"fmt"
"io"
"os"
- "template"
+ "text/template"
)
func main() {
package main
import (
- "cmath"
"fmt"
"math"
+ "math/cmplx"
)
-type Test struct{
- f, g complex128
- out complex128
+type Test struct {
+ f, g complex128
+ out complex128
}
var nan = math.NaN()
func calike(a, b complex128) bool {
switch {
- case cmath.IsInf(a) && cmath.IsInf(b):
+ case cmplx.IsInf(a) && cmplx.IsInf(b):
return true
- case cmath.IsNaN(a) && cmath.IsNaN(b):
+ case cmplx.IsNaN(a) && cmplx.IsNaN(b):
return true
}
return a == b
func main() {
bad := false
for _, t := range tests {
- x := t.f/t.g
+ x := t.f / t.g
if !calike(x, t.out) {
if !bad {
fmt.Printf("BUG\n")
package main
import (
- "http"
"io/ioutil" // GCCGO_ERROR "imported and not used"
+ "net/http"
"os"
)
package main
-import "rand"
+import "math/rand"
const Count = 1e5
package main
-import "rand"
+import "math/rand"
const Count = 1e5
import (
"flag"
- "rand"
+ "math/rand"
"runtime"
"unsafe"
)
import (
"fmt"
"os"
- "utf8"
+ "unicode/utf8"
)
func main() {
s := "\000\123\x00\xca\xFE\u0123\ubabe\U0000babe\U0010FFFFx"
- expect := []int{ 0, 0123, 0, 0xFFFD, 0xFFFD, 0x123, 0xbabe, 0xbabe, 0x10FFFF, 'x' }
+ expect := []rune{0, 0123, 0, 0xFFFD, 0xFFFD, 0x123, 0xbabe, 0xbabe, 0x10FFFF, 'x'}
offset := 0
- var i, c int
+ var i int
+ var c rune
ok := true
cnum := 0
for i, c = range s {
- rune, size := utf8.DecodeRuneInString(s[i:len(s)]) // check it another way
+ r, size := utf8.DecodeRuneInString(s[i:len(s)]) // check it another way
if i != offset {
fmt.Printf("unexpected offset %d not %d\n", i, offset)
ok = false
}
- if rune != expect[cnum] {
- fmt.Printf("unexpected rune %d from DecodeRuneInString: %x not %x\n", i, rune, expect[cnum])
+ if r != expect[cnum] {
+ fmt.Printf("unexpected rune %d from DecodeRuneInString: %x not %x\n", i, r, expect[cnum])
ok = false
}
if c != expect[cnum] {
- fmt.Printf("unexpected rune %d from range: %x not %x\n", i, rune, expect[cnum])
+ fmt.Printf("unexpected rune %d from range: %x not %x\n", i, r, expect[cnum])
ok = false
}
offset += size
package main
-import "utf8"
+import "unicode/utf8"
func main() {
- var chars [6] int
+ var chars [6]rune
chars[0] = 'a'
chars[1] = 'b'
chars[2] = 'c'
s += string(chars[i])
}
var l = len(s)
- for w, i, j := 0,0,0; i < l; i += w {
- var r int
+ for w, i, j := 0, 0, 0; i < l; i += w {
+ var r rune
r, w = utf8.DecodeRuneInString(s[i:len(s)])
- if w == 0 { panic("zero width in string") }
- if r != chars[j] { panic("wrong value from string") }
+ if w == 0 {
+ panic("zero width in string")
+ }
+ if r != chars[j] {
+ panic("wrong value from string")
+ }
j++
}
// encoded as bytes: 'a' 'b' 'c' e6 97 a5 e6 9c ac e8 aa 9e
const L = 12
- if L != l { panic("wrong length constructing array") }
+ if L != l {
+ panic("wrong length constructing array")
+ }
a := make([]byte, L)
a[0] = 'a'
a[1] = 'b'
a[9] = 0xe8
a[10] = 0xaa
a[11] = 0x9e
- for w, i, j := 0,0,0; i < L; i += w {
- var r int
+ for w, i, j := 0, 0, 0; i < L; i += w {
+ var r rune
r, w = utf8.DecodeRune(a[i:L])
- if w == 0 { panic("zero width in bytes") }
- if r != chars[j] { panic("wrong value from bytes") }
+ if w == 0 {
+ panic("zero width in bytes")
+ }
+ if r != chars[j] {
+ panic("wrong value from bytes")
+ }
j++
}
}
-780c85032b17
+2f4482b89a6b
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
toolexeclibgodir = $(toolexeclibdir)/go/$(gcc_version)/$(target_alias)
toolexeclibgo_DATA = \
- asn1.gox \
- big.gox \
bufio.gox \
bytes.gox \
- cmath.gox \
crypto.gox \
- csv.gox \
errors.gox \
- exec.gox \
expvar.gox \
flag.gox \
fmt.gox \
- gob.gox \
hash.gox \
html.gox \
- http.gox \
image.gox \
io.gox \
- json.gox \
log.gox \
math.gox \
- mail.gox \
mime.gox \
net.gox \
os.gox \
patch.gox \
path.gox \
- rand.gox \
reflect.gox \
regexp.gox \
- rpc.gox \
runtime.gox \
- scanner.gox \
- smtp.gox \
sort.gox \
strconv.gox \
strings.gox \
sync.gox \
syscall.gox \
- syslog.gox \
- tabwriter.gox \
- template.gox \
testing.gox \
time.gox \
unicode.gox \
- url.gox \
- utf16.gox \
- utf8.gox \
- websocket.gox \
- xml.gox
+ websocket.gox
toolexeclibgoarchivedir = $(toolexeclibgodir)/archive
toolexeclibgoencoding_DATA = \
encoding/ascii85.gox \
+ encoding/asn1.gox \
encoding/base32.gox \
encoding/base64.gox \
encoding/binary.gox \
+ encoding/csv.gox \
encoding/git85.gox \
+ encoding/gob.gox \
encoding/hex.gox \
- encoding/pem.gox
+ encoding/json.gox \
+ encoding/pem.gox \
+ encoding/xml.gox
if LIBGO_IS_LINUX
# exp_inotify_gox = exp/inotify.gox
toolexeclibgoexpsql_DATA = \
exp/sql/driver.gox
-toolexeclibgoexptemplatedir = $(toolexeclibgoexpdir)/template
-
-toolexeclibgoexptemplate_DATA = \
- exp/template/html.gox
-
toolexeclibgogodir = $(toolexeclibgodir)/go
toolexeclibgogo_DATA = \
hash/crc64.gox \
hash/fnv.gox
-toolexeclibgohttpdir = $(toolexeclibgodir)/http
+toolexeclibgohtmldir = $(toolexeclibgodir)/html
-toolexeclibgohttp_DATA = \
- http/cgi.gox \
- http/fcgi.gox \
- http/httptest.gox \
- http/pprof.gox
+toolexeclibgohtml_DATA = \
+ html/template.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image
toolexeclibgoio_DATA = \
io/ioutil.gox
+toolexeclibgologdir = $(toolexeclibgodir)/log
+
+toolexeclibgolog_DATA = \
+ log/syslog.gox
+
+toolexeclibgomathdir = $(toolexeclibgodir)/math
+
+toolexeclibgomath_DATA = \
+ math/big.gox \
+ math/cmplx.gox \
+ math/rand.gox
+
toolexeclibgomimedir = $(toolexeclibgodir)/mime
toolexeclibgomime_DATA = \
toolexeclibgonet_DATA = \
net/dict.gox \
- net/textproto.gox
+ net/http.gox \
+ net/mail.gox \
+ net/rpc.gox \
+ net/smtp.gox \
+ net/textproto.gox \
+ net/url.gox
+
+toolexeclibgonethttpdir = $(toolexeclibgonetdir)/http
+
+toolexeclibgonethttp_DATA = \
+ net/http/cgi.gox \
+ net/http/fcgi.gox \
+ net/http/httptest.gox \
+ net/http/httputil.gox \
+ net/http/pprof.gox
+
+toolexeclibgonetrpcdir = $(toolexeclibgonetdir)/rpc
+
+toolexeclibgonetrpc_DATA = \
+ net/rpc/jsonrpc.gox
toolexeclibgoolddir = $(toolexeclibgodir)/old
toolexeclibgoosdir = $(toolexeclibgodir)/os
toolexeclibgoos_DATA = \
+ os/exec.gox \
os/user.gox \
os/signal.gox
toolexeclibgoregexp_DATA = \
regexp/syntax.gox
-toolexeclibgorpcdir = $(toolexeclibgodir)/rpc
-
-toolexeclibgorpc_DATA = \
- rpc/jsonrpc.gox
-
toolexeclibgoruntimedir = $(toolexeclibgodir)/runtime
toolexeclibgoruntime_DATA = \
runtime/debug.gox \
runtime/pprof.gox
-toolexeclibgotemplatedir = $(toolexeclibgodir)/template
-
-toolexeclibgotemplate_DATA = \
- template/parse.gox
-
toolexeclibgosyncdir = $(toolexeclibgodir)/sync
toolexeclibgosync_DATA = \
testing/quick.gox \
testing/script.gox
+toolexeclibgotextdir = $(toolexeclibgodir)/text
+
+toolexeclibgotext_DATA = \
+ text/scanner.gox \
+ text/tabwriter.gox \
+ text/template.gox
+
+toolexeclibgotexttemplatedir = $(toolexeclibgotextdir)/template
+
+toolexeclibgotexttemplate_DATA = \
+ text/template/parse.gox
+
+toolexeclibgounicodedir = $(toolexeclibgodir)/unicode
+
+toolexeclibgounicode_DATA = \
+ unicode/utf16.gox \
+ unicode/utf8.gox
+
if HAVE_SYS_MMAN_H
runtime_mem_file = runtime/mem.c
else
runtime1.c \
sema.c \
sigqueue.c \
- string.c
+ string.c \
+ time.c
goc2c.$(OBJEXT): runtime/goc2c.c
$(CC_FOR_BUILD) -c $(CFLAGS_FOR_BUILD) $<
./goc2c --gcc --go-prefix libgo_runtime $< > $@.tmp
mv -f $@.tmp $@
+time.c: $(srcdir)/runtime/time.goc goc2c
+ ./goc2c --gcc --go-prefix libgo_time $< > $@.tmp
+ mv -f $@.tmp $@
+
%.c: $(srcdir)/runtime/%.goc goc2c
./goc2c --gcc $< > $@.tmp
mv -f $@.tmp $@
-go_asn1_files = \
- go/asn1/asn1.go \
- go/asn1/common.go \
- go/asn1/marshal.go
-
-go_big_files = \
- go/big/arith.go \
- go/big/int.go \
- go/big/nat.go \
- go/big/rat.go
-
go_bufio_files = \
go/bufio/bufio.go
go_bytes_c_files = \
go/bytes/indexbyte.c
-go_cmath_files = \
- go/cmath/abs.go \
- go/cmath/asin.go \
- go/cmath/conj.go \
- go/cmath/exp.go \
- go/cmath/isinf.go \
- go/cmath/isnan.go \
- go/cmath/log.go \
- go/cmath/phase.go \
- go/cmath/polar.go \
- go/cmath/pow.go \
- go/cmath/rect.go \
- go/cmath/sin.go \
- go/cmath/sqrt.go \
- go/cmath/tan.go
-
go_crypto_files = \
go/crypto/crypto.go
-go_csv_files = \
- go/csv/reader.go \
- go/csv/writer.go
-
go_errors_files = \
go/errors/errors.go
-go_exec_files = \
- go/exec/exec.go \
- go/exec/lp_unix.go
-
go_expvar_files = \
go/expvar/expvar.go
go/fmt/print.go \
go/fmt/scan.go
-go_gob_files = \
- go/gob/decode.go \
- go/gob/decoder.go \
- go/gob/doc.go \
- go/gob/encode.go \
- go/gob/encoder.go \
- go/gob/error.go \
- go/gob/type.go
-
go_hash_files = \
go/hash/hash.go
go/html/render.go \
go/html/token.go
-go_http_files = \
- go/http/chunked.go \
- go/http/client.go \
- go/http/cookie.go \
- go/http/dump.go \
- go/http/filetransport.go \
- go/http/fs.go \
- go/http/header.go \
- go/http/lex.go \
- go/http/persist.go \
- go/http/request.go \
- go/http/response.go \
- go/http/reverseproxy.go \
- go/http/server.go \
- go/http/sniff.go \
- go/http/status.go \
- go/http/transfer.go \
- go/http/transport.go
-
go_image_files = \
go/image/format.go \
go/image/geom.go \
go/io/io.go \
go/io/pipe.go
-go_json_files = \
- go/json/decode.go \
- go/json/encode.go \
- go/json/indent.go \
- go/json/scanner.go \
- go/json/stream.go \
- go/json/tags.go
-
go_log_files = \
go/log/log.go
go/math/tanh.go \
go/math/unsafe.go
-go_mail_files = \
- go/mail/message.go
-
go_mime_files = \
go/mime/grammar.go \
go/mime/mediatype.go \
go/path/match.go \
go/path/path.go
-go_rand_files = \
- go/rand/exp.go \
- go/rand/normal.go \
- go/rand/rand.go \
- go/rand/rng.go \
- go/rand/zipf.go
-
go_reflect_files = \
go/reflect/deepequal.go \
go/reflect/type.go \
go/regexp/exec.go \
go/regexp/regexp.go
-go_rpc_files = \
- go/rpc/client.go \
- go/rpc/debug.go \
- go/rpc/server.go
+go_net_rpc_files = \
+ go/net/rpc/client.go \
+ go/net/rpc/debug.go \
+ go/net/rpc/server.go
go_runtime_files = \
go/runtime/debug.go \
$(SHELL) $(srcdir)/../move-if-change version.go.tmp version.go
$(STAMP) $@
-go_scanner_files = \
- go/scanner/scanner.go
-
-go_smtp_files = \
- go/smtp/auth.go \
- go/smtp/smtp.go
-
go_sort_files = \
go/sort/search.go \
go/sort/sort.go
go/sync/waitgroup.go
if LIBGO_IS_SOLARIS
-go_syslog_file = go/syslog/syslog_libc.go
+go_syslog_file = go/log/syslog/syslog_libc.go
else
if LIBGO_IS_IRIX
-go_syslog_file = go/syslog/syslog_libc.go
+go_syslog_file = go/log/syslog/syslog_libc.go
else
-go_syslog_file = go/syslog/syslog_unix.go
+go_syslog_file = go/log/syslog/syslog_unix.go
endif
endif
-go_syslog_files = \
- go/syslog/syslog.go \
+go_log_syslog_files = \
+ go/log/syslog/syslog.go \
$(go_syslog_file)
go_syslog_c_files = \
- go/syslog/syslog_c.c
-
-go_tabwriter_files = \
- go/tabwriter/tabwriter.go
-
-go_template_files = \
- go/template/doc.go \
- go/template/exec.go \
- go/template/funcs.go \
- go/template/helper.go \
- go/template/parse.go \
- go/template/set.go
+ go/log/syslog/syslog_c.c
go_testing_files = \
go/testing/benchmark.go \
go/unicode/letter.go \
go/unicode/tables.go
-go_url_files = \
- go/url/url.go
-
-go_utf16_files = \
- go/utf16/utf16.go
-
-go_utf8_files = \
- go/utf8/string.go \
- go/utf8/utf8.go
-
go_websocket_files = \
go/websocket/client.go \
go/websocket/hixie.go \
go/websocket/server.go \
go/websocket/websocket.go
-go_xml_files = \
- go/xml/marshal.go \
- go/xml/read.go \
- go/xml/xml.go
go_archive_tar_files = \
go/archive/tar/common.go \
go_encoding_ascii85_files = \
go/encoding/ascii85/ascii85.go
+go_encoding_asn1_files = \
+ go/encoding/asn1/asn1.go \
+ go/encoding/asn1/common.go \
+ go/encoding/asn1/marshal.go
go_encoding_base32_files = \
go/encoding/base32/base32.go
go_encoding_base64_files = \
go_encoding_binary_files = \
go/encoding/binary/binary.go \
go/encoding/binary/varint.go
+go_encoding_csv_files = \
+ go/encoding/csv/reader.go \
+ go/encoding/csv/writer.go
go_encoding_git85_files = \
go/encoding/git85/git.go
+go_encoding_gob_files = \
+ go/encoding/gob/decode.go \
+ go/encoding/gob/decoder.go \
+ go/encoding/gob/doc.go \
+ go/encoding/gob/encode.go \
+ go/encoding/gob/encoder.go \
+ go/encoding/gob/error.go \
+ go/encoding/gob/type.go
go_encoding_hex_files = \
go/encoding/hex/hex.go
+go_encoding_json_files = \
+ go/encoding/json/decode.go \
+ go/encoding/json/encode.go \
+ go/encoding/json/indent.go \
+ go/encoding/json/scanner.go \
+ go/encoding/json/stream.go \
+ go/encoding/json/tags.go
go_encoding_pem_files = \
go/encoding/pem/pem.go
+go_encoding_xml_files = \
+ go/encoding/xml/marshal.go \
+ go/encoding/xml/read.go \
+ go/encoding/xml/xml.go
go_exp_ebnf_files = \
go/exp/ebnf/ebnf.go \
go_exp_ssh_files = \
go/exp/ssh/channel.go \
go/exp/ssh/client.go \
+ go/exp/ssh/client_auth.go \
go/exp/ssh/common.go \
go/exp/ssh/doc.go \
go/exp/ssh/messages.go \
go/exp/sql/driver/driver.go \
go/exp/sql/driver/types.go
-go_exp_template_html_files = \
- go/exp/template/html/attr.go \
- go/exp/template/html/clone.go \
- go/exp/template/html/content.go \
- go/exp/template/html/context.go \
- go/exp/template/html/css.go \
- go/exp/template/html/doc.go \
- go/exp/template/html/error.go \
- go/exp/template/html/escape.go \
- go/exp/template/html/html.go \
- go/exp/template/html/js.go \
- go/exp/template/html/transition.go \
- go/exp/template/html/url.go
-
go_go_ast_files = \
go/go/ast/ast.go \
go/go/ast/filter.go \
+ go/go/ast/import.go \
go/go/ast/print.go \
go/go/ast/resolve.go \
go/go/ast/scope.go \
go_hash_fnv_files = \
go/hash/fnv/fnv.go
-go_http_cgi_files = \
- go/http/cgi/child.go \
- go/http/cgi/host.go
-go_http_fcgi_files = \
- go/http/fcgi/child.go \
- go/http/fcgi/fcgi.go
-go_http_httptest_files = \
- go/http/httptest/recorder.go \
- go/http/httptest/server.go
-go_http_pprof_files = \
- go/http/pprof/pprof.go
+go_html_template_files = \
+ go/html/template/attr.go \
+ go/html/template/clone.go \
+ go/html/template/content.go \
+ go/html/template/context.go \
+ go/html/template/css.go \
+ go/html/template/doc.go \
+ go/html/template/error.go \
+ go/html/template/escape.go \
+ go/html/template/html.go \
+ go/html/template/js.go \
+ go/html/template/template.go \
+ go/html/template/transition.go \
+ go/html/template/url.go
go_image_bmp_files = \
go/image/bmp/reader.go
go/io/ioutil/ioutil.go \
go/io/ioutil/tempfile.go
+go_math_big_files = \
+ go/math/big/arith.go \
+ go/math/big/int.go \
+ go/math/big/nat.go \
+ go/math/big/rat.go
+go_math_cmplx_files = \
+ go/math/cmplx/abs.go \
+ go/math/cmplx/asin.go \
+ go/math/cmplx/conj.go \
+ go/math/cmplx/exp.go \
+ go/math/cmplx/isinf.go \
+ go/math/cmplx/isnan.go \
+ go/math/cmplx/log.go \
+ go/math/cmplx/phase.go \
+ go/math/cmplx/polar.go \
+ go/math/cmplx/pow.go \
+ go/math/cmplx/rect.go \
+ go/math/cmplx/sin.go \
+ go/math/cmplx/sqrt.go \
+ go/math/cmplx/tan.go
+go_math_rand_files = \
+ go/math/rand/exp.go \
+ go/math/rand/normal.go \
+ go/math/rand/rand.go \
+ go/math/rand/rng.go \
+ go/math/rand/zipf.go
+
go_mime_multipart_files = \
go/mime/multipart/formdata.go \
go/mime/multipart/multipart.go \
go_net_dict_files = \
go/net/dict/dict.go
-
+go_net_http_files = \
+ go/net/http/chunked.go \
+ go/net/http/client.go \
+ go/net/http/cookie.go \
+ go/net/http/filetransport.go \
+ go/net/http/fs.go \
+ go/net/http/header.go \
+ go/net/http/lex.go \
+ go/net/http/request.go \
+ go/net/http/response.go \
+ go/net/http/server.go \
+ go/net/http/sniff.go \
+ go/net/http/status.go \
+ go/net/http/transfer.go \
+ go/net/http/transport.go
+go_net_mail_files = \
+ go/net/mail/message.go
+go_net_smtp_files = \
+ go/net/smtp/auth.go \
+ go/net/smtp/smtp.go
go_net_textproto_files = \
go/net/textproto/header.go \
go/net/textproto/pipeline.go \
go/net/textproto/reader.go \
go/net/textproto/textproto.go \
go/net/textproto/writer.go
+go_net_url_files = \
+ go/net/url/url.go
+
+go_net_http_cgi_files = \
+ go/net/http/cgi/child.go \
+ go/net/http/cgi/host.go
+go_net_http_fcgi_files = \
+ go/net/http/fcgi/child.go \
+ go/net/http/fcgi/fcgi.go
+go_net_http_httptest_files = \
+ go/net/http/httptest/recorder.go \
+ go/net/http/httptest/server.go
+go_net_http_pprof_files = \
+ go/net/http/pprof/pprof.go
+go_net_http_httputil_files = \
+ go/net/http/httputil/chunked.go \
+ go/net/http/httputil/dump.go \
+ go/net/http/httputil/persist.go \
+ go/net/http/httputil/reverseproxy.go
+
go_old_netchan_files = \
go/old/netchan/common.go \
go/old/template/format.go \
go/old/template/parse.go
+go_os_exec_files = \
+ go/os/exec/exec.go \
+ go/os/exec/lp_unix.go
+
go_os_user_files = \
go/os/user/user.go \
go/os/user/lookup_unix.go
go/regexp/syntax/regexp.go \
go/regexp/syntax/simplify.go
-go_rpc_jsonrpc_files = \
- go/rpc/jsonrpc/client.go \
- go/rpc/jsonrpc/server.go
+go_net_rpc_jsonrpc_files = \
+ go/net/rpc/jsonrpc/client.go \
+ go/net/rpc/jsonrpc/server.go
go_runtime_debug_files = \
go/runtime/debug/stack.go
go_runtime_pprof_files = \
go/runtime/pprof/pprof.go
-go_template_parse_files = \
- go/template/parse/lex.go \
- go/template/parse/node.go \
- go/template/parse/parse.go \
- go/template/parse/set.go
+go_text_tabwriter_files = \
+ go/text/tabwriter/tabwriter.go
+go_text_template_files = \
+ go/text/template/doc.go \
+ go/text/template/exec.go \
+ go/text/template/funcs.go \
+ go/text/template/helper.go \
+ go/text/template/parse.go \
+ go/text/template/set.go
+go_text_template_parse_files = \
+ go/text/template/parse/lex.go \
+ go/text/template/parse/node.go \
+ go/text/template/parse/parse.go \
+ go/text/template/parse/set.go
go_sync_atomic_files = \
go/sync/atomic/doc.go
go_testing_script_files = \
go/testing/script/script.go
+go_text_scanner_files = \
+ go/text/scanner/scanner.go
+
+go_unicode_utf16_files = \
+ go/unicode/utf16/utf16.go
+go_unicode_utf8_files = \
+ go/unicode/utf8/string.go \
+ go/unicode/utf8/utf8.go
+
# Define Syscall and Syscall6.
if LIBGO_IS_RTEMS
syscall_syscall_file = go/syscall/syscall_stubs.go
endif
libgo_go_objs = \
- asn1/asn1.lo \
- big/big.lo \
bufio/bufio.lo \
bytes/bytes.lo \
bytes/index.lo \
- cmath/cmath.lo \
crypto/crypto.lo \
- csv/csv.lo \
errors/errors.lo \
- exec/exec.lo \
expvar/expvar.lo \
flag/flag.lo \
fmt/fmt.lo \
- gob/gob.lo \
hash/hash.lo \
html/html.lo \
- http/http.lo \
image/image.lo \
io/io.lo \
- json/json.lo \
log/log.lo \
math/math.lo \
- mail/mail.lo \
- mime/mime.lo \
net/net.lo \
+ os/exec.lo \
os/os.lo \
patch/patch.lo \
path/path.lo \
- rand/rand.lo \
reflect/reflect.lo \
regexp/regexp.lo \
- rpc/rpc.lo \
runtime/runtime.lo \
- scanner/scanner.lo \
- smtp/smtp.lo \
sort/sort.lo \
strconv/strconv.lo \
strings/strings.lo \
sync/sync.lo \
- syslog/syslog.lo \
- syslog/syslog_c.lo \
- tabwriter/tabwriter.lo \
- template/template.lo \
time/time.lo \
unicode/unicode.lo \
- url/url.lo \
- utf16/utf16.lo \
- utf8/utf8.lo \
websocket/websocket.lo \
- xml/xml.lo \
archive/tar.lo \
archive/zip.lo \
compress/bzip2.lo \
debug/macho.lo \
debug/pe.lo \
encoding/ascii85.lo \
+ encoding/asn1.lo \
encoding/base32.lo \
encoding/base64.lo \
encoding/binary.lo \
+ encoding/csv.lo \
encoding/git85.lo \
+ encoding/gob.lo \
encoding/hex.lo \
+ encoding/json.lo \
encoding/pem.lo \
+ encoding/xml.lo \
exp/ebnf.lo \
exp/gui.lo \
exp/norm.lo \
exp/types.lo \
exp/gui/x11.lo \
exp/sql/driver.lo \
- exp/template/html.lo \
+ html/template.lo \
go/ast.lo \
go/build.lo \
go/doc.lo \
hash/crc32.lo \
hash/crc64.lo \
hash/fnv.lo \
- http/cgi.lo \
- http/fcgi.lo \
- http/httptest.lo \
- http/pprof.lo \
+ net/http/cgi.lo \
+ net/http/fcgi.lo \
+ net/http/httptest.lo \
+ net/http/httputil.lo \
+ net/http/pprof.lo \
image/bmp.lo \
image/color.lo \
image/draw.lo \
image/ycbcr.lo \
index/suffixarray.lo \
io/ioutil.lo \
+ log/syslog.lo \
+ log/syslog/syslog_c.lo \
+ math/big.lo \
+ math/cmplx.lo \
+ math/rand.lo \
+ mime/mime.lo \
mime/multipart.lo \
net/dict.lo \
+ net/http.lo \
+ net/mail.lo \
+ net/rpc.lo \
+ net/smtp.lo \
net/textproto.lo \
+ net/url.lo \
old/netchan.lo \
old/regexp.lo \
old/template.lo \
os/signal.lo \
path/filepath.lo \
regexp/syntax.lo \
- rpc/jsonrpc.lo \
+ net/rpc/jsonrpc.lo \
runtime/debug.lo \
runtime/pprof.lo \
sync/atomic.lo \
syscall/syscall.lo \
syscall/errno.lo \
syscall/wait.lo \
- template/parse.lo \
+ text/scanner.lo \
+ text/tabwriter.lo \
+ text/template.lo \
+ text/template/parse.lo \
testing/testing.lo \
testing/iotest.lo \
testing/quick.lo \
- testing/script.lo
+ testing/script.lo \
+ unicode/utf16.lo \
+ unicode/utf8.lo
libgo_la_SOURCES = $(runtime_files)
$(toolexeclibgoexp_DATA) \
$(toolexeclibgogo_DATA) \
$(toolexeclibgohash_DATA) \
- $(toolexeclibgohttp_DATA) \
$(toolexeclibgoimage_DATA) \
$(toolexeclibgoindex_DATA) \
$(toolexeclibgoio_DATA) \
+ $(toolexeclibgolog_DATA) \
+ $(toolexeclibgomath_DATA) \
$(toolexeclibgomime_DATA) \
$(toolexeclibgonet_DATA) \
+ $(toolexeclibgonethttp_DATA) \
$(toolexeclibgoos_DATA) \
$(toolexeclibgopath_DATA) \
$(toolexeclibgorpc_DATA) \
$(toolexeclibgoruntime_DATA) \
$(toolexeclibgosync_DATA) \
- $(toolexeclibgotesting_DATA)
-
-@go_include@ asn1/asn1.lo.dep
-asn1/asn1.lo.dep: $(go_asn1_files)
- $(BUILDDEPS)
-asn1/asn1.lo: $(go_asn1_files)
- $(BUILDPACKAGE)
-asn1/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: asn1/check
-
-@go_include@ big/big.lo.dep
-big/big.lo.dep: $(go_big_files)
- $(BUILDDEPS)
-big/big.lo: $(go_big_files)
- $(BUILDPACKAGE)
-big/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: big/check
+ $(toolexeclibgotesting_DATA) \
+ $(toolexeclibgotext_DATA) \
+ $(toolexeclibgotexttemplate_DATA) \
+ $(toolexeclibgounicode_DATA)
@go_include@ bufio/bufio.lo.dep
bufio/bufio.lo.dep: $(go_bufio_files)
@$(CHECK)
.PHONY: bytes/check
-@go_include@ cmath/cmath.lo.dep
-cmath/cmath.lo.dep: $(go_cmath_files)
- $(BUILDDEPS)
-cmath/cmath.lo: $(go_cmath_files)
- $(BUILDPACKAGE)
-cmath/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: cmath/check
-
@go_include@ crypto/crypto.lo.dep
crypto/crypto.lo.dep: $(go_crypto_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: crypto/check
-@go_include@ csv/csv.lo.dep
-csv/csv.lo.dep: $(go_csv_files)
- $(BUILDDEPS)
-csv/csv.lo: $(go_csv_files)
- $(BUILDPACKAGE)
-csv/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: csv/check
-
@go_include@ errors/errors.lo.dep
errors/errors.lo.dep: $(go_errors_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: errors/check
-@go_include@ exec/exec.lo.dep
-exec/exec.lo.dep: $(go_exec_files)
- $(BUILDDEPS)
-exec/exec.lo: $(go_exec_files)
- $(BUILDPACKAGE)
-exec/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: exec/check
-
@go_include@ expvar/expvar.lo.dep
expvar/expvar.lo.dep: $(go_expvar_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: fmt/check
-@go_include@ gob/gob.lo.dep
-gob/gob.lo.dep: $(go_gob_files)
- $(BUILDDEPS)
-gob/gob.lo: $(go_gob_files)
- $(BUILDPACKAGE)
-gob/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: gob/check
-
@go_include@ hash/hash.lo.dep
hash/hash.lo.dep: $(go_hash_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: html/check
-@go_include@ http/http.lo.dep
-http/http.lo.dep: $(go_http_files)
- $(BUILDDEPS)
-http/http.lo: $(go_http_files)
- $(BUILDPACKAGE)
-http/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: http/check
-
@go_include@ image/image.lo.dep
image/image.lo.dep: $(go_image_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: io/check
-@go_include@ json/json.lo.dep
-json/json.lo.dep: $(go_json_files)
- $(BUILDDEPS)
-json/json.lo: $(go_json_files)
- $(BUILDPACKAGE)
-json/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: json/check
-
@go_include@ log/log.lo.dep
log/log.lo.dep: $(go_log_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: math/check
-@go_include@ mail/mail.lo.dep
-mail/mail.lo.dep: $(go_mail_files)
- $(BUILDDEPS)
-mail/mail.lo: $(go_mail_files)
- $(BUILDPACKAGE)
-mail/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: mail/check
-
@go_include@ mime/mime.lo.dep
mime/mime.lo.dep: $(go_mime_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: path/check
-@go_include@ rand/rand.lo.dep
-rand/rand.lo.dep: $(go_rand_files)
- $(BUILDDEPS)
-rand/rand.lo: $(go_rand_files)
- $(BUILDPACKAGE)
-rand/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: rand/check
-
@go_include@ reflect/reflect.lo.dep
reflect/reflect.lo.dep: $(go_reflect_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: regexp/check
-@go_include@ rpc/rpc.lo.dep
-rpc/rpc.lo.dep: $(go_rpc_files)
- $(BUILDDEPS)
-rpc/rpc.lo: $(go_rpc_files)
- $(BUILDPACKAGE)
-rpc/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: rpc/check
-
@go_include@ runtime/runtime.lo.dep
runtime/runtime.lo.dep: $(go_runtime_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: runtime/check
-@go_include@ scanner/scanner.lo.dep
-scanner/scanner.lo.dep: $(go_scanner_files)
- $(BUILDDEPS)
-scanner/scanner.lo: $(go_scanner_files)
- $(BUILDPACKAGE)
-scanner/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: scanner/check
-
-@go_include@ smtp/smtp.lo.dep
-smtp/smtp.lo.dep: $(go_smtp_files)
+@go_include@ text/scanner.lo.dep
+text/scanner.lo.dep: $(go_text_scanner_files)
$(BUILDDEPS)
-smtp/smtp.lo: $(go_smtp_files)
+text/scanner.lo: $(go_text_scanner_files)
$(BUILDPACKAGE)
-smtp/check: $(CHECK_DEPS)
+text/scanner/check: $(CHECK_DEPS)
+ @$(MKDIR_P) text/scanner
@$(CHECK)
-.PHONY: smtp/check
+.PHONY: text/scanner/check
@go_include@ sort/sort.lo.dep
sort/sort.lo.dep: $(go_sort_files)
@$(CHECK)
.PHONY: sync/check
-@go_include@ syslog/syslog.lo.dep
-syslog/syslog.lo.dep: $(go_syslog_files)
- $(BUILDDEPS)
-syslog/syslog.lo: $(go_syslog_files)
- $(BUILDPACKAGE)
-syslog/syslog_c.lo: $(go_syslog_c_files) syslog/syslog.lo
- $(LTCOMPILE) -c -o $@ $(srcdir)/go/syslog/syslog_c.c
-syslog/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: syslog/check
-
-@go_include@ tabwriter/tabwriter.lo.dep
-tabwriter/tabwriter.lo.dep: $(go_tabwriter_files)
- $(BUILDDEPS)
-tabwriter/tabwriter.lo: $(go_tabwriter_files)
- $(BUILDPACKAGE)
-tabwriter/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: tabwriter/check
-
-@go_include@ template/template.lo.dep
-template/template.lo.dep: $(go_template_files)
- $(BUILDDEPS)
-template/template.lo: $(go_template_files)
- $(BUILDPACKAGE)
-template/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: template/check
-
@go_include@ testing/testing.lo.dep
testing/testing.lo.dep: $(go_testing_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: unicode/check
-@go_include@ url/url.lo.dep
-url/url.lo.dep: $(go_url_files)
- $(BUILDDEPS)
-url/url.lo: $(go_url_files)
- $(BUILDPACKAGE)
-url/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: url/check
-
-@go_include@ utf16/utf16.lo.dep
-utf16/utf16.lo.dep: $(go_utf16_files)
- $(BUILDDEPS)
-utf16/utf16.lo: $(go_utf16_files)
- $(BUILDPACKAGE)
-utf16/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: utf16/check
-
-@go_include@ utf8/utf8.lo.dep
-utf8/utf8.lo.dep: $(go_utf8_files)
- $(BUILDDEPS)
-utf8/utf8.lo: $(go_utf8_files)
- $(BUILDPACKAGE)
-utf8/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: utf8/check
-
@go_include@ websocket/websocket.lo.dep
websocket/websocket.lo.dep: $(go_websocket_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: websocket/check
-@go_include@ xml/xml.lo.dep
-xml/xml.lo.dep: $(go_xml_files)
- $(BUILDDEPS)
-xml/xml.lo: $(go_xml_files)
- $(BUILDPACKAGE)
-xml/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: xml/check
-
@go_include@ archive/tar.lo.dep
archive/tar.lo.dep: $(go_archive_tar_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: debug/pe/check
+@go_include@ encoding/asn1.lo.dep
+encoding/asn1.lo.dep: $(go_encoding_asn1_files)
+ $(BUILDDEPS)
+encoding/asn1.lo: $(go_encoding_asn1_files)
+ $(BUILDPACKAGE)
+encoding/asn1/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/asn1
+ @$(CHECK)
+.PHONY: encoding/asn1/check
+
@go_include@ encoding/ascii85.lo.dep
encoding/ascii85.lo.dep: $(go_encoding_ascii85_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/binary/check
+@go_include@ encoding/csv.lo.dep
+encoding/csv.lo.dep: $(go_encoding_csv_files)
+ $(BUILDDEPS)
+encoding/csv.lo: $(go_encoding_csv_files)
+ $(BUILDPACKAGE)
+encoding/csv/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/csv
+ @$(CHECK)
+.PHONY: encoding/csv/check
+
@go_include@ encoding/git85.lo.dep
encoding/git85.lo.dep: $(go_encoding_git85_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/git85/check
+@go_include@ encoding/gob.lo.dep
+encoding/gob.lo.dep: $(go_encoding_gob_files)
+ $(BUILDDEPS)
+encoding/gob.lo: $(go_encoding_gob_files)
+ $(BUILDPACKAGE)
+encoding/gob/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/gob
+ @$(CHECK)
+.PHONY: encoding/gob/check
+
@go_include@ encoding/hex.lo.dep
encoding/hex.lo.dep: $(go_encoding_hex_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/hex/check
+@go_include@ encoding/json.lo.dep
+encoding/json.lo.dep: $(go_encoding_json_files)
+ $(BUILDDEPS)
+encoding/json.lo: $(go_encoding_json_files)
+ $(BUILDPACKAGE)
+encoding/json/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/json
+ @$(CHECK)
+.PHONY: encoding/json/check
+
@go_include@ encoding/pem.lo.dep
encoding/pem.lo.dep: $(go_encoding_pem_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/pem/check
+@go_include@ encoding/xml.lo.dep
+encoding/xml.lo.dep: $(go_encoding_xml_files)
+ $(BUILDDEPS)
+encoding/xml.lo: $(go_encoding_xml_files)
+ $(BUILDPACKAGE)
+encoding/xml/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/xml
+ @$(CHECK)
+.PHONY: encoding/xml/check
+
@go_include@ exp/ebnf.lo.dep
exp/ebnf.lo.dep: $(go_exp_ebnf_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/sql/driver/check
-@go_include@ exp/template/html.lo.dep
-exp/template/html.lo.dep: $(go_exp_template_html_files)
+@go_include@ html/template.lo.dep
+html/template.lo.dep: $(go_html_template_files)
$(BUILDDEPS)
-exp/template/html.lo: $(go_exp_template_html_files)
+html/template.lo: $(go_html_template_files)
$(BUILDPACKAGE)
-exp/template/html/check: $(CHECK_DEPS)
- @$(MKDIR_P) exp/template/html
+html/template/check: $(CHECK_DEPS)
+ @$(MKDIR_P) html/template
@$(CHECK)
-.PHONY: exp/template/html/check
+.PHONY: html/template/check
@go_include@ go/ast.lo.dep
go/ast.lo.dep: $(go_go_ast_files)
@$(CHECK)
.PHONY: hash/fnv/check
-@go_include@ http/cgi.lo.dep
-http/cgi.lo.dep: $(go_http_cgi_files)
- $(BUILDDEPS)
-http/cgi.lo: $(go_http_cgi_files)
- $(BUILDPACKAGE)
-http/cgi/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/cgi
- @$(CHECK)
-.PHONY: http/cgi/check
-
-@go_include@ http/fcgi.lo.dep
-http/fcgi.lo.dep: $(go_http_fcgi_files)
- $(BUILDDEPS)
-http/fcgi.lo: $(go_http_fcgi_files)
- $(BUILDPACKAGE)
-http/fcgi/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/fcgi
- @$(CHECK)
-.PHONY: http/fcgi/check
-
-@go_include@ http/httptest.lo.dep
-http/httptest.lo.dep: $(go_http_httptest_files)
- $(BUILDDEPS)
-http/httptest.lo: $(go_http_httptest_files)
- $(BUILDPACKAGE)
-http/httptest/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/httptest
- @$(CHECK)
-.PHONY: http/httptest/check
-
-@go_include@ http/pprof.lo.dep
-http/pprof.lo.dep: $(go_http_pprof_files)
- $(BUILDDEPS)
-http/pprof.lo: $(go_http_pprof_files)
- $(BUILDPACKAGE)
-http/pprof/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/pprof
- @$(CHECK)
-.PHONY: http/pprof/check
-
@go_include@ image/bmp.lo.dep
image/bmp.lo.dep: $(go_image_bmp_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: io/ioutil/check
+@go_include@ log/syslog.lo.dep
+log/syslog.lo.dep: $(go_log_syslog_files)
+ $(BUILDDEPS)
+log/syslog.lo: $(go_log_syslog_files)
+ $(BUILDPACKAGE)
+log/syslog/syslog_c.lo: $(go_syslog_c_files) log/syslog.lo
+ $(LTCOMPILE) -c -o $@ $(srcdir)/go/log/syslog/syslog_c.c
+log/syslog/check: $(CHECK_DEPS)
+ @$(MKDIR_P) log/syslog
+ @$(CHECK)
+.PHONY: log/syslog/check
+
+@go_include@ math/big.lo.dep
+math/big.lo.dep: $(go_math_big_files)
+ $(BUILDDEPS)
+math/big.lo: $(go_math_big_files)
+ $(BUILDPACKAGE)
+math/big/check: $(CHECK_DEPS)
+ @$(MKDIR_P) math/big
+ @$(CHECK)
+.PHONY: math/big/check
+
+@go_include@ math/cmplx.lo.dep
+math/cmplx.lo.dep: $(go_math_cmplx_files)
+ $(BUILDDEPS)
+math/cmplx.lo: $(go_math_cmplx_files)
+ $(BUILDPACKAGE)
+math/cmplx/check: $(CHECK_DEPS)
+ @$(MKDIR_P) math/cmplx
+ @$(CHECK)
+.PHONY: math/cmplx/check
+
+@go_include@ math/rand.lo.dep
+math/rand.lo.dep: $(go_math_rand_files)
+ $(BUILDDEPS)
+math/rand.lo: $(go_math_rand_files)
+ $(BUILDPACKAGE)
+math/rand/check: $(CHECK_DEPS)
+ @$(MKDIR_P) math/rand
+ @$(CHECK)
+.PHONY: math/rand/check
+
@go_include@ mime/multipart.lo.dep
mime/multipart.lo.dep: $(go_mime_multipart_files)
$(BUILDDEPS)
net/dict.lo: $(go_net_dict_files)
$(BUILDPACKAGE)
+@go_include@ net/http.lo.dep
+net/http.lo.dep: $(go_net_http_files)
+ $(BUILDDEPS)
+net/http.lo: $(go_net_http_files)
+ $(BUILDPACKAGE)
+net/http/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http
+ @$(CHECK)
+.PHONY: net/http/check
+
+@go_include@ net/mail.lo.dep
+net/mail.lo.dep: $(go_net_mail_files)
+ $(BUILDDEPS)
+net/mail.lo: $(go_net_mail_files)
+ $(BUILDPACKAGE)
+net/mail/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/mail
+ @$(CHECK)
+.PHONY: net/mail/check
+
+@go_include@ net/rpc.lo.dep
+net/rpc.lo.dep: $(go_net_rpc_files)
+ $(BUILDDEPS)
+net/rpc.lo: $(go_net_rpc_files)
+ $(BUILDPACKAGE)
+net/rpc/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/rpc
+ @$(CHECK)
+.PHONY: net/rpc/check
+
+@go_include@ net/smtp.lo.dep
+net/smtp.lo.dep: $(go_net_smtp_files)
+ $(BUILDDEPS)
+net/smtp.lo: $(go_net_smtp_files)
+ $(BUILDPACKAGE)
+net/smtp/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/smtp
+ @$(CHECK)
+.PHONY: net/smtp/check
+
+@go_include@ net/url.lo.dep
+net/url.lo.dep: $(go_net_url_files)
+ $(BUILDDEPS)
+net/url.lo: $(go_net_url_files)
+ $(BUILDPACKAGE)
+net/url/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/url
+ @$(CHECK)
+.PHONY: net/url/check
+
@go_include@ net/textproto.lo.dep
net/textproto.lo.dep: $(go_net_textproto_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: net/textproto/check
+@go_include@ net/http/cgi.lo.dep
+net/http/cgi.lo.dep: $(go_net_http_cgi_files)
+ $(BUILDDEPS)
+net/http/cgi.lo: $(go_net_http_cgi_files)
+ $(BUILDPACKAGE)
+net/http/cgi/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http/cgi
+ @$(CHECK)
+.PHONY: net/http/cgi/check
+
+@go_include@ net/http/fcgi.lo.dep
+net/http/fcgi.lo.dep: $(go_net_http_fcgi_files)
+ $(BUILDDEPS)
+net/http/fcgi.lo: $(go_net_http_fcgi_files)
+ $(BUILDPACKAGE)
+net/http/fcgi/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http/fcgi
+ @$(CHECK)
+.PHONY: net/http/fcgi/check
+
+@go_include@ net/http/httptest.lo.dep
+net/http/httptest.lo.dep: $(go_net_http_httptest_files)
+ $(BUILDDEPS)
+net/http/httptest.lo: $(go_net_http_httptest_files)
+ $(BUILDPACKAGE)
+net/http/httptest/check: $(check_deps)
+ @$(MKDIR_P) net/http/httptest
+ @$(CHECK)
+.PHONY: net/http/httptest/check
+
+@go_include@ net/http/httputil.lo.dep
+net/http/httputil.lo.dep: $(go_net_http_httputil_files)
+ $(BUILDDEPS)
+net/http/httputil.lo: $(go_net_http_httputil_files)
+ $(BUILDPACKAGE)
+net/http/httputil/check: $(check_deps)
+ @$(MKDIR_P) net/http/httputil
+ @$(CHECK)
+.PHONY: net/http/httputil/check
+
+@go_include@ net/http/pprof.lo.dep
+net/http/pprof.lo.dep: $(go_net_http_pprof_files)
+ $(BUILDDEPS)
+net/http/pprof.lo: $(go_net_http_pprof_files)
+ $(BUILDPACKAGE)
+net/http/pprof/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http/pprof
+ @$(CHECK)
+.PHONY: net/http/pprof/check
+
+@go_include@ net/rpc/jsonrpc.lo.dep
+net/rpc/jsonrpc.lo.dep: $(go_net_rpc_jsonrpc_files)
+ $(BUILDDEPS)
+net/rpc/jsonrpc.lo: $(go_net_rpc_jsonrpc_files)
+ $(BUILDPACKAGE)
+net/rpc/jsonrpc/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/rpc/jsonrpc
+ @$(CHECK)
+.PHONY: net/rpc/jsonrpc/check
+
@go_include@ old/netchan.lo.dep
old/netchan.lo.dep: $(go_old_netchan_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: old/template/check
+@go_include@ os/exec.lo.dep
+os/exec.lo.dep: $(go_os_exec_files)
+ $(BUILDDEPS)
+os/exec.lo: $(go_os_exec_files)
+ $(BUILDPACKAGE)
+os/exec/check: $(CHECK_DEPS)
+ @$(MKDIR_P) os/exec
+ @$(CHECK)
+.PHONY: os/exec/check
+
@go_include@ os/user.lo.dep
os/user.lo.dep: $(go_os_user_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: regexp/syntax/check
-@go_include@ rpc/jsonrpc.lo.dep
-rpc/jsonrpc.lo.dep: $(go_rpc_jsonrpc_files)
- $(BUILDDEPS)
-rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files)
- $(BUILDPACKAGE)
-rpc/jsonrpc/check: $(CHECK_DEPS)
- @$(MKDIR_P) rpc/jsonrpc
- @$(CHECK)
-.PHONY: rpc/jsonrpc/check
-
@go_include@ runtime/debug.lo.dep
runtime/debug.lo.dep: $(go_runtime_debug_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: sync/atomic/check
-@go_include@ template/parse.lo.dep
-template/parse.lo.dep: $(go_template_parse_files)
+@go_include@ text/tabwriter.lo.dep
+text/tabwriter.lo.dep: $(go_text_tabwriter_files)
+ $(BUILDDEPS)
+text/tabwriter.lo: $(go_text_tabwriter_files)
+ $(BUILDPACKAGE)
+text/tabwriter/check: $(CHECK_DEPS)
+ @$(MKDIR_P) text/tabwriter
+ @$(CHECK)
+.PHONY: text/tabwriter/check
+
+@go_include@ text/template.lo.dep
+text/template.lo.dep: $(go_text_template_files)
$(BUILDDEPS)
-template/parse.lo: $(go_template_parse_files)
+text/template.lo: $(go_text_template_files)
$(BUILDPACKAGE)
-template/parse/check: $(CHECK_DEPS)
- @$(MKDIR_P) template/parse
+text/template/check: $(CHECK_DEPS)
@$(CHECK)
-.PHONY: template/parse/check
+.PHONY: text/template/check
+
+@go_include@ text/template/parse.lo.dep
+text/template/parse.lo.dep: $(go_text_template_parse_files)
+ $(BUILDDEPS)
+text/template/parse.lo: $(go_text_template_parse_files)
+ $(BUILDPACKAGE)
+text/template/parse/check: $(CHECK_DEPS)
+ @$(MKDIR_P) text/template/parse
+ @$(CHECK)
+.PHONY: text/template/parse/check
@go_include@ testing/iotest.lo.dep
testing/iotest.lo.dep: $(go_testing_iotest_files)
@$(CHECK)
.PHONY: testing/script/check
+@go_include@ unicode/utf16.lo.dep
+unicode/utf16.lo.dep: $(go_unicode_utf16_files)
+ $(BUILDDEPS)
+unicode/utf16.lo: $(go_unicode_utf16_files)
+ $(BUILDPACKAGE)
+unicode/utf16/check: $(CHECK_DEPS)
+ @$(MKDIR_P) unicode/utf16
+ @$(CHECK)
+.PHONY: unicode/utf16/check
+
+@go_include@ unicode/utf8.lo.dep
+unicode/utf8.lo.dep: $(go_unicode_utf8_files)
+ $(BUILDDEPS)
+unicode/utf8.lo: $(go_unicode_utf8_files)
+ $(BUILDPACKAGE)
+unicode/utf8/check: $(CHECK_DEPS)
+ @$(MKDIR_P) unicode/utf8
+ @$(CHECK)
+.PHONY: unicode/utf8/check
+
@go_include@ syscall/syscall.lo.dep
syscall/syscall.lo.dep: $(go_syscall_files)
$(BUILDDEPS)
f=`echo $< | sed -e 's/.lo$$/.o/'`; \
$(OBJCOPY) -j .go_export $$f $@.tmp && mv -f $@.tmp $@
-asn1.gox: asn1/asn1.lo
- $(BUILDGOX)
-big.gox: big/big.lo
- $(BUILDGOX)
bufio.gox: bufio/bufio.lo
$(BUILDGOX)
bytes.gox: bytes/bytes.lo
$(BUILDGOX)
-cmath.gox: cmath/cmath.lo
- $(BUILDGOX)
crypto.gox: crypto/crypto.lo
$(BUILDGOX)
-csv.gox: csv/csv.lo
- $(BUILDGOX)
errors.gox: errors/errors.lo
$(BUILDGOX)
-exec.gox: exec/exec.lo
- $(BUILDGOX)
expvar.gox: expvar/expvar.lo
$(BUILDGOX)
flag.gox: flag/flag.lo
$(BUILDGOX)
fmt.gox: fmt/fmt.lo
$(BUILDGOX)
-gob.gox: gob/gob.lo
- $(BUILDGOX)
hash.gox: hash/hash.lo
$(BUILDGOX)
html.gox: html/html.lo
$(BUILDGOX)
-http.gox: http/http.lo
- $(BUILDGOX)
image.gox: image/image.lo
$(BUILDGOX)
io.gox: io/io.lo
$(BUILDGOX)
-json.gox: json/json.lo
- $(BUILDGOX)
log.gox: log/log.lo
$(BUILDGOX)
math.gox: math/math.lo
$(BUILDGOX)
-mail.gox: mail/mail.lo
- $(BUILDGOX)
mime.gox: mime/mime.lo
$(BUILDGOX)
net.gox: net/net.lo
$(BUILDGOX)
path.gox: path/path.lo
$(BUILDGOX)
-rand.gox: rand/rand.lo
- $(BUILDGOX)
reflect.gox: reflect/reflect.lo
$(BUILDGOX)
regexp.gox: regexp/regexp.lo
$(BUILDGOX)
-rpc.gox: rpc/rpc.lo
- $(BUILDGOX)
runtime.gox: runtime/runtime.lo
$(BUILDGOX)
-scanner.gox: scanner/scanner.lo
- $(BUILDGOX)
-smtp.gox: smtp/smtp.lo
- $(BUILDGOX)
sort.gox: sort/sort.lo
$(BUILDGOX)
strconv.gox: strconv/strconv.lo
$(BUILDGOX)
sync.gox: sync/sync.lo
$(BUILDGOX)
-syslog.gox: syslog/syslog.lo
- $(BUILDGOX)
syscall.gox: syscall/syscall.lo
$(BUILDGOX)
-tabwriter.gox: tabwriter/tabwriter.lo
- $(BUILDGOX)
-template.gox: template/template.lo
- $(BUILDGOX)
testing.gox: testing/testing.lo
$(BUILDGOX)
time.gox: time/time.lo
$(BUILDGOX)
unicode.gox: unicode/unicode.lo
$(BUILDGOX)
-url.gox: url/url.lo
- $(BUILDGOX)
-utf16.gox: utf16/utf16.lo
- $(BUILDGOX)
-utf8.gox: utf8/utf8.lo
- $(BUILDGOX)
websocket.gox: websocket/websocket.lo
$(BUILDGOX)
-xml.gox: xml/xml.lo
- $(BUILDGOX)
archive/tar.gox: archive/tar.lo
$(BUILDGOX)
encoding/ascii85.gox: encoding/ascii85.lo
$(BUILDGOX)
+encoding/asn1.gox: encoding/asn1.lo
+ $(BUILDGOX)
encoding/base32.gox: encoding/base32.lo
$(BUILDGOX)
encoding/base64.gox: encoding/base64.lo
$(BUILDGOX)
encoding/binary.gox: encoding/binary.lo
$(BUILDGOX)
+encoding/csv.gox: encoding/csv.lo
+ $(BUILDGOX)
encoding/git85.gox: encoding/git85.lo
$(BUILDGOX)
+encoding/gob.gox: encoding/gob.lo
+ $(BUILDGOX)
encoding/hex.gox: encoding/hex.lo
$(BUILDGOX)
+encoding/json.gox: encoding/json.lo
+ $(BUILDGOX)
encoding/pem.gox: encoding/pem.lo
$(BUILDGOX)
+encoding/xml.gox: encoding/xml.lo
+ $(BUILDGOX)
exp/ebnf.gox: exp/ebnf.lo
$(BUILDGOX)
exp/sql/driver.gox: exp/sql/driver.lo
$(BUILDGOX)
-exp/template/html.gox: exp/template/html.lo
+html/template.gox: html/template.lo
$(BUILDGOX)
go/ast.gox: go/ast.lo
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
-http/cgi.gox: http/cgi.lo
- $(BUILDGOX)
-http/fcgi.gox: http/fcgi.lo
- $(BUILDGOX)
-http/httptest.gox: http/httptest.lo
- $(BUILDGOX)
-http/pprof.gox: http/pprof.lo
- $(BUILDGOX)
-
image/bmp.gox: image/bmp.lo
$(BUILDGOX)
image/color.gox: image/color.lo
io/ioutil.gox: io/ioutil.lo
$(BUILDGOX)
+log/syslog.gox: log/syslog.lo
+ $(BUILDGOX)
+
+math/big.gox: math/big.lo
+ $(BUILDGOX)
+math/cmplx.gox: math/cmplx.lo
+ $(BUILDGOX)
+math/rand.gox: math/rand.lo
+ $(BUILDGOX)
+
mime/multipart.gox: mime/multipart.lo
$(BUILDGOX)
net/dict.gox: net/dict.lo
$(BUILDGOX)
+net/http.gox: net/http.lo
+ $(BUILDGOX)
+net/mail.gox: net/mail.lo
+ $(BUILDGOX)
+net/rpc.gox: net/rpc.lo
+ $(BUILDGOX)
+net/smtp.gox: net/smtp.lo
+ $(BUILDGOX)
net/textproto.gox: net/textproto.lo
$(BUILDGOX)
+net/url.gox: net/url.lo
+ $(BUILDGOX)
+
+net/http/cgi.gox: net/http/cgi.lo
+ $(BUILDGOX)
+net/http/fcgi.gox: net/http/fcgi.lo
+ $(BUILDGOX)
+net/http/httptest.gox: net/http/httptest.lo
+ $(BUILDGOX)
+net/http/httputil.gox: net/http/httputil.lo
+ $(BUILDGOX)
+net/http/pprof.gox: net/http/pprof.lo
+ $(BUILDGOX)
+
+net/rpc/jsonrpc.gox: net/rpc/jsonrpc.lo
+ $(BUILDGOX)
old/netchan.gox: old/netchan.lo
$(BUILDGOX)
old/template.gox: old/template.lo
$(BUILDGOX)
+os/exec.gox: os/exec.lo
+ $(BUILDGOX)
os/user.gox: os/user.lo
$(BUILDGOX)
os/signal.gox: os/signal.lo
regexp/syntax.gox: regexp/syntax.lo
$(BUILDGOX)
-rpc/jsonrpc.gox: rpc/jsonrpc.lo
- $(BUILDGOX)
-
runtime/debug.gox: runtime/debug.lo
$(BUILDGOX)
runtime/pprof.gox: runtime/pprof.lo
sync/atomic.gox: sync/atomic.lo
$(BUILDGOX)
-template/parse.gox: template/parse.lo
+text/scanner.gox: text/scanner.lo
+ $(BUILDGOX)
+text/tabwriter.gox: text/tabwriter.lo
+ $(BUILDGOX)
+text/template.gox: text/template.lo
+ $(BUILDGOX)
+text/template/parse.gox: text/template/parse.lo
$(BUILDGOX)
testing/iotest.gox: testing/iotest.lo
testing/script.gox: testing/script.lo
$(BUILDGOX)
+unicode/utf16.gox: unicode/utf16.lo
+ $(BUILDGOX)
+unicode/utf8.gox: unicode/utf8.lo
+ $(BUILDGOX)
+
if LIBGO_IS_LINUX
# exp_inotify_check = exp/inotify/check
exp_inotify_check =
endif
TEST_PACKAGES = \
- asn1/check \
- big/check \
bufio/check \
bytes/check \
- cmath/check \
- csv/check \
errors/check \
- exec/check \
expvar/check \
flag/check \
fmt/check \
- gob/check \
html/check \
- http/check \
image/check \
io/check \
- json/check \
log/check \
math/check \
- mail/check \
mime/check \
net/check \
os/check \
patch/check \
path/check \
- rand/check \
reflect/check \
regexp/check \
- rpc/check \
runtime/check \
- scanner/check \
- smtp/check \
sort/check \
strconv/check \
strings/check \
sync/check \
- syslog/check \
- tabwriter/check \
- template/check \
time/check \
unicode/check \
- url/check \
- utf16/check \
- utf8/check \
websocket/check \
- xml/check \
archive/tar/check \
archive/zip/check \
compress/bzip2/check \
debug/macho/check \
debug/pe/check \
encoding/ascii85/check \
+ encoding/asn1/check \
encoding/base32/check \
encoding/base64/check \
encoding/binary/check \
+ encoding/csv/check \
encoding/git85/check \
+ encoding/gob/check \
encoding/hex/check \
+ encoding/json/check \
encoding/pem/check \
+ encoding/xml/check \
exp/ebnf/check \
$(exp_inotify_check) \
exp/norm/check \
exp/sql/check \
exp/ssh/check \
exp/terminal/check \
- exp/template/html/check \
+ html/template/check \
go/ast/check \
$(go_build_check_omitted_since_it_calls_6g) \
go/parser/check \
hash/crc32/check \
hash/crc64/check \
hash/fnv/check \
- http/cgi/check \
- http/fcgi/check \
image/draw/check \
image/jpeg/check \
image/png/check \
image/ycbcr/check \
index/suffixarray/check \
io/ioutil/check \
+ log/syslog/check \
+ math/big/check \
+ math/cmplx/check \
+ math/rand/check \
mime/multipart/check \
+ net/http/check \
+ net/http/cgi/check \
+ net/http/fcgi/check \
+ net/http/httputil/check \
+ net/mail/check \
+ net/rpc/check \
+ net/smtp/check \
net/textproto/check \
+ net/url/check \
+ net/rpc/jsonrpc/check \
old/netchan/check \
old/regexp/check \
old/template/check \
+ os/exec/check \
os/user/check \
os/signal/check \
path/filepath/check \
regexp/syntax/check \
- rpc/jsonrpc/check \
sync/atomic/check \
- template/parse/check \
+ text/scanner/check \
+ text/tabwriter/check \
+ text/template/check \
+ text/template/parse/check \
testing/quick/check \
- testing/script/check
+ testing/script/check \
+ unicode/utf16/check \
+ unicode/utf8/check
check: check-tail
check-recursive: check-head
"$(DESTDIR)$(toolexeclibgoexpdir)" \
"$(DESTDIR)$(toolexeclibgoexpguidir)" \
"$(DESTDIR)$(toolexeclibgoexpsqldir)" \
- "$(DESTDIR)$(toolexeclibgoexptemplatedir)" \
"$(DESTDIR)$(toolexeclibgogodir)" \
"$(DESTDIR)$(toolexeclibgohashdir)" \
- "$(DESTDIR)$(toolexeclibgohttpdir)" \
+ "$(DESTDIR)$(toolexeclibgohtmldir)" \
"$(DESTDIR)$(toolexeclibgoimagedir)" \
"$(DESTDIR)$(toolexeclibgoindexdir)" \
"$(DESTDIR)$(toolexeclibgoiodir)" \
+ "$(DESTDIR)$(toolexeclibgologdir)" \
+ "$(DESTDIR)$(toolexeclibgomathdir)" \
"$(DESTDIR)$(toolexeclibgomimedir)" \
"$(DESTDIR)$(toolexeclibgonetdir)" \
+ "$(DESTDIR)$(toolexeclibgonethttpdir)" \
+ "$(DESTDIR)$(toolexeclibgonetrpcdir)" \
"$(DESTDIR)$(toolexeclibgoolddir)" \
"$(DESTDIR)$(toolexeclibgoosdir)" \
"$(DESTDIR)$(toolexeclibgopathdir)" \
"$(DESTDIR)$(toolexeclibgoregexpdir)" \
- "$(DESTDIR)$(toolexeclibgorpcdir)" \
"$(DESTDIR)$(toolexeclibgoruntimedir)" \
"$(DESTDIR)$(toolexeclibgosyncdir)" \
- "$(DESTDIR)$(toolexeclibgotemplatedir)" \
- "$(DESTDIR)$(toolexeclibgotestingdir)"
+ "$(DESTDIR)$(toolexeclibgotestingdir)" \
+ "$(DESTDIR)$(toolexeclibgotextdir)" \
+ "$(DESTDIR)$(toolexeclibgotexttemplatedir)" \
+ "$(DESTDIR)$(toolexeclibgounicodedir)"
LIBRARIES = $(toolexeclib_LIBRARIES)
ARFLAGS = cru
libgobegin_a_AR = $(AR) $(ARFLAGS)
libgobegin_a_OBJECTS = $(am_libgobegin_a_OBJECTS)
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
-am__DEPENDENCIES_2 = asn1/asn1.lo big/big.lo bufio/bufio.lo \
- bytes/bytes.lo bytes/index.lo cmath/cmath.lo crypto/crypto.lo \
- csv/csv.lo errors/errors.lo exec/exec.lo expvar/expvar.lo \
- flag/flag.lo fmt/fmt.lo gob/gob.lo hash/hash.lo html/html.lo \
- http/http.lo image/image.lo io/io.lo json/json.lo log/log.lo \
- math/math.lo mail/mail.lo mime/mime.lo net/net.lo os/os.lo \
- patch/patch.lo path/path.lo rand/rand.lo reflect/reflect.lo \
- regexp/regexp.lo rpc/rpc.lo runtime/runtime.lo \
- scanner/scanner.lo smtp/smtp.lo sort/sort.lo \
- strconv/strconv.lo strings/strings.lo sync/sync.lo \
- syslog/syslog.lo syslog/syslog_c.lo tabwriter/tabwriter.lo \
- template/template.lo time/time.lo unicode/unicode.lo \
- url/url.lo utf16/utf16.lo utf8/utf8.lo websocket/websocket.lo \
- xml/xml.lo archive/tar.lo archive/zip.lo compress/bzip2.lo \
- compress/flate.lo compress/gzip.lo compress/lzw.lo \
- compress/zlib.lo container/heap.lo container/list.lo \
- container/ring.lo crypto/aes.lo crypto/bcrypt.lo \
- crypto/blowfish.lo crypto/cast5.lo crypto/cipher.lo \
- crypto/des.lo crypto/dsa.lo crypto/ecdsa.lo crypto/elliptic.lo \
- crypto/hmac.lo crypto/md4.lo crypto/md5.lo crypto/ocsp.lo \
- crypto/openpgp.lo crypto/rand.lo crypto/rc4.lo \
+am__DEPENDENCIES_2 = bufio/bufio.lo bytes/bytes.lo bytes/index.lo \
+ crypto/crypto.lo errors/errors.lo expvar/expvar.lo \
+ flag/flag.lo fmt/fmt.lo hash/hash.lo html/html.lo \
+ image/image.lo io/io.lo log/log.lo math/math.lo net/net.lo \
+ os/exec.lo os/os.lo patch/patch.lo path/path.lo \
+ reflect/reflect.lo regexp/regexp.lo runtime/runtime.lo \
+ sort/sort.lo strconv/strconv.lo strings/strings.lo \
+ sync/sync.lo time/time.lo unicode/unicode.lo \
+ websocket/websocket.lo archive/tar.lo archive/zip.lo \
+ compress/bzip2.lo compress/flate.lo compress/gzip.lo \
+ compress/lzw.lo compress/zlib.lo container/heap.lo \
+ container/list.lo container/ring.lo crypto/aes.lo \
+ crypto/bcrypt.lo crypto/blowfish.lo crypto/cast5.lo \
+ crypto/cipher.lo crypto/des.lo crypto/dsa.lo crypto/ecdsa.lo \
+ crypto/elliptic.lo crypto/hmac.lo crypto/md4.lo crypto/md5.lo \
+ crypto/ocsp.lo crypto/openpgp.lo crypto/rand.lo crypto/rc4.lo \
crypto/ripemd160.lo crypto/rsa.lo crypto/sha1.lo \
crypto/sha256.lo crypto/sha512.lo crypto/subtle.lo \
crypto/tls.lo crypto/twofish.lo crypto/x509.lo crypto/xtea.lo \
crypto/openpgp/error.lo crypto/openpgp/packet.lo \
crypto/openpgp/s2k.lo crypto/x509/pkix.lo debug/dwarf.lo \
debug/elf.lo debug/gosym.lo debug/macho.lo debug/pe.lo \
- encoding/ascii85.lo encoding/base32.lo encoding/base64.lo \
- encoding/binary.lo encoding/git85.lo encoding/hex.lo \
- encoding/pem.lo exp/ebnf.lo exp/gui.lo exp/norm.lo exp/spdy.lo \
- exp/sql.lo exp/ssh.lo exp/terminal.lo exp/types.lo \
- exp/gui/x11.lo exp/sql/driver.lo exp/template/html.lo \
- go/ast.lo go/build.lo go/doc.lo go/parser.lo go/printer.lo \
- go/scanner.lo go/token.lo hash/adler32.lo hash/crc32.lo \
- hash/crc64.lo hash/fnv.lo http/cgi.lo http/fcgi.lo \
- http/httptest.lo http/pprof.lo image/bmp.lo image/color.lo \
- image/draw.lo image/gif.lo image/jpeg.lo image/png.lo \
- image/tiff.lo image/ycbcr.lo index/suffixarray.lo io/ioutil.lo \
- mime/multipart.lo net/dict.lo net/textproto.lo old/netchan.lo \
- old/regexp.lo old/template.lo $(am__DEPENDENCIES_1) os/user.lo \
- os/signal.lo path/filepath.lo regexp/syntax.lo rpc/jsonrpc.lo \
- runtime/debug.lo runtime/pprof.lo sync/atomic.lo \
- sync/atomic_c.lo syscall/syscall.lo syscall/errno.lo \
- syscall/wait.lo template/parse.lo testing/testing.lo \
- testing/iotest.lo testing/quick.lo testing/script.lo
+ encoding/ascii85.lo encoding/asn1.lo encoding/base32.lo \
+ encoding/base64.lo encoding/binary.lo encoding/csv.lo \
+ encoding/git85.lo encoding/gob.lo encoding/hex.lo \
+ encoding/json.lo encoding/pem.lo encoding/xml.lo exp/ebnf.lo \
+ exp/gui.lo exp/norm.lo exp/spdy.lo exp/sql.lo exp/ssh.lo \
+ exp/terminal.lo exp/types.lo exp/gui/x11.lo exp/sql/driver.lo \
+ html/template.lo go/ast.lo go/build.lo go/doc.lo go/parser.lo \
+ go/printer.lo go/scanner.lo go/token.lo hash/adler32.lo \
+ hash/crc32.lo hash/crc64.lo hash/fnv.lo net/http/cgi.lo \
+ net/http/fcgi.lo net/http/httptest.lo net/http/httputil.lo \
+ net/http/pprof.lo image/bmp.lo image/color.lo image/draw.lo \
+ image/gif.lo image/jpeg.lo image/png.lo image/tiff.lo \
+ image/ycbcr.lo index/suffixarray.lo io/ioutil.lo log/syslog.lo \
+ log/syslog/syslog_c.lo math/big.lo math/cmplx.lo math/rand.lo \
+ mime/mime.lo mime/multipart.lo net/dict.lo net/http.lo \
+ net/mail.lo net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \
+ old/netchan.lo old/regexp.lo old/template.lo \
+ $(am__DEPENDENCIES_1) os/user.lo os/signal.lo path/filepath.lo \
+ regexp/syntax.lo net/rpc/jsonrpc.lo runtime/debug.lo \
+ runtime/pprof.lo sync/atomic.lo sync/atomic_c.lo \
+ syscall/syscall.lo syscall/errno.lo syscall/wait.lo \
+ text/scanner.lo text/tabwriter.lo text/template.lo \
+ text/template/parse.lo testing/testing.lo testing/iotest.lo \
+ testing/quick.lo testing/script.lo unicode/utf16.lo \
+ unicode/utf8.lo
libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1)
runtime/mheap.c runtime/msize.c runtime/proc.c \
runtime/runtime.c runtime/thread.c runtime/yield.c \
runtime/rtems-task-variable-add.c iface.c malloc.c map.c \
- mprof.c reflect.c runtime1.c sema.c sigqueue.c string.c
+ mprof.c reflect.c runtime1.c sema.c sigqueue.c string.c time.c
@LIBGO_IS_LINUX_FALSE@am__objects_1 = lock_sema.lo thread-sema.lo
@LIBGO_IS_LINUX_TRUE@am__objects_1 = lock_futex.lo thread-linux.lo
@HAVE_SYS_MMAN_H_FALSE@am__objects_2 = mem_posix_memalign.lo
mfinal.lo mfixalloc.lo mgc0.lo mheap.lo msize.lo proc.lo \
runtime.lo thread.lo yield.lo $(am__objects_3) iface.lo \
malloc.lo map.lo mprof.lo reflect.lo runtime1.lo sema.lo \
- sigqueue.lo string.lo
+ sigqueue.lo string.lo time.lo
am_libgo_la_OBJECTS = $(am__objects_4)
libgo_la_OBJECTS = $(am_libgo_la_OBJECTS)
libgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
$(toolexeclibgocryptox509_DATA) $(toolexeclibgodebug_DATA) \
$(toolexeclibgoencoding_DATA) $(toolexeclibgoexp_DATA) \
$(toolexeclibgoexpgui_DATA) $(toolexeclibgoexpsql_DATA) \
- $(toolexeclibgoexptemplate_DATA) $(toolexeclibgogo_DATA) \
- $(toolexeclibgohash_DATA) $(toolexeclibgohttp_DATA) \
- $(toolexeclibgoimage_DATA) $(toolexeclibgoindex_DATA) \
- $(toolexeclibgoio_DATA) $(toolexeclibgomime_DATA) \
- $(toolexeclibgonet_DATA) $(toolexeclibgoold_DATA) \
- $(toolexeclibgoos_DATA) $(toolexeclibgopath_DATA) \
- $(toolexeclibgoregexp_DATA) $(toolexeclibgorpc_DATA) \
+ $(toolexeclibgogo_DATA) $(toolexeclibgohash_DATA) \
+ $(toolexeclibgohtml_DATA) $(toolexeclibgoimage_DATA) \
+ $(toolexeclibgoindex_DATA) $(toolexeclibgoio_DATA) \
+ $(toolexeclibgolog_DATA) $(toolexeclibgomath_DATA) \
+ $(toolexeclibgomime_DATA) $(toolexeclibgonet_DATA) \
+ $(toolexeclibgonethttp_DATA) $(toolexeclibgonetrpc_DATA) \
+ $(toolexeclibgoold_DATA) $(toolexeclibgoos_DATA) \
+ $(toolexeclibgopath_DATA) $(toolexeclibgoregexp_DATA) \
$(toolexeclibgoruntime_DATA) $(toolexeclibgosync_DATA) \
- $(toolexeclibgotemplate_DATA) $(toolexeclibgotesting_DATA)
+ $(toolexeclibgotesting_DATA) $(toolexeclibgotext_DATA) \
+ $(toolexeclibgotexttemplate_DATA) $(toolexeclibgounicode_DATA)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
toolexeclib_LIBRARIES = libgobegin.a
toolexeclibgodir = $(toolexeclibdir)/go/$(gcc_version)/$(target_alias)
toolexeclibgo_DATA = \
- asn1.gox \
- big.gox \
bufio.gox \
bytes.gox \
- cmath.gox \
crypto.gox \
- csv.gox \
errors.gox \
- exec.gox \
expvar.gox \
flag.gox \
fmt.gox \
- gob.gox \
hash.gox \
html.gox \
- http.gox \
image.gox \
io.gox \
- json.gox \
log.gox \
math.gox \
- mail.gox \
mime.gox \
net.gox \
os.gox \
patch.gox \
path.gox \
- rand.gox \
reflect.gox \
regexp.gox \
- rpc.gox \
runtime.gox \
- scanner.gox \
- smtp.gox \
sort.gox \
strconv.gox \
strings.gox \
sync.gox \
syscall.gox \
- syslog.gox \
- tabwriter.gox \
- template.gox \
testing.gox \
time.gox \
unicode.gox \
- url.gox \
- utf16.gox \
- utf8.gox \
- websocket.gox \
- xml.gox
+ websocket.gox
toolexeclibgoarchivedir = $(toolexeclibgodir)/archive
toolexeclibgoarchive_DATA = \
toolexeclibgoencodingdir = $(toolexeclibgodir)/encoding
toolexeclibgoencoding_DATA = \
encoding/ascii85.gox \
+ encoding/asn1.gox \
encoding/base32.gox \
encoding/base64.gox \
encoding/binary.gox \
+ encoding/csv.gox \
encoding/git85.gox \
+ encoding/gob.gox \
encoding/hex.gox \
- encoding/pem.gox
+ encoding/json.gox \
+ encoding/pem.gox \
+ encoding/xml.gox
@LIBGO_IS_LINUX_FALSE@exp_inotify_gox =
toolexeclibgoexpsql_DATA = \
exp/sql/driver.gox
-toolexeclibgoexptemplatedir = $(toolexeclibgoexpdir)/template
-toolexeclibgoexptemplate_DATA = \
- exp/template/html.gox
-
toolexeclibgogodir = $(toolexeclibgodir)/go
toolexeclibgogo_DATA = \
go/ast.gox \
hash/crc64.gox \
hash/fnv.gox
-toolexeclibgohttpdir = $(toolexeclibgodir)/http
-toolexeclibgohttp_DATA = \
- http/cgi.gox \
- http/fcgi.gox \
- http/httptest.gox \
- http/pprof.gox
+toolexeclibgohtmldir = $(toolexeclibgodir)/html
+toolexeclibgohtml_DATA = \
+ html/template.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image
toolexeclibgoimage_DATA = \
toolexeclibgoio_DATA = \
io/ioutil.gox
+toolexeclibgologdir = $(toolexeclibgodir)/log
+toolexeclibgolog_DATA = \
+ log/syslog.gox
+
+toolexeclibgomathdir = $(toolexeclibgodir)/math
+toolexeclibgomath_DATA = \
+ math/big.gox \
+ math/cmplx.gox \
+ math/rand.gox
+
toolexeclibgomimedir = $(toolexeclibgodir)/mime
toolexeclibgomime_DATA = \
mime/multipart.gox
toolexeclibgonetdir = $(toolexeclibgodir)/net
toolexeclibgonet_DATA = \
net/dict.gox \
- net/textproto.gox
+ net/http.gox \
+ net/mail.gox \
+ net/rpc.gox \
+ net/smtp.gox \
+ net/textproto.gox \
+ net/url.gox
+
+toolexeclibgonethttpdir = $(toolexeclibgonetdir)/http
+toolexeclibgonethttp_DATA = \
+ net/http/cgi.gox \
+ net/http/fcgi.gox \
+ net/http/httptest.gox \
+ net/http/httputil.gox \
+ net/http/pprof.gox
+
+toolexeclibgonetrpcdir = $(toolexeclibgonetdir)/rpc
+toolexeclibgonetrpc_DATA = \
+ net/rpc/jsonrpc.gox
toolexeclibgoolddir = $(toolexeclibgodir)/old
toolexeclibgoold_DATA = \
toolexeclibgoosdir = $(toolexeclibgodir)/os
toolexeclibgoos_DATA = \
+ os/exec.gox \
os/user.gox \
os/signal.gox
toolexeclibgoregexp_DATA = \
regexp/syntax.gox
-toolexeclibgorpcdir = $(toolexeclibgodir)/rpc
-toolexeclibgorpc_DATA = \
- rpc/jsonrpc.gox
-
toolexeclibgoruntimedir = $(toolexeclibgodir)/runtime
toolexeclibgoruntime_DATA = \
runtime/debug.gox \
runtime/pprof.gox
-toolexeclibgotemplatedir = $(toolexeclibgodir)/template
-toolexeclibgotemplate_DATA = \
- template/parse.gox
-
toolexeclibgosyncdir = $(toolexeclibgodir)/sync
toolexeclibgosync_DATA = \
sync/atomic.gox
testing/quick.gox \
testing/script.gox
+toolexeclibgotextdir = $(toolexeclibgodir)/text
+toolexeclibgotext_DATA = \
+ text/scanner.gox \
+ text/tabwriter.gox \
+ text/template.gox
+
+toolexeclibgotexttemplatedir = $(toolexeclibgotextdir)/template
+toolexeclibgotexttemplate_DATA = \
+ text/template/parse.gox
+
+toolexeclibgounicodedir = $(toolexeclibgodir)/unicode
+toolexeclibgounicode_DATA = \
+ unicode/utf16.gox \
+ unicode/utf8.gox
+
@HAVE_SYS_MMAN_H_FALSE@runtime_mem_file = runtime/mem_posix_memalign.c
@HAVE_SYS_MMAN_H_TRUE@runtime_mem_file = runtime/mem.c
@LIBGO_IS_RTEMS_FALSE@rtems_task_variable_add_file =
runtime1.c \
sema.c \
sigqueue.c \
- string.c
-
-go_asn1_files = \
- go/asn1/asn1.go \
- go/asn1/common.go \
- go/asn1/marshal.go
-
-go_big_files = \
- go/big/arith.go \
- go/big/int.go \
- go/big/nat.go \
- go/big/rat.go
+ string.c \
+ time.c
go_bufio_files = \
go/bufio/bufio.go
go_bytes_c_files = \
go/bytes/indexbyte.c
-go_cmath_files = \
- go/cmath/abs.go \
- go/cmath/asin.go \
- go/cmath/conj.go \
- go/cmath/exp.go \
- go/cmath/isinf.go \
- go/cmath/isnan.go \
- go/cmath/log.go \
- go/cmath/phase.go \
- go/cmath/polar.go \
- go/cmath/pow.go \
- go/cmath/rect.go \
- go/cmath/sin.go \
- go/cmath/sqrt.go \
- go/cmath/tan.go
-
go_crypto_files = \
go/crypto/crypto.go
-go_csv_files = \
- go/csv/reader.go \
- go/csv/writer.go
-
go_errors_files = \
go/errors/errors.go
-go_exec_files = \
- go/exec/exec.go \
- go/exec/lp_unix.go
-
go_expvar_files = \
go/expvar/expvar.go
go/fmt/print.go \
go/fmt/scan.go
-go_gob_files = \
- go/gob/decode.go \
- go/gob/decoder.go \
- go/gob/doc.go \
- go/gob/encode.go \
- go/gob/encoder.go \
- go/gob/error.go \
- go/gob/type.go
-
go_hash_files = \
go/hash/hash.go
go/html/render.go \
go/html/token.go
-go_http_files = \
- go/http/chunked.go \
- go/http/client.go \
- go/http/cookie.go \
- go/http/dump.go \
- go/http/filetransport.go \
- go/http/fs.go \
- go/http/header.go \
- go/http/lex.go \
- go/http/persist.go \
- go/http/request.go \
- go/http/response.go \
- go/http/reverseproxy.go \
- go/http/server.go \
- go/http/sniff.go \
- go/http/status.go \
- go/http/transfer.go \
- go/http/transport.go
-
go_image_files = \
go/image/format.go \
go/image/geom.go \
go/io/io.go \
go/io/pipe.go
-go_json_files = \
- go/json/decode.go \
- go/json/encode.go \
- go/json/indent.go \
- go/json/scanner.go \
- go/json/stream.go \
- go/json/tags.go
-
go_log_files = \
go/log/log.go
go/math/tanh.go \
go/math/unsafe.go
-go_mail_files = \
- go/mail/message.go
-
go_mime_files = \
go/mime/grammar.go \
go/mime/mediatype.go \
go/path/match.go \
go/path/path.go
-go_rand_files = \
- go/rand/exp.go \
- go/rand/normal.go \
- go/rand/rand.go \
- go/rand/rng.go \
- go/rand/zipf.go
-
go_reflect_files = \
go/reflect/deepequal.go \
go/reflect/type.go \
go/regexp/exec.go \
go/regexp/regexp.go
-go_rpc_files = \
- go/rpc/client.go \
- go/rpc/debug.go \
- go/rpc/server.go
+go_net_rpc_files = \
+ go/net/rpc/client.go \
+ go/net/rpc/debug.go \
+ go/net/rpc/server.go
go_runtime_files = \
go/runtime/debug.go \
go/runtime/type.go \
version.go
-go_scanner_files = \
- go/scanner/scanner.go
-
-go_smtp_files = \
- go/smtp/auth.go \
- go/smtp/smtp.go
-
go_sort_files = \
go/sort/search.go \
go/sort/sort.go
go/sync/rwmutex.go \
go/sync/waitgroup.go
-@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_syslog_file = go/syslog/syslog_unix.go
-@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_SOLARIS_FALSE@go_syslog_file = go/syslog/syslog_libc.go
-@LIBGO_IS_SOLARIS_TRUE@go_syslog_file = go/syslog/syslog_libc.go
-go_syslog_files = \
- go/syslog/syslog.go \
+@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_syslog_file = go/log/syslog/syslog_unix.go
+@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_SOLARIS_FALSE@go_syslog_file = go/log/syslog/syslog_libc.go
+@LIBGO_IS_SOLARIS_TRUE@go_syslog_file = go/log/syslog/syslog_libc.go
+go_log_syslog_files = \
+ go/log/syslog/syslog.go \
$(go_syslog_file)
go_syslog_c_files = \
- go/syslog/syslog_c.c
-
-go_tabwriter_files = \
- go/tabwriter/tabwriter.go
-
-go_template_files = \
- go/template/doc.go \
- go/template/exec.go \
- go/template/funcs.go \
- go/template/helper.go \
- go/template/parse.go \
- go/template/set.go
+ go/log/syslog/syslog_c.c
go_testing_files = \
go/testing/benchmark.go \
go/unicode/letter.go \
go/unicode/tables.go
-go_url_files = \
- go/url/url.go
-
-go_utf16_files = \
- go/utf16/utf16.go
-
-go_utf8_files = \
- go/utf8/string.go \
- go/utf8/utf8.go
-
go_websocket_files = \
go/websocket/client.go \
go/websocket/hixie.go \
go/websocket/server.go \
go/websocket/websocket.go
-go_xml_files = \
- go/xml/marshal.go \
- go/xml/read.go \
- go/xml/xml.go
-
go_archive_tar_files = \
go/archive/tar/common.go \
go/archive/tar/reader.go \
go_encoding_ascii85_files = \
go/encoding/ascii85/ascii85.go
+go_encoding_asn1_files = \
+ go/encoding/asn1/asn1.go \
+ go/encoding/asn1/common.go \
+ go/encoding/asn1/marshal.go
+
go_encoding_base32_files = \
go/encoding/base32/base32.go
go/encoding/binary/binary.go \
go/encoding/binary/varint.go
+go_encoding_csv_files = \
+ go/encoding/csv/reader.go \
+ go/encoding/csv/writer.go
+
go_encoding_git85_files = \
go/encoding/git85/git.go
+go_encoding_gob_files = \
+ go/encoding/gob/decode.go \
+ go/encoding/gob/decoder.go \
+ go/encoding/gob/doc.go \
+ go/encoding/gob/encode.go \
+ go/encoding/gob/encoder.go \
+ go/encoding/gob/error.go \
+ go/encoding/gob/type.go
+
go_encoding_hex_files = \
go/encoding/hex/hex.go
+go_encoding_json_files = \
+ go/encoding/json/decode.go \
+ go/encoding/json/encode.go \
+ go/encoding/json/indent.go \
+ go/encoding/json/scanner.go \
+ go/encoding/json/stream.go \
+ go/encoding/json/tags.go
+
go_encoding_pem_files = \
go/encoding/pem/pem.go
+go_encoding_xml_files = \
+ go/encoding/xml/marshal.go \
+ go/encoding/xml/read.go \
+ go/encoding/xml/xml.go
+
go_exp_ebnf_files = \
go/exp/ebnf/ebnf.go \
go/exp/ebnf/parser.go
go_exp_ssh_files = \
go/exp/ssh/channel.go \
go/exp/ssh/client.go \
+ go/exp/ssh/client_auth.go \
go/exp/ssh/common.go \
go/exp/ssh/doc.go \
go/exp/ssh/messages.go \
go/exp/sql/driver/driver.go \
go/exp/sql/driver/types.go
-go_exp_template_html_files = \
- go/exp/template/html/attr.go \
- go/exp/template/html/clone.go \
- go/exp/template/html/content.go \
- go/exp/template/html/context.go \
- go/exp/template/html/css.go \
- go/exp/template/html/doc.go \
- go/exp/template/html/error.go \
- go/exp/template/html/escape.go \
- go/exp/template/html/html.go \
- go/exp/template/html/js.go \
- go/exp/template/html/transition.go \
- go/exp/template/html/url.go
-
go_go_ast_files = \
go/go/ast/ast.go \
go/go/ast/filter.go \
+ go/go/ast/import.go \
go/go/ast/print.go \
go/go/ast/resolve.go \
go/go/ast/scope.go \
go_hash_fnv_files = \
go/hash/fnv/fnv.go
-go_http_cgi_files = \
- go/http/cgi/child.go \
- go/http/cgi/host.go
-
-go_http_fcgi_files = \
- go/http/fcgi/child.go \
- go/http/fcgi/fcgi.go
-
-go_http_httptest_files = \
- go/http/httptest/recorder.go \
- go/http/httptest/server.go
-
-go_http_pprof_files = \
- go/http/pprof/pprof.go
+go_html_template_files = \
+ go/html/template/attr.go \
+ go/html/template/clone.go \
+ go/html/template/content.go \
+ go/html/template/context.go \
+ go/html/template/css.go \
+ go/html/template/doc.go \
+ go/html/template/error.go \
+ go/html/template/escape.go \
+ go/html/template/html.go \
+ go/html/template/js.go \
+ go/html/template/template.go \
+ go/html/template/transition.go \
+ go/html/template/url.go
go_image_bmp_files = \
go/image/bmp/reader.go
go/io/ioutil/ioutil.go \
go/io/ioutil/tempfile.go
+go_math_big_files = \
+ go/math/big/arith.go \
+ go/math/big/int.go \
+ go/math/big/nat.go \
+ go/math/big/rat.go
+
+go_math_cmplx_files = \
+ go/math/cmplx/abs.go \
+ go/math/cmplx/asin.go \
+ go/math/cmplx/conj.go \
+ go/math/cmplx/exp.go \
+ go/math/cmplx/isinf.go \
+ go/math/cmplx/isnan.go \
+ go/math/cmplx/log.go \
+ go/math/cmplx/phase.go \
+ go/math/cmplx/polar.go \
+ go/math/cmplx/pow.go \
+ go/math/cmplx/rect.go \
+ go/math/cmplx/sin.go \
+ go/math/cmplx/sqrt.go \
+ go/math/cmplx/tan.go
+
+go_math_rand_files = \
+ go/math/rand/exp.go \
+ go/math/rand/normal.go \
+ go/math/rand/rand.go \
+ go/math/rand/rng.go \
+ go/math/rand/zipf.go
+
go_mime_multipart_files = \
go/mime/multipart/formdata.go \
go/mime/multipart/multipart.go \
go_net_dict_files = \
go/net/dict/dict.go
+go_net_http_files = \
+ go/net/http/chunked.go \
+ go/net/http/client.go \
+ go/net/http/cookie.go \
+ go/net/http/filetransport.go \
+ go/net/http/fs.go \
+ go/net/http/header.go \
+ go/net/http/lex.go \
+ go/net/http/request.go \
+ go/net/http/response.go \
+ go/net/http/server.go \
+ go/net/http/sniff.go \
+ go/net/http/status.go \
+ go/net/http/transfer.go \
+ go/net/http/transport.go
+
+go_net_mail_files = \
+ go/net/mail/message.go
+
+go_net_smtp_files = \
+ go/net/smtp/auth.go \
+ go/net/smtp/smtp.go
+
go_net_textproto_files = \
go/net/textproto/header.go \
go/net/textproto/pipeline.go \
go/net/textproto/textproto.go \
go/net/textproto/writer.go
+go_net_url_files = \
+ go/net/url/url.go
+
+go_net_http_cgi_files = \
+ go/net/http/cgi/child.go \
+ go/net/http/cgi/host.go
+
+go_net_http_fcgi_files = \
+ go/net/http/fcgi/child.go \
+ go/net/http/fcgi/fcgi.go
+
+go_net_http_httptest_files = \
+ go/net/http/httptest/recorder.go \
+ go/net/http/httptest/server.go
+
+go_net_http_pprof_files = \
+ go/net/http/pprof/pprof.go
+
+go_net_http_httputil_files = \
+ go/net/http/httputil/chunked.go \
+ go/net/http/httputil/dump.go \
+ go/net/http/httputil/persist.go \
+ go/net/http/httputil/reverseproxy.go
+
go_old_netchan_files = \
go/old/netchan/common.go \
go/old/netchan/export.go \
go/old/template/format.go \
go/old/template/parse.go
+go_os_exec_files = \
+ go/os/exec/exec.go \
+ go/os/exec/lp_unix.go
+
go_os_user_files = \
go/os/user/user.go \
go/os/user/lookup_unix.go
go/regexp/syntax/regexp.go \
go/regexp/syntax/simplify.go
-go_rpc_jsonrpc_files = \
- go/rpc/jsonrpc/client.go \
- go/rpc/jsonrpc/server.go
+go_net_rpc_jsonrpc_files = \
+ go/net/rpc/jsonrpc/client.go \
+ go/net/rpc/jsonrpc/server.go
go_runtime_debug_files = \
go/runtime/debug/stack.go
go_runtime_pprof_files = \
go/runtime/pprof/pprof.go
-go_template_parse_files = \
- go/template/parse/lex.go \
- go/template/parse/node.go \
- go/template/parse/parse.go \
- go/template/parse/set.go
+go_text_tabwriter_files = \
+ go/text/tabwriter/tabwriter.go
+
+go_text_template_files = \
+ go/text/template/doc.go \
+ go/text/template/exec.go \
+ go/text/template/funcs.go \
+ go/text/template/helper.go \
+ go/text/template/parse.go \
+ go/text/template/set.go
+
+go_text_template_parse_files = \
+ go/text/template/parse/lex.go \
+ go/text/template/parse/node.go \
+ go/text/template/parse/parse.go \
+ go/text/template/parse/set.go
go_sync_atomic_files = \
go/sync/atomic/doc.go
go_testing_script_files = \
go/testing/script/script.go
+go_text_scanner_files = \
+ go/text/scanner/scanner.go
+
+go_unicode_utf16_files = \
+ go/unicode/utf16/utf16.go
+
+go_unicode_utf8_files = \
+ go/unicode/utf8/string.go \
+ go/unicode/utf8/utf8.go
+
@LIBGO_IS_RTEMS_FALSE@syscall_syscall_file = go/syscall/syscall_unix.go
# Define Syscall and Syscall6.
# os_lib_inotify_lo = os/inotify.lo
@LIBGO_IS_LINUX_TRUE@os_lib_inotify_lo =
libgo_go_objs = \
- asn1/asn1.lo \
- big/big.lo \
bufio/bufio.lo \
bytes/bytes.lo \
bytes/index.lo \
- cmath/cmath.lo \
crypto/crypto.lo \
- csv/csv.lo \
errors/errors.lo \
- exec/exec.lo \
expvar/expvar.lo \
flag/flag.lo \
fmt/fmt.lo \
- gob/gob.lo \
hash/hash.lo \
html/html.lo \
- http/http.lo \
image/image.lo \
io/io.lo \
- json/json.lo \
log/log.lo \
math/math.lo \
- mail/mail.lo \
- mime/mime.lo \
net/net.lo \
+ os/exec.lo \
os/os.lo \
patch/patch.lo \
path/path.lo \
- rand/rand.lo \
reflect/reflect.lo \
regexp/regexp.lo \
- rpc/rpc.lo \
runtime/runtime.lo \
- scanner/scanner.lo \
- smtp/smtp.lo \
sort/sort.lo \
strconv/strconv.lo \
strings/strings.lo \
sync/sync.lo \
- syslog/syslog.lo \
- syslog/syslog_c.lo \
- tabwriter/tabwriter.lo \
- template/template.lo \
time/time.lo \
unicode/unicode.lo \
- url/url.lo \
- utf16/utf16.lo \
- utf8/utf8.lo \
websocket/websocket.lo \
- xml/xml.lo \
archive/tar.lo \
archive/zip.lo \
compress/bzip2.lo \
debug/macho.lo \
debug/pe.lo \
encoding/ascii85.lo \
+ encoding/asn1.lo \
encoding/base32.lo \
encoding/base64.lo \
encoding/binary.lo \
+ encoding/csv.lo \
encoding/git85.lo \
+ encoding/gob.lo \
encoding/hex.lo \
+ encoding/json.lo \
encoding/pem.lo \
+ encoding/xml.lo \
exp/ebnf.lo \
exp/gui.lo \
exp/norm.lo \
exp/types.lo \
exp/gui/x11.lo \
exp/sql/driver.lo \
- exp/template/html.lo \
+ html/template.lo \
go/ast.lo \
go/build.lo \
go/doc.lo \
hash/crc32.lo \
hash/crc64.lo \
hash/fnv.lo \
- http/cgi.lo \
- http/fcgi.lo \
- http/httptest.lo \
- http/pprof.lo \
+ net/http/cgi.lo \
+ net/http/fcgi.lo \
+ net/http/httptest.lo \
+ net/http/httputil.lo \
+ net/http/pprof.lo \
image/bmp.lo \
image/color.lo \
image/draw.lo \
image/ycbcr.lo \
index/suffixarray.lo \
io/ioutil.lo \
+ log/syslog.lo \
+ log/syslog/syslog_c.lo \
+ math/big.lo \
+ math/cmplx.lo \
+ math/rand.lo \
+ mime/mime.lo \
mime/multipart.lo \
net/dict.lo \
+ net/http.lo \
+ net/mail.lo \
+ net/rpc.lo \
+ net/smtp.lo \
net/textproto.lo \
+ net/url.lo \
old/netchan.lo \
old/regexp.lo \
old/template.lo \
os/signal.lo \
path/filepath.lo \
regexp/syntax.lo \
- rpc/jsonrpc.lo \
+ net/rpc/jsonrpc.lo \
runtime/debug.lo \
runtime/pprof.lo \
sync/atomic.lo \
syscall/syscall.lo \
syscall/errno.lo \
syscall/wait.lo \
- template/parse.lo \
+ text/scanner.lo \
+ text/tabwriter.lo \
+ text/template.lo \
+ text/template/parse.lo \
testing/testing.lo \
testing/iotest.lo \
testing/quick.lo \
- testing/script.lo
+ testing/script.lo \
+ unicode/utf16.lo \
+ unicode/utf8.lo
libgo_la_SOURCES = $(runtime_files)
libgo_la_LDFLAGS = $(PTHREAD_CFLAGS) $(AM_LDFLAGS)
$(toolexeclibgoexp_DATA) \
$(toolexeclibgogo_DATA) \
$(toolexeclibgohash_DATA) \
- $(toolexeclibgohttp_DATA) \
$(toolexeclibgoimage_DATA) \
$(toolexeclibgoindex_DATA) \
$(toolexeclibgoio_DATA) \
+ $(toolexeclibgolog_DATA) \
+ $(toolexeclibgomath_DATA) \
$(toolexeclibgomime_DATA) \
$(toolexeclibgonet_DATA) \
+ $(toolexeclibgonethttp_DATA) \
$(toolexeclibgoos_DATA) \
$(toolexeclibgopath_DATA) \
$(toolexeclibgorpc_DATA) \
$(toolexeclibgoruntime_DATA) \
$(toolexeclibgosync_DATA) \
- $(toolexeclibgotesting_DATA)
+ $(toolexeclibgotesting_DATA) \
+ $(toolexeclibgotext_DATA) \
+ $(toolexeclibgotexttemplate_DATA) \
+ $(toolexeclibgounicode_DATA)
# How to build a .gox file from a .lo file.
# exp_inotify_check = exp/inotify/check
@LIBGO_IS_LINUX_TRUE@exp_inotify_check =
TEST_PACKAGES = \
- asn1/check \
- big/check \
bufio/check \
bytes/check \
- cmath/check \
- csv/check \
errors/check \
- exec/check \
expvar/check \
flag/check \
fmt/check \
- gob/check \
html/check \
- http/check \
image/check \
io/check \
- json/check \
log/check \
math/check \
- mail/check \
mime/check \
net/check \
os/check \
patch/check \
path/check \
- rand/check \
reflect/check \
regexp/check \
- rpc/check \
runtime/check \
- scanner/check \
- smtp/check \
sort/check \
strconv/check \
strings/check \
sync/check \
- syslog/check \
- tabwriter/check \
- template/check \
time/check \
unicode/check \
- url/check \
- utf16/check \
- utf8/check \
websocket/check \
- xml/check \
archive/tar/check \
archive/zip/check \
compress/bzip2/check \
debug/macho/check \
debug/pe/check \
encoding/ascii85/check \
+ encoding/asn1/check \
encoding/base32/check \
encoding/base64/check \
encoding/binary/check \
+ encoding/csv/check \
encoding/git85/check \
+ encoding/gob/check \
encoding/hex/check \
+ encoding/json/check \
encoding/pem/check \
+ encoding/xml/check \
exp/ebnf/check \
$(exp_inotify_check) \
exp/norm/check \
exp/sql/check \
exp/ssh/check \
exp/terminal/check \
- exp/template/html/check \
+ html/template/check \
go/ast/check \
$(go_build_check_omitted_since_it_calls_6g) \
go/parser/check \
hash/crc32/check \
hash/crc64/check \
hash/fnv/check \
- http/cgi/check \
- http/fcgi/check \
image/draw/check \
image/jpeg/check \
image/png/check \
image/ycbcr/check \
index/suffixarray/check \
io/ioutil/check \
+ log/syslog/check \
+ math/big/check \
+ math/cmplx/check \
+ math/rand/check \
mime/multipart/check \
+ net/http/check \
+ net/http/cgi/check \
+ net/http/fcgi/check \
+ net/http/httputil/check \
+ net/mail/check \
+ net/rpc/check \
+ net/smtp/check \
net/textproto/check \
+ net/url/check \
+ net/rpc/jsonrpc/check \
old/netchan/check \
old/regexp/check \
old/template/check \
+ os/exec/check \
os/user/check \
os/signal/check \
path/filepath/check \
regexp/syntax/check \
- rpc/jsonrpc/check \
sync/atomic/check \
- template/parse/check \
+ text/scanner/check \
+ text/tabwriter/check \
+ text/template/check \
+ text/template/parse/check \
testing/quick/check \
- testing/script/check
+ testing/script/check \
+ unicode/utf16/check \
+ unicode/utf8/check
MOSTLYCLEAN_FILES = libgo.head libgo.sum.sep libgo.log.sep
CLEANFILES = *.go *.gox goc2c *.c s-version libgo.sum libgo.log
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread-linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread-sema.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/time.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/yield.Plo@am__quote@
.c.o:
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgoexpsqldir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgoexpsqldir)" && rm -f $$files
-install-toolexeclibgoexptemplateDATA: $(toolexeclibgoexptemplate_DATA)
- @$(NORMAL_INSTALL)
- test -z "$(toolexeclibgoexptemplatedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoexptemplatedir)"
- @list='$(toolexeclibgoexptemplate_DATA)'; test -n "$(toolexeclibgoexptemplatedir)" || list=; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgoexptemplatedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgoexptemplatedir)" || exit $$?; \
- done
-
-uninstall-toolexeclibgoexptemplateDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(toolexeclibgoexptemplate_DATA)'; test -n "$(toolexeclibgoexptemplatedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(toolexeclibgoexptemplatedir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(toolexeclibgoexptemplatedir)" && rm -f $$files
install-toolexeclibgogoDATA: $(toolexeclibgogo_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgogodir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgogodir)"
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgohashdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgohashdir)" && rm -f $$files
-install-toolexeclibgohttpDATA: $(toolexeclibgohttp_DATA)
+install-toolexeclibgohtmlDATA: $(toolexeclibgohtml_DATA)
@$(NORMAL_INSTALL)
- test -z "$(toolexeclibgohttpdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgohttpdir)"
- @list='$(toolexeclibgohttp_DATA)'; test -n "$(toolexeclibgohttpdir)" || list=; \
+ test -z "$(toolexeclibgohtmldir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgohtmldir)"
+ @list='$(toolexeclibgohtml_DATA)'; test -n "$(toolexeclibgohtmldir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgohttpdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgohttpdir)" || exit $$?; \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgohtmldir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgohtmldir)" || exit $$?; \
done
-uninstall-toolexeclibgohttpDATA:
+uninstall-toolexeclibgohtmlDATA:
@$(NORMAL_UNINSTALL)
- @list='$(toolexeclibgohttp_DATA)'; test -n "$(toolexeclibgohttpdir)" || list=; \
+ @list='$(toolexeclibgohtml_DATA)'; test -n "$(toolexeclibgohtmldir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(toolexeclibgohttpdir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(toolexeclibgohttpdir)" && rm -f $$files
+ echo " ( cd '$(DESTDIR)$(toolexeclibgohtmldir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgohtmldir)" && rm -f $$files
install-toolexeclibgoimageDATA: $(toolexeclibgoimage_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgoimagedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoimagedir)"
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgoiodir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgoiodir)" && rm -f $$files
+install-toolexeclibgologDATA: $(toolexeclibgolog_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgologdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgologdir)"
+ @list='$(toolexeclibgolog_DATA)'; test -n "$(toolexeclibgologdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgologdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgologdir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgologDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgolog_DATA)'; test -n "$(toolexeclibgologdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgologdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgologdir)" && rm -f $$files
+install-toolexeclibgomathDATA: $(toolexeclibgomath_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgomathdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgomathdir)"
+ @list='$(toolexeclibgomath_DATA)'; test -n "$(toolexeclibgomathdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgomathdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgomathdir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgomathDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgomath_DATA)'; test -n "$(toolexeclibgomathdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgomathdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgomathdir)" && rm -f $$files
install-toolexeclibgomimeDATA: $(toolexeclibgomime_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgomimedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgomimedir)"
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgonetdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgonetdir)" && rm -f $$files
+install-toolexeclibgonethttpDATA: $(toolexeclibgonethttp_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgonethttpdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgonethttpdir)"
+ @list='$(toolexeclibgonethttp_DATA)'; test -n "$(toolexeclibgonethttpdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgonethttpdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgonethttpdir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgonethttpDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgonethttp_DATA)'; test -n "$(toolexeclibgonethttpdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgonethttpdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgonethttpdir)" && rm -f $$files
+install-toolexeclibgonetrpcDATA: $(toolexeclibgonetrpc_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgonetrpcdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgonetrpcdir)"
+ @list='$(toolexeclibgonetrpc_DATA)'; test -n "$(toolexeclibgonetrpcdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgonetrpcdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgonetrpcdir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgonetrpcDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgonetrpc_DATA)'; test -n "$(toolexeclibgonetrpcdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgonetrpcdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgonetrpcdir)" && rm -f $$files
install-toolexeclibgooldDATA: $(toolexeclibgoold_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgoolddir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoolddir)"
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgoregexpdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgoregexpdir)" && rm -f $$files
-install-toolexeclibgorpcDATA: $(toolexeclibgorpc_DATA)
- @$(NORMAL_INSTALL)
- test -z "$(toolexeclibgorpcdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgorpcdir)"
- @list='$(toolexeclibgorpc_DATA)'; test -n "$(toolexeclibgorpcdir)" || list=; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgorpcdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgorpcdir)" || exit $$?; \
- done
-
-uninstall-toolexeclibgorpcDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(toolexeclibgorpc_DATA)'; test -n "$(toolexeclibgorpcdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(toolexeclibgorpcdir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(toolexeclibgorpcdir)" && rm -f $$files
install-toolexeclibgoruntimeDATA: $(toolexeclibgoruntime_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgoruntimedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoruntimedir)"
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgosyncdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgosyncdir)" && rm -f $$files
-install-toolexeclibgotemplateDATA: $(toolexeclibgotemplate_DATA)
+install-toolexeclibgotestingDATA: $(toolexeclibgotesting_DATA)
@$(NORMAL_INSTALL)
- test -z "$(toolexeclibgotemplatedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgotemplatedir)"
- @list='$(toolexeclibgotemplate_DATA)'; test -n "$(toolexeclibgotemplatedir)" || list=; \
+ test -z "$(toolexeclibgotestingdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgotestingdir)"
+ @list='$(toolexeclibgotesting_DATA)'; test -n "$(toolexeclibgotestingdir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgotemplatedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgotemplatedir)" || exit $$?; \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgotestingdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgotestingdir)" || exit $$?; \
done
-uninstall-toolexeclibgotemplateDATA:
+uninstall-toolexeclibgotestingDATA:
@$(NORMAL_UNINSTALL)
- @list='$(toolexeclibgotemplate_DATA)'; test -n "$(toolexeclibgotemplatedir)" || list=; \
+ @list='$(toolexeclibgotesting_DATA)'; test -n "$(toolexeclibgotestingdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(toolexeclibgotemplatedir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(toolexeclibgotemplatedir)" && rm -f $$files
-install-toolexeclibgotestingDATA: $(toolexeclibgotesting_DATA)
+ echo " ( cd '$(DESTDIR)$(toolexeclibgotestingdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgotestingdir)" && rm -f $$files
+install-toolexeclibgotextDATA: $(toolexeclibgotext_DATA)
@$(NORMAL_INSTALL)
- test -z "$(toolexeclibgotestingdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgotestingdir)"
- @list='$(toolexeclibgotesting_DATA)'; test -n "$(toolexeclibgotestingdir)" || list=; \
+ test -z "$(toolexeclibgotextdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgotextdir)"
+ @list='$(toolexeclibgotext_DATA)'; test -n "$(toolexeclibgotextdir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgotestingdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgotestingdir)" || exit $$?; \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgotextdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgotextdir)" || exit $$?; \
done
-uninstall-toolexeclibgotestingDATA:
+uninstall-toolexeclibgotextDATA:
@$(NORMAL_UNINSTALL)
- @list='$(toolexeclibgotesting_DATA)'; test -n "$(toolexeclibgotestingdir)" || list=; \
+ @list='$(toolexeclibgotext_DATA)'; test -n "$(toolexeclibgotextdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(toolexeclibgotestingdir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(toolexeclibgotestingdir)" && rm -f $$files
+ echo " ( cd '$(DESTDIR)$(toolexeclibgotextdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgotextdir)" && rm -f $$files
+install-toolexeclibgotexttemplateDATA: $(toolexeclibgotexttemplate_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgotexttemplatedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgotexttemplatedir)"
+ @list='$(toolexeclibgotexttemplate_DATA)'; test -n "$(toolexeclibgotexttemplatedir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgotexttemplatedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgotexttemplatedir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgotexttemplateDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgotexttemplate_DATA)'; test -n "$(toolexeclibgotexttemplatedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgotexttemplatedir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgotexttemplatedir)" && rm -f $$files
+install-toolexeclibgounicodeDATA: $(toolexeclibgounicode_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgounicodedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgounicodedir)"
+ @list='$(toolexeclibgounicode_DATA)'; test -n "$(toolexeclibgounicodedir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgounicodedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgounicodedir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgounicodeDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgounicode_DATA)'; test -n "$(toolexeclibgounicodedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgounicodedir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgounicodedir)" && rm -f $$files
# This directory's subdirectories are mostly independent; you can cd
# into them and run `make' without going through this Makefile.
config.h
installdirs: installdirs-recursive
installdirs-am:
- for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgocryptox509dir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgoexpguidir)" "$(DESTDIR)$(toolexeclibgoexpsqldir)" "$(DESTDIR)$(toolexeclibgoexptemplatedir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoolddir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgoregexpdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotemplatedir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \
+ for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgocryptox509dir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgoexpguidir)" "$(DESTDIR)$(toolexeclibgoexpsqldir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohtmldir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgologdir)" "$(DESTDIR)$(toolexeclibgomathdir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgonethttpdir)" "$(DESTDIR)$(toolexeclibgonetrpcdir)" "$(DESTDIR)$(toolexeclibgoolddir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgoregexpdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)" "$(DESTDIR)$(toolexeclibgotextdir)" "$(DESTDIR)$(toolexeclibgotexttemplatedir)" "$(DESTDIR)$(toolexeclibgounicodedir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-recursive
install-toolexeclibgodebugDATA \
install-toolexeclibgoencodingDATA install-toolexeclibgoexpDATA \
install-toolexeclibgoexpguiDATA \
- install-toolexeclibgoexpsqlDATA \
- install-toolexeclibgoexptemplateDATA \
- install-toolexeclibgogoDATA install-toolexeclibgohashDATA \
- install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \
- install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
- install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
- install-toolexeclibgooldDATA install-toolexeclibgoosDATA \
- install-toolexeclibgopathDATA install-toolexeclibgoregexpDATA \
- install-toolexeclibgorpcDATA install-toolexeclibgoruntimeDATA \
- install-toolexeclibgosyncDATA \
- install-toolexeclibgotemplateDATA \
- install-toolexeclibgotestingDATA
+ install-toolexeclibgoexpsqlDATA install-toolexeclibgogoDATA \
+ install-toolexeclibgohashDATA install-toolexeclibgohtmlDATA \
+ install-toolexeclibgoimageDATA install-toolexeclibgoindexDATA \
+ install-toolexeclibgoioDATA install-toolexeclibgologDATA \
+ install-toolexeclibgomathDATA install-toolexeclibgomimeDATA \
+ install-toolexeclibgonetDATA install-toolexeclibgonethttpDATA \
+ install-toolexeclibgonetrpcDATA install-toolexeclibgooldDATA \
+ install-toolexeclibgoosDATA install-toolexeclibgopathDATA \
+ install-toolexeclibgoregexpDATA \
+ install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \
+ install-toolexeclibgotestingDATA install-toolexeclibgotextDATA \
+ install-toolexeclibgotexttemplateDATA \
+ install-toolexeclibgounicodeDATA
install-html: install-html-recursive
uninstall-toolexeclibgoexpDATA \
uninstall-toolexeclibgoexpguiDATA \
uninstall-toolexeclibgoexpsqlDATA \
- uninstall-toolexeclibgoexptemplateDATA \
uninstall-toolexeclibgogoDATA uninstall-toolexeclibgohashDATA \
- uninstall-toolexeclibgohttpDATA \
+ uninstall-toolexeclibgohtmlDATA \
uninstall-toolexeclibgoimageDATA \
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
+ uninstall-toolexeclibgologDATA uninstall-toolexeclibgomathDATA \
uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \
+ uninstall-toolexeclibgonethttpDATA \
+ uninstall-toolexeclibgonetrpcDATA \
uninstall-toolexeclibgooldDATA uninstall-toolexeclibgoosDATA \
uninstall-toolexeclibgopathDATA \
uninstall-toolexeclibgoregexpDATA \
- uninstall-toolexeclibgorpcDATA \
uninstall-toolexeclibgoruntimeDATA \
uninstall-toolexeclibgosyncDATA \
- uninstall-toolexeclibgotemplateDATA \
- uninstall-toolexeclibgotestingDATA
+ uninstall-toolexeclibgotestingDATA \
+ uninstall-toolexeclibgotextDATA \
+ uninstall-toolexeclibgotexttemplateDATA \
+ uninstall-toolexeclibgounicodeDATA
.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all all-multi \
clean-multi ctags-recursive distclean-multi install-am \
install-toolexeclibgodebugDATA \
install-toolexeclibgoencodingDATA install-toolexeclibgoexpDATA \
install-toolexeclibgoexpguiDATA \
- install-toolexeclibgoexpsqlDATA \
- install-toolexeclibgoexptemplateDATA \
- install-toolexeclibgogoDATA install-toolexeclibgohashDATA \
- install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \
- install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
- install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
- install-toolexeclibgooldDATA install-toolexeclibgoosDATA \
- install-toolexeclibgopathDATA install-toolexeclibgoregexpDATA \
- install-toolexeclibgorpcDATA install-toolexeclibgoruntimeDATA \
- install-toolexeclibgosyncDATA \
- install-toolexeclibgotemplateDATA \
- install-toolexeclibgotestingDATA installcheck installcheck-am \
+ install-toolexeclibgoexpsqlDATA install-toolexeclibgogoDATA \
+ install-toolexeclibgohashDATA install-toolexeclibgohtmlDATA \
+ install-toolexeclibgoimageDATA install-toolexeclibgoindexDATA \
+ install-toolexeclibgoioDATA install-toolexeclibgologDATA \
+ install-toolexeclibgomathDATA install-toolexeclibgomimeDATA \
+ install-toolexeclibgonetDATA install-toolexeclibgonethttpDATA \
+ install-toolexeclibgonetrpcDATA install-toolexeclibgooldDATA \
+ install-toolexeclibgoosDATA install-toolexeclibgopathDATA \
+ install-toolexeclibgoregexpDATA \
+ install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \
+ install-toolexeclibgotestingDATA install-toolexeclibgotextDATA \
+ install-toolexeclibgotexttemplateDATA \
+ install-toolexeclibgounicodeDATA installcheck installcheck-am \
installdirs installdirs-am maintainer-clean \
maintainer-clean-generic maintainer-clean-multi mostlyclean \
mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
uninstall-toolexeclibgoexpDATA \
uninstall-toolexeclibgoexpguiDATA \
uninstall-toolexeclibgoexpsqlDATA \
- uninstall-toolexeclibgoexptemplateDATA \
uninstall-toolexeclibgogoDATA uninstall-toolexeclibgohashDATA \
- uninstall-toolexeclibgohttpDATA \
+ uninstall-toolexeclibgohtmlDATA \
uninstall-toolexeclibgoimageDATA \
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
+ uninstall-toolexeclibgologDATA uninstall-toolexeclibgomathDATA \
uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \
+ uninstall-toolexeclibgonethttpDATA \
+ uninstall-toolexeclibgonetrpcDATA \
uninstall-toolexeclibgooldDATA uninstall-toolexeclibgoosDATA \
uninstall-toolexeclibgopathDATA \
uninstall-toolexeclibgoregexpDATA \
- uninstall-toolexeclibgorpcDATA \
uninstall-toolexeclibgoruntimeDATA \
uninstall-toolexeclibgosyncDATA \
- uninstall-toolexeclibgotemplateDATA \
- uninstall-toolexeclibgotestingDATA
+ uninstall-toolexeclibgotestingDATA \
+ uninstall-toolexeclibgotextDATA \
+ uninstall-toolexeclibgotexttemplateDATA \
+ uninstall-toolexeclibgounicodeDATA
goc2c.$(OBJEXT): runtime/goc2c.c
./goc2c --gcc --go-prefix libgo_runtime $< > $@.tmp
mv -f $@.tmp $@
+time.c: $(srcdir)/runtime/time.goc goc2c
+ ./goc2c --gcc --go-prefix libgo_time $< > $@.tmp
+ mv -f $@.tmp $@
+
%.c: $(srcdir)/runtime/%.goc goc2c
./goc2c --gcc $< > $@.tmp
mv -f $@.tmp $@
$(SHELL) $(srcdir)/../move-if-change epoll.go.tmp epoll.go
$(STAMP) $@
-@go_include@ asn1/asn1.lo.dep
-asn1/asn1.lo.dep: $(go_asn1_files)
- $(BUILDDEPS)
-asn1/asn1.lo: $(go_asn1_files)
- $(BUILDPACKAGE)
-asn1/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: asn1/check
-
-@go_include@ big/big.lo.dep
-big/big.lo.dep: $(go_big_files)
- $(BUILDDEPS)
-big/big.lo: $(go_big_files)
- $(BUILDPACKAGE)
-big/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: big/check
-
@go_include@ bufio/bufio.lo.dep
bufio/bufio.lo.dep: $(go_bufio_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: bytes/check
-@go_include@ cmath/cmath.lo.dep
-cmath/cmath.lo.dep: $(go_cmath_files)
- $(BUILDDEPS)
-cmath/cmath.lo: $(go_cmath_files)
- $(BUILDPACKAGE)
-cmath/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: cmath/check
-
@go_include@ crypto/crypto.lo.dep
crypto/crypto.lo.dep: $(go_crypto_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: crypto/check
-@go_include@ csv/csv.lo.dep
-csv/csv.lo.dep: $(go_csv_files)
- $(BUILDDEPS)
-csv/csv.lo: $(go_csv_files)
- $(BUILDPACKAGE)
-csv/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: csv/check
-
@go_include@ errors/errors.lo.dep
errors/errors.lo.dep: $(go_errors_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: errors/check
-@go_include@ exec/exec.lo.dep
-exec/exec.lo.dep: $(go_exec_files)
- $(BUILDDEPS)
-exec/exec.lo: $(go_exec_files)
- $(BUILDPACKAGE)
-exec/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: exec/check
-
@go_include@ expvar/expvar.lo.dep
expvar/expvar.lo.dep: $(go_expvar_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: fmt/check
-@go_include@ gob/gob.lo.dep
-gob/gob.lo.dep: $(go_gob_files)
- $(BUILDDEPS)
-gob/gob.lo: $(go_gob_files)
- $(BUILDPACKAGE)
-gob/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: gob/check
-
@go_include@ hash/hash.lo.dep
hash/hash.lo.dep: $(go_hash_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: html/check
-@go_include@ http/http.lo.dep
-http/http.lo.dep: $(go_http_files)
- $(BUILDDEPS)
-http/http.lo: $(go_http_files)
- $(BUILDPACKAGE)
-http/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: http/check
-
@go_include@ image/image.lo.dep
image/image.lo.dep: $(go_image_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: io/check
-@go_include@ json/json.lo.dep
-json/json.lo.dep: $(go_json_files)
- $(BUILDDEPS)
-json/json.lo: $(go_json_files)
- $(BUILDPACKAGE)
-json/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: json/check
-
@go_include@ log/log.lo.dep
log/log.lo.dep: $(go_log_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: math/check
-@go_include@ mail/mail.lo.dep
-mail/mail.lo.dep: $(go_mail_files)
- $(BUILDDEPS)
-mail/mail.lo: $(go_mail_files)
- $(BUILDPACKAGE)
-mail/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: mail/check
-
@go_include@ mime/mime.lo.dep
mime/mime.lo.dep: $(go_mime_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: path/check
-@go_include@ rand/rand.lo.dep
-rand/rand.lo.dep: $(go_rand_files)
- $(BUILDDEPS)
-rand/rand.lo: $(go_rand_files)
- $(BUILDPACKAGE)
-rand/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: rand/check
-
@go_include@ reflect/reflect.lo.dep
reflect/reflect.lo.dep: $(go_reflect_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: regexp/check
-@go_include@ rpc/rpc.lo.dep
-rpc/rpc.lo.dep: $(go_rpc_files)
- $(BUILDDEPS)
-rpc/rpc.lo: $(go_rpc_files)
- $(BUILDPACKAGE)
-rpc/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: rpc/check
-
@go_include@ runtime/runtime.lo.dep
runtime/runtime.lo.dep: $(go_runtime_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: runtime/check
-@go_include@ scanner/scanner.lo.dep
-scanner/scanner.lo.dep: $(go_scanner_files)
- $(BUILDDEPS)
-scanner/scanner.lo: $(go_scanner_files)
- $(BUILDPACKAGE)
-scanner/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: scanner/check
-
-@go_include@ smtp/smtp.lo.dep
-smtp/smtp.lo.dep: $(go_smtp_files)
+@go_include@ text/scanner.lo.dep
+text/scanner.lo.dep: $(go_text_scanner_files)
$(BUILDDEPS)
-smtp/smtp.lo: $(go_smtp_files)
+text/scanner.lo: $(go_text_scanner_files)
$(BUILDPACKAGE)
-smtp/check: $(CHECK_DEPS)
+text/scanner/check: $(CHECK_DEPS)
+ @$(MKDIR_P) text/scanner
@$(CHECK)
-.PHONY: smtp/check
+.PHONY: text/scanner/check
@go_include@ sort/sort.lo.dep
sort/sort.lo.dep: $(go_sort_files)
@$(CHECK)
.PHONY: sync/check
-@go_include@ syslog/syslog.lo.dep
-syslog/syslog.lo.dep: $(go_syslog_files)
- $(BUILDDEPS)
-syslog/syslog.lo: $(go_syslog_files)
- $(BUILDPACKAGE)
-syslog/syslog_c.lo: $(go_syslog_c_files) syslog/syslog.lo
- $(LTCOMPILE) -c -o $@ $(srcdir)/go/syslog/syslog_c.c
-syslog/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: syslog/check
-
-@go_include@ tabwriter/tabwriter.lo.dep
-tabwriter/tabwriter.lo.dep: $(go_tabwriter_files)
- $(BUILDDEPS)
-tabwriter/tabwriter.lo: $(go_tabwriter_files)
- $(BUILDPACKAGE)
-tabwriter/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: tabwriter/check
-
-@go_include@ template/template.lo.dep
-template/template.lo.dep: $(go_template_files)
- $(BUILDDEPS)
-template/template.lo: $(go_template_files)
- $(BUILDPACKAGE)
-template/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: template/check
-
@go_include@ testing/testing.lo.dep
testing/testing.lo.dep: $(go_testing_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: unicode/check
-@go_include@ url/url.lo.dep
-url/url.lo.dep: $(go_url_files)
- $(BUILDDEPS)
-url/url.lo: $(go_url_files)
- $(BUILDPACKAGE)
-url/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: url/check
-
-@go_include@ utf16/utf16.lo.dep
-utf16/utf16.lo.dep: $(go_utf16_files)
- $(BUILDDEPS)
-utf16/utf16.lo: $(go_utf16_files)
- $(BUILDPACKAGE)
-utf16/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: utf16/check
-
-@go_include@ utf8/utf8.lo.dep
-utf8/utf8.lo.dep: $(go_utf8_files)
- $(BUILDDEPS)
-utf8/utf8.lo: $(go_utf8_files)
- $(BUILDPACKAGE)
-utf8/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: utf8/check
-
@go_include@ websocket/websocket.lo.dep
websocket/websocket.lo.dep: $(go_websocket_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: websocket/check
-@go_include@ xml/xml.lo.dep
-xml/xml.lo.dep: $(go_xml_files)
- $(BUILDDEPS)
-xml/xml.lo: $(go_xml_files)
- $(BUILDPACKAGE)
-xml/check: $(CHECK_DEPS)
- @$(CHECK)
-.PHONY: xml/check
-
@go_include@ archive/tar.lo.dep
archive/tar.lo.dep: $(go_archive_tar_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: debug/pe/check
+@go_include@ encoding/asn1.lo.dep
+encoding/asn1.lo.dep: $(go_encoding_asn1_files)
+ $(BUILDDEPS)
+encoding/asn1.lo: $(go_encoding_asn1_files)
+ $(BUILDPACKAGE)
+encoding/asn1/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/asn1
+ @$(CHECK)
+.PHONY: encoding/asn1/check
+
@go_include@ encoding/ascii85.lo.dep
encoding/ascii85.lo.dep: $(go_encoding_ascii85_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/binary/check
+@go_include@ encoding/csv.lo.dep
+encoding/csv.lo.dep: $(go_encoding_csv_files)
+ $(BUILDDEPS)
+encoding/csv.lo: $(go_encoding_csv_files)
+ $(BUILDPACKAGE)
+encoding/csv/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/csv
+ @$(CHECK)
+.PHONY: encoding/csv/check
+
@go_include@ encoding/git85.lo.dep
encoding/git85.lo.dep: $(go_encoding_git85_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/git85/check
+@go_include@ encoding/gob.lo.dep
+encoding/gob.lo.dep: $(go_encoding_gob_files)
+ $(BUILDDEPS)
+encoding/gob.lo: $(go_encoding_gob_files)
+ $(BUILDPACKAGE)
+encoding/gob/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/gob
+ @$(CHECK)
+.PHONY: encoding/gob/check
+
@go_include@ encoding/hex.lo.dep
encoding/hex.lo.dep: $(go_encoding_hex_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/hex/check
+@go_include@ encoding/json.lo.dep
+encoding/json.lo.dep: $(go_encoding_json_files)
+ $(BUILDDEPS)
+encoding/json.lo: $(go_encoding_json_files)
+ $(BUILDPACKAGE)
+encoding/json/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/json
+ @$(CHECK)
+.PHONY: encoding/json/check
+
@go_include@ encoding/pem.lo.dep
encoding/pem.lo.dep: $(go_encoding_pem_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: encoding/pem/check
+@go_include@ encoding/xml.lo.dep
+encoding/xml.lo.dep: $(go_encoding_xml_files)
+ $(BUILDDEPS)
+encoding/xml.lo: $(go_encoding_xml_files)
+ $(BUILDPACKAGE)
+encoding/xml/check: $(CHECK_DEPS)
+ @$(MKDIR_P) encoding/xml
+ @$(CHECK)
+.PHONY: encoding/xml/check
+
@go_include@ exp/ebnf.lo.dep
exp/ebnf.lo.dep: $(go_exp_ebnf_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/sql/driver/check
-@go_include@ exp/template/html.lo.dep
-exp/template/html.lo.dep: $(go_exp_template_html_files)
+@go_include@ html/template.lo.dep
+html/template.lo.dep: $(go_html_template_files)
$(BUILDDEPS)
-exp/template/html.lo: $(go_exp_template_html_files)
+html/template.lo: $(go_html_template_files)
$(BUILDPACKAGE)
-exp/template/html/check: $(CHECK_DEPS)
- @$(MKDIR_P) exp/template/html
+html/template/check: $(CHECK_DEPS)
+ @$(MKDIR_P) html/template
@$(CHECK)
-.PHONY: exp/template/html/check
+.PHONY: html/template/check
@go_include@ go/ast.lo.dep
go/ast.lo.dep: $(go_go_ast_files)
@$(CHECK)
.PHONY: hash/fnv/check
-@go_include@ http/cgi.lo.dep
-http/cgi.lo.dep: $(go_http_cgi_files)
- $(BUILDDEPS)
-http/cgi.lo: $(go_http_cgi_files)
- $(BUILDPACKAGE)
-http/cgi/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/cgi
- @$(CHECK)
-.PHONY: http/cgi/check
-
-@go_include@ http/fcgi.lo.dep
-http/fcgi.lo.dep: $(go_http_fcgi_files)
- $(BUILDDEPS)
-http/fcgi.lo: $(go_http_fcgi_files)
- $(BUILDPACKAGE)
-http/fcgi/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/fcgi
- @$(CHECK)
-.PHONY: http/fcgi/check
-
-@go_include@ http/httptest.lo.dep
-http/httptest.lo.dep: $(go_http_httptest_files)
- $(BUILDDEPS)
-http/httptest.lo: $(go_http_httptest_files)
- $(BUILDPACKAGE)
-http/httptest/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/httptest
- @$(CHECK)
-.PHONY: http/httptest/check
-
-@go_include@ http/pprof.lo.dep
-http/pprof.lo.dep: $(go_http_pprof_files)
- $(BUILDDEPS)
-http/pprof.lo: $(go_http_pprof_files)
- $(BUILDPACKAGE)
-http/pprof/check: $(CHECK_DEPS)
- @$(MKDIR_P) http/pprof
- @$(CHECK)
-.PHONY: http/pprof/check
-
@go_include@ image/bmp.lo.dep
image/bmp.lo.dep: $(go_image_bmp_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: io/ioutil/check
+@go_include@ log/syslog.lo.dep
+log/syslog.lo.dep: $(go_log_syslog_files)
+ $(BUILDDEPS)
+log/syslog.lo: $(go_log_syslog_files)
+ $(BUILDPACKAGE)
+log/syslog/syslog_c.lo: $(go_syslog_c_files) log/syslog.lo
+ $(LTCOMPILE) -c -o $@ $(srcdir)/go/log/syslog/syslog_c.c
+log/syslog/check: $(CHECK_DEPS)
+ @$(MKDIR_P) log/syslog
+ @$(CHECK)
+.PHONY: log/syslog/check
+
+@go_include@ math/big.lo.dep
+math/big.lo.dep: $(go_math_big_files)
+ $(BUILDDEPS)
+math/big.lo: $(go_math_big_files)
+ $(BUILDPACKAGE)
+math/big/check: $(CHECK_DEPS)
+ @$(MKDIR_P) math/big
+ @$(CHECK)
+.PHONY: math/big/check
+
+@go_include@ math/cmplx.lo.dep
+math/cmplx.lo.dep: $(go_math_cmplx_files)
+ $(BUILDDEPS)
+math/cmplx.lo: $(go_math_cmplx_files)
+ $(BUILDPACKAGE)
+math/cmplx/check: $(CHECK_DEPS)
+ @$(MKDIR_P) math/cmplx
+ @$(CHECK)
+.PHONY: math/cmplx/check
+
+@go_include@ math/rand.lo.dep
+math/rand.lo.dep: $(go_math_rand_files)
+ $(BUILDDEPS)
+math/rand.lo: $(go_math_rand_files)
+ $(BUILDPACKAGE)
+math/rand/check: $(CHECK_DEPS)
+ @$(MKDIR_P) math/rand
+ @$(CHECK)
+.PHONY: math/rand/check
+
@go_include@ mime/multipart.lo.dep
mime/multipart.lo.dep: $(go_mime_multipart_files)
$(BUILDDEPS)
net/dict.lo: $(go_net_dict_files)
$(BUILDPACKAGE)
+@go_include@ net/http.lo.dep
+net/http.lo.dep: $(go_net_http_files)
+ $(BUILDDEPS)
+net/http.lo: $(go_net_http_files)
+ $(BUILDPACKAGE)
+net/http/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http
+ @$(CHECK)
+.PHONY: net/http/check
+
+@go_include@ net/mail.lo.dep
+net/mail.lo.dep: $(go_net_mail_files)
+ $(BUILDDEPS)
+net/mail.lo: $(go_net_mail_files)
+ $(BUILDPACKAGE)
+net/mail/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/mail
+ @$(CHECK)
+.PHONY: net/mail/check
+
+@go_include@ net/rpc.lo.dep
+net/rpc.lo.dep: $(go_net_rpc_files)
+ $(BUILDDEPS)
+net/rpc.lo: $(go_net_rpc_files)
+ $(BUILDPACKAGE)
+net/rpc/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/rpc
+ @$(CHECK)
+.PHONY: net/rpc/check
+
+@go_include@ net/smtp.lo.dep
+net/smtp.lo.dep: $(go_net_smtp_files)
+ $(BUILDDEPS)
+net/smtp.lo: $(go_net_smtp_files)
+ $(BUILDPACKAGE)
+net/smtp/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/smtp
+ @$(CHECK)
+.PHONY: net/smtp/check
+
+@go_include@ net/url.lo.dep
+net/url.lo.dep: $(go_net_url_files)
+ $(BUILDDEPS)
+net/url.lo: $(go_net_url_files)
+ $(BUILDPACKAGE)
+net/url/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/url
+ @$(CHECK)
+.PHONY: net/url/check
+
@go_include@ net/textproto.lo.dep
net/textproto.lo.dep: $(go_net_textproto_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: net/textproto/check
+@go_include@ net/http/cgi.lo.dep
+net/http/cgi.lo.dep: $(go_net_http_cgi_files)
+ $(BUILDDEPS)
+net/http/cgi.lo: $(go_net_http_cgi_files)
+ $(BUILDPACKAGE)
+net/http/cgi/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http/cgi
+ @$(CHECK)
+.PHONY: net/http/cgi/check
+
+@go_include@ net/http/fcgi.lo.dep
+net/http/fcgi.lo.dep: $(go_net_http_fcgi_files)
+ $(BUILDDEPS)
+net/http/fcgi.lo: $(go_net_http_fcgi_files)
+ $(BUILDPACKAGE)
+net/http/fcgi/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http/fcgi
+ @$(CHECK)
+.PHONY: net/http/fcgi/check
+
+@go_include@ net/http/httptest.lo.dep
+net/http/httptest.lo.dep: $(go_net_http_httptest_files)
+ $(BUILDDEPS)
+net/http/httptest.lo: $(go_net_http_httptest_files)
+ $(BUILDPACKAGE)
+net/http/httptest/check: $(check_deps)
+ @$(MKDIR_P) net/http/httptest
+ @$(CHECK)
+.PHONY: net/http/httptest/check
+
+@go_include@ net/http/httputil.lo.dep
+net/http/httputil.lo.dep: $(go_net_http_httputil_files)
+ $(BUILDDEPS)
+net/http/httputil.lo: $(go_net_http_httputil_files)
+ $(BUILDPACKAGE)
+net/http/httputil/check: $(check_deps)
+ @$(MKDIR_P) net/http/httputil
+ @$(CHECK)
+.PHONY: net/http/httputil/check
+
+@go_include@ net/http/pprof.lo.dep
+net/http/pprof.lo.dep: $(go_net_http_pprof_files)
+ $(BUILDDEPS)
+net/http/pprof.lo: $(go_net_http_pprof_files)
+ $(BUILDPACKAGE)
+net/http/pprof/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/http/pprof
+ @$(CHECK)
+.PHONY: net/http/pprof/check
+
+@go_include@ net/rpc/jsonrpc.lo.dep
+net/rpc/jsonrpc.lo.dep: $(go_net_rpc_jsonrpc_files)
+ $(BUILDDEPS)
+net/rpc/jsonrpc.lo: $(go_net_rpc_jsonrpc_files)
+ $(BUILDPACKAGE)
+net/rpc/jsonrpc/check: $(CHECK_DEPS)
+ @$(MKDIR_P) net/rpc/jsonrpc
+ @$(CHECK)
+.PHONY: net/rpc/jsonrpc/check
+
@go_include@ old/netchan.lo.dep
old/netchan.lo.dep: $(go_old_netchan_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: old/template/check
+@go_include@ os/exec.lo.dep
+os/exec.lo.dep: $(go_os_exec_files)
+ $(BUILDDEPS)
+os/exec.lo: $(go_os_exec_files)
+ $(BUILDPACKAGE)
+os/exec/check: $(CHECK_DEPS)
+ @$(MKDIR_P) os/exec
+ @$(CHECK)
+.PHONY: os/exec/check
+
@go_include@ os/user.lo.dep
os/user.lo.dep: $(go_os_user_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: regexp/syntax/check
-@go_include@ rpc/jsonrpc.lo.dep
-rpc/jsonrpc.lo.dep: $(go_rpc_jsonrpc_files)
- $(BUILDDEPS)
-rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files)
- $(BUILDPACKAGE)
-rpc/jsonrpc/check: $(CHECK_DEPS)
- @$(MKDIR_P) rpc/jsonrpc
- @$(CHECK)
-.PHONY: rpc/jsonrpc/check
-
@go_include@ runtime/debug.lo.dep
runtime/debug.lo.dep: $(go_runtime_debug_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: sync/atomic/check
-@go_include@ template/parse.lo.dep
-template/parse.lo.dep: $(go_template_parse_files)
+@go_include@ text/tabwriter.lo.dep
+text/tabwriter.lo.dep: $(go_text_tabwriter_files)
+ $(BUILDDEPS)
+text/tabwriter.lo: $(go_text_tabwriter_files)
+ $(BUILDPACKAGE)
+text/tabwriter/check: $(CHECK_DEPS)
+ @$(MKDIR_P) text/tabwriter
+ @$(CHECK)
+.PHONY: text/tabwriter/check
+
+@go_include@ text/template.lo.dep
+text/template.lo.dep: $(go_text_template_files)
$(BUILDDEPS)
-template/parse.lo: $(go_template_parse_files)
+text/template.lo: $(go_text_template_files)
$(BUILDPACKAGE)
-template/parse/check: $(CHECK_DEPS)
- @$(MKDIR_P) template/parse
+text/template/check: $(CHECK_DEPS)
@$(CHECK)
-.PHONY: template/parse/check
+.PHONY: text/template/check
+
+@go_include@ text/template/parse.lo.dep
+text/template/parse.lo.dep: $(go_text_template_parse_files)
+ $(BUILDDEPS)
+text/template/parse.lo: $(go_text_template_parse_files)
+ $(BUILDPACKAGE)
+text/template/parse/check: $(CHECK_DEPS)
+ @$(MKDIR_P) text/template/parse
+ @$(CHECK)
+.PHONY: text/template/parse/check
@go_include@ testing/iotest.lo.dep
testing/iotest.lo.dep: $(go_testing_iotest_files)
@$(CHECK)
.PHONY: testing/script/check
+@go_include@ unicode/utf16.lo.dep
+unicode/utf16.lo.dep: $(go_unicode_utf16_files)
+ $(BUILDDEPS)
+unicode/utf16.lo: $(go_unicode_utf16_files)
+ $(BUILDPACKAGE)
+unicode/utf16/check: $(CHECK_DEPS)
+ @$(MKDIR_P) unicode/utf16
+ @$(CHECK)
+.PHONY: unicode/utf16/check
+
+@go_include@ unicode/utf8.lo.dep
+unicode/utf8.lo.dep: $(go_unicode_utf8_files)
+ $(BUILDDEPS)
+unicode/utf8.lo: $(go_unicode_utf8_files)
+ $(BUILDPACKAGE)
+unicode/utf8/check: $(CHECK_DEPS)
+ @$(MKDIR_P) unicode/utf8
+ @$(CHECK)
+.PHONY: unicode/utf8/check
+
@go_include@ syscall/syscall.lo.dep
syscall/syscall.lo.dep: $(go_syscall_files)
$(BUILDDEPS)
syscall/wait.lo: go/syscall/wait.c
$(LTCOMPILE) -c -o $@ $<
-asn1.gox: asn1/asn1.lo
- $(BUILDGOX)
-big.gox: big/big.lo
- $(BUILDGOX)
bufio.gox: bufio/bufio.lo
$(BUILDGOX)
bytes.gox: bytes/bytes.lo
$(BUILDGOX)
-cmath.gox: cmath/cmath.lo
- $(BUILDGOX)
crypto.gox: crypto/crypto.lo
$(BUILDGOX)
-csv.gox: csv/csv.lo
- $(BUILDGOX)
errors.gox: errors/errors.lo
$(BUILDGOX)
-exec.gox: exec/exec.lo
- $(BUILDGOX)
expvar.gox: expvar/expvar.lo
$(BUILDGOX)
flag.gox: flag/flag.lo
$(BUILDGOX)
fmt.gox: fmt/fmt.lo
$(BUILDGOX)
-gob.gox: gob/gob.lo
- $(BUILDGOX)
hash.gox: hash/hash.lo
$(BUILDGOX)
html.gox: html/html.lo
$(BUILDGOX)
-http.gox: http/http.lo
- $(BUILDGOX)
image.gox: image/image.lo
$(BUILDGOX)
io.gox: io/io.lo
$(BUILDGOX)
-json.gox: json/json.lo
- $(BUILDGOX)
log.gox: log/log.lo
$(BUILDGOX)
math.gox: math/math.lo
$(BUILDGOX)
-mail.gox: mail/mail.lo
- $(BUILDGOX)
mime.gox: mime/mime.lo
$(BUILDGOX)
net.gox: net/net.lo
$(BUILDGOX)
path.gox: path/path.lo
$(BUILDGOX)
-rand.gox: rand/rand.lo
- $(BUILDGOX)
reflect.gox: reflect/reflect.lo
$(BUILDGOX)
regexp.gox: regexp/regexp.lo
$(BUILDGOX)
-rpc.gox: rpc/rpc.lo
- $(BUILDGOX)
runtime.gox: runtime/runtime.lo
$(BUILDGOX)
-scanner.gox: scanner/scanner.lo
- $(BUILDGOX)
-smtp.gox: smtp/smtp.lo
- $(BUILDGOX)
sort.gox: sort/sort.lo
$(BUILDGOX)
strconv.gox: strconv/strconv.lo
$(BUILDGOX)
sync.gox: sync/sync.lo
$(BUILDGOX)
-syslog.gox: syslog/syslog.lo
- $(BUILDGOX)
syscall.gox: syscall/syscall.lo
$(BUILDGOX)
-tabwriter.gox: tabwriter/tabwriter.lo
- $(BUILDGOX)
-template.gox: template/template.lo
- $(BUILDGOX)
testing.gox: testing/testing.lo
$(BUILDGOX)
time.gox: time/time.lo
$(BUILDGOX)
unicode.gox: unicode/unicode.lo
$(BUILDGOX)
-url.gox: url/url.lo
- $(BUILDGOX)
-utf16.gox: utf16/utf16.lo
- $(BUILDGOX)
-utf8.gox: utf8/utf8.lo
- $(BUILDGOX)
websocket.gox: websocket/websocket.lo
$(BUILDGOX)
-xml.gox: xml/xml.lo
- $(BUILDGOX)
archive/tar.gox: archive/tar.lo
$(BUILDGOX)
encoding/ascii85.gox: encoding/ascii85.lo
$(BUILDGOX)
+encoding/asn1.gox: encoding/asn1.lo
+ $(BUILDGOX)
encoding/base32.gox: encoding/base32.lo
$(BUILDGOX)
encoding/base64.gox: encoding/base64.lo
$(BUILDGOX)
encoding/binary.gox: encoding/binary.lo
$(BUILDGOX)
+encoding/csv.gox: encoding/csv.lo
+ $(BUILDGOX)
encoding/git85.gox: encoding/git85.lo
$(BUILDGOX)
+encoding/gob.gox: encoding/gob.lo
+ $(BUILDGOX)
encoding/hex.gox: encoding/hex.lo
$(BUILDGOX)
+encoding/json.gox: encoding/json.lo
+ $(BUILDGOX)
encoding/pem.gox: encoding/pem.lo
$(BUILDGOX)
+encoding/xml.gox: encoding/xml.lo
+ $(BUILDGOX)
exp/ebnf.gox: exp/ebnf.lo
$(BUILDGOX)
exp/sql/driver.gox: exp/sql/driver.lo
$(BUILDGOX)
-exp/template/html.gox: exp/template/html.lo
+html/template.gox: html/template.lo
$(BUILDGOX)
go/ast.gox: go/ast.lo
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
-http/cgi.gox: http/cgi.lo
- $(BUILDGOX)
-http/fcgi.gox: http/fcgi.lo
- $(BUILDGOX)
-http/httptest.gox: http/httptest.lo
- $(BUILDGOX)
-http/pprof.gox: http/pprof.lo
- $(BUILDGOX)
-
image/bmp.gox: image/bmp.lo
$(BUILDGOX)
image/color.gox: image/color.lo
io/ioutil.gox: io/ioutil.lo
$(BUILDGOX)
+log/syslog.gox: log/syslog.lo
+ $(BUILDGOX)
+
+math/big.gox: math/big.lo
+ $(BUILDGOX)
+math/cmplx.gox: math/cmplx.lo
+ $(BUILDGOX)
+math/rand.gox: math/rand.lo
+ $(BUILDGOX)
+
mime/multipart.gox: mime/multipart.lo
$(BUILDGOX)
net/dict.gox: net/dict.lo
$(BUILDGOX)
+net/http.gox: net/http.lo
+ $(BUILDGOX)
+net/mail.gox: net/mail.lo
+ $(BUILDGOX)
+net/rpc.gox: net/rpc.lo
+ $(BUILDGOX)
+net/smtp.gox: net/smtp.lo
+ $(BUILDGOX)
net/textproto.gox: net/textproto.lo
$(BUILDGOX)
+net/url.gox: net/url.lo
+ $(BUILDGOX)
+
+net/http/cgi.gox: net/http/cgi.lo
+ $(BUILDGOX)
+net/http/fcgi.gox: net/http/fcgi.lo
+ $(BUILDGOX)
+net/http/httptest.gox: net/http/httptest.lo
+ $(BUILDGOX)
+net/http/httputil.gox: net/http/httputil.lo
+ $(BUILDGOX)
+net/http/pprof.gox: net/http/pprof.lo
+ $(BUILDGOX)
+
+net/rpc/jsonrpc.gox: net/rpc/jsonrpc.lo
+ $(BUILDGOX)
old/netchan.gox: old/netchan.lo
$(BUILDGOX)
old/template.gox: old/template.lo
$(BUILDGOX)
+os/exec.gox: os/exec.lo
+ $(BUILDGOX)
os/user.gox: os/user.lo
$(BUILDGOX)
os/signal.gox: os/signal.lo
regexp/syntax.gox: regexp/syntax.lo
$(BUILDGOX)
-rpc/jsonrpc.gox: rpc/jsonrpc.lo
- $(BUILDGOX)
-
runtime/debug.gox: runtime/debug.lo
$(BUILDGOX)
runtime/pprof.gox: runtime/pprof.lo
sync/atomic.gox: sync/atomic.lo
$(BUILDGOX)
-template/parse.gox: template/parse.lo
+text/scanner.gox: text/scanner.lo
+ $(BUILDGOX)
+text/tabwriter.gox: text/tabwriter.lo
+ $(BUILDGOX)
+text/template.gox: text/template.lo
+ $(BUILDGOX)
+text/template/parse.gox: text/template/parse.lo
$(BUILDGOX)
testing/iotest.gox: testing/iotest.lo
testing/script.gox: testing/script.lo
$(BUILDGOX)
+unicode/utf16.gox: unicode/utf16.lo
+ $(BUILDGOX)
+unicode/utf8.gox: unicode/utf8.lo
+ $(BUILDGOX)
+
check: check-tail
check-recursive: check-head
// tr := tar.NewReader(r)
// for {
// hdr, err := tr.Next()
-// if err == os.EOF {
+// if err == io.EOF {
// // end of tar archive
// break
// }
}
// Read reads from the current entry in the tar archive.
-// It returns 0, os.EOF when it reaches the end of that entry,
+// It returns 0, io.EOF when it reaches the end of that entry,
// until Next is called to advance to the next entry.
func (tr *Reader) Read(b []byte) (n int, err error) {
if tr.nb == 0 {
import (
"bufio"
"compress/flate"
+ "encoding/binary"
"errors"
"hash"
"hash/crc32"
- "encoding/binary"
"io"
"io/ioutil"
"os"
f.Close()
return nil, err
}
+ r.f = f
return r, nil
}
if err == FormatError {
return
}
- defer z.Close()
+ defer func() {
+ if err := z.Close(); err != nil {
+ t.Errorf("error %q when closing zip file", err)
+ }
+ }()
// bail here if no Files expected to be tested
// (there may actually be files in the zip, but we don't care)
import (
"bytes"
"io/ioutil"
- "rand"
+ "math/rand"
"testing"
)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
-// as defined in ITU-T Rec X.690.
-//
-// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
-// http://luca.ntop.org/Teaching/Appunti/asn1.html.
-package asn1
-
-// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
-// are different encoding formats for those objects. Here, we'll be dealing
-// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
-// it's fast to parse and, unlike BER, has a unique encoding for every object.
-// When calculating hashes over objects, it's important that the resulting
-// bytes be the same at both ends and DER removes this margin of error.
-//
-// ASN.1 is very complex and this package doesn't attempt to implement
-// everything by any means.
-
-import (
- "big"
- "fmt"
- "reflect"
- "time"
-)
-
-// A StructuralError suggests that the ASN.1 data is valid, but the Go type
-// which is receiving it doesn't match.
-type StructuralError struct {
- Msg string
-}
-
-func (e StructuralError) Error() string { return "ASN.1 structure error: " + e.Msg }
-
-// A SyntaxError suggests that the ASN.1 data is invalid.
-type SyntaxError struct {
- Msg string
-}
-
-func (e SyntaxError) Error() string { return "ASN.1 syntax error: " + e.Msg }
-
-// We start by dealing with each of the primitive types in turn.
-
-// BOOLEAN
-
-func parseBool(bytes []byte) (ret bool, err error) {
- if len(bytes) != 1 {
- err = SyntaxError{"invalid boolean"}
- return
- }
-
- return bytes[0] != 0, nil
-}
-
-// INTEGER
-
-// parseInt64 treats the given bytes as a big-endian, signed integer and
-// returns the result.
-func parseInt64(bytes []byte) (ret int64, err error) {
- if len(bytes) > 8 {
- // We'll overflow an int64 in this case.
- err = StructuralError{"integer too large"}
- return
- }
- for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
- ret <<= 8
- ret |= int64(bytes[bytesRead])
- }
-
- // Shift up and down in order to sign extend the result.
- ret <<= 64 - uint8(len(bytes))*8
- ret >>= 64 - uint8(len(bytes))*8
- return
-}
-
-// parseInt treats the given bytes as a big-endian, signed integer and returns
-// the result.
-func parseInt(bytes []byte) (int, error) {
- ret64, err := parseInt64(bytes)
- if err != nil {
- return 0, err
- }
- if ret64 != int64(int(ret64)) {
- return 0, StructuralError{"integer too large"}
- }
- return int(ret64), nil
-}
-
-var bigOne = big.NewInt(1)
-
-// parseBigInt treats the given bytes as a big-endian, signed integer and returns
-// the result.
-func parseBigInt(bytes []byte) *big.Int {
- ret := new(big.Int)
- if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
- // This is a negative number.
- notBytes := make([]byte, len(bytes))
- for i := range notBytes {
- notBytes[i] = ^bytes[i]
- }
- ret.SetBytes(notBytes)
- ret.Add(ret, bigOne)
- ret.Neg(ret)
- return ret
- }
- ret.SetBytes(bytes)
- return ret
-}
-
-// BIT STRING
-
-// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
-// bit string is padded up to the nearest byte in memory and the number of
-// valid bits is recorded. Padding bits will be zero.
-type BitString struct {
- Bytes []byte // bits packed into bytes.
- BitLength int // length in bits.
-}
-
-// At returns the bit at the given index. If the index is out of range it
-// returns false.
-func (b BitString) At(i int) int {
- if i < 0 || i >= b.BitLength {
- return 0
- }
- x := i / 8
- y := 7 - uint(i%8)
- return int(b.Bytes[x]>>y) & 1
-}
-
-// RightAlign returns a slice where the padding bits are at the beginning. The
-// slice may share memory with the BitString.
-func (b BitString) RightAlign() []byte {
- shift := uint(8 - (b.BitLength % 8))
- if shift == 8 || len(b.Bytes) == 0 {
- return b.Bytes
- }
-
- a := make([]byte, len(b.Bytes))
- a[0] = b.Bytes[0] >> shift
- for i := 1; i < len(b.Bytes); i++ {
- a[i] = b.Bytes[i-1] << (8 - shift)
- a[i] |= b.Bytes[i] >> shift
- }
-
- return a
-}
-
-// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
-func parseBitString(bytes []byte) (ret BitString, err error) {
- if len(bytes) == 0 {
- err = SyntaxError{"zero length BIT STRING"}
- return
- }
- paddingBits := int(bytes[0])
- if paddingBits > 7 ||
- len(bytes) == 1 && paddingBits > 0 ||
- bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
- err = SyntaxError{"invalid padding bits in BIT STRING"}
- return
- }
- ret.BitLength = (len(bytes)-1)*8 - paddingBits
- ret.Bytes = bytes[1:]
- return
-}
-
-// OBJECT IDENTIFIER
-
-// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
-type ObjectIdentifier []int
-
-// Equal returns true iff oi and other represent the same identifier.
-func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
- if len(oi) != len(other) {
- return false
- }
- for i := 0; i < len(oi); i++ {
- if oi[i] != other[i] {
- return false
- }
- }
-
- return true
-}
-
-// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
-// returns it. An object identifier is a sequence of variable length integers
-// that are assigned in a hierarchy.
-func parseObjectIdentifier(bytes []byte) (s []int, err error) {
- if len(bytes) == 0 {
- err = SyntaxError{"zero length OBJECT IDENTIFIER"}
- return
- }
-
- // In the worst case, we get two elements from the first byte (which is
- // encoded differently) and then every varint is a single byte long.
- s = make([]int, len(bytes)+1)
-
- // The first byte is 40*value1 + value2:
- s[0] = int(bytes[0]) / 40
- s[1] = int(bytes[0]) % 40
- i := 2
- for offset := 1; offset < len(bytes); i++ {
- var v int
- v, offset, err = parseBase128Int(bytes, offset)
- if err != nil {
- return
- }
- s[i] = v
- }
- s = s[0:i]
- return
-}
-
-// ENUMERATED
-
-// An Enumerated is represented as a plain int.
-type Enumerated int
-
-// FLAG
-
-// A Flag accepts any data and is set to true if present.
-type Flag bool
-
-// parseBase128Int parses a base-128 encoded int from the given offset in the
-// given byte slice. It returns the value and the new offset.
-func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
- offset = initOffset
- for shifted := 0; offset < len(bytes); shifted++ {
- if shifted > 4 {
- err = StructuralError{"base 128 integer too large"}
- return
- }
- ret <<= 7
- b := bytes[offset]
- ret |= int(b & 0x7f)
- offset++
- if b&0x80 == 0 {
- return
- }
- }
- err = SyntaxError{"truncated base 128 integer"}
- return
-}
-
-// UTCTime
-
-func parseUTCTime(bytes []byte) (ret *time.Time, err error) {
- s := string(bytes)
- ret, err = time.Parse("0601021504Z0700", s)
- if err == nil {
- return
- }
- ret, err = time.Parse("060102150405Z0700", s)
- return
-}
-
-// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
-// and returns the resulting time.
-func parseGeneralizedTime(bytes []byte) (ret *time.Time, err error) {
- return time.Parse("20060102150405Z0700", string(bytes))
-}
-
-// PrintableString
-
-// parsePrintableString parses a ASN.1 PrintableString from the given byte
-// array and returns it.
-func parsePrintableString(bytes []byte) (ret string, err error) {
- for _, b := range bytes {
- if !isPrintable(b) {
- err = SyntaxError{"PrintableString contains invalid character"}
- return
- }
- }
- ret = string(bytes)
- return
-}
-
-// isPrintable returns true iff the given b is in the ASN.1 PrintableString set.
-func isPrintable(b byte) bool {
- return 'a' <= b && b <= 'z' ||
- 'A' <= b && b <= 'Z' ||
- '0' <= b && b <= '9' ||
- '\'' <= b && b <= ')' ||
- '+' <= b && b <= '/' ||
- b == ' ' ||
- b == ':' ||
- b == '=' ||
- b == '?' ||
- // This is technically not allowed in a PrintableString.
- // However, x509 certificates with wildcard strings don't
- // always use the correct string type so we permit it.
- b == '*'
-}
-
-// IA5String
-
-// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
-// byte slice and returns it.
-func parseIA5String(bytes []byte) (ret string, err error) {
- for _, b := range bytes {
- if b >= 0x80 {
- err = SyntaxError{"IA5String contains invalid character"}
- return
- }
- }
- ret = string(bytes)
- return
-}
-
-// T61String
-
-// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
-// byte slice and returns it.
-func parseT61String(bytes []byte) (ret string, err error) {
- return string(bytes), nil
-}
-
-// UTF8String
-
-// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
-// array and returns it.
-func parseUTF8String(bytes []byte) (ret string, err error) {
- return string(bytes), nil
-}
-
-// A RawValue represents an undecoded ASN.1 object.
-type RawValue struct {
- Class, Tag int
- IsCompound bool
- Bytes []byte
- FullBytes []byte // includes the tag and length
-}
-
-// RawContent is used to signal that the undecoded, DER data needs to be
-// preserved for a struct. To use it, the first field of the struct must have
-// this type. It's an error for any of the other fields to have this type.
-type RawContent []byte
-
-// Tagging
-
-// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
-// into a byte slice. It returns the parsed data and the new offset. SET and
-// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
-// don't distinguish between ordered and unordered objects in this code.
-func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
- offset = initOffset
- b := bytes[offset]
- offset++
- ret.class = int(b >> 6)
- ret.isCompound = b&0x20 == 0x20
- ret.tag = int(b & 0x1f)
-
- // If the bottom five bits are set, then the tag number is actually base 128
- // encoded afterwards
- if ret.tag == 0x1f {
- ret.tag, offset, err = parseBase128Int(bytes, offset)
- if err != nil {
- return
- }
- }
- if offset >= len(bytes) {
- err = SyntaxError{"truncated tag or length"}
- return
- }
- b = bytes[offset]
- offset++
- if b&0x80 == 0 {
- // The length is encoded in the bottom 7 bits.
- ret.length = int(b & 0x7f)
- } else {
- // Bottom 7 bits give the number of length bytes to follow.
- numBytes := int(b & 0x7f)
- // We risk overflowing a signed 32-bit number if we accept more than 3 bytes.
- if numBytes > 3 {
- err = StructuralError{"length too large"}
- return
- }
- if numBytes == 0 {
- err = SyntaxError{"indefinite length found (not DER)"}
- return
- }
- ret.length = 0
- for i := 0; i < numBytes; i++ {
- if offset >= len(bytes) {
- err = SyntaxError{"truncated tag or length"}
- return
- }
- b = bytes[offset]
- offset++
- ret.length <<= 8
- ret.length |= int(b)
- }
- }
-
- return
-}
-
-// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
-// a number of ASN.1 values from the given byte slice and returns them as a
-// slice of Go values of the given type.
-func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
- expectedTag, compoundType, ok := getUniversalType(elemType)
- if !ok {
- err = StructuralError{"unknown Go type for slice"}
- return
- }
-
- // First we iterate over the input and count the number of elements,
- // checking that the types are correct in each case.
- numElements := 0
- for offset := 0; offset < len(bytes); {
- var t tagAndLength
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- // We pretend that GENERAL STRINGs are PRINTABLE STRINGs so
- // that a sequence of them can be parsed into a []string.
- if t.tag == tagGeneralString {
- t.tag = tagPrintableString
- }
- if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag {
- err = StructuralError{"sequence tag mismatch"}
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"truncated sequence"}
- return
- }
- offset += t.length
- numElements++
- }
- ret = reflect.MakeSlice(sliceType, numElements, numElements)
- params := fieldParameters{}
- offset := 0
- for i := 0; i < numElements; i++ {
- offset, err = parseField(ret.Index(i), bytes, offset, params)
- if err != nil {
- return
- }
- }
- return
-}
-
-var (
- bitStringType = reflect.TypeOf(BitString{})
- objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
- enumeratedType = reflect.TypeOf(Enumerated(0))
- flagType = reflect.TypeOf(Flag(false))
- timeType = reflect.TypeOf(&time.Time{})
- rawValueType = reflect.TypeOf(RawValue{})
- rawContentsType = reflect.TypeOf(RawContent(nil))
- bigIntType = reflect.TypeOf(new(big.Int))
-)
-
-// invalidLength returns true iff offset + length > sliceLength, or if the
-// addition would overflow.
-func invalidLength(offset, length, sliceLength int) bool {
- return offset+length < offset || offset+length > sliceLength
-}
-
-// parseField is the main parsing function. Given a byte slice and an offset
-// into the array, it will try to parse a suitable ASN.1 value out and store it
-// in the given Value.
-func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
- offset = initOffset
- fieldType := v.Type()
-
- // If we have run out of data, it may be that there are optional elements at the end.
- if offset == len(bytes) {
- if !setDefaultValue(v, params) {
- err = SyntaxError{"sequence truncated"}
- }
- return
- }
-
- // Deal with raw values.
- if fieldType == rawValueType {
- var t tagAndLength
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"data truncated"}
- return
- }
- result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
- offset += t.length
- v.Set(reflect.ValueOf(result))
- return
- }
-
- // Deal with the ANY type.
- if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
- var t tagAndLength
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"data truncated"}
- return
- }
- var result interface{}
- if !t.isCompound && t.class == classUniversal {
- innerBytes := bytes[offset : offset+t.length]
- switch t.tag {
- case tagPrintableString:
- result, err = parsePrintableString(innerBytes)
- case tagIA5String:
- result, err = parseIA5String(innerBytes)
- case tagT61String:
- result, err = parseT61String(innerBytes)
- case tagUTF8String:
- result, err = parseUTF8String(innerBytes)
- case tagInteger:
- result, err = parseInt64(innerBytes)
- case tagBitString:
- result, err = parseBitString(innerBytes)
- case tagOID:
- result, err = parseObjectIdentifier(innerBytes)
- case tagUTCTime:
- result, err = parseUTCTime(innerBytes)
- case tagOctetString:
- result = innerBytes
- default:
- // If we don't know how to handle the type, we just leave Value as nil.
- }
- }
- offset += t.length
- if err != nil {
- return
- }
- if result != nil {
- v.Set(reflect.ValueOf(result))
- }
- return
- }
- universalTag, compoundType, ok1 := getUniversalType(fieldType)
- if !ok1 {
- err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
- return
- }
-
- t, offset, err := parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- if params.explicit {
- expectedClass := classContextSpecific
- if params.application {
- expectedClass = classApplication
- }
- if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
- if t.length > 0 {
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- } else {
- if fieldType != flagType {
- err = StructuralError{"Zero length explicit tag was not an asn1.Flag"}
- return
- }
- v.SetBool(true)
- return
- }
- } else {
- // The tags didn't match, it might be an optional element.
- ok := setDefaultValue(v, params)
- if ok {
- offset = initOffset
- } else {
- err = StructuralError{"explicitly tagged member didn't match"}
- }
- return
- }
- }
-
- // Special case for strings: all the ASN.1 string types map to the Go
- // type string. getUniversalType returns the tag for PrintableString
- // when it sees a string, so if we see a different string type on the
- // wire, we change the universal type to match.
- if universalTag == tagPrintableString {
- switch t.tag {
- case tagIA5String, tagGeneralString, tagT61String, tagUTF8String:
- universalTag = t.tag
- }
- }
-
- // Special case for time: UTCTime and GeneralizedTime both map to the
- // Go type time.Time.
- if universalTag == tagUTCTime && t.tag == tagGeneralizedTime {
- universalTag = tagGeneralizedTime
- }
-
- expectedClass := classUniversal
- expectedTag := universalTag
-
- if !params.explicit && params.tag != nil {
- expectedClass = classContextSpecific
- expectedTag = *params.tag
- }
-
- if !params.explicit && params.application && params.tag != nil {
- expectedClass = classApplication
- expectedTag = *params.tag
- }
-
- // We have unwrapped any explicit tagging at this point.
- if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
- // Tags don't match. Again, it could be an optional element.
- ok := setDefaultValue(v, params)
- if ok {
- offset = initOffset
- } else {
- err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
- }
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"data truncated"}
- return
- }
- innerBytes := bytes[offset : offset+t.length]
- offset += t.length
-
- // We deal with the structures defined in this package first.
- switch fieldType {
- case objectIdentifierType:
- newSlice, err1 := parseObjectIdentifier(innerBytes)
- v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
- if err1 == nil {
- reflect.Copy(v, reflect.ValueOf(newSlice))
- }
- err = err1
- return
- case bitStringType:
- bs, err1 := parseBitString(innerBytes)
- if err1 == nil {
- v.Set(reflect.ValueOf(bs))
- }
- err = err1
- return
- case timeType:
- var time *time.Time
- var err1 error
- if universalTag == tagUTCTime {
- time, err1 = parseUTCTime(innerBytes)
- } else {
- time, err1 = parseGeneralizedTime(innerBytes)
- }
- if err1 == nil {
- v.Set(reflect.ValueOf(time))
- }
- err = err1
- return
- case enumeratedType:
- parsedInt, err1 := parseInt(innerBytes)
- if err1 == nil {
- v.SetInt(int64(parsedInt))
- }
- err = err1
- return
- case flagType:
- v.SetBool(true)
- return
- case bigIntType:
- parsedInt := parseBigInt(innerBytes)
- v.Set(reflect.ValueOf(parsedInt))
- return
- }
- switch val := v; val.Kind() {
- case reflect.Bool:
- parsedBool, err1 := parseBool(innerBytes)
- if err1 == nil {
- val.SetBool(parsedBool)
- }
- err = err1
- return
- case reflect.Int, reflect.Int32:
- parsedInt, err1 := parseInt(innerBytes)
- if err1 == nil {
- val.SetInt(int64(parsedInt))
- }
- err = err1
- return
- case reflect.Int64:
- parsedInt, err1 := parseInt64(innerBytes)
- if err1 == nil {
- val.SetInt(parsedInt)
- }
- err = err1
- return
- // TODO(dfc) Add support for the remaining integer types
- case reflect.Struct:
- structType := fieldType
-
- if structType.NumField() > 0 &&
- structType.Field(0).Type == rawContentsType {
- bytes := bytes[initOffset:offset]
- val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
- }
-
- innerOffset := 0
- for i := 0; i < structType.NumField(); i++ {
- field := structType.Field(i)
- if i == 0 && field.Type == rawContentsType {
- continue
- }
- innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
- if err != nil {
- return
- }
- }
- // We allow extra bytes at the end of the SEQUENCE because
- // adding elements to the end has been used in X.509 as the
- // version numbers have increased.
- return
- case reflect.Slice:
- sliceType := fieldType
- if sliceType.Elem().Kind() == reflect.Uint8 {
- val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
- reflect.Copy(val, reflect.ValueOf(innerBytes))
- return
- }
- newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
- if err1 == nil {
- val.Set(newSlice)
- }
- err = err1
- return
- case reflect.String:
- var v string
- switch universalTag {
- case tagPrintableString:
- v, err = parsePrintableString(innerBytes)
- case tagIA5String:
- v, err = parseIA5String(innerBytes)
- case tagT61String:
- v, err = parseT61String(innerBytes)
- case tagUTF8String:
- v, err = parseUTF8String(innerBytes)
- case tagGeneralString:
- // GeneralString is specified in ISO-2022/ECMA-35,
- // A brief review suggests that it includes structures
- // that allow the encoding to change midstring and
- // such. We give up and pass it as an 8-bit string.
- v, err = parseT61String(innerBytes)
- default:
- err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
- }
- if err == nil {
- val.SetString(v)
- }
- return
- }
- err = StructuralError{"unsupported: " + v.Type().String()}
- return
-}
-
-// setDefaultValue is used to install a default value, from a tag string, into
-// a Value. It is successful is the field was optional, even if a default value
-// wasn't provided or it failed to install it into the Value.
-func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
- if !params.optional {
- return
- }
- ok = true
- if params.defaultValue == nil {
- return
- }
- switch val := v; val.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- val.SetInt(*params.defaultValue)
- }
- return
-}
-
-// Unmarshal parses the DER-encoded ASN.1 data structure b
-// and uses the reflect package to fill in an arbitrary value pointed at by val.
-// Because Unmarshal uses the reflect package, the structs
-// being written to must use upper case field names.
-//
-// An ASN.1 INTEGER can be written to an int, int32 or int64.
-// If the encoded value does not fit in the Go type,
-// Unmarshal returns a parse error.
-//
-// An ASN.1 BIT STRING can be written to a BitString.
-//
-// An ASN.1 OCTET STRING can be written to a []byte.
-//
-// An ASN.1 OBJECT IDENTIFIER can be written to an
-// ObjectIdentifier.
-//
-// An ASN.1 ENUMERATED can be written to an Enumerated.
-//
-// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a *time.Time.
-//
-// An ASN.1 PrintableString or IA5String can be written to a string.
-//
-// Any of the above ASN.1 values can be written to an interface{}.
-// The value stored in the interface has the corresponding Go type.
-// For integers, that type is int64.
-//
-// An ASN.1 SEQUENCE OF x or SET OF x can be written
-// to a slice if an x can be written to the slice's element type.
-//
-// An ASN.1 SEQUENCE or SET can be written to a struct
-// if each of the elements in the sequence can be
-// written to the corresponding element in the struct.
-//
-// The following tags on struct fields have special meaning to Unmarshal:
-//
-// optional marks the field as ASN.1 OPTIONAL
-// [explicit] tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
-// default:x sets the default value for optional integer fields
-//
-// If the type of the first field of a structure is RawContent then the raw
-// ASN1 contents of the struct will be stored in it.
-//
-// Other ASN.1 types are not supported; if it encounters them,
-// Unmarshal returns a parse error.
-func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
- return UnmarshalWithParams(b, val, "")
-}
-
-// UnmarshalWithParams allows field parameters to be specified for the
-// top-level element. The form of the params is the same as the field tags.
-func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
- v := reflect.ValueOf(val).Elem()
- offset, err := parseField(v, b, 0, parseFieldParameters(params))
- if err != nil {
- return nil, err
- }
- return b[offset:], nil
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asn1
-
-import (
- "bytes"
- "reflect"
- "testing"
- "time"
-)
-
-type int64Test struct {
- in []byte
- ok bool
- out int64
-}
-
-var int64TestData = []int64Test{
- {[]byte{0x00}, true, 0},
- {[]byte{0x7f}, true, 127},
- {[]byte{0x00, 0x80}, true, 128},
- {[]byte{0x01, 0x00}, true, 256},
- {[]byte{0x80}, true, -128},
- {[]byte{0xff, 0x7f}, true, -129},
- {[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, true, -1},
- {[]byte{0xff}, true, -1},
- {[]byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, true, -9223372036854775808},
- {[]byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, false, 0},
-}
-
-func TestParseInt64(t *testing.T) {
- for i, test := range int64TestData {
- ret, err := parseInt64(test.in)
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
- }
- if test.ok && ret != test.out {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
- }
- }
-}
-
-type int32Test struct {
- in []byte
- ok bool
- out int32
-}
-
-var int32TestData = []int32Test{
- {[]byte{0x00}, true, 0},
- {[]byte{0x7f}, true, 127},
- {[]byte{0x00, 0x80}, true, 128},
- {[]byte{0x01, 0x00}, true, 256},
- {[]byte{0x80}, true, -128},
- {[]byte{0xff, 0x7f}, true, -129},
- {[]byte{0xff, 0xff, 0xff, 0xff}, true, -1},
- {[]byte{0xff}, true, -1},
- {[]byte{0x80, 0x00, 0x00, 0x00}, true, -2147483648},
- {[]byte{0x80, 0x00, 0x00, 0x00, 0x00}, false, 0},
-}
-
-func TestParseInt32(t *testing.T) {
- for i, test := range int32TestData {
- ret, err := parseInt(test.in)
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
- }
- if test.ok && int32(ret) != test.out {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
- }
- }
-}
-
-var bigIntTests = []struct {
- in []byte
- base10 string
-}{
- {[]byte{0xff}, "-1"},
- {[]byte{0x00}, "0"},
- {[]byte{0x01}, "1"},
- {[]byte{0x00, 0xff}, "255"},
- {[]byte{0xff, 0x00}, "-256"},
- {[]byte{0x01, 0x00}, "256"},
-}
-
-func TestParseBigInt(t *testing.T) {
- for i, test := range bigIntTests {
- ret := parseBigInt(test.in)
- if ret.String() != test.base10 {
- t.Errorf("#%d: bad result from %x, got %s want %s", i, test.in, ret.String(), test.base10)
- }
- fw := newForkableWriter()
- marshalBigInt(fw, ret)
- result := fw.Bytes()
- if !bytes.Equal(result, test.in) {
- t.Errorf("#%d: got %x from marshaling %s, want %x", i, result, ret, test.in)
- }
- }
-}
-
-type bitStringTest struct {
- in []byte
- ok bool
- out []byte
- bitLength int
-}
-
-var bitStringTestData = []bitStringTest{
- {[]byte{}, false, []byte{}, 0},
- {[]byte{0x00}, true, []byte{}, 0},
- {[]byte{0x07, 0x00}, true, []byte{0x00}, 1},
- {[]byte{0x07, 0x01}, false, []byte{}, 0},
- {[]byte{0x07, 0x40}, false, []byte{}, 0},
- {[]byte{0x08, 0x00}, false, []byte{}, 0},
-}
-
-func TestBitString(t *testing.T) {
- for i, test := range bitStringTestData {
- ret, err := parseBitString(test.in)
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
- }
- if err == nil {
- if test.bitLength != ret.BitLength || bytes.Compare(ret.Bytes, test.out) != 0 {
- t.Errorf("#%d: Bad result: %v (expected %v %v)", i, ret, test.out, test.bitLength)
- }
- }
- }
-}
-
-func TestBitStringAt(t *testing.T) {
- bs := BitString{[]byte{0x82, 0x40}, 16}
- if bs.At(0) != 1 {
- t.Error("#1: Failed")
- }
- if bs.At(1) != 0 {
- t.Error("#2: Failed")
- }
- if bs.At(6) != 1 {
- t.Error("#3: Failed")
- }
- if bs.At(9) != 1 {
- t.Error("#4: Failed")
- }
-}
-
-type bitStringRightAlignTest struct {
- in []byte
- inlen int
- out []byte
-}
-
-var bitStringRightAlignTests = []bitStringRightAlignTest{
- {[]byte{0x80}, 1, []byte{0x01}},
- {[]byte{0x80, 0x80}, 9, []byte{0x01, 0x01}},
- {[]byte{}, 0, []byte{}},
- {[]byte{0xce}, 8, []byte{0xce}},
- {[]byte{0xce, 0x47}, 16, []byte{0xce, 0x47}},
- {[]byte{0x34, 0x50}, 12, []byte{0x03, 0x45}},
-}
-
-func TestBitStringRightAlign(t *testing.T) {
- for i, test := range bitStringRightAlignTests {
- bs := BitString{test.in, test.inlen}
- out := bs.RightAlign()
- if bytes.Compare(out, test.out) != 0 {
- t.Errorf("#%d got: %x want: %x", i, out, test.out)
- }
- }
-}
-
-type objectIdentifierTest struct {
- in []byte
- ok bool
- out []int
-}
-
-var objectIdentifierTestData = []objectIdentifierTest{
- {[]byte{}, false, []int{}},
- {[]byte{85}, true, []int{2, 5}},
- {[]byte{85, 0x02}, true, []int{2, 5, 2}},
- {[]byte{85, 0x02, 0xc0, 0x00}, true, []int{2, 5, 2, 0x2000}},
- {[]byte{85, 0x02, 0xc0, 0x80, 0x80, 0x80, 0x80}, false, []int{}},
-}
-
-func TestObjectIdentifier(t *testing.T) {
- for i, test := range objectIdentifierTestData {
- ret, err := parseObjectIdentifier(test.in)
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
- }
- if err == nil {
- if !reflect.DeepEqual(test.out, ret) {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
- }
- }
- }
-}
-
-type timeTest struct {
- in string
- ok bool
- out *time.Time
-}
-
-var utcTestData = []timeTest{
- {"910506164540-0700", true, &time.Time{1991, 05, 06, 16, 45, 40, 0, -7 * 60 * 60, ""}},
- {"910506164540+0730", true, &time.Time{1991, 05, 06, 16, 45, 40, 0, 7*60*60 + 30*60, ""}},
- {"910506234540Z", true, &time.Time{1991, 05, 06, 23, 45, 40, 0, 0, "UTC"}},
- {"9105062345Z", true, &time.Time{1991, 05, 06, 23, 45, 0, 0, 0, "UTC"}},
- {"a10506234540Z", false, nil},
- {"91a506234540Z", false, nil},
- {"9105a6234540Z", false, nil},
- {"910506a34540Z", false, nil},
- {"910506334a40Z", false, nil},
- {"91050633444aZ", false, nil},
- {"910506334461Z", false, nil},
- {"910506334400Za", false, nil},
-}
-
-func TestUTCTime(t *testing.T) {
- for i, test := range utcTestData {
- ret, err := parseUTCTime([]byte(test.in))
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
- }
- if err == nil {
- if !reflect.DeepEqual(test.out, ret) {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
- }
- }
- }
-}
-
-var generalizedTimeTestData = []timeTest{
- {"20100102030405Z", true, &time.Time{2010, 01, 02, 03, 04, 05, 0, 0, "UTC"}},
- {"20100102030405", false, nil},
- {"20100102030405+0607", true, &time.Time{2010, 01, 02, 03, 04, 05, 0, 6*60*60 + 7*60, ""}},
- {"20100102030405-0607", true, &time.Time{2010, 01, 02, 03, 04, 05, 0, -6*60*60 - 7*60, ""}},
-}
-
-func TestGeneralizedTime(t *testing.T) {
- for i, test := range generalizedTimeTestData {
- ret, err := parseGeneralizedTime([]byte(test.in))
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
- }
- if err == nil {
- if !reflect.DeepEqual(test.out, ret) {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
- }
- }
- }
-}
-
-type tagAndLengthTest struct {
- in []byte
- ok bool
- out tagAndLength
-}
-
-var tagAndLengthData = []tagAndLengthTest{
- {[]byte{0x80, 0x01}, true, tagAndLength{2, 0, 1, false}},
- {[]byte{0xa0, 0x01}, true, tagAndLength{2, 0, 1, true}},
- {[]byte{0x02, 0x00}, true, tagAndLength{0, 2, 0, false}},
- {[]byte{0xfe, 0x00}, true, tagAndLength{3, 30, 0, true}},
- {[]byte{0x1f, 0x01, 0x00}, true, tagAndLength{0, 1, 0, false}},
- {[]byte{0x1f, 0x81, 0x00, 0x00}, true, tagAndLength{0, 128, 0, false}},
- {[]byte{0x1f, 0x81, 0x80, 0x01, 0x00}, true, tagAndLength{0, 0x4001, 0, false}},
- {[]byte{0x00, 0x81, 0x01}, true, tagAndLength{0, 0, 1, false}},
- {[]byte{0x00, 0x82, 0x01, 0x00}, true, tagAndLength{0, 0, 256, false}},
- {[]byte{0x00, 0x83, 0x01, 0x00}, false, tagAndLength{}},
- {[]byte{0x1f, 0x85}, false, tagAndLength{}},
- {[]byte{0x30, 0x80}, false, tagAndLength{}},
-}
-
-func TestParseTagAndLength(t *testing.T) {
- for i, test := range tagAndLengthData {
- tagAndLength, _, err := parseTagAndLength(test.in, 0)
- if (err == nil) != test.ok {
- t.Errorf("#%d: Incorrect error result (did pass? %v, expected: %v)", i, err == nil, test.ok)
- }
- if err == nil && !reflect.DeepEqual(test.out, tagAndLength) {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, tagAndLength, test.out)
- }
- }
-}
-
-type parseFieldParametersTest struct {
- in string
- out fieldParameters
-}
-
-func newInt(n int) *int { return &n }
-
-func newInt64(n int64) *int64 { return &n }
-
-func newString(s string) *string { return &s }
-
-func newBool(b bool) *bool { return &b }
-
-var parseFieldParametersTestData []parseFieldParametersTest = []parseFieldParametersTest{
- {"", fieldParameters{}},
- {"ia5", fieldParameters{stringType: tagIA5String}},
- {"printable", fieldParameters{stringType: tagPrintableString}},
- {"optional", fieldParameters{optional: true}},
- {"explicit", fieldParameters{explicit: true, tag: new(int)}},
- {"application", fieldParameters{application: true, tag: new(int)}},
- {"optional,explicit", fieldParameters{optional: true, explicit: true, tag: new(int)}},
- {"default:42", fieldParameters{defaultValue: newInt64(42)}},
- {"tag:17", fieldParameters{tag: newInt(17)}},
- {"optional,explicit,default:42,tag:17", fieldParameters{optional: true, explicit: true, defaultValue: newInt64(42), tag: newInt(17)}},
- {"optional,explicit,default:42,tag:17,rubbish1", fieldParameters{true, true, false, newInt64(42), newInt(17), 0, false}},
- {"set", fieldParameters{set: true}},
-}
-
-func TestParseFieldParameters(t *testing.T) {
- for i, test := range parseFieldParametersTestData {
- f := parseFieldParameters(test.in)
- if !reflect.DeepEqual(f, test.out) {
- t.Errorf("#%d: Bad result: %v (expected %v)", i, f, test.out)
- }
- }
-}
-
-type TestObjectIdentifierStruct struct {
- OID ObjectIdentifier
-}
-
-type TestContextSpecificTags struct {
- A int `asn1:"tag:1"`
-}
-
-type TestContextSpecificTags2 struct {
- A int `asn1:"explicit,tag:1"`
- B int
-}
-
-type TestElementsAfterString struct {
- S string
- A, B int
-}
-
-var unmarshalTestData = []struct {
- in []byte
- out interface{}
-}{
- {[]byte{0x02, 0x01, 0x42}, newInt(0x42)},
- {[]byte{0x30, 0x08, 0x06, 0x06, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d}, &TestObjectIdentifierStruct{[]int{1, 2, 840, 113549}}},
- {[]byte{0x03, 0x04, 0x06, 0x6e, 0x5d, 0xc0}, &BitString{[]byte{110, 93, 192}, 18}},
- {[]byte{0x30, 0x09, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x01, 0x03}, &[]int{1, 2, 3}},
- {[]byte{0x02, 0x01, 0x10}, newInt(16)},
- {[]byte{0x13, 0x04, 't', 'e', 's', 't'}, newString("test")},
- {[]byte{0x16, 0x04, 't', 'e', 's', 't'}, newString("test")},
- {[]byte{0x16, 0x04, 't', 'e', 's', 't'}, &RawValue{0, 22, false, []byte("test"), []byte("\x16\x04test")}},
- {[]byte{0x04, 0x04, 1, 2, 3, 4}, &RawValue{0, 4, false, []byte{1, 2, 3, 4}, []byte{4, 4, 1, 2, 3, 4}}},
- {[]byte{0x30, 0x03, 0x81, 0x01, 0x01}, &TestContextSpecificTags{1}},
- {[]byte{0x30, 0x08, 0xa1, 0x03, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02}, &TestContextSpecificTags2{1, 2}},
- {[]byte{0x01, 0x01, 0x00}, newBool(false)},
- {[]byte{0x01, 0x01, 0x01}, newBool(true)},
- {[]byte{0x30, 0x0b, 0x13, 0x03, 0x66, 0x6f, 0x6f, 0x02, 0x01, 0x22, 0x02, 0x01, 0x33}, &TestElementsAfterString{"foo", 0x22, 0x33}},
-}
-
-func TestUnmarshal(t *testing.T) {
- for i, test := range unmarshalTestData {
- pv := reflect.New(reflect.TypeOf(test.out).Elem())
- val := pv.Interface()
- _, err := Unmarshal(test.in, val)
- if err != nil {
- t.Errorf("Unmarshal failed at index %d %v", i, err)
- }
- if !reflect.DeepEqual(val, test.out) {
- t.Errorf("#%d:\nhave %#v\nwant %#v", i, val, test.out)
- }
- }
-}
-
-type Certificate struct {
- TBSCertificate TBSCertificate
- SignatureAlgorithm AlgorithmIdentifier
- SignatureValue BitString
-}
-
-type TBSCertificate struct {
- Version int `asn1:"optional,explicit,default:0,tag:0"`
- SerialNumber RawValue
- SignatureAlgorithm AlgorithmIdentifier
- Issuer RDNSequence
- Validity Validity
- Subject RDNSequence
- PublicKey PublicKeyInfo
-}
-
-type AlgorithmIdentifier struct {
- Algorithm ObjectIdentifier
-}
-
-type RDNSequence []RelativeDistinguishedNameSET
-
-type RelativeDistinguishedNameSET []AttributeTypeAndValue
-
-type AttributeTypeAndValue struct {
- Type ObjectIdentifier
- Value interface{}
-}
-
-type Validity struct {
- NotBefore, NotAfter *time.Time
-}
-
-type PublicKeyInfo struct {
- Algorithm AlgorithmIdentifier
- PublicKey BitString
-}
-
-func TestCertificate(t *testing.T) {
- // This is a minimal, self-signed certificate that should parse correctly.
- var cert Certificate
- if _, err := Unmarshal(derEncodedSelfSignedCertBytes, &cert); err != nil {
- t.Errorf("Unmarshal failed: %v", err)
- }
- if !reflect.DeepEqual(cert, derEncodedSelfSignedCert) {
- t.Errorf("Bad result:\ngot: %+v\nwant: %+v", cert, derEncodedSelfSignedCert)
- }
-}
-
-func TestCertificateWithNUL(t *testing.T) {
- // This is the paypal NUL-hack certificate. It should fail to parse because
- // NUL isn't a permitted character in a PrintableString.
-
- var cert Certificate
- if _, err := Unmarshal(derEncodedPaypalNULCertBytes, &cert); err == nil {
- t.Error("Unmarshal succeeded, should not have")
- }
-}
-
-type rawStructTest struct {
- Raw RawContent
- A int
-}
-
-func TestRawStructs(t *testing.T) {
- var s rawStructTest
- input := []byte{0x30, 0x03, 0x02, 0x01, 0x50}
-
- rest, err := Unmarshal(input, &s)
- if len(rest) != 0 {
- t.Errorf("incomplete parse: %x", rest)
- return
- }
- if err != nil {
- t.Error(err)
- return
- }
- if s.A != 0x50 {
- t.Errorf("bad value for A: got %d want %d", s.A, 0x50)
- }
- if bytes.Compare([]byte(s.Raw), input) != 0 {
- t.Errorf("bad value for Raw: got %x want %x", s.Raw, input)
- }
-}
-
-var derEncodedSelfSignedCert = Certificate{
- TBSCertificate: TBSCertificate{
- Version: 0,
- SerialNumber: RawValue{Class: 0, Tag: 2, IsCompound: false, Bytes: []uint8{0x0, 0x8c, 0xc3, 0x37, 0x92, 0x10, 0xec, 0x2c, 0x98}, FullBytes: []byte{2, 9, 0x0, 0x8c, 0xc3, 0x37, 0x92, 0x10, 0xec, 0x2c, 0x98}},
- SignatureAlgorithm: AlgorithmIdentifier{Algorithm: ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}},
- Issuer: RDNSequence{
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 6}, Value: "XX"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 8}, Value: "Some-State"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 7}, Value: "City"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 10}, Value: "Internet Widgits Pty Ltd"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 3}, Value: "false.example.com"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{1, 2, 840, 113549, 1, 9, 1}, Value: "false@example.com"}},
- },
- Validity: Validity{NotBefore: &time.Time{Year: 2009, Month: 10, Day: 8, Hour: 0, Minute: 25, Second: 53, ZoneOffset: 0, Zone: "UTC"}, NotAfter: &time.Time{Year: 2010, Month: 10, Day: 8, Hour: 0, Minute: 25, Second: 53, ZoneOffset: 0, Zone: "UTC"}},
- Subject: RDNSequence{
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 6}, Value: "XX"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 8}, Value: "Some-State"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 7}, Value: "City"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 10}, Value: "Internet Widgits Pty Ltd"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 3}, Value: "false.example.com"}},
- RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{1, 2, 840, 113549, 1, 9, 1}, Value: "false@example.com"}},
- },
- PublicKey: PublicKeyInfo{
- Algorithm: AlgorithmIdentifier{Algorithm: ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}},
- PublicKey: BitString{
- Bytes: []uint8{
- 0x30, 0x48, 0x2, 0x41, 0x0, 0xcd, 0xb7,
- 0x63, 0x9c, 0x32, 0x78, 0xf0, 0x6, 0xaa, 0x27, 0x7f, 0x6e, 0xaf, 0x42,
- 0x90, 0x2b, 0x59, 0x2d, 0x8c, 0xbc, 0xbe, 0x38, 0xa1, 0xc9, 0x2b, 0xa4,
- 0x69, 0x5a, 0x33, 0x1b, 0x1d, 0xea, 0xde, 0xad, 0xd8, 0xe9, 0xa5, 0xc2,
- 0x7e, 0x8c, 0x4c, 0x2f, 0xd0, 0xa8, 0x88, 0x96, 0x57, 0x72, 0x2a, 0x4f,
- 0x2a, 0xf7, 0x58, 0x9c, 0xf2, 0xc7, 0x70, 0x45, 0xdc, 0x8f, 0xde, 0xec,
- 0x35, 0x7d, 0x2, 0x3, 0x1, 0x0, 0x1,
- },
- BitLength: 592,
- },
- },
- },
- SignatureAlgorithm: AlgorithmIdentifier{Algorithm: ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}},
- SignatureValue: BitString{
- Bytes: []uint8{
- 0xa6, 0x7b, 0x6, 0xec, 0x5e, 0xce,
- 0x92, 0x77, 0x2c, 0xa4, 0x13, 0xcb, 0xa3, 0xca, 0x12, 0x56, 0x8f, 0xdc, 0x6c,
- 0x7b, 0x45, 0x11, 0xcd, 0x40, 0xa7, 0xf6, 0x59, 0x98, 0x4, 0x2, 0xdf, 0x2b,
- 0x99, 0x8b, 0xb9, 0xa4, 0xa8, 0xcb, 0xeb, 0x34, 0xc0, 0xf0, 0xa7, 0x8c, 0xf8,
- 0xd9, 0x1e, 0xde, 0x14, 0xa5, 0xed, 0x76, 0xbf, 0x11, 0x6f, 0xe3, 0x60, 0xaa,
- 0xfa, 0x88, 0x21, 0x49, 0x4, 0x35,
- },
- BitLength: 512,
- },
-}
-
-var derEncodedSelfSignedCertBytes = []byte{
- 0x30, 0x82, 0x02, 0x18, 0x30,
- 0x82, 0x01, 0xc2, 0x02, 0x09, 0x00, 0x8c, 0xc3, 0x37, 0x92, 0x10, 0xec, 0x2c,
- 0x98, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
- 0x05, 0x05, 0x00, 0x30, 0x81, 0x92, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55,
- 0x04, 0x06, 0x13, 0x02, 0x58, 0x58, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55,
- 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x04, 0x43,
- 0x69, 0x74, 0x79, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13,
- 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64,
- 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x31,
- 0x1a, 0x30, 0x18, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x11, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
- 0x6d, 0x31, 0x20, 0x30, 0x1e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
- 0x01, 0x09, 0x01, 0x16, 0x11, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x40, 0x65, 0x78,
- 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d,
- 0x30, 0x39, 0x31, 0x30, 0x30, 0x38, 0x30, 0x30, 0x32, 0x35, 0x35, 0x33, 0x5a,
- 0x17, 0x0d, 0x31, 0x30, 0x31, 0x30, 0x30, 0x38, 0x30, 0x30, 0x32, 0x35, 0x35,
- 0x33, 0x5a, 0x30, 0x81, 0x92, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04,
- 0x06, 0x13, 0x02, 0x58, 0x58, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04,
- 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x04, 0x43, 0x69,
- 0x74, 0x79, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18,
- 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67,
- 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x31, 0x1a,
- 0x30, 0x18, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x11, 0x66, 0x61, 0x6c, 0x73,
- 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
- 0x31, 0x20, 0x30, 0x1e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
- 0x09, 0x01, 0x16, 0x11, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x40, 0x65, 0x78, 0x61,
- 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x5c, 0x30, 0x0d, 0x06,
- 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03,
- 0x4b, 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0xcd, 0xb7, 0x63, 0x9c, 0x32, 0x78,
- 0xf0, 0x06, 0xaa, 0x27, 0x7f, 0x6e, 0xaf, 0x42, 0x90, 0x2b, 0x59, 0x2d, 0x8c,
- 0xbc, 0xbe, 0x38, 0xa1, 0xc9, 0x2b, 0xa4, 0x69, 0x5a, 0x33, 0x1b, 0x1d, 0xea,
- 0xde, 0xad, 0xd8, 0xe9, 0xa5, 0xc2, 0x7e, 0x8c, 0x4c, 0x2f, 0xd0, 0xa8, 0x88,
- 0x96, 0x57, 0x72, 0x2a, 0x4f, 0x2a, 0xf7, 0x58, 0x9c, 0xf2, 0xc7, 0x70, 0x45,
- 0xdc, 0x8f, 0xde, 0xec, 0x35, 0x7d, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d,
- 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00,
- 0x03, 0x41, 0x00, 0xa6, 0x7b, 0x06, 0xec, 0x5e, 0xce, 0x92, 0x77, 0x2c, 0xa4,
- 0x13, 0xcb, 0xa3, 0xca, 0x12, 0x56, 0x8f, 0xdc, 0x6c, 0x7b, 0x45, 0x11, 0xcd,
- 0x40, 0xa7, 0xf6, 0x59, 0x98, 0x04, 0x02, 0xdf, 0x2b, 0x99, 0x8b, 0xb9, 0xa4,
- 0xa8, 0xcb, 0xeb, 0x34, 0xc0, 0xf0, 0xa7, 0x8c, 0xf8, 0xd9, 0x1e, 0xde, 0x14,
- 0xa5, 0xed, 0x76, 0xbf, 0x11, 0x6f, 0xe3, 0x60, 0xaa, 0xfa, 0x88, 0x21, 0x49,
- 0x04, 0x35,
-}
-
-var derEncodedPaypalNULCertBytes = []byte{
- 0x30, 0x82, 0x06, 0x44, 0x30,
- 0x82, 0x05, 0xad, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x03, 0x00, 0xf0, 0x9b,
- 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05,
- 0x05, 0x00, 0x30, 0x82, 0x01, 0x12, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55,
- 0x04, 0x06, 0x13, 0x02, 0x45, 0x53, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55,
- 0x04, 0x08, 0x13, 0x09, 0x42, 0x61, 0x72, 0x63, 0x65, 0x6c, 0x6f, 0x6e, 0x61,
- 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x09, 0x42, 0x61,
- 0x72, 0x63, 0x65, 0x6c, 0x6f, 0x6e, 0x61, 0x31, 0x29, 0x30, 0x27, 0x06, 0x03,
- 0x55, 0x04, 0x0a, 0x13, 0x20, 0x49, 0x50, 0x53, 0x20, 0x43, 0x65, 0x72, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74,
- 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x20, 0x73, 0x2e, 0x6c, 0x2e, 0x31, 0x2e,
- 0x30, 0x2c, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x14, 0x25, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x6c, 0x40, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d,
- 0x20, 0x43, 0x2e, 0x49, 0x2e, 0x46, 0x2e, 0x20, 0x20, 0x42, 0x2d, 0x42, 0x36,
- 0x32, 0x32, 0x31, 0x30, 0x36, 0x39, 0x35, 0x31, 0x2e, 0x30, 0x2c, 0x06, 0x03,
- 0x55, 0x04, 0x0b, 0x13, 0x25, 0x69, 0x70, 0x73, 0x43, 0x41, 0x20, 0x43, 0x4c,
- 0x41, 0x53, 0x45, 0x41, 0x31, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
- 0x69, 0x74, 0x79, 0x31, 0x2e, 0x30, 0x2c, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13,
- 0x25, 0x69, 0x70, 0x73, 0x43, 0x41, 0x20, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41,
- 0x31, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31,
- 0x20, 0x30, 0x1e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09,
- 0x01, 0x16, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x40, 0x69, 0x70,
- 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x30, 0x39,
- 0x30, 0x32, 0x32, 0x34, 0x32, 0x33, 0x30, 0x34, 0x31, 0x37, 0x5a, 0x17, 0x0d,
- 0x31, 0x31, 0x30, 0x32, 0x32, 0x34, 0x32, 0x33, 0x30, 0x34, 0x31, 0x37, 0x5a,
- 0x30, 0x81, 0x94, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13,
- 0x02, 0x55, 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13,
- 0x0a, 0x43, 0x61, 0x6c, 0x69, 0x66, 0x6f, 0x72, 0x6e, 0x69, 0x61, 0x31, 0x16,
- 0x30, 0x14, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x0d, 0x53, 0x61, 0x6e, 0x20,
- 0x46, 0x72, 0x61, 0x6e, 0x63, 0x69, 0x73, 0x63, 0x6f, 0x31, 0x11, 0x30, 0x0f,
- 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x08, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
- 0x74, 0x79, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x0b,
- 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x20, 0x55, 0x6e, 0x69, 0x74, 0x31, 0x2f,
- 0x30, 0x2d, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x26, 0x77, 0x77, 0x77, 0x2e,
- 0x70, 0x61, 0x79, 0x70, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x00, 0x73, 0x73,
- 0x6c, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x63, 0x63, 0x30, 0x81, 0x9f, 0x30, 0x0d,
- 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00,
- 0x03, 0x81, 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xd2, 0x69,
- 0xfa, 0x6f, 0x3a, 0x00, 0xb4, 0x21, 0x1b, 0xc8, 0xb1, 0x02, 0xd7, 0x3f, 0x19,
- 0xb2, 0xc4, 0x6d, 0xb4, 0x54, 0xf8, 0x8b, 0x8a, 0xcc, 0xdb, 0x72, 0xc2, 0x9e,
- 0x3c, 0x60, 0xb9, 0xc6, 0x91, 0x3d, 0x82, 0xb7, 0x7d, 0x99, 0xff, 0xd1, 0x29,
- 0x84, 0xc1, 0x73, 0x53, 0x9c, 0x82, 0xdd, 0xfc, 0x24, 0x8c, 0x77, 0xd5, 0x41,
- 0xf3, 0xe8, 0x1e, 0x42, 0xa1, 0xad, 0x2d, 0x9e, 0xff, 0x5b, 0x10, 0x26, 0xce,
- 0x9d, 0x57, 0x17, 0x73, 0x16, 0x23, 0x38, 0xc8, 0xd6, 0xf1, 0xba, 0xa3, 0x96,
- 0x5b, 0x16, 0x67, 0x4a, 0x4f, 0x73, 0x97, 0x3a, 0x4d, 0x14, 0xa4, 0xf4, 0xe2,
- 0x3f, 0x8b, 0x05, 0x83, 0x42, 0xd1, 0xd0, 0xdc, 0x2f, 0x7a, 0xe5, 0xb6, 0x10,
- 0xb2, 0x11, 0xc0, 0xdc, 0x21, 0x2a, 0x90, 0xff, 0xae, 0x97, 0x71, 0x5a, 0x49,
- 0x81, 0xac, 0x40, 0xf3, 0x3b, 0xb8, 0x59, 0xb2, 0x4f, 0x02, 0x03, 0x01, 0x00,
- 0x01, 0xa3, 0x82, 0x03, 0x21, 0x30, 0x82, 0x03, 0x1d, 0x30, 0x09, 0x06, 0x03,
- 0x55, 0x1d, 0x13, 0x04, 0x02, 0x30, 0x00, 0x30, 0x11, 0x06, 0x09, 0x60, 0x86,
- 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x01, 0x04, 0x04, 0x03, 0x02, 0x06, 0x40,
- 0x30, 0x0b, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x04, 0x04, 0x03, 0x02, 0x03, 0xf8,
- 0x30, 0x13, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x0c, 0x30, 0x0a, 0x06, 0x08,
- 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01, 0x30, 0x1d, 0x06, 0x03, 0x55,
- 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x61, 0x8f, 0x61, 0x34, 0x43, 0x55, 0x14,
- 0x7f, 0x27, 0x09, 0xce, 0x4c, 0x8b, 0xea, 0x9b, 0x7b, 0x19, 0x25, 0xbc, 0x6e,
- 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14,
- 0x0e, 0x07, 0x60, 0xd4, 0x39, 0xc9, 0x1b, 0x5b, 0x5d, 0x90, 0x7b, 0x23, 0xc8,
- 0xd2, 0x34, 0x9d, 0x4a, 0x9a, 0x46, 0x39, 0x30, 0x09, 0x06, 0x03, 0x55, 0x1d,
- 0x11, 0x04, 0x02, 0x30, 0x00, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x1d, 0x12, 0x04,
- 0x15, 0x30, 0x13, 0x81, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x40,
- 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x72, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x0d, 0x04, 0x65, 0x16, 0x63,
- 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20,
- 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x4e,
- 0x4f, 0x54, 0x20, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x45, 0x44, 0x2e,
- 0x20, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x20, 0x53, 0x65, 0x72, 0x76,
- 0x65, 0x72, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x20, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x68,
- 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70,
- 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x2f, 0x06, 0x09, 0x60,
- 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x02, 0x04, 0x22, 0x16, 0x20, 0x68,
- 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70,
- 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61,
- 0x32, 0x30, 0x30, 0x32, 0x2f, 0x30, 0x43, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01,
- 0x86, 0xf8, 0x42, 0x01, 0x04, 0x04, 0x36, 0x16, 0x34, 0x68, 0x74, 0x74, 0x70,
- 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30, 0x30,
- 0x32, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30, 0x30, 0x32, 0x43, 0x4c,
- 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x63, 0x72, 0x6c, 0x30, 0x46, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x03, 0x04, 0x39, 0x16, 0x37,
- 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
- 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63,
- 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f, 0x72, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x68, 0x74,
- 0x6d, 0x6c, 0x3f, 0x30, 0x43, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8,
- 0x42, 0x01, 0x07, 0x04, 0x36, 0x16, 0x34, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
- 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f,
- 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x6c, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41,
- 0x31, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x3f, 0x30, 0x41, 0x06, 0x09, 0x60, 0x86,
- 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x08, 0x04, 0x34, 0x16, 0x32, 0x68, 0x74,
- 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70, 0x73,
- 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32,
- 0x30, 0x30, 0x32, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x4c, 0x41,
- 0x53, 0x45, 0x41, 0x31, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x30, 0x81, 0x83, 0x06,
- 0x03, 0x55, 0x1d, 0x1f, 0x04, 0x7c, 0x30, 0x7a, 0x30, 0x39, 0xa0, 0x37, 0xa0,
- 0x35, 0x86, 0x33, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
- 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70,
- 0x73, 0x63, 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61,
- 0x32, 0x30, 0x30, 0x32, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x63,
- 0x72, 0x6c, 0x30, 0x3d, 0xa0, 0x3b, 0xa0, 0x39, 0x86, 0x37, 0x68, 0x74, 0x74,
- 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x62, 0x61, 0x63, 0x6b, 0x2e, 0x69,
- 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63,
- 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30,
- 0x30, 0x32, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x63, 0x72, 0x6c,
- 0x30, 0x32, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x01, 0x04,
- 0x26, 0x30, 0x24, 0x30, 0x22, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07,
- 0x30, 0x01, 0x86, 0x16, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63,
- 0x73, 0x70, 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05,
- 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, 0x68, 0xee, 0x79, 0x97, 0x97, 0xdd, 0x3b,
- 0xef, 0x16, 0x6a, 0x06, 0xf2, 0x14, 0x9a, 0x6e, 0xcd, 0x9e, 0x12, 0xf7, 0xaa,
- 0x83, 0x10, 0xbd, 0xd1, 0x7c, 0x98, 0xfa, 0xc7, 0xae, 0xd4, 0x0e, 0x2c, 0x9e,
- 0x38, 0x05, 0x9d, 0x52, 0x60, 0xa9, 0x99, 0x0a, 0x81, 0xb4, 0x98, 0x90, 0x1d,
- 0xae, 0xbb, 0x4a, 0xd7, 0xb9, 0xdc, 0x88, 0x9e, 0x37, 0x78, 0x41, 0x5b, 0xf7,
- 0x82, 0xa5, 0xf2, 0xba, 0x41, 0x25, 0x5a, 0x90, 0x1a, 0x1e, 0x45, 0x38, 0xa1,
- 0x52, 0x58, 0x75, 0x94, 0x26, 0x44, 0xfb, 0x20, 0x07, 0xba, 0x44, 0xcc, 0xe5,
- 0x4a, 0x2d, 0x72, 0x3f, 0x98, 0x47, 0xf6, 0x26, 0xdc, 0x05, 0x46, 0x05, 0x07,
- 0x63, 0x21, 0xab, 0x46, 0x9b, 0x9c, 0x78, 0xd5, 0x54, 0x5b, 0x3d, 0x0c, 0x1e,
- 0xc8, 0x64, 0x8c, 0xb5, 0x50, 0x23, 0x82, 0x6f, 0xdb, 0xb8, 0x22, 0x1c, 0x43,
- 0x96, 0x07, 0xa8, 0xbb,
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asn1
-
-import (
- "reflect"
- "strconv"
- "strings"
-)
-
-// ASN.1 objects have metadata preceding them:
-// the tag: the type of the object
-// a flag denoting if this object is compound or not
-// the class type: the namespace of the tag
-// the length of the object, in bytes
-
-// Here are some standard tags and classes
-
-const (
- tagBoolean = 1
- tagInteger = 2
- tagBitString = 3
- tagOctetString = 4
- tagOID = 6
- tagEnum = 10
- tagUTF8String = 12
- tagSequence = 16
- tagSet = 17
- tagPrintableString = 19
- tagT61String = 20
- tagIA5String = 22
- tagUTCTime = 23
- tagGeneralizedTime = 24
- tagGeneralString = 27
-)
-
-const (
- classUniversal = 0
- classApplication = 1
- classContextSpecific = 2
- classPrivate = 3
-)
-
-type tagAndLength struct {
- class, tag, length int
- isCompound bool
-}
-
-// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
-// of" and "in addition to". When not specified, every primitive type has a
-// default tag in the UNIVERSAL class.
-//
-// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
-// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
-// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
-//
-// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
-// /additional/ tag would wrap the default tag. This explicit tag will have the
-// compound flag set.
-//
-// (This is used in order to remove ambiguity with optional elements.)
-//
-// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
-// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
-// tagging with tag strings on the fields of a structure.
-
-// fieldParameters is the parsed representation of tag string from a structure field.
-type fieldParameters struct {
- optional bool // true iff the field is OPTIONAL
- explicit bool // true iff an EXPLICIT tag is in use.
- application bool // true iff an APPLICATION tag is in use.
- defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
- tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
- stringType int // the string tag to use when marshaling.
- set bool // true iff this should be encoded as a SET
-
- // Invariants:
- // if explicit is set, tag is non-nil.
-}
-
-// Given a tag string with the format specified in the package comment,
-// parseFieldParameters will parse it into a fieldParameters structure,
-// ignoring unknown parts of the string.
-func parseFieldParameters(str string) (ret fieldParameters) {
- for _, part := range strings.Split(str, ",") {
- switch {
- case part == "optional":
- ret.optional = true
- case part == "explicit":
- ret.explicit = true
- if ret.tag == nil {
- ret.tag = new(int)
- }
- case part == "ia5":
- ret.stringType = tagIA5String
- case part == "printable":
- ret.stringType = tagPrintableString
- case strings.HasPrefix(part, "default:"):
- i, err := strconv.Atoi64(part[8:])
- if err == nil {
- ret.defaultValue = new(int64)
- *ret.defaultValue = i
- }
- case strings.HasPrefix(part, "tag:"):
- i, err := strconv.Atoi(part[4:])
- if err == nil {
- ret.tag = new(int)
- *ret.tag = i
- }
- case part == "set":
- ret.set = true
- case part == "application":
- ret.application = true
- if ret.tag == nil {
- ret.tag = new(int)
- }
- }
- }
- return
-}
-
-// Given a reflected Go type, getUniversalType returns the default tag number
-// and expected compound flag.
-func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
- switch t {
- case objectIdentifierType:
- return tagOID, false, true
- case bitStringType:
- return tagBitString, false, true
- case timeType:
- return tagUTCTime, false, true
- case enumeratedType:
- return tagEnum, false, true
- case bigIntType:
- return tagInteger, false, true
- }
- switch t.Kind() {
- case reflect.Bool:
- return tagBoolean, false, true
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return tagInteger, false, true
- case reflect.Struct:
- return tagSequence, true, true
- case reflect.Slice:
- if t.Elem().Kind() == reflect.Uint8 {
- return tagOctetString, false, true
- }
- if strings.HasSuffix(t.Name(), "SET") {
- return tagSet, true, true
- }
- return tagSequence, true, true
- case reflect.String:
- return tagPrintableString, false, true
- }
- return 0, false, false
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asn1
-
-import (
- "big"
- "bytes"
- "fmt"
- "io"
- "reflect"
- "time"
-)
-
-// A forkableWriter is an in-memory buffer that can be
-// 'forked' to create new forkableWriters that bracket the
-// original. After
-// pre, post := w.fork();
-// the overall sequence of bytes represented is logically w+pre+post.
-type forkableWriter struct {
- *bytes.Buffer
- pre, post *forkableWriter
-}
-
-func newForkableWriter() *forkableWriter {
- return &forkableWriter{bytes.NewBuffer(nil), nil, nil}
-}
-
-func (f *forkableWriter) fork() (pre, post *forkableWriter) {
- if f.pre != nil || f.post != nil {
- panic("have already forked")
- }
- f.pre = newForkableWriter()
- f.post = newForkableWriter()
- return f.pre, f.post
-}
-
-func (f *forkableWriter) Len() (l int) {
- l += f.Buffer.Len()
- if f.pre != nil {
- l += f.pre.Len()
- }
- if f.post != nil {
- l += f.post.Len()
- }
- return
-}
-
-func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
- n, err = out.Write(f.Bytes())
- if err != nil {
- return
- }
-
- var nn int
-
- if f.pre != nil {
- nn, err = f.pre.writeTo(out)
- n += nn
- if err != nil {
- return
- }
- }
-
- if f.post != nil {
- nn, err = f.post.writeTo(out)
- n += nn
- }
- return
-}
-
-func marshalBase128Int(out *forkableWriter, n int64) (err error) {
- if n == 0 {
- err = out.WriteByte(0)
- return
- }
-
- l := 0
- for i := n; i > 0; i >>= 7 {
- l++
- }
-
- for i := l - 1; i >= 0; i-- {
- o := byte(n >> uint(i*7))
- o &= 0x7f
- if i != 0 {
- o |= 0x80
- }
- err = out.WriteByte(o)
- if err != nil {
- return
- }
- }
-
- return nil
-}
-
-func marshalInt64(out *forkableWriter, i int64) (err error) {
- n := int64Length(i)
-
- for ; n > 0; n-- {
- err = out.WriteByte(byte(i >> uint((n-1)*8)))
- if err != nil {
- return
- }
- }
-
- return nil
-}
-
-func int64Length(i int64) (numBytes int) {
- numBytes = 1
-
- for i > 127 {
- numBytes++
- i >>= 8
- }
-
- for i < -128 {
- numBytes++
- i >>= 8
- }
-
- return
-}
-
-func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
- if n.Sign() < 0 {
- // A negative number has to be converted to two's-complement
- // form. So we'll subtract 1 and invert. If the
- // most-significant-bit isn't set then we'll need to pad the
- // beginning with 0xff in order to keep the number negative.
- nMinus1 := new(big.Int).Neg(n)
- nMinus1.Sub(nMinus1, bigOne)
- bytes := nMinus1.Bytes()
- for i := range bytes {
- bytes[i] ^= 0xff
- }
- if len(bytes) == 0 || bytes[0]&0x80 == 0 {
- err = out.WriteByte(0xff)
- if err != nil {
- return
- }
- }
- _, err = out.Write(bytes)
- } else if n.Sign() == 0 {
- // Zero is written as a single 0 zero rather than no bytes.
- err = out.WriteByte(0x00)
- } else {
- bytes := n.Bytes()
- if len(bytes) > 0 && bytes[0]&0x80 != 0 {
- // We'll have to pad this with 0x00 in order to stop it
- // looking like a negative number.
- err = out.WriteByte(0)
- if err != nil {
- return
- }
- }
- _, err = out.Write(bytes)
- }
- return
-}
-
-func marshalLength(out *forkableWriter, i int) (err error) {
- n := lengthLength(i)
-
- for ; n > 0; n-- {
- err = out.WriteByte(byte(i >> uint((n-1)*8)))
- if err != nil {
- return
- }
- }
-
- return nil
-}
-
-func lengthLength(i int) (numBytes int) {
- numBytes = 1
- for i > 255 {
- numBytes++
- i >>= 8
- }
- return
-}
-
-func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
- b := uint8(t.class) << 6
- if t.isCompound {
- b |= 0x20
- }
- if t.tag >= 31 {
- b |= 0x1f
- err = out.WriteByte(b)
- if err != nil {
- return
- }
- err = marshalBase128Int(out, int64(t.tag))
- if err != nil {
- return
- }
- } else {
- b |= uint8(t.tag)
- err = out.WriteByte(b)
- if err != nil {
- return
- }
- }
-
- if t.length >= 128 {
- l := lengthLength(t.length)
- err = out.WriteByte(0x80 | byte(l))
- if err != nil {
- return
- }
- err = marshalLength(out, t.length)
- if err != nil {
- return
- }
- } else {
- err = out.WriteByte(byte(t.length))
- if err != nil {
- return
- }
- }
-
- return nil
-}
-
-func marshalBitString(out *forkableWriter, b BitString) (err error) {
- paddingBits := byte((8 - b.BitLength%8) % 8)
- err = out.WriteByte(paddingBits)
- if err != nil {
- return
- }
- _, err = out.Write(b.Bytes)
- return
-}
-
-func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
- if len(oid) < 2 || oid[0] > 6 || oid[1] >= 40 {
- return StructuralError{"invalid object identifier"}
- }
-
- err = out.WriteByte(byte(oid[0]*40 + oid[1]))
- if err != nil {
- return
- }
- for i := 2; i < len(oid); i++ {
- err = marshalBase128Int(out, int64(oid[i]))
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func marshalPrintableString(out *forkableWriter, s string) (err error) {
- b := []byte(s)
- for _, c := range b {
- if !isPrintable(c) {
- return StructuralError{"PrintableString contains invalid character"}
- }
- }
-
- _, err = out.Write(b)
- return
-}
-
-func marshalIA5String(out *forkableWriter, s string) (err error) {
- b := []byte(s)
- for _, c := range b {
- if c > 127 {
- return StructuralError{"IA5String contains invalid character"}
- }
- }
-
- _, err = out.Write(b)
- return
-}
-
-func marshalTwoDigits(out *forkableWriter, v int) (err error) {
- err = out.WriteByte(byte('0' + (v/10)%10))
- if err != nil {
- return
- }
- return out.WriteByte(byte('0' + v%10))
-}
-
-func marshalUTCTime(out *forkableWriter, t *time.Time) (err error) {
- switch {
- case 1950 <= t.Year && t.Year < 2000:
- err = marshalTwoDigits(out, int(t.Year-1900))
- case 2000 <= t.Year && t.Year < 2050:
- err = marshalTwoDigits(out, int(t.Year-2000))
- default:
- return StructuralError{"Cannot represent time as UTCTime"}
- }
-
- if err != nil {
- return
- }
-
- err = marshalTwoDigits(out, t.Month)
- if err != nil {
- return
- }
-
- err = marshalTwoDigits(out, t.Day)
- if err != nil {
- return
- }
-
- err = marshalTwoDigits(out, t.Hour)
- if err != nil {
- return
- }
-
- err = marshalTwoDigits(out, t.Minute)
- if err != nil {
- return
- }
-
- err = marshalTwoDigits(out, t.Second)
- if err != nil {
- return
- }
-
- switch {
- case t.ZoneOffset/60 == 0:
- err = out.WriteByte('Z')
- return
- case t.ZoneOffset > 0:
- err = out.WriteByte('+')
- case t.ZoneOffset < 0:
- err = out.WriteByte('-')
- }
-
- if err != nil {
- return
- }
-
- offsetMinutes := t.ZoneOffset / 60
- if offsetMinutes < 0 {
- offsetMinutes = -offsetMinutes
- }
-
- err = marshalTwoDigits(out, offsetMinutes/60)
- if err != nil {
- return
- }
-
- err = marshalTwoDigits(out, offsetMinutes%60)
- return
-}
-
-func stripTagAndLength(in []byte) []byte {
- _, offset, err := parseTagAndLength(in, 0)
- if err != nil {
- return in
- }
- return in[offset:]
-}
-
-func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
- switch value.Type() {
- case timeType:
- return marshalUTCTime(out, value.Interface().(*time.Time))
- case bitStringType:
- return marshalBitString(out, value.Interface().(BitString))
- case objectIdentifierType:
- return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
- case bigIntType:
- return marshalBigInt(out, value.Interface().(*big.Int))
- }
-
- switch v := value; v.Kind() {
- case reflect.Bool:
- if v.Bool() {
- return out.WriteByte(255)
- } else {
- return out.WriteByte(0)
- }
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return marshalInt64(out, int64(v.Int()))
- case reflect.Struct:
- t := v.Type()
-
- startingField := 0
-
- // If the first element of the structure is a non-empty
- // RawContents, then we don't bother serializing the rest.
- if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
- s := v.Field(0)
- if s.Len() > 0 {
- bytes := make([]byte, s.Len())
- for i := 0; i < s.Len(); i++ {
- bytes[i] = uint8(s.Index(i).Uint())
- }
- /* The RawContents will contain the tag and
- * length fields but we'll also be writing
- * those ourselves, so we strip them out of
- * bytes */
- _, err = out.Write(stripTagAndLength(bytes))
- return
- } else {
- startingField = 1
- }
- }
-
- for i := startingField; i < t.NumField(); i++ {
- var pre *forkableWriter
- pre, out = out.fork()
- err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
- if err != nil {
- return
- }
- }
- return
- case reflect.Slice:
- sliceType := v.Type()
- if sliceType.Elem().Kind() == reflect.Uint8 {
- bytes := make([]byte, v.Len())
- for i := 0; i < v.Len(); i++ {
- bytes[i] = uint8(v.Index(i).Uint())
- }
- _, err = out.Write(bytes)
- return
- }
-
- var params fieldParameters
- for i := 0; i < v.Len(); i++ {
- var pre *forkableWriter
- pre, out = out.fork()
- err = marshalField(pre, v.Index(i), params)
- if err != nil {
- return
- }
- }
- return
- case reflect.String:
- if params.stringType == tagIA5String {
- return marshalIA5String(out, v.String())
- } else {
- return marshalPrintableString(out, v.String())
- }
- return
- }
-
- return StructuralError{"unknown Go type"}
-}
-
-func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
- // If the field is an interface{} then recurse into it.
- if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
- return marshalField(out, v.Elem(), params)
- }
-
- if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
- return
- }
-
- if v.Type() == rawValueType {
- rv := v.Interface().(RawValue)
- if len(rv.FullBytes) != 0 {
- _, err = out.Write(rv.FullBytes)
- } else {
- err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
- if err != nil {
- return
- }
- _, err = out.Write(rv.Bytes)
- }
- return
- }
-
- tag, isCompound, ok := getUniversalType(v.Type())
- if !ok {
- err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
- return
- }
- class := classUniversal
-
- if params.stringType != 0 {
- if tag != tagPrintableString {
- return StructuralError{"Explicit string type given to non-string member"}
- }
- tag = params.stringType
- }
-
- if params.set {
- if tag != tagSequence {
- return StructuralError{"Non sequence tagged as set"}
- }
- tag = tagSet
- }
-
- tags, body := out.fork()
-
- err = marshalBody(body, v, params)
- if err != nil {
- return
- }
-
- bodyLen := body.Len()
-
- var explicitTag *forkableWriter
- if params.explicit {
- explicitTag, tags = tags.fork()
- }
-
- if !params.explicit && params.tag != nil {
- // implicit tag.
- tag = *params.tag
- class = classContextSpecific
- }
-
- err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
- if err != nil {
- return
- }
-
- if params.explicit {
- err = marshalTagAndLength(explicitTag, tagAndLength{
- class: classContextSpecific,
- tag: *params.tag,
- length: bodyLen + tags.Len(),
- isCompound: true,
- })
- }
-
- return nil
-}
-
-// Marshal returns the ASN.1 encoding of val.
-func Marshal(val interface{}) ([]byte, error) {
- var out bytes.Buffer
- v := reflect.ValueOf(val)
- f := newForkableWriter()
- err := marshalField(f, v, fieldParameters{})
- if err != nil {
- return nil, err
- }
- _, err = f.writeTo(&out)
- return out.Bytes(), nil
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asn1
-
-import (
- "bytes"
- "encoding/hex"
- "testing"
- "time"
-)
-
-type intStruct struct {
- A int
-}
-
-type twoIntStruct struct {
- A int
- B int
-}
-
-type nestedStruct struct {
- A intStruct
-}
-
-type rawContentsStruct struct {
- Raw RawContent
- A int
-}
-
-type implicitTagTest struct {
- A int `asn1:"implicit,tag:5"`
-}
-
-type explicitTagTest struct {
- A int `asn1:"explicit,tag:5"`
-}
-
-type ia5StringTest struct {
- A string `asn1:"ia5"`
-}
-
-type printableStringTest struct {
- A string `asn1:"printable"`
-}
-
-type optionalRawValueTest struct {
- A RawValue `asn1:"optional"`
-}
-
-type testSET []int
-
-func setPST(t *time.Time) *time.Time {
- t.ZoneOffset = -28800
- return t
-}
-
-type marshalTest struct {
- in interface{}
- out string // hex encoded
-}
-
-var marshalTests = []marshalTest{
- {10, "02010a"},
- {127, "02017f"},
- {128, "02020080"},
- {-128, "020180"},
- {-129, "0202ff7f"},
- {intStruct{64}, "3003020140"},
- {twoIntStruct{64, 65}, "3006020140020141"},
- {nestedStruct{intStruct{127}}, "3005300302017f"},
- {[]byte{1, 2, 3}, "0403010203"},
- {implicitTagTest{64}, "3003850140"},
- {explicitTagTest{64}, "3005a503020140"},
- {time.SecondsToUTC(0), "170d3730303130313030303030305a"},
- {time.SecondsToUTC(1258325776), "170d3039313131353232353631365a"},
- {setPST(time.SecondsToUTC(1258325776)), "17113039313131353232353631362d30383030"},
- {BitString{[]byte{0x80}, 1}, "03020780"},
- {BitString{[]byte{0x81, 0xf0}, 12}, "03030481f0"},
- {ObjectIdentifier([]int{1, 2, 3, 4}), "06032a0304"},
- {ObjectIdentifier([]int{1, 2, 840, 133549, 1, 1, 5}), "06092a864888932d010105"},
- {"test", "130474657374"},
- {
- "" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", // This is 127 times 'x'
- "137f" +
- "7878787878787878787878787878787878787878787878787878787878787878" +
- "7878787878787878787878787878787878787878787878787878787878787878" +
- "7878787878787878787878787878787878787878787878787878787878787878" +
- "78787878787878787878787878787878787878787878787878787878787878",
- },
- {
- "" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", // This is 128 times 'x'
- "138180" +
- "7878787878787878787878787878787878787878787878787878787878787878" +
- "7878787878787878787878787878787878787878787878787878787878787878" +
- "7878787878787878787878787878787878787878787878787878787878787878" +
- "7878787878787878787878787878787878787878787878787878787878787878",
- },
- {ia5StringTest{"test"}, "3006160474657374"},
- {optionalRawValueTest{}, "3000"},
- {printableStringTest{"test"}, "3006130474657374"},
- {printableStringTest{"test*"}, "30071305746573742a"},
- {rawContentsStruct{nil, 64}, "3003020140"},
- {rawContentsStruct{[]byte{0x30, 3, 1, 2, 3}, 64}, "3003010203"},
- {RawValue{Tag: 1, Class: 2, IsCompound: false, Bytes: []byte{1, 2, 3}}, "8103010203"},
- {testSET([]int{10}), "310302010a"},
-}
-
-func TestMarshal(t *testing.T) {
- for i, test := range marshalTests {
- data, err := Marshal(test.in)
- if err != nil {
- t.Errorf("#%d failed: %s", i, err)
- }
- out, _ := hex.DecodeString(test.out)
- if bytes.Compare(out, data) != 0 {
- t.Errorf("#%d got: %x want %x", i, data, out)
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides Go implementations of elementary multi-precision
-// arithmetic operations on word vectors. Needed for platforms without
-// assembly implementations of these routines.
-
-package big
-
-// TODO(gri) Decide if Word needs to remain exported.
-
-type Word uintptr
-
-const (
- // Compute the size _S of a Word in bytes.
- _m = ^Word(0)
- _logS = _m>>8&1 + _m>>16&1 + _m>>32&1
- _S = 1 << _logS
-
- _W = _S << 3 // word size in bits
- _B = 1 << _W // digit base
- _M = _B - 1 // digit mask
-
- _W2 = _W / 2 // half word size in bits
- _B2 = 1 << _W2 // half digit base
- _M2 = _B2 - 1 // half digit mask
-)
-
-// ----------------------------------------------------------------------------
-// Elementary operations on words
-//
-// These operations are used by the vector operations below.
-
-// z1<<_W + z0 = x+y+c, with c == 0 or 1
-func addWW_g(x, y, c Word) (z1, z0 Word) {
- yc := y + c
- z0 = x + yc
- if z0 < x || yc < y {
- z1 = 1
- }
- return
-}
-
-// z1<<_W + z0 = x-y-c, with c == 0 or 1
-func subWW_g(x, y, c Word) (z1, z0 Word) {
- yc := y + c
- z0 = x - yc
- if z0 > x || yc < y {
- z1 = 1
- }
- return
-}
-
-// z1<<_W + z0 = x*y
-func mulWW(x, y Word) (z1, z0 Word) { return mulWW_g(x, y) }
-// Adapted from Warren, Hacker's Delight, p. 132.
-func mulWW_g(x, y Word) (z1, z0 Word) {
- x0 := x & _M2
- x1 := x >> _W2
- y0 := y & _M2
- y1 := y >> _W2
- w0 := x0 * y0
- t := x1*y0 + w0>>_W2
- w1 := t & _M2
- w2 := t >> _W2
- w1 += x0 * y1
- z1 = x1*y1 + w2 + w1>>_W2
- z0 = x * y
- return
-}
-
-// z1<<_W + z0 = x*y + c
-func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
- z1, zz0 := mulWW(x, y)
- if z0 = zz0 + c; z0 < zz0 {
- z1++
- }
- return
-}
-
-// Length of x in bits.
-func bitLen(x Word) (n int) {
- for ; x >= 0x100; x >>= 8 {
- n += 8
- }
- for ; x > 0; x >>= 1 {
- n++
- }
- return
-}
-
-// log2 computes the integer binary logarithm of x.
-// The result is the integer n for which 2^n <= x < 2^(n+1).
-// If x == 0, the result is -1.
-func log2(x Word) int {
- return bitLen(x) - 1
-}
-
-// Number of leading zeros in x.
-func leadingZeros(x Word) uint {
- return uint(_W - bitLen(x))
-}
-
-// q = (u1<<_W + u0 - r)/y
-func divWW(x1, x0, y Word) (q, r Word) { return divWW_g(x1, x0, y) }
-// Adapted from Warren, Hacker's Delight, p. 152.
-func divWW_g(u1, u0, v Word) (q, r Word) {
- if u1 >= v {
- return 1<<_W - 1, 1<<_W - 1
- }
-
- s := leadingZeros(v)
- v <<= s
-
- vn1 := v >> _W2
- vn0 := v & _M2
- un32 := u1<<s | u0>>(_W-s)
- un10 := u0 << s
- un1 := un10 >> _W2
- un0 := un10 & _M2
- q1 := un32 / vn1
- rhat := un32 - q1*vn1
-
-again1:
- if q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {
- q1--
- rhat += vn1
- if rhat < _B2 {
- goto again1
- }
- }
-
- un21 := un32*_B2 + un1 - q1*v
- q0 := un21 / vn1
- rhat = un21 - q0*vn1
-
-again2:
- if q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {
- q0--
- rhat += vn1
- if rhat < _B2 {
- goto again2
- }
- }
-
- return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s
-}
-
-func addVV(z, x, y []Word) (c Word) { return addVV_g(z, x, y) }
-func addVV_g(z, x, y []Word) (c Word) {
- for i := range z {
- c, z[i] = addWW_g(x[i], y[i], c)
- }
- return
-}
-
-func subVV(z, x, y []Word) (c Word) { return subVV_g(z, x, y) }
-func subVV_g(z, x, y []Word) (c Word) {
- for i := range z {
- c, z[i] = subWW_g(x[i], y[i], c)
- }
- return
-}
-
-func addVW(z, x []Word, y Word) (c Word) { return addVW_g(z, x, y) }
-func addVW_g(z, x []Word, y Word) (c Word) {
- c = y
- for i := range z {
- c, z[i] = addWW_g(x[i], c, 0)
- }
- return
-}
-
-func subVW(z, x []Word, y Word) (c Word) { return subVW_g(z, x, y) }
-func subVW_g(z, x []Word, y Word) (c Word) {
- c = y
- for i := range z {
- c, z[i] = subWW_g(x[i], c, 0)
- }
- return
-}
-
-func shlVU(z, x []Word, s uint) (c Word) { return shlVU_g(z, x, s) }
-func shlVU_g(z, x []Word, s uint) (c Word) {
- if n := len(z); n > 0 {
- ŝ := _W - s
- w1 := x[n-1]
- c = w1 >> ŝ
- for i := n - 1; i > 0; i-- {
- w := w1
- w1 = x[i-1]
- z[i] = w<<s | w1>>ŝ
- }
- z[0] = w1 << s
- }
- return
-}
-
-func shrVU(z, x []Word, s uint) (c Word) { return shrVU_g(z, x, s) }
-func shrVU_g(z, x []Word, s uint) (c Word) {
- if n := len(z); n > 0 {
- ŝ := _W - s
- w1 := x[0]
- c = w1 << ŝ
- for i := 0; i < n-1; i++ {
- w := w1
- w1 = x[i+1]
- z[i] = w>>s | w1<<ŝ
- }
- z[n-1] = w1 >> s
- }
- return
-}
-
-func mulAddVWW(z, x []Word, y, r Word) (c Word) { return mulAddVWW_g(z, x, y, r) }
-func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
- c = r
- for i := range z {
- c, z[i] = mulAddWWW_g(x[i], y, c)
- }
- return
-}
-
-func addMulVVW(z, x []Word, y Word) (c Word) { return addMulVVW_g(z, x, y) }
-func addMulVVW_g(z, x []Word, y Word) (c Word) {
- for i := range z {
- z1, z0 := mulAddWWW_g(x[i], y, z[i])
- c, z[i] = addWW_g(z0, c, 0)
- c += z1
- }
- return
-}
-
-func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) { return divWVW_g(z, xn, x, y) }
-func divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {
- r = xn
- for i := len(z) - 1; i >= 0; i-- {
- z[i], r = divWW_g(r, x[i], y)
- }
- return
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-// implemented in arith_$GOARCH.s
-func mulWW(x, y Word) (z1, z0 Word)
-func divWW(x1, x0, y Word) (q, r Word)
-func addVV(z, x, y []Word) (c Word)
-func subVV(z, x, y []Word) (c Word)
-func addVW(z, x []Word, y Word) (c Word)
-func subVW(z, x []Word, y Word) (c Word)
-func shlVU(z, x []Word, s uint) (c Word)
-func shrVU(z, x []Word, s uint) (c Word)
-func mulAddVWW(z, x []Word, y, r Word) (c Word)
-func addMulVVW(z, x []Word, y Word) (c Word)
-func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import "testing"
-
-type funWW func(x, y, c Word) (z1, z0 Word)
-type argWW struct {
- x, y, c, z1, z0 Word
-}
-
-var sumWW = []argWW{
- {0, 0, 0, 0, 0},
- {0, 1, 0, 0, 1},
- {0, 0, 1, 0, 1},
- {0, 1, 1, 0, 2},
- {12345, 67890, 0, 0, 80235},
- {12345, 67890, 1, 0, 80236},
- {_M, 1, 0, 1, 0},
- {_M, 0, 1, 1, 0},
- {_M, 1, 1, 1, 1},
- {_M, _M, 0, 1, _M - 1},
- {_M, _M, 1, 1, _M},
-}
-
-func testFunWW(t *testing.T, msg string, f funWW, a argWW) {
- z1, z0 := f(a.x, a.y, a.c)
- if z1 != a.z1 || z0 != a.z0 {
- t.Errorf("%s%+v\n\tgot z1:z0 = %#x:%#x; want %#x:%#x", msg, a, z1, z0, a.z1, a.z0)
- }
-}
-
-func TestFunWW(t *testing.T) {
- for _, a := range sumWW {
- arg := a
- testFunWW(t, "addWW_g", addWW_g, arg)
-
- arg = argWW{a.y, a.x, a.c, a.z1, a.z0}
- testFunWW(t, "addWW_g symmetric", addWW_g, arg)
-
- arg = argWW{a.z0, a.x, a.c, a.z1, a.y}
- testFunWW(t, "subWW_g", subWW_g, arg)
-
- arg = argWW{a.z0, a.y, a.c, a.z1, a.x}
- testFunWW(t, "subWW_g symmetric", subWW_g, arg)
- }
-}
-
-type funVV func(z, x, y []Word) (c Word)
-type argVV struct {
- z, x, y nat
- c Word
-}
-
-var sumVV = []argVV{
- {},
- {nat{0}, nat{0}, nat{0}, 0},
- {nat{1}, nat{1}, nat{0}, 0},
- {nat{0}, nat{_M}, nat{1}, 1},
- {nat{80235}, nat{12345}, nat{67890}, 0},
- {nat{_M - 1}, nat{_M}, nat{_M}, 1},
- {nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, nat{1, 0, 0, 0}, 1},
- {nat{0, 0, 0, _M}, nat{_M, _M, _M, _M - 1}, nat{1, 0, 0, 0}, 0},
- {nat{0, 0, 0, 0}, nat{_M, 0, _M, 0}, nat{1, _M, 0, _M}, 1},
-}
-
-func testFunVV(t *testing.T, msg string, f funVV, a argVV) {
- z := make(nat, len(a.z))
- c := f(z, a.x, a.y)
- for i, zi := range z {
- if zi != a.z[i] {
- t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
- break
- }
- }
- if c != a.c {
- t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
- }
-}
-
-func TestFunVV(t *testing.T) {
- for _, a := range sumVV {
- arg := a
- testFunVV(t, "addVV_g", addVV_g, arg)
- testFunVV(t, "addVV", addVV, arg)
-
- arg = argVV{a.z, a.y, a.x, a.c}
- testFunVV(t, "addVV_g symmetric", addVV_g, arg)
- testFunVV(t, "addVV symmetric", addVV, arg)
-
- arg = argVV{a.x, a.z, a.y, a.c}
- testFunVV(t, "subVV_g", subVV_g, arg)
- testFunVV(t, "subVV", subVV, arg)
-
- arg = argVV{a.y, a.z, a.x, a.c}
- testFunVV(t, "subVV_g symmetric", subVV_g, arg)
- testFunVV(t, "subVV symmetric", subVV, arg)
- }
-}
-
-type funVW func(z, x []Word, y Word) (c Word)
-type argVW struct {
- z, x nat
- y Word
- c Word
-}
-
-var sumVW = []argVW{
- {},
- {nil, nil, 2, 2},
- {nat{0}, nat{0}, 0, 0},
- {nat{1}, nat{0}, 1, 0},
- {nat{1}, nat{1}, 0, 0},
- {nat{0}, nat{_M}, 1, 1},
- {nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, 1, 1},
-}
-
-var prodVW = []argVW{
- {},
- {nat{0}, nat{0}, 0, 0},
- {nat{0}, nat{_M}, 0, 0},
- {nat{0}, nat{0}, _M, 0},
- {nat{1}, nat{1}, 1, 0},
- {nat{22793}, nat{991}, 23, 0},
- {nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0},
- {nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0},
- {nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0},
- {nat{_M << 1 & _M}, nat{_M}, 1 << 1, _M >> (_W - 1)},
- {nat{_M << 7 & _M}, nat{_M}, 1 << 7, _M >> (_W - 7)},
- {nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, _M >> (_W - 7)},
-}
-
-var lshVW = []argVW{
- {},
- {nat{0}, nat{0}, 0, 0},
- {nat{0}, nat{0}, 1, 0},
- {nat{0}, nat{0}, 20, 0},
-
- {nat{_M}, nat{_M}, 0, 0},
- {nat{_M << 1 & _M}, nat{_M}, 1, 1},
- {nat{_M << 20 & _M}, nat{_M}, 20, _M >> (_W - 20)},
-
- {nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
- {nat{_M << 1 & _M, _M, _M}, nat{_M, _M, _M}, 1, 1},
- {nat{_M << 20 & _M, _M, _M}, nat{_M, _M, _M}, 20, _M >> (_W - 20)},
-}
-
-var rshVW = []argVW{
- {},
- {nat{0}, nat{0}, 0, 0},
- {nat{0}, nat{0}, 1, 0},
- {nat{0}, nat{0}, 20, 0},
-
- {nat{_M}, nat{_M}, 0, 0},
- {nat{_M >> 1}, nat{_M}, 1, _M << (_W - 1) & _M},
- {nat{_M >> 20}, nat{_M}, 20, _M << (_W - 20) & _M},
-
- {nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
- {nat{_M, _M, _M >> 1}, nat{_M, _M, _M}, 1, _M << (_W - 1) & _M},
- {nat{_M, _M, _M >> 20}, nat{_M, _M, _M}, 20, _M << (_W - 20) & _M},
-}
-
-func testFunVW(t *testing.T, msg string, f funVW, a argVW) {
- z := make(nat, len(a.z))
- c := f(z, a.x, a.y)
- for i, zi := range z {
- if zi != a.z[i] {
- t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
- break
- }
- }
- if c != a.c {
- t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
- }
-}
-
-func makeFunVW(f func(z, x []Word, s uint) (c Word)) funVW {
- return func(z, x []Word, s Word) (c Word) {
- return f(z, x, uint(s))
- }
-}
-
-func TestFunVW(t *testing.T) {
- for _, a := range sumVW {
- arg := a
- testFunVW(t, "addVW_g", addVW_g, arg)
- testFunVW(t, "addVW", addVW, arg)
-
- arg = argVW{a.x, a.z, a.y, a.c}
- testFunVW(t, "subVW_g", subVW_g, arg)
- testFunVW(t, "subVW", subVW, arg)
- }
-
- shlVW_g := makeFunVW(shlVU_g)
- shlVW := makeFunVW(shlVU)
- for _, a := range lshVW {
- arg := a
- testFunVW(t, "shlVU_g", shlVW_g, arg)
- testFunVW(t, "shlVU", shlVW, arg)
- }
-
- shrVW_g := makeFunVW(shrVU_g)
- shrVW := makeFunVW(shrVU)
- for _, a := range rshVW {
- arg := a
- testFunVW(t, "shrVU_g", shrVW_g, arg)
- testFunVW(t, "shrVU", shrVW, arg)
- }
-}
-
-type funVWW func(z, x []Word, y, r Word) (c Word)
-type argVWW struct {
- z, x nat
- y, r Word
- c Word
-}
-
-var prodVWW = []argVWW{
- {},
- {nat{0}, nat{0}, 0, 0, 0},
- {nat{991}, nat{0}, 0, 991, 0},
- {nat{0}, nat{_M}, 0, 0, 0},
- {nat{991}, nat{_M}, 0, 991, 0},
- {nat{0}, nat{0}, _M, 0, 0},
- {nat{991}, nat{0}, _M, 991, 0},
- {nat{1}, nat{1}, 1, 0, 0},
- {nat{992}, nat{1}, 1, 991, 0},
- {nat{22793}, nat{991}, 23, 0, 0},
- {nat{22800}, nat{991}, 23, 7, 0},
- {nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0, 0},
- {nat{7, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 7, 0},
- {nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0, 0},
- {nat{991, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 991, 0},
- {nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0, 0},
- {nat{991, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 991, 0},
- {nat{_M << 1 & _M}, nat{_M}, 1 << 1, 0, _M >> (_W - 1)},
- {nat{_M<<1&_M + 1}, nat{_M}, 1 << 1, 1, _M >> (_W - 1)},
- {nat{_M << 7 & _M}, nat{_M}, 1 << 7, 0, _M >> (_W - 7)},
- {nat{_M<<7&_M + 1<<6}, nat{_M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
- {nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 0, _M >> (_W - 7)},
- {nat{_M<<7&_M + 1<<6, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
-}
-
-func testFunVWW(t *testing.T, msg string, f funVWW, a argVWW) {
- z := make(nat, len(a.z))
- c := f(z, a.x, a.y, a.r)
- for i, zi := range z {
- if zi != a.z[i] {
- t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
- break
- }
- }
- if c != a.c {
- t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
- }
-}
-
-// TODO(gri) mulAddVWW and divWVW are symmetric operations but
-// their signature is not symmetric. Try to unify.
-
-type funWVW func(z []Word, xn Word, x []Word, y Word) (r Word)
-type argWVW struct {
- z nat
- xn Word
- x nat
- y Word
- r Word
-}
-
-func testFunWVW(t *testing.T, msg string, f funWVW, a argWVW) {
- z := make(nat, len(a.z))
- r := f(z, a.xn, a.x, a.y)
- for i, zi := range z {
- if zi != a.z[i] {
- t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
- break
- }
- }
- if r != a.r {
- t.Errorf("%s%+v\n\tgot r = %#x; want %#x", msg, a, r, a.r)
- }
-}
-
-func TestFunVWW(t *testing.T) {
- for _, a := range prodVWW {
- arg := a
- testFunVWW(t, "mulAddVWW_g", mulAddVWW_g, arg)
- testFunVWW(t, "mulAddVWW", mulAddVWW, arg)
-
- if a.y != 0 && a.r < a.y {
- arg := argWVW{a.x, a.c, a.z, a.y, a.r}
- testFunWVW(t, "divWVW_g", divWVW_g, arg)
- testFunWVW(t, "divWVW", divWVW, arg)
- }
- }
-}
-
-var mulWWTests = []struct {
- x, y Word
- q, r Word
-}{
- {_M, _M, _M - 1, 1},
- // 32 bit only: {0xc47dfa8c, 50911, 0x98a4, 0x998587f4},
-}
-
-func TestMulWW(t *testing.T) {
- for i, test := range mulWWTests {
- q, r := mulWW_g(test.x, test.y)
- if q != test.q || r != test.r {
- t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
- }
- }
-}
-
-var mulAddWWWTests = []struct {
- x, y, c Word
- q, r Word
-}{
- // TODO(agl): These will only work on 64-bit platforms.
- // {15064310297182388543, 0xe7df04d2d35d5d80, 13537600649892366549, 13644450054494335067, 10832252001440893781},
- // {15064310297182388543, 0xdab2f18048baa68d, 13644450054494335067, 12869334219691522700, 14233854684711418382},
- {_M, _M, 0, _M - 1, 1},
- {_M, _M, _M, _M, 0},
-}
-
-func TestMulAddWWW(t *testing.T) {
- for i, test := range mulAddWWWTests {
- q, r := mulAddWWW_g(test.x, test.y, test.c)
- if q != test.q || r != test.r {
- t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file prints execution times for the Mul benchmark
-// given different Karatsuba thresholds. The result may be
-// used to manually fine-tune the threshold constant. The
-// results are somewhat fragile; use repeated runs to get
-// a clear picture.
-
-// Usage: gotest -calibrate
-
-package big
-
-import (
- "flag"
- "fmt"
- "testing"
- "time"
-)
-
-var calibrate = flag.Bool("calibrate", false, "run calibration test")
-
-// measure returns the time to run f
-func measure(f func()) int64 {
- const N = 100
- start := time.Nanoseconds()
- for i := N; i > 0; i-- {
- f()
- }
- stop := time.Nanoseconds()
- return (stop - start) / N
-}
-
-func computeThresholds() {
- fmt.Printf("Multiplication times for varying Karatsuba thresholds\n")
- fmt.Printf("(run repeatedly for good results)\n")
-
- // determine Tk, the work load execution time using basic multiplication
- karatsubaThreshold = 1e9 // disable karatsuba
- Tb := measure(benchmarkMulLoad)
- fmt.Printf("Tb = %dns\n", Tb)
-
- // thresholds
- n := 8 // any lower values for the threshold lead to very slow multiplies
- th1 := -1
- th2 := -1
-
- var deltaOld int64
- for count := -1; count != 0; count-- {
- // determine Tk, the work load execution time using Karatsuba multiplication
- karatsubaThreshold = n // enable karatsuba
- Tk := measure(benchmarkMulLoad)
-
- // improvement over Tb
- delta := (Tb - Tk) * 100 / Tb
-
- fmt.Printf("n = %3d Tk = %8dns %4d%%", n, Tk, delta)
-
- // determine break-even point
- if Tk < Tb && th1 < 0 {
- th1 = n
- fmt.Print(" break-even point")
- }
-
- // determine diminishing return
- if 0 < delta && delta < deltaOld && th2 < 0 {
- th2 = n
- fmt.Print(" diminishing return")
- }
- deltaOld = delta
-
- fmt.Println()
-
- // trigger counter
- if th1 >= 0 && th2 >= 0 && count < 0 {
- count = 20 // this many extra measurements after we got both thresholds
- }
-
- n++
- }
-}
-
-func TestCalibrate(t *testing.T) {
- if *calibrate {
- computeThresholds()
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// A little test program and benchmark for rational arithmetics.
-// Computes a Hilbert matrix, its inverse, multiplies them
-// and verifies that the product is the identity matrix.
-
-package big
-
-import (
- "fmt"
- "testing"
-)
-
-type matrix struct {
- n, m int
- a []*Rat
-}
-
-func (a *matrix) at(i, j int) *Rat {
- if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
- panic("index out of range")
- }
- return a.a[i*a.m+j]
-}
-
-func (a *matrix) set(i, j int, x *Rat) {
- if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
- panic("index out of range")
- }
- a.a[i*a.m+j] = x
-}
-
-func newMatrix(n, m int) *matrix {
- if !(0 <= n && 0 <= m) {
- panic("illegal matrix")
- }
- a := new(matrix)
- a.n = n
- a.m = m
- a.a = make([]*Rat, n*m)
- return a
-}
-
-func newUnit(n int) *matrix {
- a := newMatrix(n, n)
- for i := 0; i < n; i++ {
- for j := 0; j < n; j++ {
- x := NewRat(0, 1)
- if i == j {
- x.SetInt64(1)
- }
- a.set(i, j, x)
- }
- }
- return a
-}
-
-func newHilbert(n int) *matrix {
- a := newMatrix(n, n)
- for i := 0; i < n; i++ {
- for j := 0; j < n; j++ {
- a.set(i, j, NewRat(1, int64(i+j+1)))
- }
- }
- return a
-}
-
-func newInverseHilbert(n int) *matrix {
- a := newMatrix(n, n)
- for i := 0; i < n; i++ {
- for j := 0; j < n; j++ {
- x1 := new(Rat).SetInt64(int64(i + j + 1))
- x2 := new(Rat).SetInt(new(Int).Binomial(int64(n+i), int64(n-j-1)))
- x3 := new(Rat).SetInt(new(Int).Binomial(int64(n+j), int64(n-i-1)))
- x4 := new(Rat).SetInt(new(Int).Binomial(int64(i+j), int64(i)))
-
- x1.Mul(x1, x2)
- x1.Mul(x1, x3)
- x1.Mul(x1, x4)
- x1.Mul(x1, x4)
-
- if (i+j)&1 != 0 {
- x1.Neg(x1)
- }
-
- a.set(i, j, x1)
- }
- }
- return a
-}
-
-func (a *matrix) mul(b *matrix) *matrix {
- if a.m != b.n {
- panic("illegal matrix multiply")
- }
- c := newMatrix(a.n, b.m)
- for i := 0; i < c.n; i++ {
- for j := 0; j < c.m; j++ {
- x := NewRat(0, 1)
- for k := 0; k < a.m; k++ {
- x.Add(x, new(Rat).Mul(a.at(i, k), b.at(k, j)))
- }
- c.set(i, j, x)
- }
- }
- return c
-}
-
-func (a *matrix) eql(b *matrix) bool {
- if a.n != b.n || a.m != b.m {
- return false
- }
- for i := 0; i < a.n; i++ {
- for j := 0; j < a.m; j++ {
- if a.at(i, j).Cmp(b.at(i, j)) != 0 {
- return false
- }
- }
- }
- return true
-}
-
-func (a *matrix) String() string {
- s := ""
- for i := 0; i < a.n; i++ {
- for j := 0; j < a.m; j++ {
- s += fmt.Sprintf("\t%s", a.at(i, j))
- }
- s += "\n"
- }
- return s
-}
-
-func doHilbert(t *testing.T, n int) {
- a := newHilbert(n)
- b := newInverseHilbert(n)
- I := newUnit(n)
- ab := a.mul(b)
- if !ab.eql(I) {
- if t == nil {
- panic("Hilbert failed")
- }
- t.Errorf("a = %s\n", a)
- t.Errorf("b = %s\n", b)
- t.Errorf("a*b = %s\n", ab)
- t.Errorf("I = %s\n", I)
- }
-}
-
-func TestHilbert(t *testing.T) {
- doHilbert(t, 10)
-}
-
-func BenchmarkHilbert(b *testing.B) {
- for i := 0; i < b.N; i++ {
- doHilbert(nil, 10)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements signed multi-precision integers.
-
-package big
-
-import (
- "errors"
- "fmt"
- "io"
- "rand"
- "strings"
-)
-
-// An Int represents a signed multi-precision integer.
-// The zero value for an Int represents the value 0.
-type Int struct {
- neg bool // sign
- abs nat // absolute value of the integer
-}
-
-var intOne = &Int{false, natOne}
-
-// Sign returns:
-//
-// -1 if x < 0
-// 0 if x == 0
-// +1 if x > 0
-//
-func (x *Int) Sign() int {
- if len(x.abs) == 0 {
- return 0
- }
- if x.neg {
- return -1
- }
- return 1
-}
-
-// SetInt64 sets z to x and returns z.
-func (z *Int) SetInt64(x int64) *Int {
- neg := false
- if x < 0 {
- neg = true
- x = -x
- }
- z.abs = z.abs.setUint64(uint64(x))
- z.neg = neg
- return z
-}
-
-// NewInt allocates and returns a new Int set to x.
-func NewInt(x int64) *Int {
- return new(Int).SetInt64(x)
-}
-
-// Set sets z to x and returns z.
-func (z *Int) Set(x *Int) *Int {
- if z != x {
- z.abs = z.abs.set(x.abs)
- z.neg = x.neg
- }
- return z
-}
-
-// Abs sets z to |x| (the absolute value of x) and returns z.
-func (z *Int) Abs(x *Int) *Int {
- z.Set(x)
- z.neg = false
- return z
-}
-
-// Neg sets z to -x and returns z.
-func (z *Int) Neg(x *Int) *Int {
- z.Set(x)
- z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
- return z
-}
-
-// Add sets z to the sum x+y and returns z.
-func (z *Int) Add(x, y *Int) *Int {
- neg := x.neg
- if x.neg == y.neg {
- // x + y == x + y
- // (-x) + (-y) == -(x + y)
- z.abs = z.abs.add(x.abs, y.abs)
- } else {
- // x + (-y) == x - y == -(y - x)
- // (-x) + y == y - x == -(x - y)
- if x.abs.cmp(y.abs) >= 0 {
- z.abs = z.abs.sub(x.abs, y.abs)
- } else {
- neg = !neg
- z.abs = z.abs.sub(y.abs, x.abs)
- }
- }
- z.neg = len(z.abs) > 0 && neg // 0 has no sign
- return z
-}
-
-// Sub sets z to the difference x-y and returns z.
-func (z *Int) Sub(x, y *Int) *Int {
- neg := x.neg
- if x.neg != y.neg {
- // x - (-y) == x + y
- // (-x) - y == -(x + y)
- z.abs = z.abs.add(x.abs, y.abs)
- } else {
- // x - y == x - y == -(y - x)
- // (-x) - (-y) == y - x == -(x - y)
- if x.abs.cmp(y.abs) >= 0 {
- z.abs = z.abs.sub(x.abs, y.abs)
- } else {
- neg = !neg
- z.abs = z.abs.sub(y.abs, x.abs)
- }
- }
- z.neg = len(z.abs) > 0 && neg // 0 has no sign
- return z
-}
-
-// Mul sets z to the product x*y and returns z.
-func (z *Int) Mul(x, y *Int) *Int {
- // x * y == x * y
- // x * (-y) == -(x * y)
- // (-x) * y == -(x * y)
- // (-x) * (-y) == x * y
- z.abs = z.abs.mul(x.abs, y.abs)
- z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
- return z
-}
-
-// MulRange sets z to the product of all integers
-// in the range [a, b] inclusively and returns z.
-// If a > b (empty range), the result is 1.
-func (z *Int) MulRange(a, b int64) *Int {
- switch {
- case a > b:
- return z.SetInt64(1) // empty range
- case a <= 0 && b >= 0:
- return z.SetInt64(0) // range includes 0
- }
- // a <= b && (b < 0 || a > 0)
-
- neg := false
- if a < 0 {
- neg = (b-a)&1 == 0
- a, b = -b, -a
- }
-
- z.abs = z.abs.mulRange(uint64(a), uint64(b))
- z.neg = neg
- return z
-}
-
-// Binomial sets z to the binomial coefficient of (n, k) and returns z.
-func (z *Int) Binomial(n, k int64) *Int {
- var a, b Int
- a.MulRange(n-k+1, n)
- b.MulRange(1, k)
- return z.Quo(&a, &b)
-}
-
-// Quo sets z to the quotient x/y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Quo implements truncated division (like Go); see QuoRem for more details.
-func (z *Int) Quo(x, y *Int) *Int {
- z.abs, _ = z.abs.div(nil, x.abs, y.abs)
- z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
- return z
-}
-
-// Rem sets z to the remainder x%y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Rem implements truncated modulus (like Go); see QuoRem for more details.
-func (z *Int) Rem(x, y *Int) *Int {
- _, z.abs = nat{}.div(z.abs, x.abs, y.abs)
- z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
- return z
-}
-
-// QuoRem sets z to the quotient x/y and r to the remainder x%y
-// and returns the pair (z, r) for y != 0.
-// If y == 0, a division-by-zero run-time panic occurs.
-//
-// QuoRem implements T-division and modulus (like Go):
-//
-// q = x/y with the result truncated to zero
-// r = x - y*q
-//
-// (See Daan Leijen, ``Division and Modulus for Computer Scientists''.)
-//
-func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
- z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
- z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
- return z, r
-}
-
-// Div sets z to the quotient x/y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Div implements Euclidean division (unlike Go); see DivMod for more details.
-func (z *Int) Div(x, y *Int) *Int {
- y_neg := y.neg // z may be an alias for y
- var r Int
- z.QuoRem(x, y, &r)
- if r.neg {
- if y_neg {
- z.Add(z, intOne)
- } else {
- z.Sub(z, intOne)
- }
- }
- return z
-}
-
-// Mod sets z to the modulus x%y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
-func (z *Int) Mod(x, y *Int) *Int {
- y0 := y // save y
- if z == y || alias(z.abs, y.abs) {
- y0 = new(Int).Set(y)
- }
- var q Int
- q.QuoRem(x, y, z)
- if z.neg {
- if y0.neg {
- z.Sub(z, y0)
- } else {
- z.Add(z, y0)
- }
- }
- return z
-}
-
-// DivMod sets z to the quotient x div y and m to the modulus x mod y
-// and returns the pair (z, m) for y != 0.
-// If y == 0, a division-by-zero run-time panic occurs.
-//
-// DivMod implements Euclidean division and modulus (unlike Go):
-//
-// q = x div y such that
-// m = x - y*q with 0 <= m < |q|
-//
-// (See Raymond T. Boute, ``The Euclidean definition of the functions
-// div and mod''. ACM Transactions on Programming Languages and
-// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
-// ACM press.)
-//
-func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
- y0 := y // save y
- if z == y || alias(z.abs, y.abs) {
- y0 = new(Int).Set(y)
- }
- z.QuoRem(x, y, m)
- if m.neg {
- if y0.neg {
- z.Add(z, intOne)
- m.Sub(m, y0)
- } else {
- z.Sub(z, intOne)
- m.Add(m, y0)
- }
- }
- return z, m
-}
-
-// Cmp compares x and y and returns:
-//
-// -1 if x < y
-// 0 if x == y
-// +1 if x > y
-//
-func (x *Int) Cmp(y *Int) (r int) {
- // x cmp y == x cmp y
- // x cmp (-y) == x
- // (-x) cmp y == y
- // (-x) cmp (-y) == -(x cmp y)
- switch {
- case x.neg == y.neg:
- r = x.abs.cmp(y.abs)
- if x.neg {
- r = -r
- }
- case x.neg:
- r = -1
- default:
- r = 1
- }
- return
-}
-
-func (x *Int) String() string {
- switch {
- case x == nil:
- return "<nil>"
- case x.neg:
- return "-" + x.abs.decimalString()
- }
- return x.abs.decimalString()
-}
-
-func charset(ch rune) string {
- switch ch {
- case 'b':
- return lowercaseDigits[0:2]
- case 'o':
- return lowercaseDigits[0:8]
- case 'd', 's', 'v':
- return lowercaseDigits[0:10]
- case 'x':
- return lowercaseDigits[0:16]
- case 'X':
- return uppercaseDigits[0:16]
- }
- return "" // unknown format
-}
-
-// write count copies of text to s
-func writeMultiple(s fmt.State, text string, count int) {
- if len(text) > 0 {
- b := []byte(text)
- for ; count > 0; count-- {
- s.Write(b)
- }
- }
-}
-
-// Format is a support routine for fmt.Formatter. It accepts
-// the formats 'b' (binary), 'o' (octal), 'd' (decimal), 'x'
-// (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
-// Also supported are the full suite of package fmt's format
-// verbs for integral types, including '+', '-', and ' '
-// for sign control, '#' for leading zero in octal and for
-// hexadecimal, a leading "0x" or "0X" for "%#x" and "%#X"
-// respectively, specification of minimum digits precision,
-// output field width, space or zero padding, and left or
-// right justification.
-//
-func (x *Int) Format(s fmt.State, ch rune) {
- cs := charset(ch)
-
- // special cases
- switch {
- case cs == "":
- // unknown format
- fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
- return
- case x == nil:
- fmt.Fprint(s, "<nil>")
- return
- }
-
- // determine sign character
- sign := ""
- switch {
- case x.neg:
- sign = "-"
- case s.Flag('+'): // supersedes ' ' when both specified
- sign = "+"
- case s.Flag(' '):
- sign = " "
- }
-
- // determine prefix characters for indicating output base
- prefix := ""
- if s.Flag('#') {
- switch ch {
- case 'o': // octal
- prefix = "0"
- case 'x': // hexadecimal
- prefix = "0x"
- case 'X':
- prefix = "0X"
- }
- }
-
- // determine digits with base set by len(cs) and digit characters from cs
- digits := x.abs.string(cs)
-
- // number of characters for the three classes of number padding
- var left int // space characters to left of digits for right justification ("%8d")
- var zeroes int // zero characters (actually cs[0]) as left-most digits ("%.8d")
- var right int // space characters to right of digits for left justification ("%-8d")
-
- // determine number padding from precision: the least number of digits to output
- precision, precisionSet := s.Precision()
- if precisionSet {
- switch {
- case len(digits) < precision:
- zeroes = precision - len(digits) // count of zero padding
- case digits == "0" && precision == 0:
- return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
- }
- }
-
- // determine field pad from width: the least number of characters to output
- length := len(sign) + len(prefix) + zeroes + len(digits)
- if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
- switch d := width - length; {
- case s.Flag('-'):
- // pad on the right with spaces; supersedes '0' when both specified
- right = d
- case s.Flag('0') && !precisionSet:
- // pad with zeroes unless precision also specified
- zeroes = d
- default:
- // pad on the left with spaces
- left = d
- }
- }
-
- // print number as [left pad][sign][prefix][zero pad][digits][right pad]
- writeMultiple(s, " ", left)
- writeMultiple(s, sign, 1)
- writeMultiple(s, prefix, 1)
- writeMultiple(s, "0", zeroes)
- writeMultiple(s, digits, 1)
- writeMultiple(s, " ", right)
-}
-
-// scan sets z to the integer value corresponding to the longest possible prefix
-// read from r representing a signed integer number in a given conversion base.
-// It returns z, the actual conversion base used, and an error, if any. In the
-// error case, the value of z is undefined but the returned value is nil. The
-// syntax follows the syntax of integer literals in Go.
-//
-// The base argument must be 0 or a value from 2 through MaxBase. If the base
-// is 0, the string prefix determines the actual conversion base. A prefix of
-// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
-// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
-//
-func (z *Int) scan(r io.RuneScanner, base int) (*Int, int, error) {
- // determine sign
- ch, _, err := r.ReadRune()
- if err != nil {
- return nil, 0, err
- }
- neg := false
- switch ch {
- case '-':
- neg = true
- case '+': // nothing to do
- default:
- r.UnreadRune()
- }
-
- // determine mantissa
- z.abs, base, err = z.abs.scan(r, base)
- if err != nil {
- return nil, base, err
- }
- z.neg = len(z.abs) > 0 && neg // 0 has no sign
-
- return z, base, nil
-}
-
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
-// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
-// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
-func (z *Int) Scan(s fmt.ScanState, ch rune) error {
- s.SkipSpace() // skip leading space characters
- base := 0
- switch ch {
- case 'b':
- base = 2
- case 'o':
- base = 8
- case 'd':
- base = 10
- case 'x', 'X':
- base = 16
- case 's', 'v':
- // let scan determine the base
- default:
- return errors.New("Int.Scan: invalid verb")
- }
- _, _, err := z.scan(s, base)
- return err
-}
-
-// Int64 returns the int64 representation of x.
-// If x cannot be represented in an int64, the result is undefined.
-func (x *Int) Int64() int64 {
- if len(x.abs) == 0 {
- return 0
- }
- v := int64(x.abs[0])
- if _W == 32 && len(x.abs) > 1 {
- v |= int64(x.abs[1]) << 32
- }
- if x.neg {
- v = -v
- }
- return v
-}
-
-// SetString sets z to the value of s, interpreted in the given base,
-// and returns z and a boolean indicating success. If SetString fails,
-// the value of z is undefined but the returned value is nil.
-//
-// The base argument must be 0 or a value from 2 through MaxBase. If the base
-// is 0, the string prefix determines the actual conversion base. A prefix of
-// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
-// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
-//
-func (z *Int) SetString(s string, base int) (*Int, bool) {
- r := strings.NewReader(s)
- _, _, err := z.scan(r, base)
- if err != nil {
- return nil, false
- }
- _, _, err = r.ReadRune()
- if err != io.EOF {
- return nil, false
- }
- return z, true // err == os.EOF => scan consumed all of s
-}
-
-// SetBytes interprets buf as the bytes of a big-endian unsigned
-// integer, sets z to that value, and returns z.
-func (z *Int) SetBytes(buf []byte) *Int {
- z.abs = z.abs.setBytes(buf)
- z.neg = false
- return z
-}
-
-// Bytes returns the absolute value of z as a big-endian byte slice.
-func (z *Int) Bytes() []byte {
- buf := make([]byte, len(z.abs)*_S)
- return buf[z.abs.bytes(buf):]
-}
-
-// BitLen returns the length of the absolute value of z in bits.
-// The bit length of 0 is 0.
-func (z *Int) BitLen() int {
- return z.abs.bitLen()
-}
-
-// Exp sets z = x**y mod m. If m is nil, z = x**y.
-// See Knuth, volume 2, section 4.6.3.
-func (z *Int) Exp(x, y, m *Int) *Int {
- if y.neg || len(y.abs) == 0 {
- neg := x.neg
- z.SetInt64(1)
- z.neg = neg
- return z
- }
-
- var mWords nat
- if m != nil {
- mWords = m.abs
- }
-
- z.abs = z.abs.expNN(x.abs, y.abs, mWords)
- z.neg = len(z.abs) > 0 && x.neg && y.abs[0]&1 == 1 // 0 has no sign
- return z
-}
-
-// GcdInt sets d to the greatest common divisor of a and b, which must be
-// positive numbers.
-// If x and y are not nil, GcdInt sets x and y such that d = a*x + b*y.
-// If either a or b is not positive, GcdInt sets d = x = y = 0.
-func GcdInt(d, x, y, a, b *Int) {
- if a.neg || b.neg {
- d.SetInt64(0)
- if x != nil {
- x.SetInt64(0)
- }
- if y != nil {
- y.SetInt64(0)
- }
- return
- }
-
- A := new(Int).Set(a)
- B := new(Int).Set(b)
-
- X := new(Int)
- Y := new(Int).SetInt64(1)
-
- lastX := new(Int).SetInt64(1)
- lastY := new(Int)
-
- q := new(Int)
- temp := new(Int)
-
- for len(B.abs) > 0 {
- r := new(Int)
- q, r = q.QuoRem(A, B, r)
-
- A, B = B, r
-
- temp.Set(X)
- X.Mul(X, q)
- X.neg = !X.neg
- X.Add(X, lastX)
- lastX.Set(temp)
-
- temp.Set(Y)
- Y.Mul(Y, q)
- Y.neg = !Y.neg
- Y.Add(Y, lastY)
- lastY.Set(temp)
- }
-
- if x != nil {
- *x = *lastX
- }
-
- if y != nil {
- *y = *lastY
- }
-
- *d = *A
-}
-
-// ProbablyPrime performs n Miller-Rabin tests to check whether z is prime.
-// If it returns true, z is prime with probability 1 - 1/4^n.
-// If it returns false, z is not prime.
-func ProbablyPrime(z *Int, n int) bool {
- return !z.neg && z.abs.probablyPrime(n)
-}
-
-// Rand sets z to a pseudo-random number in [0, n) and returns z.
-func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
- z.neg = false
- if n.neg == true || len(n.abs) == 0 {
- z.abs = nil
- return z
- }
- z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
- return z
-}
-
-// ModInverse sets z to the multiplicative inverse of g in the group ℤ/pℤ (where
-// p is a prime) and returns z.
-func (z *Int) ModInverse(g, p *Int) *Int {
- var d Int
- GcdInt(&d, z, nil, g, p)
- // x and y are such that g*x + p*y = d. Since p is prime, d = 1. Taking
- // that modulo p results in g*x = 1, therefore x is the inverse element.
- if z.neg {
- z.Add(z, p)
- }
- return z
-}
-
-// Lsh sets z = x << n and returns z.
-func (z *Int) Lsh(x *Int, n uint) *Int {
- z.abs = z.abs.shl(x.abs, n)
- z.neg = x.neg
- return z
-}
-
-// Rsh sets z = x >> n and returns z.
-func (z *Int) Rsh(x *Int, n uint) *Int {
- if x.neg {
- // (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
- t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
- t = t.shr(t, n)
- z.abs = t.add(t, natOne)
- z.neg = true // z cannot be zero if x is negative
- return z
- }
-
- z.abs = z.abs.shr(x.abs, n)
- z.neg = false
- return z
-}
-
-// Bit returns the value of the i'th bit of z. That is, it
-// returns (z>>i)&1. The bit index i must be >= 0.
-func (z *Int) Bit(i int) uint {
- if i < 0 {
- panic("negative bit index")
- }
- if z.neg {
- t := nat{}.sub(z.abs, natOne)
- return t.bit(uint(i)) ^ 1
- }
-
- return z.abs.bit(uint(i))
-}
-
-// SetBit sets the i'th bit of z to bit and returns z.
-// That is, if bit is 1 SetBit sets z = x | (1 << i);
-// if bit is 0 it sets z = x &^ (1 << i). If bit is not 0 or 1,
-// SetBit will panic.
-func (z *Int) SetBit(x *Int, i int, b uint) *Int {
- if i < 0 {
- panic("negative bit index")
- }
- if x.neg {
- t := z.abs.sub(x.abs, natOne)
- t = t.setBit(t, uint(i), b^1)
- z.abs = t.add(t, natOne)
- z.neg = len(z.abs) > 0
- return z
- }
- z.abs = z.abs.setBit(x.abs, uint(i), b)
- z.neg = false
- return z
-}
-
-// And sets z = x & y and returns z.
-func (z *Int) And(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
- x1 := nat{}.sub(x.abs, natOne)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
- z.neg = true // z cannot be zero if x and y are negative
- return z
- }
-
- // x & y == x & y
- z.abs = z.abs.and(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- // x.neg != y.neg
- if x.neg {
- x, y = y, x // & is symmetric
- }
-
- // x & (-y) == x & ^(y-1) == x &^ (y-1)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.andNot(x.abs, y1)
- z.neg = false
- return z
-}
-
-// AndNot sets z = x &^ y and returns z.
-func (z *Int) AndNot(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
- x1 := nat{}.sub(x.abs, natOne)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.andNot(y1, x1)
- z.neg = false
- return z
- }
-
- // x &^ y == x &^ y
- z.abs = z.abs.andNot(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- if x.neg {
- // (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
- x1 := nat{}.sub(x.abs, natOne)
- z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
- z.neg = true // z cannot be zero if x is negative and y is positive
- return z
- }
-
- // x &^ (-y) == x &^ ^(y-1) == x & (y-1)
- y1 := nat{}.add(y.abs, natOne)
- z.abs = z.abs.and(x.abs, y1)
- z.neg = false
- return z
-}
-
-// Or sets z = x | y and returns z.
-func (z *Int) Or(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
- x1 := nat{}.sub(x.abs, natOne)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
- z.neg = true // z cannot be zero if x and y are negative
- return z
- }
-
- // x | y == x | y
- z.abs = z.abs.or(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- // x.neg != y.neg
- if x.neg {
- x, y = y, x // | is symmetric
- }
-
- // x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
- z.neg = true // z cannot be zero if one of x or y is negative
- return z
-}
-
-// Xor sets z = x ^ y and returns z.
-func (z *Int) Xor(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
- x1 := nat{}.sub(x.abs, natOne)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.xor(x1, y1)
- z.neg = false
- return z
- }
-
- // x ^ y == x ^ y
- z.abs = z.abs.xor(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- // x.neg != y.neg
- if x.neg {
- x, y = y, x // ^ is symmetric
- }
-
- // x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
- y1 := nat{}.sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
- z.neg = true // z cannot be zero if only one of x or y is negative
- return z
-}
-
-// Not sets z = ^x and returns z.
-func (z *Int) Not(x *Int) *Int {
- if x.neg {
- // ^(-x) == ^(^(x-1)) == x-1
- z.abs = z.abs.sub(x.abs, natOne)
- z.neg = false
- return z
- }
-
- // ^x == -x-1 == -(x+1)
- z.abs = z.abs.add(x.abs, natOne)
- z.neg = true // z cannot be zero if x is positive
- return z
-}
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const intGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-func (z *Int) GobEncode() ([]byte, error) {
- buf := make([]byte, 1+len(z.abs)*_S) // extra byte for version and sign bit
- i := z.abs.bytes(buf) - 1 // i >= 0
- b := intGobVersion << 1 // make space for sign bit
- if z.neg {
- b |= 1
- }
- buf[i] = b
- return buf[i:], nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (z *Int) GobDecode(buf []byte) error {
- if len(buf) == 0 {
- return errors.New("Int.GobDecode: no data")
- }
- b := buf[0]
- if b>>1 != intGobVersion {
- return errors.New(fmt.Sprintf("Int.GobDecode: encoding version %d not supported", b>>1))
- }
- z.neg = b&1 != 0
- z.abs = z.abs.setBytes(buf[1:])
- return nil
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "gob"
- "testing"
- "testing/quick"
-)
-
-func isNormalized(x *Int) bool {
- if len(x.abs) == 0 {
- return !x.neg
- }
- // len(x.abs) > 0
- return x.abs[len(x.abs)-1] != 0
-}
-
-type funZZ func(z, x, y *Int) *Int
-type argZZ struct {
- z, x, y *Int
-}
-
-var sumZZ = []argZZ{
- {NewInt(0), NewInt(0), NewInt(0)},
- {NewInt(1), NewInt(1), NewInt(0)},
- {NewInt(1111111110), NewInt(123456789), NewInt(987654321)},
- {NewInt(-1), NewInt(-1), NewInt(0)},
- {NewInt(864197532), NewInt(-123456789), NewInt(987654321)},
- {NewInt(-1111111110), NewInt(-123456789), NewInt(-987654321)},
-}
-
-var prodZZ = []argZZ{
- {NewInt(0), NewInt(0), NewInt(0)},
- {NewInt(0), NewInt(1), NewInt(0)},
- {NewInt(1), NewInt(1), NewInt(1)},
- {NewInt(-991 * 991), NewInt(991), NewInt(-991)},
- // TODO(gri) add larger products
-}
-
-func TestSignZ(t *testing.T) {
- var zero Int
- for _, a := range sumZZ {
- s := a.z.Sign()
- e := a.z.Cmp(&zero)
- if s != e {
- t.Errorf("got %d; want %d for z = %v", s, e, a.z)
- }
- }
-}
-
-func TestSetZ(t *testing.T) {
- for _, a := range sumZZ {
- var z Int
- z.Set(a.z)
- if !isNormalized(&z) {
- t.Errorf("%v is not normalized", z)
- }
- if (&z).Cmp(a.z) != 0 {
- t.Errorf("got z = %v; want %v", z, a.z)
- }
- }
-}
-
-func TestAbsZ(t *testing.T) {
- var zero Int
- for _, a := range sumZZ {
- var z Int
- z.Abs(a.z)
- var e Int
- e.Set(a.z)
- if e.Cmp(&zero) < 0 {
- e.Sub(&zero, &e)
- }
- if z.Cmp(&e) != 0 {
- t.Errorf("got z = %v; want %v", z, e)
- }
- }
-}
-
-func testFunZZ(t *testing.T, msg string, f funZZ, a argZZ) {
- var z Int
- f(&z, a.x, a.y)
- if !isNormalized(&z) {
- t.Errorf("%s%v is not normalized", z, msg)
- }
- if (&z).Cmp(a.z) != 0 {
- t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
- }
-}
-
-func TestSumZZ(t *testing.T) {
- AddZZ := func(z, x, y *Int) *Int { return z.Add(x, y) }
- SubZZ := func(z, x, y *Int) *Int { return z.Sub(x, y) }
- for _, a := range sumZZ {
- arg := a
- testFunZZ(t, "AddZZ", AddZZ, arg)
-
- arg = argZZ{a.z, a.y, a.x}
- testFunZZ(t, "AddZZ symmetric", AddZZ, arg)
-
- arg = argZZ{a.x, a.z, a.y}
- testFunZZ(t, "SubZZ", SubZZ, arg)
-
- arg = argZZ{a.y, a.z, a.x}
- testFunZZ(t, "SubZZ symmetric", SubZZ, arg)
- }
-}
-
-func TestProdZZ(t *testing.T) {
- MulZZ := func(z, x, y *Int) *Int { return z.Mul(x, y) }
- for _, a := range prodZZ {
- arg := a
- testFunZZ(t, "MulZZ", MulZZ, arg)
-
- arg = argZZ{a.z, a.y, a.x}
- testFunZZ(t, "MulZZ symmetric", MulZZ, arg)
- }
-}
-
-// mulBytes returns x*y via grade school multiplication. Both inputs
-// and the result are assumed to be in big-endian representation (to
-// match the semantics of Int.Bytes and Int.SetBytes).
-func mulBytes(x, y []byte) []byte {
- z := make([]byte, len(x)+len(y))
-
- // multiply
- k0 := len(z) - 1
- for j := len(y) - 1; j >= 0; j-- {
- d := int(y[j])
- if d != 0 {
- k := k0
- carry := 0
- for i := len(x) - 1; i >= 0; i-- {
- t := int(z[k]) + int(x[i])*d + carry
- z[k], carry = byte(t), t>>8
- k--
- }
- z[k] = byte(carry)
- }
- k0--
- }
-
- // normalize (remove leading 0's)
- i := 0
- for i < len(z) && z[i] == 0 {
- i++
- }
-
- return z[i:]
-}
-
-func checkMul(a, b []byte) bool {
- var x, y, z1 Int
- x.SetBytes(a)
- y.SetBytes(b)
- z1.Mul(&x, &y)
-
- var z2 Int
- z2.SetBytes(mulBytes(a, b))
-
- return z1.Cmp(&z2) == 0
-}
-
-func TestMul(t *testing.T) {
- if err := quick.Check(checkMul, nil); err != nil {
- t.Error(err)
- }
-}
-
-var mulRangesZ = []struct {
- a, b int64
- prod string
-}{
- // entirely positive ranges are covered by mulRangesN
- {-1, 1, "0"},
- {-2, -1, "2"},
- {-3, -2, "6"},
- {-3, -1, "-6"},
- {1, 3, "6"},
- {-10, -10, "-10"},
- {0, -1, "1"}, // empty range
- {-1, -100, "1"}, // empty range
- {-1, 1, "0"}, // range includes 0
- {-1e9, 0, "0"}, // range includes 0
- {-1e9, 1e9, "0"}, // range includes 0
- {-10, -1, "3628800"}, // 10!
- {-20, -2, "-2432902008176640000"}, // -20!
- {-99, -1,
- "-933262154439441526816992388562667004907159682643816214685929" +
- "638952175999932299156089414639761565182862536979208272237582" +
- "511852109168640000000000000000000000", // -99!
- },
-}
-
-func TestMulRangeZ(t *testing.T) {
- var tmp Int
- // test entirely positive ranges
- for i, r := range mulRangesN {
- prod := tmp.MulRange(int64(r.a), int64(r.b)).String()
- if prod != r.prod {
- t.Errorf("#%da: got %s; want %s", i, prod, r.prod)
- }
- }
- // test other ranges
- for i, r := range mulRangesZ {
- prod := tmp.MulRange(r.a, r.b).String()
- if prod != r.prod {
- t.Errorf("#%db: got %s; want %s", i, prod, r.prod)
- }
- }
-}
-
-var stringTests = []struct {
- in string
- out string
- base int
- val int64
- ok bool
-}{
- {in: "", ok: false},
- {in: "a", ok: false},
- {in: "z", ok: false},
- {in: "+", ok: false},
- {in: "-", ok: false},
- {in: "0b", ok: false},
- {in: "0x", ok: false},
- {in: "2", base: 2, ok: false},
- {in: "0b2", base: 0, ok: false},
- {in: "08", ok: false},
- {in: "8", base: 8, ok: false},
- {in: "0xg", base: 0, ok: false},
- {in: "g", base: 16, ok: false},
- {"0", "0", 0, 0, true},
- {"0", "0", 10, 0, true},
- {"0", "0", 16, 0, true},
- {"+0", "0", 0, 0, true},
- {"-0", "0", 0, 0, true},
- {"10", "10", 0, 10, true},
- {"10", "10", 10, 10, true},
- {"10", "10", 16, 16, true},
- {"-10", "-10", 16, -16, true},
- {"+10", "10", 16, 16, true},
- {"0x10", "16", 0, 16, true},
- {in: "0x10", base: 16, ok: false},
- {"-0x10", "-16", 0, -16, true},
- {"+0x10", "16", 0, 16, true},
- {"00", "0", 0, 0, true},
- {"0", "0", 8, 0, true},
- {"07", "7", 0, 7, true},
- {"7", "7", 8, 7, true},
- {"023", "19", 0, 19, true},
- {"23", "23", 8, 19, true},
- {"cafebabe", "cafebabe", 16, 0xcafebabe, true},
- {"0b0", "0", 0, 0, true},
- {"-111", "-111", 2, -7, true},
- {"-0b111", "-7", 0, -7, true},
- {"0b1001010111", "599", 0, 0x257, true},
- {"1001010111", "1001010111", 2, 0x257, true},
-}
-
-func format(base int) string {
- switch base {
- case 2:
- return "%b"
- case 8:
- return "%o"
- case 16:
- return "%x"
- }
- return "%d"
-}
-
-func TestGetString(t *testing.T) {
- z := new(Int)
- for i, test := range stringTests {
- if !test.ok {
- continue
- }
- z.SetInt64(test.val)
-
- if test.base == 10 {
- s := z.String()
- if s != test.out {
- t.Errorf("#%da got %s; want %s", i, s, test.out)
- }
- }
-
- s := fmt.Sprintf(format(test.base), z)
- if s != test.out {
- t.Errorf("#%db got %s; want %s", i, s, test.out)
- }
- }
-}
-
-func TestSetString(t *testing.T) {
- tmp := new(Int)
- for i, test := range stringTests {
- // initialize to a non-zero value so that issues with parsing
- // 0 are detected
- tmp.SetInt64(1234567890)
- n1, ok1 := new(Int).SetString(test.in, test.base)
- n2, ok2 := tmp.SetString(test.in, test.base)
- expected := NewInt(test.val)
- if ok1 != test.ok || ok2 != test.ok {
- t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
- continue
- }
- if !ok1 {
- if n1 != nil {
- t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
- }
- continue
- }
- if !ok2 {
- if n2 != nil {
- t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
- }
- continue
- }
-
- if ok1 && !isNormalized(n1) {
- t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n1)
- }
- if ok2 && !isNormalized(n2) {
- t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n2)
- }
-
- if n1.Cmp(expected) != 0 {
- t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
- }
- if n2.Cmp(expected) != 0 {
- t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
- }
- }
-}
-
-var formatTests = []struct {
- input string
- format string
- output string
-}{
- {"<nil>", "%x", "<nil>"},
- {"<nil>", "%#x", "<nil>"},
- {"<nil>", "%#y", "%!y(big.Int=<nil>)"},
-
- {"10", "%b", "1010"},
- {"10", "%o", "12"},
- {"10", "%d", "10"},
- {"10", "%v", "10"},
- {"10", "%x", "a"},
- {"10", "%X", "A"},
- {"-10", "%X", "-A"},
- {"10", "%y", "%!y(big.Int=10)"},
- {"-10", "%y", "%!y(big.Int=-10)"},
-
- {"10", "%#b", "1010"},
- {"10", "%#o", "012"},
- {"10", "%#d", "10"},
- {"10", "%#v", "10"},
- {"10", "%#x", "0xa"},
- {"10", "%#X", "0XA"},
- {"-10", "%#X", "-0XA"},
- {"10", "%#y", "%!y(big.Int=10)"},
- {"-10", "%#y", "%!y(big.Int=-10)"},
-
- {"1234", "%d", "1234"},
- {"1234", "%3d", "1234"},
- {"1234", "%4d", "1234"},
- {"-1234", "%d", "-1234"},
- {"1234", "% 5d", " 1234"},
- {"1234", "%+5d", "+1234"},
- {"1234", "%-5d", "1234 "},
- {"1234", "%x", "4d2"},
- {"1234", "%X", "4D2"},
- {"-1234", "%3x", "-4d2"},
- {"-1234", "%4x", "-4d2"},
- {"-1234", "%5x", " -4d2"},
- {"-1234", "%-5x", "-4d2 "},
- {"1234", "%03d", "1234"},
- {"1234", "%04d", "1234"},
- {"1234", "%05d", "01234"},
- {"1234", "%06d", "001234"},
- {"-1234", "%06d", "-01234"},
- {"1234", "%+06d", "+01234"},
- {"1234", "% 06d", " 01234"},
- {"1234", "%-6d", "1234 "},
- {"1234", "%-06d", "1234 "},
- {"-1234", "%-06d", "-1234 "},
-
- {"1234", "%.3d", "1234"},
- {"1234", "%.4d", "1234"},
- {"1234", "%.5d", "01234"},
- {"1234", "%.6d", "001234"},
- {"-1234", "%.3d", "-1234"},
- {"-1234", "%.4d", "-1234"},
- {"-1234", "%.5d", "-01234"},
- {"-1234", "%.6d", "-001234"},
-
- {"1234", "%8.3d", " 1234"},
- {"1234", "%8.4d", " 1234"},
- {"1234", "%8.5d", " 01234"},
- {"1234", "%8.6d", " 001234"},
- {"-1234", "%8.3d", " -1234"},
- {"-1234", "%8.4d", " -1234"},
- {"-1234", "%8.5d", " -01234"},
- {"-1234", "%8.6d", " -001234"},
-
- {"1234", "%+8.3d", " +1234"},
- {"1234", "%+8.4d", " +1234"},
- {"1234", "%+8.5d", " +01234"},
- {"1234", "%+8.6d", " +001234"},
- {"-1234", "%+8.3d", " -1234"},
- {"-1234", "%+8.4d", " -1234"},
- {"-1234", "%+8.5d", " -01234"},
- {"-1234", "%+8.6d", " -001234"},
-
- {"1234", "% 8.3d", " 1234"},
- {"1234", "% 8.4d", " 1234"},
- {"1234", "% 8.5d", " 01234"},
- {"1234", "% 8.6d", " 001234"},
- {"-1234", "% 8.3d", " -1234"},
- {"-1234", "% 8.4d", " -1234"},
- {"-1234", "% 8.5d", " -01234"},
- {"-1234", "% 8.6d", " -001234"},
-
- {"1234", "%.3x", "4d2"},
- {"1234", "%.4x", "04d2"},
- {"1234", "%.5x", "004d2"},
- {"1234", "%.6x", "0004d2"},
- {"-1234", "%.3x", "-4d2"},
- {"-1234", "%.4x", "-04d2"},
- {"-1234", "%.5x", "-004d2"},
- {"-1234", "%.6x", "-0004d2"},
-
- {"1234", "%8.3x", " 4d2"},
- {"1234", "%8.4x", " 04d2"},
- {"1234", "%8.5x", " 004d2"},
- {"1234", "%8.6x", " 0004d2"},
- {"-1234", "%8.3x", " -4d2"},
- {"-1234", "%8.4x", " -04d2"},
- {"-1234", "%8.5x", " -004d2"},
- {"-1234", "%8.6x", " -0004d2"},
-
- {"1234", "%+8.3x", " +4d2"},
- {"1234", "%+8.4x", " +04d2"},
- {"1234", "%+8.5x", " +004d2"},
- {"1234", "%+8.6x", " +0004d2"},
- {"-1234", "%+8.3x", " -4d2"},
- {"-1234", "%+8.4x", " -04d2"},
- {"-1234", "%+8.5x", " -004d2"},
- {"-1234", "%+8.6x", " -0004d2"},
-
- {"1234", "% 8.3x", " 4d2"},
- {"1234", "% 8.4x", " 04d2"},
- {"1234", "% 8.5x", " 004d2"},
- {"1234", "% 8.6x", " 0004d2"},
- {"1234", "% 8.7x", " 00004d2"},
- {"1234", "% 8.8x", " 000004d2"},
- {"-1234", "% 8.3x", " -4d2"},
- {"-1234", "% 8.4x", " -04d2"},
- {"-1234", "% 8.5x", " -004d2"},
- {"-1234", "% 8.6x", " -0004d2"},
- {"-1234", "% 8.7x", "-00004d2"},
- {"-1234", "% 8.8x", "-000004d2"},
-
- {"1234", "%-8.3d", "1234 "},
- {"1234", "%-8.4d", "1234 "},
- {"1234", "%-8.5d", "01234 "},
- {"1234", "%-8.6d", "001234 "},
- {"1234", "%-8.7d", "0001234 "},
- {"1234", "%-8.8d", "00001234"},
- {"-1234", "%-8.3d", "-1234 "},
- {"-1234", "%-8.4d", "-1234 "},
- {"-1234", "%-8.5d", "-01234 "},
- {"-1234", "%-8.6d", "-001234 "},
- {"-1234", "%-8.7d", "-0001234"},
- {"-1234", "%-8.8d", "-00001234"},
-
- {"16777215", "%b", "111111111111111111111111"}, // 2**24 - 1
-
- {"0", "%.d", ""},
- {"0", "%.0d", ""},
- {"0", "%3.d", ""},
-}
-
-func TestFormat(t *testing.T) {
- for i, test := range formatTests {
- var x *Int
- if test.input != "<nil>" {
- var ok bool
- x, ok = new(Int).SetString(test.input, 0)
- if !ok {
- t.Errorf("#%d failed reading input %s", i, test.input)
- }
- }
- output := fmt.Sprintf(test.format, x)
- if output != test.output {
- t.Errorf("#%d got %q; want %q, {%q, %q, %q}", i, output, test.output, test.input, test.format, test.output)
- }
- }
-}
-
-var scanTests = []struct {
- input string
- format string
- output string
- remaining int
-}{
- {"1010", "%b", "10", 0},
- {"0b1010", "%v", "10", 0},
- {"12", "%o", "10", 0},
- {"012", "%v", "10", 0},
- {"10", "%d", "10", 0},
- {"10", "%v", "10", 0},
- {"a", "%x", "10", 0},
- {"0xa", "%v", "10", 0},
- {"A", "%X", "10", 0},
- {"-A", "%X", "-10", 0},
- {"+0b1011001", "%v", "89", 0},
- {"0xA", "%v", "10", 0},
- {"0 ", "%v", "0", 1},
- {"2+3", "%v", "2", 2},
- {"0XABC 12", "%v", "2748", 3},
-}
-
-func TestScan(t *testing.T) {
- var buf bytes.Buffer
- for i, test := range scanTests {
- x := new(Int)
- buf.Reset()
- buf.WriteString(test.input)
- if _, err := fmt.Fscanf(&buf, test.format, x); err != nil {
- t.Errorf("#%d error: %s", i, err)
- }
- if x.String() != test.output {
- t.Errorf("#%d got %s; want %s", i, x.String(), test.output)
- }
- if buf.Len() != test.remaining {
- t.Errorf("#%d got %d bytes remaining; want %d", i, buf.Len(), test.remaining)
- }
- }
-}
-
-// Examples from the Go Language Spec, section "Arithmetic operators"
-var divisionSignsTests = []struct {
- x, y int64
- q, r int64 // T-division
- d, m int64 // Euclidian division
-}{
- {5, 3, 1, 2, 1, 2},
- {-5, 3, -1, -2, -2, 1},
- {5, -3, -1, 2, -1, 2},
- {-5, -3, 1, -2, 2, 1},
- {1, 2, 0, 1, 0, 1},
- {8, 4, 2, 0, 2, 0},
-}
-
-func TestDivisionSigns(t *testing.T) {
- for i, test := range divisionSignsTests {
- x := NewInt(test.x)
- y := NewInt(test.y)
- q := NewInt(test.q)
- r := NewInt(test.r)
- d := NewInt(test.d)
- m := NewInt(test.m)
-
- q1 := new(Int).Quo(x, y)
- r1 := new(Int).Rem(x, y)
- if !isNormalized(q1) {
- t.Errorf("#%d Quo: %v is not normalized", i, *q1)
- }
- if !isNormalized(r1) {
- t.Errorf("#%d Rem: %v is not normalized", i, *r1)
- }
- if q1.Cmp(q) != 0 || r1.Cmp(r) != 0 {
- t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q1, r1, q, r)
- }
-
- q2, r2 := new(Int).QuoRem(x, y, new(Int))
- if !isNormalized(q2) {
- t.Errorf("#%d Quo: %v is not normalized", i, *q2)
- }
- if !isNormalized(r2) {
- t.Errorf("#%d Rem: %v is not normalized", i, *r2)
- }
- if q2.Cmp(q) != 0 || r2.Cmp(r) != 0 {
- t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q2, r2, q, r)
- }
-
- d1 := new(Int).Div(x, y)
- m1 := new(Int).Mod(x, y)
- if !isNormalized(d1) {
- t.Errorf("#%d Div: %v is not normalized", i, *d1)
- }
- if !isNormalized(m1) {
- t.Errorf("#%d Mod: %v is not normalized", i, *m1)
- }
- if d1.Cmp(d) != 0 || m1.Cmp(m) != 0 {
- t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d1, m1, d, m)
- }
-
- d2, m2 := new(Int).DivMod(x, y, new(Int))
- if !isNormalized(d2) {
- t.Errorf("#%d Div: %v is not normalized", i, *d2)
- }
- if !isNormalized(m2) {
- t.Errorf("#%d Mod: %v is not normalized", i, *m2)
- }
- if d2.Cmp(d) != 0 || m2.Cmp(m) != 0 {
- t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d2, m2, d, m)
- }
- }
-}
-
-func checkSetBytes(b []byte) bool {
- hex1 := hex.EncodeToString(new(Int).SetBytes(b).Bytes())
- hex2 := hex.EncodeToString(b)
-
- for len(hex1) < len(hex2) {
- hex1 = "0" + hex1
- }
-
- for len(hex1) > len(hex2) {
- hex2 = "0" + hex2
- }
-
- return hex1 == hex2
-}
-
-func TestSetBytes(t *testing.T) {
- if err := quick.Check(checkSetBytes, nil); err != nil {
- t.Error(err)
- }
-}
-
-func checkBytes(b []byte) bool {
- b2 := new(Int).SetBytes(b).Bytes()
- return bytes.Compare(b, b2) == 0
-}
-
-func TestBytes(t *testing.T) {
- if err := quick.Check(checkSetBytes, nil); err != nil {
- t.Error(err)
- }
-}
-
-func checkQuo(x, y []byte) bool {
- u := new(Int).SetBytes(x)
- v := new(Int).SetBytes(y)
-
- if len(v.abs) == 0 {
- return true
- }
-
- r := new(Int)
- q, r := new(Int).QuoRem(u, v, r)
-
- if r.Cmp(v) >= 0 {
- return false
- }
-
- uprime := new(Int).Set(q)
- uprime.Mul(uprime, v)
- uprime.Add(uprime, r)
-
- return uprime.Cmp(u) == 0
-}
-
-var quoTests = []struct {
- x, y string
- q, r string
-}{
- {
- "476217953993950760840509444250624797097991362735329973741718102894495832294430498335824897858659711275234906400899559094370964723884706254265559534144986498357",
- "9353930466774385905609975137998169297361893554149986716853295022578535724979483772383667534691121982974895531435241089241440253066816724367338287092081996",
- "50911",
- "1",
- },
- {
- "11510768301994997771168",
- "1328165573307167369775",
- "8",
- "885443715537658812968",
- },
-}
-
-func TestQuo(t *testing.T) {
- if err := quick.Check(checkQuo, nil); err != nil {
- t.Error(err)
- }
-
- for i, test := range quoTests {
- x, _ := new(Int).SetString(test.x, 10)
- y, _ := new(Int).SetString(test.y, 10)
- expectedQ, _ := new(Int).SetString(test.q, 10)
- expectedR, _ := new(Int).SetString(test.r, 10)
-
- r := new(Int)
- q, r := new(Int).QuoRem(x, y, r)
-
- if q.Cmp(expectedQ) != 0 || r.Cmp(expectedR) != 0 {
- t.Errorf("#%d got (%s, %s) want (%s, %s)", i, q, r, expectedQ, expectedR)
- }
- }
-}
-
-func TestQuoStepD6(t *testing.T) {
- // See Knuth, Volume 2, section 4.3.1, exercise 21. This code exercises
- // a code path which only triggers 1 in 10^{-19} cases.
-
- u := &Int{false, nat{0, 0, 1 + 1<<(_W-1), _M ^ (1 << (_W - 1))}}
- v := &Int{false, nat{5, 2 + 1<<(_W-1), 1 << (_W - 1)}}
-
- r := new(Int)
- q, r := new(Int).QuoRem(u, v, r)
- const expectedQ64 = "18446744073709551613"
- const expectedR64 = "3138550867693340382088035895064302439801311770021610913807"
- const expectedQ32 = "4294967293"
- const expectedR32 = "39614081266355540837921718287"
- if q.String() != expectedQ64 && q.String() != expectedQ32 ||
- r.String() != expectedR64 && r.String() != expectedR32 {
- t.Errorf("got (%s, %s) want (%s, %s) or (%s, %s)", q, r, expectedQ64, expectedR64, expectedQ32, expectedR32)
- }
-}
-
-var bitLenTests = []struct {
- in string
- out int
-}{
- {"-1", 1},
- {"0", 0},
- {"1", 1},
- {"2", 2},
- {"4", 3},
- {"0xabc", 12},
- {"0x8000", 16},
- {"0x80000000", 32},
- {"0x800000000000", 48},
- {"0x8000000000000000", 64},
- {"0x80000000000000000000", 80},
- {"-0x4000000000000000000000", 87},
-}
-
-func TestBitLen(t *testing.T) {
- for i, test := range bitLenTests {
- x, ok := new(Int).SetString(test.in, 0)
- if !ok {
- t.Errorf("#%d test input invalid: %s", i, test.in)
- continue
- }
-
- if n := x.BitLen(); n != test.out {
- t.Errorf("#%d got %d want %d", i, n, test.out)
- }
- }
-}
-
-var expTests = []struct {
- x, y, m string
- out string
-}{
- {"5", "0", "", "1"},
- {"-5", "0", "", "-1"},
- {"5", "1", "", "5"},
- {"-5", "1", "", "-5"},
- {"-2", "3", "2", "0"},
- {"5", "2", "", "25"},
- {"1", "65537", "2", "1"},
- {"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
- {"0x8000000000000000", "2", "6719", "4944"},
- {"0x8000000000000000", "3", "6719", "5447"},
- {"0x8000000000000000", "1000", "6719", "1603"},
- {"0x8000000000000000", "1000000", "6719", "3199"},
- {
- "2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
- "298472983472983471903246121093472394872319615612417471234712061",
- "29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
- "23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
- },
-}
-
-func TestExp(t *testing.T) {
- for i, test := range expTests {
- x, ok1 := new(Int).SetString(test.x, 0)
- y, ok2 := new(Int).SetString(test.y, 0)
- out, ok3 := new(Int).SetString(test.out, 0)
-
- var ok4 bool
- var m *Int
-
- if len(test.m) == 0 {
- m, ok4 = nil, true
- } else {
- m, ok4 = new(Int).SetString(test.m, 0)
- }
-
- if !ok1 || !ok2 || !ok3 || !ok4 {
- t.Errorf("#%d: error in input", i)
- continue
- }
-
- z := y.Exp(x, y, m)
- if !isNormalized(z) {
- t.Errorf("#%d: %v is not normalized", i, *z)
- }
- if z.Cmp(out) != 0 {
- t.Errorf("#%d: got %s want %s", i, z, out)
- }
- }
-}
-
-func checkGcd(aBytes, bBytes []byte) bool {
- a := new(Int).SetBytes(aBytes)
- b := new(Int).SetBytes(bBytes)
-
- x := new(Int)
- y := new(Int)
- d := new(Int)
-
- GcdInt(d, x, y, a, b)
- x.Mul(x, a)
- y.Mul(y, b)
- x.Add(x, y)
-
- return x.Cmp(d) == 0
-}
-
-var gcdTests = []struct {
- a, b int64
- d, x, y int64
-}{
- {120, 23, 1, -9, 47},
-}
-
-func TestGcd(t *testing.T) {
- for i, test := range gcdTests {
- a := NewInt(test.a)
- b := NewInt(test.b)
-
- x := new(Int)
- y := new(Int)
- d := new(Int)
-
- expectedX := NewInt(test.x)
- expectedY := NewInt(test.y)
- expectedD := NewInt(test.d)
-
- GcdInt(d, x, y, a, b)
-
- if expectedX.Cmp(x) != 0 ||
- expectedY.Cmp(y) != 0 ||
- expectedD.Cmp(d) != 0 {
- t.Errorf("#%d got (%s %s %s) want (%s %s %s)", i, x, y, d, expectedX, expectedY, expectedD)
- }
- }
-
- quick.Check(checkGcd, nil)
-}
-
-var primes = []string{
- "2",
- "3",
- "5",
- "7",
- "11",
-
- "13756265695458089029",
- "13496181268022124907",
- "10953742525620032441",
- "17908251027575790097",
-
- // http://code.google.com/p/go/issues/detail?id=638
- "18699199384836356663",
-
- "98920366548084643601728869055592650835572950932266967461790948584315647051443",
- "94560208308847015747498523884063394671606671904944666360068158221458669711639",
-
- // http://primes.utm.edu/lists/small/small3.html
- "449417999055441493994709297093108513015373787049558499205492347871729927573118262811508386655998299074566974373711472560655026288668094291699357843464363003144674940345912431129144354948751003607115263071543163",
- "230975859993204150666423538988557839555560243929065415434980904258310530753006723857139742334640122533598517597674807096648905501653461687601339782814316124971547968912893214002992086353183070342498989426570593",
- "5521712099665906221540423207019333379125265462121169655563495403888449493493629943498064604536961775110765377745550377067893607246020694972959780839151452457728855382113555867743022746090187341871655890805971735385789993",
- "203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123",
-}
-
-var composites = []string{
- "21284175091214687912771199898307297748211672914763848041968395774954376176754",
- "6084766654921918907427900243509372380954290099172559290432744450051395395951",
- "84594350493221918389213352992032324280367711247940675652888030554255915464401",
- "82793403787388584738507275144194252681",
-}
-
-func TestProbablyPrime(t *testing.T) {
- nreps := 20
- if testing.Short() {
- nreps = 1
- }
- for i, s := range primes {
- p, _ := new(Int).SetString(s, 10)
- if !ProbablyPrime(p, nreps) {
- t.Errorf("#%d prime found to be non-prime (%s)", i, s)
- }
- }
-
- for i, s := range composites {
- c, _ := new(Int).SetString(s, 10)
- if ProbablyPrime(c, nreps) {
- t.Errorf("#%d composite found to be prime (%s)", i, s)
- }
- if testing.Short() {
- break
- }
- }
-}
-
-type intShiftTest struct {
- in string
- shift uint
- out string
-}
-
-var rshTests = []intShiftTest{
- {"0", 0, "0"},
- {"-0", 0, "0"},
- {"0", 1, "0"},
- {"0", 2, "0"},
- {"1", 0, "1"},
- {"1", 1, "0"},
- {"1", 2, "0"},
- {"2", 0, "2"},
- {"2", 1, "1"},
- {"-1", 0, "-1"},
- {"-1", 1, "-1"},
- {"-1", 10, "-1"},
- {"-100", 2, "-25"},
- {"-100", 3, "-13"},
- {"-100", 100, "-1"},
- {"4294967296", 0, "4294967296"},
- {"4294967296", 1, "2147483648"},
- {"4294967296", 2, "1073741824"},
- {"18446744073709551616", 0, "18446744073709551616"},
- {"18446744073709551616", 1, "9223372036854775808"},
- {"18446744073709551616", 2, "4611686018427387904"},
- {"18446744073709551616", 64, "1"},
- {"340282366920938463463374607431768211456", 64, "18446744073709551616"},
- {"340282366920938463463374607431768211456", 128, "1"},
-}
-
-func TestRsh(t *testing.T) {
- for i, test := range rshTests {
- in, _ := new(Int).SetString(test.in, 10)
- expected, _ := new(Int).SetString(test.out, 10)
- out := new(Int).Rsh(in, test.shift)
-
- if !isNormalized(out) {
- t.Errorf("#%d: %v is not normalized", i, *out)
- }
- if out.Cmp(expected) != 0 {
- t.Errorf("#%d: got %s want %s", i, out, expected)
- }
- }
-}
-
-func TestRshSelf(t *testing.T) {
- for i, test := range rshTests {
- z, _ := new(Int).SetString(test.in, 10)
- expected, _ := new(Int).SetString(test.out, 10)
- z.Rsh(z, test.shift)
-
- if !isNormalized(z) {
- t.Errorf("#%d: %v is not normalized", i, *z)
- }
- if z.Cmp(expected) != 0 {
- t.Errorf("#%d: got %s want %s", i, z, expected)
- }
- }
-}
-
-var lshTests = []intShiftTest{
- {"0", 0, "0"},
- {"0", 1, "0"},
- {"0", 2, "0"},
- {"1", 0, "1"},
- {"1", 1, "2"},
- {"1", 2, "4"},
- {"2", 0, "2"},
- {"2", 1, "4"},
- {"2", 2, "8"},
- {"-87", 1, "-174"},
- {"4294967296", 0, "4294967296"},
- {"4294967296", 1, "8589934592"},
- {"4294967296", 2, "17179869184"},
- {"18446744073709551616", 0, "18446744073709551616"},
- {"9223372036854775808", 1, "18446744073709551616"},
- {"4611686018427387904", 2, "18446744073709551616"},
- {"1", 64, "18446744073709551616"},
- {"18446744073709551616", 64, "340282366920938463463374607431768211456"},
- {"1", 128, "340282366920938463463374607431768211456"},
-}
-
-func TestLsh(t *testing.T) {
- for i, test := range lshTests {
- in, _ := new(Int).SetString(test.in, 10)
- expected, _ := new(Int).SetString(test.out, 10)
- out := new(Int).Lsh(in, test.shift)
-
- if !isNormalized(out) {
- t.Errorf("#%d: %v is not normalized", i, *out)
- }
- if out.Cmp(expected) != 0 {
- t.Errorf("#%d: got %s want %s", i, out, expected)
- }
- }
-}
-
-func TestLshSelf(t *testing.T) {
- for i, test := range lshTests {
- z, _ := new(Int).SetString(test.in, 10)
- expected, _ := new(Int).SetString(test.out, 10)
- z.Lsh(z, test.shift)
-
- if !isNormalized(z) {
- t.Errorf("#%d: %v is not normalized", i, *z)
- }
- if z.Cmp(expected) != 0 {
- t.Errorf("#%d: got %s want %s", i, z, expected)
- }
- }
-}
-
-func TestLshRsh(t *testing.T) {
- for i, test := range rshTests {
- in, _ := new(Int).SetString(test.in, 10)
- out := new(Int).Lsh(in, test.shift)
- out = out.Rsh(out, test.shift)
-
- if !isNormalized(out) {
- t.Errorf("#%d: %v is not normalized", i, *out)
- }
- if in.Cmp(out) != 0 {
- t.Errorf("#%d: got %s want %s", i, out, in)
- }
- }
- for i, test := range lshTests {
- in, _ := new(Int).SetString(test.in, 10)
- out := new(Int).Lsh(in, test.shift)
- out.Rsh(out, test.shift)
-
- if !isNormalized(out) {
- t.Errorf("#%d: %v is not normalized", i, *out)
- }
- if in.Cmp(out) != 0 {
- t.Errorf("#%d: got %s want %s", i, out, in)
- }
- }
-}
-
-var int64Tests = []int64{
- 0,
- 1,
- -1,
- 4294967295,
- -4294967295,
- 4294967296,
- -4294967296,
- 9223372036854775807,
- -9223372036854775807,
- -9223372036854775808,
-}
-
-func TestInt64(t *testing.T) {
- for i, testVal := range int64Tests {
- in := NewInt(testVal)
- out := in.Int64()
-
- if out != testVal {
- t.Errorf("#%d got %d want %d", i, out, testVal)
- }
- }
-}
-
-var bitwiseTests = []struct {
- x, y string
- and, or, xor, andNot string
-}{
- {"0x00", "0x00", "0x00", "0x00", "0x00", "0x00"},
- {"0x00", "0x01", "0x00", "0x01", "0x01", "0x00"},
- {"0x01", "0x00", "0x00", "0x01", "0x01", "0x01"},
- {"-0x01", "0x00", "0x00", "-0x01", "-0x01", "-0x01"},
- {"-0xaf", "-0x50", "-0xf0", "-0x0f", "0xe1", "0x41"},
- {"0x00", "-0x01", "0x00", "-0x01", "-0x01", "0x00"},
- {"0x01", "0x01", "0x01", "0x01", "0x00", "0x00"},
- {"-0x01", "-0x01", "-0x01", "-0x01", "0x00", "0x00"},
- {"0x07", "0x08", "0x00", "0x0f", "0x0f", "0x07"},
- {"0x05", "0x0f", "0x05", "0x0f", "0x0a", "0x00"},
- {"0x013ff6", "0x9a4e", "0x1a46", "0x01bffe", "0x01a5b8", "0x0125b0"},
- {"-0x013ff6", "0x9a4e", "0x800a", "-0x0125b2", "-0x01a5bc", "-0x01c000"},
- {"-0x013ff6", "-0x9a4e", "-0x01bffe", "-0x1a46", "0x01a5b8", "0x8008"},
- {
- "0x1000009dc6e3d9822cba04129bcbe3401",
- "0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
- "0x1000001186210100001000009048c2001",
- "0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
- "0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
- "0x8c40c2d8822caa04120b8321400",
- },
- {
- "0x1000009dc6e3d9822cba04129bcbe3401",
- "-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
- "0x8c40c2d8822caa04120b8321401",
- "-0xb9bd7d543685789d57ca918e82229142459020483cd2014001fd",
- "-0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fe",
- "0x1000001186210100001000009048c2000",
- },
- {
- "-0x1000009dc6e3d9822cba04129bcbe3401",
- "-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
- "-0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
- "-0x1000001186210100001000009048c2001",
- "0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
- "0xb9bd7d543685789d57ca918e82229142459020483cd2014001fc",
- },
-}
-
-type bitFun func(z, x, y *Int) *Int
-
-func testBitFun(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
- expected := new(Int)
- expected.SetString(exp, 0)
-
- out := f(new(Int), x, y)
- if out.Cmp(expected) != 0 {
- t.Errorf("%s: got %s want %s", msg, out, expected)
- }
-}
-
-func testBitFunSelf(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
- self := new(Int)
- self.Set(x)
- expected := new(Int)
- expected.SetString(exp, 0)
-
- self = f(self, self, y)
- if self.Cmp(expected) != 0 {
- t.Errorf("%s: got %s want %s", msg, self, expected)
- }
-}
-
-func altBit(x *Int, i int) uint {
- z := new(Int).Rsh(x, uint(i))
- z = z.And(z, NewInt(1))
- if z.Cmp(new(Int)) != 0 {
- return 1
- }
- return 0
-}
-
-func altSetBit(z *Int, x *Int, i int, b uint) *Int {
- one := NewInt(1)
- m := one.Lsh(one, uint(i))
- switch b {
- case 1:
- return z.Or(x, m)
- case 0:
- return z.AndNot(x, m)
- }
- panic("set bit is not 0 or 1")
-}
-
-func testBitset(t *testing.T, x *Int) {
- n := x.BitLen()
- z := new(Int).Set(x)
- z1 := new(Int).Set(x)
- for i := 0; i < n+10; i++ {
- old := z.Bit(i)
- old1 := altBit(z1, i)
- if old != old1 {
- t.Errorf("bitset: inconsistent value for Bit(%s, %d), got %v want %v", z1, i, old, old1)
- }
- z := new(Int).SetBit(z, i, 1)
- z1 := altSetBit(new(Int), z1, i, 1)
- if z.Bit(i) == 0 {
- t.Errorf("bitset: bit %d of %s got 0 want 1", i, x)
- }
- if z.Cmp(z1) != 0 {
- t.Errorf("bitset: inconsistent value after SetBit 1, got %s want %s", z, z1)
- }
- z.SetBit(z, i, 0)
- altSetBit(z1, z1, i, 0)
- if z.Bit(i) != 0 {
- t.Errorf("bitset: bit %d of %s got 1 want 0", i, x)
- }
- if z.Cmp(z1) != 0 {
- t.Errorf("bitset: inconsistent value after SetBit 0, got %s want %s", z, z1)
- }
- altSetBit(z1, z1, i, old)
- z.SetBit(z, i, old)
- if z.Cmp(z1) != 0 {
- t.Errorf("bitset: inconsistent value after SetBit old, got %s want %s", z, z1)
- }
- }
- if z.Cmp(x) != 0 {
- t.Errorf("bitset: got %s want %s", z, x)
- }
-}
-
-var bitsetTests = []struct {
- x string
- i int
- b uint
-}{
- {"0", 0, 0},
- {"0", 200, 0},
- {"1", 0, 1},
- {"1", 1, 0},
- {"-1", 0, 1},
- {"-1", 200, 1},
- {"0x2000000000000000000000000000", 108, 0},
- {"0x2000000000000000000000000000", 109, 1},
- {"0x2000000000000000000000000000", 110, 0},
- {"-0x2000000000000000000000000001", 108, 1},
- {"-0x2000000000000000000000000001", 109, 0},
- {"-0x2000000000000000000000000001", 110, 1},
-}
-
-func TestBitSet(t *testing.T) {
- for _, test := range bitwiseTests {
- x := new(Int)
- x.SetString(test.x, 0)
- testBitset(t, x)
- x = new(Int)
- x.SetString(test.y, 0)
- testBitset(t, x)
- }
- for i, test := range bitsetTests {
- x := new(Int)
- x.SetString(test.x, 0)
- b := x.Bit(test.i)
- if b != test.b {
-
- t.Errorf("#%d want %v got %v", i, test.b, b)
- }
- }
-}
-
-func BenchmarkBitset(b *testing.B) {
- z := new(Int)
- z.SetBit(z, 512, 1)
- b.ResetTimer()
- b.StartTimer()
- for i := b.N - 1; i >= 0; i-- {
- z.SetBit(z, i&512, 1)
- }
-}
-
-func BenchmarkBitsetNeg(b *testing.B) {
- z := NewInt(-1)
- z.SetBit(z, 512, 0)
- b.ResetTimer()
- b.StartTimer()
- for i := b.N - 1; i >= 0; i-- {
- z.SetBit(z, i&512, 0)
- }
-}
-
-func BenchmarkBitsetOrig(b *testing.B) {
- z := new(Int)
- altSetBit(z, z, 512, 1)
- b.ResetTimer()
- b.StartTimer()
- for i := b.N - 1; i >= 0; i-- {
- altSetBit(z, z, i&512, 1)
- }
-}
-
-func BenchmarkBitsetNegOrig(b *testing.B) {
- z := NewInt(-1)
- altSetBit(z, z, 512, 0)
- b.ResetTimer()
- b.StartTimer()
- for i := b.N - 1; i >= 0; i-- {
- altSetBit(z, z, i&512, 0)
- }
-}
-
-func TestBitwise(t *testing.T) {
- x := new(Int)
- y := new(Int)
- for _, test := range bitwiseTests {
- x.SetString(test.x, 0)
- y.SetString(test.y, 0)
-
- testBitFun(t, "and", (*Int).And, x, y, test.and)
- testBitFunSelf(t, "and", (*Int).And, x, y, test.and)
- testBitFun(t, "andNot", (*Int).AndNot, x, y, test.andNot)
- testBitFunSelf(t, "andNot", (*Int).AndNot, x, y, test.andNot)
- testBitFun(t, "or", (*Int).Or, x, y, test.or)
- testBitFunSelf(t, "or", (*Int).Or, x, y, test.or)
- testBitFun(t, "xor", (*Int).Xor, x, y, test.xor)
- testBitFunSelf(t, "xor", (*Int).Xor, x, y, test.xor)
- }
-}
-
-var notTests = []struct {
- in string
- out string
-}{
- {"0", "-1"},
- {"1", "-2"},
- {"7", "-8"},
- {"0", "-1"},
- {"-81910", "81909"},
- {
- "298472983472983471903246121093472394872319615612417471234712061",
- "-298472983472983471903246121093472394872319615612417471234712062",
- },
-}
-
-func TestNot(t *testing.T) {
- in := new(Int)
- out := new(Int)
- expected := new(Int)
- for i, test := range notTests {
- in.SetString(test.in, 10)
- expected.SetString(test.out, 10)
- out = out.Not(in)
- if out.Cmp(expected) != 0 {
- t.Errorf("#%d: got %s want %s", i, out, expected)
- }
- out = out.Not(out)
- if out.Cmp(in) != 0 {
- t.Errorf("#%d: got %s want %s", i, out, in)
- }
- }
-}
-
-var modInverseTests = []struct {
- element string
- prime string
-}{
- {"1", "7"},
- {"1", "13"},
- {"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"},
-}
-
-func TestModInverse(t *testing.T) {
- var element, prime Int
- one := NewInt(1)
- for i, test := range modInverseTests {
- (&element).SetString(test.element, 10)
- (&prime).SetString(test.prime, 10)
- inverse := new(Int).ModInverse(&element, &prime)
- inverse.Mul(inverse, &element)
- inverse.Mod(inverse, &prime)
- if inverse.Cmp(one) != 0 {
- t.Errorf("#%d: failed (e·e^(-1)=%s)", i, inverse)
- }
- }
-}
-
-// used by TestIntGobEncoding and TestRatGobEncoding
-var gobEncodingTests = []string{
- "0",
- "1",
- "2",
- "10",
- "42",
- "1234567890",
- "298472983472983471903246121093472394872319615612417471234712061",
-}
-
-func TestIntGobEncoding(t *testing.T) {
- var medium bytes.Buffer
- enc := gob.NewEncoder(&medium)
- dec := gob.NewDecoder(&medium)
- for i, test := range gobEncodingTests {
- for j := 0; j < 2; j++ {
- medium.Reset() // empty buffer for each test case (in case of failures)
- stest := test
- if j != 0 {
- // negative numbers
- stest = "-" + test
- }
- var tx Int
- tx.SetString(stest, 10)
- if err := enc.Encode(&tx); err != nil {
- t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
- }
- var rx Int
- if err := dec.Decode(&rx); err != nil {
- t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
- }
- if rx.Cmp(&tx) != 0 {
- t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package big implements multi-precision arithmetic (big numbers).
-// The following numeric types are supported:
-//
-// - Int signed integers
-// - Rat rational numbers
-//
-// All methods on Int take the result as the receiver; if it is one
-// of the operands it may be overwritten (and its memory reused).
-// To enable chaining of operations, the result is also returned.
-//
-package big
-
-// This file contains operations on unsigned multi-precision integers.
-// These are the building blocks for the operations on signed integers
-// and rationals.
-
-import (
- "errors"
- "io"
- "rand"
-)
-
-// An unsigned integer x of the form
-//
-// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
-//
-// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
-// with the digits x[i] as the slice elements.
-//
-// A number is normalized if the slice contains no leading 0 digits.
-// During arithmetic operations, denormalized values may occur but are
-// always normalized before returning the final result. The normalized
-// representation of 0 is the empty or nil slice (length = 0).
-//
-type nat []Word
-
-var (
- natOne = nat{1}
- natTwo = nat{2}
- natTen = nat{10}
-)
-
-func (z nat) clear() {
- for i := range z {
- z[i] = 0
- }
-}
-
-func (z nat) norm() nat {
- i := len(z)
- for i > 0 && z[i-1] == 0 {
- i--
- }
- return z[0:i]
-}
-
-func (z nat) make(n int) nat {
- if n <= cap(z) {
- return z[0:n] // reuse z
- }
- // Choosing a good value for e has significant performance impact
- // because it increases the chance that a value can be reused.
- const e = 4 // extra capacity
- return make(nat, n, n+e)
-}
-
-func (z nat) setWord(x Word) nat {
- if x == 0 {
- return z.make(0)
- }
- z = z.make(1)
- z[0] = x
- return z
-}
-
-func (z nat) setUint64(x uint64) nat {
- // single-digit values
- if w := Word(x); uint64(w) == x {
- return z.setWord(w)
- }
-
- // compute number of words n required to represent x
- n := 0
- for t := x; t > 0; t >>= _W {
- n++
- }
-
- // split x into n words
- z = z.make(n)
- for i := range z {
- z[i] = Word(x & _M)
- x >>= _W
- }
-
- return z
-}
-
-func (z nat) set(x nat) nat {
- z = z.make(len(x))
- copy(z, x)
- return z
-}
-
-func (z nat) add(x, y nat) nat {
- m := len(x)
- n := len(y)
-
- switch {
- case m < n:
- return z.add(y, x)
- case m == 0:
- // n == 0 because m >= n; result is 0
- return z.make(0)
- case n == 0:
- // result is x
- return z.set(x)
- }
- // m > 0
-
- z = z.make(m + 1)
- c := addVV(z[0:n], x, y)
- if m > n {
- c = addVW(z[n:m], x[n:], c)
- }
- z[m] = c
-
- return z.norm()
-}
-
-func (z nat) sub(x, y nat) nat {
- m := len(x)
- n := len(y)
-
- switch {
- case m < n:
- panic("underflow")
- case m == 0:
- // n == 0 because m >= n; result is 0
- return z.make(0)
- case n == 0:
- // result is x
- return z.set(x)
- }
- // m > 0
-
- z = z.make(m)
- c := subVV(z[0:n], x, y)
- if m > n {
- c = subVW(z[n:], x[n:], c)
- }
- if c != 0 {
- panic("underflow")
- }
-
- return z.norm()
-}
-
-func (x nat) cmp(y nat) (r int) {
- m := len(x)
- n := len(y)
- if m != n || m == 0 {
- switch {
- case m < n:
- r = -1
- case m > n:
- r = 1
- }
- return
- }
-
- i := m - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
-
- switch {
- case x[i] < y[i]:
- r = -1
- case x[i] > y[i]:
- r = 1
- }
- return
-}
-
-func (z nat) mulAddWW(x nat, y, r Word) nat {
- m := len(x)
- if m == 0 || y == 0 {
- return z.setWord(r) // result is r
- }
- // m > 0
-
- z = z.make(m + 1)
- z[m] = mulAddVWW(z[0:m], x, y, r)
-
- return z.norm()
-}
-
-// basicMul multiplies x and y and leaves the result in z.
-// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
-func basicMul(z, x, y nat) {
- z[0 : len(x)+len(y)].clear() // initialize z
- for i, d := range y {
- if d != 0 {
- z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d)
- }
- }
-}
-
-// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
-// Factored out for readability - do not use outside karatsuba.
-func karatsubaAdd(z, x nat, n int) {
- if c := addVV(z[0:n], z, x); c != 0 {
- addVW(z[n:n+n>>1], z[n:], c)
- }
-}
-
-// Like karatsubaAdd, but does subtract.
-func karatsubaSub(z, x nat, n int) {
- if c := subVV(z[0:n], z, x); c != 0 {
- subVW(z[n:n+n>>1], z[n:], c)
- }
-}
-
-// Operands that are shorter than karatsubaThreshold are multiplied using
-// "grade school" multiplication; for longer operands the Karatsuba algorithm
-// is used.
-var karatsubaThreshold int = 32 // computed by calibrate.go
-
-// karatsuba multiplies x and y and leaves the result in z.
-// Both x and y must have the same length n and n must be a
-// power of 2. The result vector z must have len(z) >= 6*n.
-// The (non-normalized) result is placed in z[0 : 2*n].
-func karatsuba(z, x, y nat) {
- n := len(y)
-
- // Switch to basic multiplication if numbers are odd or small.
- // (n is always even if karatsubaThreshold is even, but be
- // conservative)
- if n&1 != 0 || n < karatsubaThreshold || n < 2 {
- basicMul(z, x, y)
- return
- }
- // n&1 == 0 && n >= karatsubaThreshold && n >= 2
-
- // Karatsuba multiplication is based on the observation that
- // for two numbers x and y with:
- //
- // x = x1*b + x0
- // y = y1*b + y0
- //
- // the product x*y can be obtained with 3 products z2, z1, z0
- // instead of 4:
- //
- // x*y = x1*y1*b*b + (x1*y0 + x0*y1)*b + x0*y0
- // = z2*b*b + z1*b + z0
- //
- // with:
- //
- // xd = x1 - x0
- // yd = y0 - y1
- //
- // z1 = xd*yd + z1 + z0
- // = (x1-x0)*(y0 - y1) + z1 + z0
- // = x1*y0 - x1*y1 - x0*y0 + x0*y1 + z1 + z0
- // = x1*y0 - z1 - z0 + x0*y1 + z1 + z0
- // = x1*y0 + x0*y1
-
- // split x, y into "digits"
- n2 := n >> 1 // n2 >= 1
- x1, x0 := x[n2:], x[0:n2] // x = x1*b + y0
- y1, y0 := y[n2:], y[0:n2] // y = y1*b + y0
-
- // z is used for the result and temporary storage:
- //
- // 6*n 5*n 4*n 3*n 2*n 1*n 0*n
- // z = [z2 copy|z0 copy| xd*yd | yd:xd | x1*y1 | x0*y0 ]
- //
- // For each recursive call of karatsuba, an unused slice of
- // z is passed in that has (at least) half the length of the
- // caller's z.
-
- // compute z0 and z2 with the result "in place" in z
- karatsuba(z, x0, y0) // z0 = x0*y0
- karatsuba(z[n:], x1, y1) // z2 = x1*y1
-
- // compute xd (or the negative value if underflow occurs)
- s := 1 // sign of product xd*yd
- xd := z[2*n : 2*n+n2]
- if subVV(xd, x1, x0) != 0 { // x1-x0
- s = -s
- subVV(xd, x0, x1) // x0-x1
- }
-
- // compute yd (or the negative value if underflow occurs)
- yd := z[2*n+n2 : 3*n]
- if subVV(yd, y0, y1) != 0 { // y0-y1
- s = -s
- subVV(yd, y1, y0) // y1-y0
- }
-
- // p = (x1-x0)*(y0-y1) == x1*y0 - x1*y1 - x0*y0 + x0*y1 for s > 0
- // p = (x0-x1)*(y0-y1) == x0*y0 - x0*y1 - x1*y0 + x1*y1 for s < 0
- p := z[n*3:]
- karatsuba(p, xd, yd)
-
- // save original z2:z0
- // (ok to use upper half of z since we're done recursing)
- r := z[n*4:]
- copy(r, z)
-
- // add up all partial products
- //
- // 2*n n 0
- // z = [ z2 | z0 ]
- // + [ z0 ]
- // + [ z2 ]
- // + [ p ]
- //
- karatsubaAdd(z[n2:], r, n)
- karatsubaAdd(z[n2:], r[n:], n)
- if s > 0 {
- karatsubaAdd(z[n2:], p, n)
- } else {
- karatsubaSub(z[n2:], p, n)
- }
-}
-
-// alias returns true if x and y share the same base array.
-func alias(x, y nat) bool {
- return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
-}
-
-// addAt implements z += x*(1<<(_W*i)); z must be long enough.
-// (we don't use nat.add because we need z to stay the same
-// slice, and we don't need to normalize z after each addition)
-func addAt(z, x nat, i int) {
- if n := len(x); n > 0 {
- if c := addVV(z[i:i+n], z[i:], x); c != 0 {
- j := i + n
- if j < len(z) {
- addVW(z[j:], z[j:], c)
- }
- }
- }
-}
-
-func max(x, y int) int {
- if x > y {
- return x
- }
- return y
-}
-
-// karatsubaLen computes an approximation to the maximum k <= n such that
-// k = p<<i for a number p <= karatsubaThreshold and an i >= 0. Thus, the
-// result is the largest number that can be divided repeatedly by 2 before
-// becoming about the value of karatsubaThreshold.
-func karatsubaLen(n int) int {
- i := uint(0)
- for n > karatsubaThreshold {
- n >>= 1
- i++
- }
- return n << i
-}
-
-func (z nat) mul(x, y nat) nat {
- m := len(x)
- n := len(y)
-
- switch {
- case m < n:
- return z.mul(y, x)
- case m == 0 || n == 0:
- return z.make(0)
- case n == 1:
- return z.mulAddWW(x, y[0], 0)
- }
- // m >= n > 1
-
- // determine if z can be reused
- if alias(z, x) || alias(z, y) {
- z = nil // z is an alias for x or y - cannot reuse
- }
-
- // use basic multiplication if the numbers are small
- if n < karatsubaThreshold || n < 2 {
- z = z.make(m + n)
- basicMul(z, x, y)
- return z.norm()
- }
- // m >= n && n >= karatsubaThreshold && n >= 2
-
- // determine Karatsuba length k such that
- //
- // x = x1*b + x0
- // y = y1*b + y0 (and k <= len(y), which implies k <= len(x))
- // b = 1<<(_W*k) ("base" of digits xi, yi)
- //
- k := karatsubaLen(n)
- // k <= n
-
- // multiply x0 and y0 via Karatsuba
- x0 := x[0:k] // x0 is not normalized
- y0 := y[0:k] // y0 is not normalized
- z = z.make(max(6*k, m+n)) // enough space for karatsuba of x0*y0 and full result of x*y
- karatsuba(z, x0, y0)
- z = z[0 : m+n] // z has final length but may be incomplete, upper portion is garbage
-
- // If x1 and/or y1 are not 0, add missing terms to z explicitly:
- //
- // m+n 2*k 0
- // z = [ ... | x0*y0 ]
- // + [ x1*y1 ]
- // + [ x1*y0 ]
- // + [ x0*y1 ]
- //
- if k < n || m != n {
- x1 := x[k:] // x1 is normalized because x is
- y1 := y[k:] // y1 is normalized because y is
- var t nat
- t = t.mul(x1, y1)
- copy(z[2*k:], t)
- z[2*k+len(t):].clear() // upper portion of z is garbage
- t = t.mul(x1, y0.norm())
- addAt(z, t, k)
- t = t.mul(x0.norm(), y1)
- addAt(z, t, k)
- }
-
- return z.norm()
-}
-
-// mulRange computes the product of all the unsigned integers in the
-// range [a, b] inclusively. If a > b (empty range), the result is 1.
-func (z nat) mulRange(a, b uint64) nat {
- switch {
- case a == 0:
- // cut long ranges short (optimization)
- return z.setUint64(0)
- case a > b:
- return z.setUint64(1)
- case a == b:
- return z.setUint64(a)
- case a+1 == b:
- return z.mul(nat{}.setUint64(a), nat{}.setUint64(b))
- }
- m := (a + b) / 2
- return z.mul(nat{}.mulRange(a, m), nat{}.mulRange(m+1, b))
-}
-
-// q = (x-r)/y, with 0 <= r < y
-func (z nat) divW(x nat, y Word) (q nat, r Word) {
- m := len(x)
- switch {
- case y == 0:
- panic("division by zero")
- case y == 1:
- q = z.set(x) // result is x
- return
- case m == 0:
- q = z.make(0) // result is 0
- return
- }
- // m > 0
- z = z.make(m)
- r = divWVW(z, 0, x, y)
- q = z.norm()
- return
-}
-
-func (z nat) div(z2, u, v nat) (q, r nat) {
- if len(v) == 0 {
- panic("division by zero")
- }
-
- if u.cmp(v) < 0 {
- q = z.make(0)
- r = z2.set(u)
- return
- }
-
- if len(v) == 1 {
- var rprime Word
- q, rprime = z.divW(u, v[0])
- if rprime > 0 {
- r = z2.make(1)
- r[0] = rprime
- } else {
- r = z2.make(0)
- }
- return
- }
-
- q, r = z.divLarge(z2, u, v)
- return
-}
-
-// q = (uIn-r)/v, with 0 <= r < y
-// Uses z as storage for q, and u as storage for r if possible.
-// See Knuth, Volume 2, section 4.3.1, Algorithm D.
-// Preconditions:
-// len(v) >= 2
-// len(uIn) >= len(v)
-func (z nat) divLarge(u, uIn, v nat) (q, r nat) {
- n := len(v)
- m := len(uIn) - n
-
- // determine if z can be reused
- // TODO(gri) should find a better solution - this if statement
- // is very costly (see e.g. time pidigits -s -n 10000)
- if alias(z, uIn) || alias(z, v) {
- z = nil // z is an alias for uIn or v - cannot reuse
- }
- q = z.make(m + 1)
-
- qhatv := make(nat, n+1)
- if alias(u, uIn) || alias(u, v) {
- u = nil // u is an alias for uIn or v - cannot reuse
- }
- u = u.make(len(uIn) + 1)
- u.clear()
-
- // D1.
- shift := leadingZeros(v[n-1])
- if shift > 0 {
- // do not modify v, it may be used by another goroutine simultaneously
- v1 := make(nat, n)
- shlVU(v1, v, shift)
- v = v1
- }
- u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift)
-
- // D2.
- for j := m; j >= 0; j-- {
- // D3.
- qhat := Word(_M)
- if u[j+n] != v[n-1] {
- var rhat Word
- qhat, rhat = divWW(u[j+n], u[j+n-1], v[n-1])
-
- // x1 | x2 = q̂v_{n-2}
- x1, x2 := mulWW(qhat, v[n-2])
- // test if q̂v_{n-2} > br̂ + u_{j+n-2}
- for greaterThan(x1, x2, rhat, u[j+n-2]) {
- qhat--
- prevRhat := rhat
- rhat += v[n-1]
- // v[n-1] >= 0, so this tests for overflow.
- if rhat < prevRhat {
- break
- }
- x1, x2 = mulWW(qhat, v[n-2])
- }
- }
-
- // D4.
- qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
-
- c := subVV(u[j:j+len(qhatv)], u[j:], qhatv)
- if c != 0 {
- c := addVV(u[j:j+n], u[j:], v)
- u[j+n] += c
- qhat--
- }
-
- q[j] = qhat
- }
-
- q = q.norm()
- shrVU(u, u, shift)
- r = u.norm()
-
- return q, r
-}
-
-// Length of x in bits. x must be normalized.
-func (x nat) bitLen() int {
- if i := len(x) - 1; i >= 0 {
- return i*_W + bitLen(x[i])
- }
- return 0
-}
-
-// MaxBase is the largest number base accepted for string conversions.
-const MaxBase = 'z' - 'a' + 10 + 1 // = hexValue('z') + 1
-
-func hexValue(ch rune) Word {
- d := MaxBase + 1 // illegal base
- switch {
- case '0' <= ch && ch <= '9':
- d = int(ch - '0')
- case 'a' <= ch && ch <= 'z':
- d = int(ch - 'a' + 10)
- case 'A' <= ch && ch <= 'Z':
- d = int(ch - 'A' + 10)
- }
- return Word(d)
-}
-
-// scan sets z to the natural number corresponding to the longest possible prefix
-// read from r representing an unsigned integer in a given conversion base.
-// It returns z, the actual conversion base used, and an error, if any. In the
-// error case, the value of z is undefined. The syntax follows the syntax of
-// unsigned integer literals in Go.
-//
-// The base argument must be 0 or a value from 2 through MaxBase. If the base
-// is 0, the string prefix determines the actual conversion base. A prefix of
-// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
-// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
-//
-func (z nat) scan(r io.RuneScanner, base int) (nat, int, error) {
- // reject illegal bases
- if base < 0 || base == 1 || MaxBase < base {
- return z, 0, errors.New("illegal number base")
- }
-
- // one char look-ahead
- ch, _, err := r.ReadRune()
- if err != nil {
- return z, 0, err
- }
-
- // determine base if necessary
- b := Word(base)
- if base == 0 {
- b = 10
- if ch == '0' {
- switch ch, _, err = r.ReadRune(); err {
- case nil:
- b = 8
- switch ch {
- case 'x', 'X':
- b = 16
- case 'b', 'B':
- b = 2
- }
- if b == 2 || b == 16 {
- if ch, _, err = r.ReadRune(); err != nil {
- return z, 0, err
- }
- }
- case io.EOF:
- return z.make(0), 10, nil
- default:
- return z, 10, err
- }
- }
- }
-
- // convert string
- // - group as many digits d as possible together into a "super-digit" dd with "super-base" bb
- // - only when bb does not fit into a word anymore, do a full number mulAddWW using bb and dd
- z = z.make(0)
- bb := Word(1)
- dd := Word(0)
- for max := _M / b; ; {
- d := hexValue(ch)
- if d >= b {
- r.UnreadRune() // ch does not belong to number anymore
- break
- }
-
- if bb <= max {
- bb *= b
- dd = dd*b + d
- } else {
- // bb * b would overflow
- z = z.mulAddWW(z, bb, dd)
- bb = b
- dd = d
- }
-
- if ch, _, err = r.ReadRune(); err != nil {
- if err != io.EOF {
- return z, int(b), err
- }
- break
- }
- }
-
- switch {
- case bb > 1:
- // there was at least one mantissa digit
- z = z.mulAddWW(z, bb, dd)
- case base == 0 && b == 8:
- // there was only the octal prefix 0 (possibly followed by digits > 7);
- // return base 10, not 8
- return z, 10, nil
- case base != 0 || b != 8:
- // there was neither a mantissa digit nor the octal prefix 0
- return z, int(b), errors.New("syntax error scanning number")
- }
-
- return z.norm(), int(b), nil
-}
-
-// Character sets for string conversion.
-const (
- lowercaseDigits = "0123456789abcdefghijklmnopqrstuvwxyz"
- uppercaseDigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-)
-
-// decimalString returns a decimal representation of x.
-// It calls x.string with the charset "0123456789".
-func (x nat) decimalString() string {
- return x.string(lowercaseDigits[0:10])
-}
-
-// string converts x to a string using digits from a charset; a digit with
-// value d is represented by charset[d]. The conversion base is determined
-// by len(charset), which must be >= 2.
-func (x nat) string(charset string) string {
- b := Word(len(charset))
-
- // special cases
- switch {
- case b < 2 || b > 256:
- panic("illegal base")
- case len(x) == 0:
- return string(charset[0])
- }
-
- // allocate buffer for conversion
- i := x.bitLen()/log2(b) + 1 // +1: round up
- s := make([]byte, i)
-
- // special case: power of two bases can avoid divisions completely
- if b == b&-b {
- // shift is base-b digit size in bits
- shift := uint(trailingZeroBits(b)) // shift > 0 because b >= 2
- mask := Word(1)<<shift - 1
- w := x[0]
- nbits := uint(_W) // number of unprocessed bits in w
-
- // convert less-significant words
- for k := 1; k < len(x); k++ {
- // convert full digits
- for nbits >= shift {
- i--
- s[i] = charset[w&mask]
- w >>= shift
- nbits -= shift
- }
-
- // convert any partial leading digit and advance to next word
- if nbits == 0 {
- // no partial digit remaining, just advance
- w = x[k]
- nbits = _W
- } else {
- // partial digit in current (k-1) and next (k) word
- w |= x[k] << nbits
- i--
- s[i] = charset[w&mask]
-
- // advance
- w = x[k] >> (shift - nbits)
- nbits = _W - (shift - nbits)
- }
- }
-
- // convert digits of most-significant word (omit leading zeros)
- for nbits >= 0 && w != 0 {
- i--
- s[i] = charset[w&mask]
- w >>= shift
- nbits -= shift
- }
-
- return string(s[i:])
- }
-
- // general case: extract groups of digits by multiprecision division
-
- // maximize ndigits where b**ndigits < 2^_W; bb (big base) is b**ndigits
- bb := Word(1)
- ndigits := 0
- for max := Word(_M / b); bb <= max; bb *= b {
- ndigits++
- }
-
- // preserve x, create local copy for use in repeated divisions
- q := nat{}.set(x)
- var r Word
-
- // convert
- if b == 10 { // hard-coding for 10 here speeds this up by 1.25x
- for len(q) > 0 {
- // extract least significant, base bb "digit"
- q, r = q.divW(q, bb) // N.B. >82% of time is here. Optimize divW
- if len(q) == 0 {
- // skip leading zeros in most-significant group of digits
- for j := 0; j < ndigits && r != 0; j++ {
- i--
- s[i] = charset[r%10]
- r /= 10
- }
- } else {
- for j := 0; j < ndigits; j++ {
- i--
- s[i] = charset[r%10]
- r /= 10
- }
- }
- }
- } else {
- for len(q) > 0 {
- // extract least significant group of digits
- q, r = q.divW(q, bb) // N.B. >82% of time is here. Optimize divW
- if len(q) == 0 {
- // skip leading zeros in most-significant group of digits
- for j := 0; j < ndigits && r != 0; j++ {
- i--
- s[i] = charset[r%b]
- r /= b
- }
- } else {
- for j := 0; j < ndigits; j++ {
- i--
- s[i] = charset[r%b]
- r /= b
- }
- }
- }
- }
-
- return string(s[i:])
-}
-
-const deBruijn32 = 0x077CB531
-
-var deBruijn32Lookup = []byte{
- 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
- 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
-}
-
-const deBruijn64 = 0x03f79d71b4ca8b09
-
-var deBruijn64Lookup = []byte{
- 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
- 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
- 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
- 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
-}
-
-// trailingZeroBits returns the number of consecutive zero bits on the right
-// side of the given Word.
-// See Knuth, volume 4, section 7.3.1
-func trailingZeroBits(x Word) int {
- // x & -x leaves only the right-most bit set in the word. Let k be the
- // index of that bit. Since only a single bit is set, the value is two
- // to the power of k. Multiplying by a power of two is equivalent to
- // left shifting, in this case by k bits. The de Bruijn constant is
- // such that all six bit, consecutive substrings are distinct.
- // Therefore, if we have a left shifted version of this constant we can
- // find by how many bits it was shifted by looking at which six bit
- // substring ended up at the top of the word.
- switch _W {
- case 32:
- return int(deBruijn32Lookup[((x&-x)*deBruijn32)>>27])
- case 64:
- return int(deBruijn64Lookup[((x&-x)*(deBruijn64&_M))>>58])
- default:
- panic("Unknown word size")
- }
-
- return 0
-}
-
-// z = x << s
-func (z nat) shl(x nat, s uint) nat {
- m := len(x)
- if m == 0 {
- return z.make(0)
- }
- // m > 0
-
- n := m + int(s/_W)
- z = z.make(n + 1)
- z[n] = shlVU(z[n-m:n], x, s%_W)
- z[0 : n-m].clear()
-
- return z.norm()
-}
-
-// z = x >> s
-func (z nat) shr(x nat, s uint) nat {
- m := len(x)
- n := m - int(s/_W)
- if n <= 0 {
- return z.make(0)
- }
- // n > 0
-
- z = z.make(n)
- shrVU(z, x[m-n:], s%_W)
-
- return z.norm()
-}
-
-func (z nat) setBit(x nat, i uint, b uint) nat {
- j := int(i / _W)
- m := Word(1) << (i % _W)
- n := len(x)
- switch b {
- case 0:
- z = z.make(n)
- copy(z, x)
- if j >= n {
- // no need to grow
- return z
- }
- z[j] &^= m
- return z.norm()
- case 1:
- if j >= n {
- n = j + 1
- }
- z = z.make(n)
- copy(z, x)
- z[j] |= m
- // no need to normalize
- return z
- }
- panic("set bit is not 0 or 1")
-}
-
-func (z nat) bit(i uint) uint {
- j := int(i / _W)
- if j >= len(z) {
- return 0
- }
- return uint(z[j] >> (i % _W) & 1)
-}
-
-func (z nat) and(x, y nat) nat {
- m := len(x)
- n := len(y)
- if m > n {
- m = n
- }
- // m <= n
-
- z = z.make(m)
- for i := 0; i < m; i++ {
- z[i] = x[i] & y[i]
- }
-
- return z.norm()
-}
-
-func (z nat) andNot(x, y nat) nat {
- m := len(x)
- n := len(y)
- if n > m {
- n = m
- }
- // m >= n
-
- z = z.make(m)
- for i := 0; i < n; i++ {
- z[i] = x[i] &^ y[i]
- }
- copy(z[n:m], x[n:m])
-
- return z.norm()
-}
-
-func (z nat) or(x, y nat) nat {
- m := len(x)
- n := len(y)
- s := x
- if m < n {
- n, m = m, n
- s = y
- }
- // m >= n
-
- z = z.make(m)
- for i := 0; i < n; i++ {
- z[i] = x[i] | y[i]
- }
- copy(z[n:m], s[n:m])
-
- return z.norm()
-}
-
-func (z nat) xor(x, y nat) nat {
- m := len(x)
- n := len(y)
- s := x
- if m < n {
- n, m = m, n
- s = y
- }
- // m >= n
-
- z = z.make(m)
- for i := 0; i < n; i++ {
- z[i] = x[i] ^ y[i]
- }
- copy(z[n:m], s[n:m])
-
- return z.norm()
-}
-
-// greaterThan returns true iff (x1<<_W + x2) > (y1<<_W + y2)
-func greaterThan(x1, x2, y1, y2 Word) bool {
- return x1 > y1 || x1 == y1 && x2 > y2
-}
-
-// modW returns x % d.
-func (x nat) modW(d Word) (r Word) {
- // TODO(agl): we don't actually need to store the q value.
- var q nat
- q = q.make(len(x))
- return divWVW(q, 0, x, d)
-}
-
-// powersOfTwoDecompose finds q and k with x = q * 1<<k and q is odd, or q and k are 0.
-func (x nat) powersOfTwoDecompose() (q nat, k int) {
- if len(x) == 0 {
- return x, 0
- }
-
- // One of the words must be non-zero by definition,
- // so this loop will terminate with i < len(x), and
- // i is the number of 0 words.
- i := 0
- for x[i] == 0 {
- i++
- }
- n := trailingZeroBits(x[i]) // x[i] != 0
-
- q = make(nat, len(x)-i)
- shrVU(q, x[i:], uint(n))
-
- q = q.norm()
- k = i*_W + n
- return
-}
-
-// random creates a random integer in [0..limit), using the space in z if
-// possible. n is the bit length of limit.
-func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
- bitLengthOfMSW := uint(n % _W)
- if bitLengthOfMSW == 0 {
- bitLengthOfMSW = _W
- }
- mask := Word((1 << bitLengthOfMSW) - 1)
- z = z.make(len(limit))
-
- for {
- for i := range z {
- switch _W {
- case 32:
- z[i] = Word(rand.Uint32())
- case 64:
- z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
- }
- }
-
- z[len(limit)-1] &= mask
-
- if z.cmp(limit) < 0 {
- break
- }
- }
-
- return z.norm()
-}
-
-// If m != nil, expNN calculates x**y mod m. Otherwise it calculates x**y. It
-// reuses the storage of z if possible.
-func (z nat) expNN(x, y, m nat) nat {
- if alias(z, x) || alias(z, y) {
- // We cannot allow in place modification of x or y.
- z = nil
- }
-
- if len(y) == 0 {
- z = z.make(1)
- z[0] = 1
- return z
- }
-
- if m != nil {
- // We likely end up being as long as the modulus.
- z = z.make(len(m))
- }
- z = z.set(x)
- v := y[len(y)-1]
- // It's invalid for the most significant word to be zero, therefore we
- // will find a one bit.
- shift := leadingZeros(v) + 1
- v <<= shift
- var q nat
-
- const mask = 1 << (_W - 1)
-
- // We walk through the bits of the exponent one by one. Each time we
- // see a bit, we square, thus doubling the power. If the bit is a one,
- // we also multiply by x, thus adding one to the power.
-
- w := _W - int(shift)
- for j := 0; j < w; j++ {
- z = z.mul(z, z)
-
- if v&mask != 0 {
- z = z.mul(z, x)
- }
-
- if m != nil {
- q, z = q.div(z, z, m)
- }
-
- v <<= 1
- }
-
- for i := len(y) - 2; i >= 0; i-- {
- v = y[i]
-
- for j := 0; j < _W; j++ {
- z = z.mul(z, z)
-
- if v&mask != 0 {
- z = z.mul(z, x)
- }
-
- if m != nil {
- q, z = q.div(z, z, m)
- }
-
- v <<= 1
- }
- }
-
- return z
-}
-
-// probablyPrime performs reps Miller-Rabin tests to check whether n is prime.
-// If it returns true, n is prime with probability 1 - 1/4^reps.
-// If it returns false, n is not prime.
-func (n nat) probablyPrime(reps int) bool {
- if len(n) == 0 {
- return false
- }
-
- if len(n) == 1 {
- if n[0] < 2 {
- return false
- }
-
- if n[0]%2 == 0 {
- return n[0] == 2
- }
-
- // We have to exclude these cases because we reject all
- // multiples of these numbers below.
- switch n[0] {
- case 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53:
- return true
- }
- }
-
- const primesProduct32 = 0xC0CFD797 // Π {p ∈ primes, 2 < p <= 29}
- const primesProduct64 = 0xE221F97C30E94E1D // Π {p ∈ primes, 2 < p <= 53}
-
- var r Word
- switch _W {
- case 32:
- r = n.modW(primesProduct32)
- case 64:
- r = n.modW(primesProduct64 & _M)
- default:
- panic("Unknown word size")
- }
-
- if r%3 == 0 || r%5 == 0 || r%7 == 0 || r%11 == 0 ||
- r%13 == 0 || r%17 == 0 || r%19 == 0 || r%23 == 0 || r%29 == 0 {
- return false
- }
-
- if _W == 64 && (r%31 == 0 || r%37 == 0 || r%41 == 0 ||
- r%43 == 0 || r%47 == 0 || r%53 == 0) {
- return false
- }
-
- nm1 := nat{}.sub(n, natOne)
- // 1<<k * q = nm1;
- q, k := nm1.powersOfTwoDecompose()
-
- nm3 := nat{}.sub(nm1, natTwo)
- rand := rand.New(rand.NewSource(int64(n[0])))
-
- var x, y, quotient nat
- nm3Len := nm3.bitLen()
-
-NextRandom:
- for i := 0; i < reps; i++ {
- x = x.random(rand, nm3, nm3Len)
- x = x.add(x, natTwo)
- y = y.expNN(x, q, n)
- if y.cmp(natOne) == 0 || y.cmp(nm1) == 0 {
- continue
- }
- for j := 1; j < k; j++ {
- y = y.mul(y, y)
- quotient, y = quotient.div(y, y, n)
- if y.cmp(nm1) == 0 {
- continue NextRandom
- }
- if y.cmp(natOne) == 0 {
- return false
- }
- }
- return false
- }
-
- return true
-}
-
-// bytes writes the value of z into buf using big-endian encoding.
-// len(buf) must be >= len(z)*_S. The value of z is encoded in the
-// slice buf[i:]. The number i of unused bytes at the beginning of
-// buf is returned as result.
-func (z nat) bytes(buf []byte) (i int) {
- i = len(buf)
- for _, d := range z {
- for j := 0; j < _S; j++ {
- i--
- buf[i] = byte(d)
- d >>= 8
- }
- }
-
- for i < len(buf) && buf[i] == 0 {
- i++
- }
-
- return
-}
-
-// setBytes interprets buf as the bytes of a big-endian unsigned
-// integer, sets z to that value, and returns z.
-func (z nat) setBytes(buf []byte) nat {
- z = z.make((len(buf) + _S - 1) / _S)
-
- k := 0
- s := uint(0)
- var d Word
- for i := len(buf); i > 0; i-- {
- d |= Word(buf[i-1]) << s
- if s += 8; s == _S*8 {
- z[k] = d
- k++
- s = 0
- d = 0
- }
- }
- if k < len(z) {
- z[k] = d
- }
-
- return z.norm()
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
- "fmt"
- "io"
- "strings"
- "testing"
-)
-
-var cmpTests = []struct {
- x, y nat
- r int
-}{
- {nil, nil, 0},
- {nil, nat{}, 0},
- {nat{}, nil, 0},
- {nat{}, nat{}, 0},
- {nat{0}, nat{0}, 0},
- {nat{0}, nat{1}, -1},
- {nat{1}, nat{0}, 1},
- {nat{1}, nat{1}, 0},
- {nat{0, _M}, nat{1}, 1},
- {nat{1}, nat{0, _M}, -1},
- {nat{1, _M}, nat{0, _M}, 1},
- {nat{0, _M}, nat{1, _M}, -1},
- {nat{16, 571956, 8794, 68}, nat{837, 9146, 1, 754489}, -1},
- {nat{34986, 41, 105, 1957}, nat{56, 7458, 104, 1957}, 1},
-}
-
-func TestCmp(t *testing.T) {
- for i, a := range cmpTests {
- r := a.x.cmp(a.y)
- if r != a.r {
- t.Errorf("#%d got r = %v; want %v", i, r, a.r)
- }
- }
-}
-
-type funNN func(z, x, y nat) nat
-type argNN struct {
- z, x, y nat
-}
-
-var sumNN = []argNN{
- {},
- {nat{1}, nil, nat{1}},
- {nat{1111111110}, nat{123456789}, nat{987654321}},
- {nat{0, 0, 0, 1}, nil, nat{0, 0, 0, 1}},
- {nat{0, 0, 0, 1111111110}, nat{0, 0, 0, 123456789}, nat{0, 0, 0, 987654321}},
- {nat{0, 0, 0, 1}, nat{0, 0, _M}, nat{0, 0, 1}},
-}
-
-var prodNN = []argNN{
- {},
- {nil, nil, nil},
- {nil, nat{991}, nil},
- {nat{991}, nat{991}, nat{1}},
- {nat{991 * 991}, nat{991}, nat{991}},
- {nat{0, 0, 991 * 991}, nat{0, 991}, nat{0, 991}},
- {nat{1 * 991, 2 * 991, 3 * 991, 4 * 991}, nat{1, 2, 3, 4}, nat{991}},
- {nat{4, 11, 20, 30, 20, 11, 4}, nat{1, 2, 3, 4}, nat{4, 3, 2, 1}},
-}
-
-func TestSet(t *testing.T) {
- for _, a := range sumNN {
- z := nat{}.set(a.z)
- if z.cmp(a.z) != 0 {
- t.Errorf("got z = %v; want %v", z, a.z)
- }
- }
-}
-
-func testFunNN(t *testing.T, msg string, f funNN, a argNN) {
- z := f(nil, a.x, a.y)
- if z.cmp(a.z) != 0 {
- t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, z, a.z)
- }
-}
-
-func TestFunNN(t *testing.T) {
- for _, a := range sumNN {
- arg := a
- testFunNN(t, "add", nat.add, arg)
-
- arg = argNN{a.z, a.y, a.x}
- testFunNN(t, "add symmetric", nat.add, arg)
-
- arg = argNN{a.x, a.z, a.y}
- testFunNN(t, "sub", nat.sub, arg)
-
- arg = argNN{a.y, a.z, a.x}
- testFunNN(t, "sub symmetric", nat.sub, arg)
- }
-
- for _, a := range prodNN {
- arg := a
- testFunNN(t, "mul", nat.mul, arg)
-
- arg = argNN{a.z, a.y, a.x}
- testFunNN(t, "mul symmetric", nat.mul, arg)
- }
-}
-
-var mulRangesN = []struct {
- a, b uint64
- prod string
-}{
- {0, 0, "0"},
- {1, 1, "1"},
- {1, 2, "2"},
- {1, 3, "6"},
- {10, 10, "10"},
- {0, 100, "0"},
- {0, 1e9, "0"},
- {1, 0, "1"}, // empty range
- {100, 1, "1"}, // empty range
- {1, 10, "3628800"}, // 10!
- {1, 20, "2432902008176640000"}, // 20!
- {1, 100,
- "933262154439441526816992388562667004907159682643816214685929" +
- "638952175999932299156089414639761565182862536979208272237582" +
- "51185210916864000000000000000000000000", // 100!
- },
-}
-
-func TestMulRangeN(t *testing.T) {
- for i, r := range mulRangesN {
- prod := nat{}.mulRange(r.a, r.b).decimalString()
- if prod != r.prod {
- t.Errorf("#%d: got %s; want %s", i, prod, r.prod)
- }
- }
-}
-
-var mulArg, mulTmp nat
-
-func init() {
- const n = 1000
- mulArg = make(nat, n)
- for i := 0; i < n; i++ {
- mulArg[i] = _M
- }
-}
-
-func benchmarkMulLoad() {
- for j := 1; j <= 10; j++ {
- x := mulArg[0 : j*100]
- mulTmp.mul(x, x)
- }
-}
-
-func BenchmarkMul(b *testing.B) {
- for i := 0; i < b.N; i++ {
- benchmarkMulLoad()
- }
-}
-
-func toString(x nat, charset string) string {
- base := len(charset)
-
- // special cases
- switch {
- case base < 2:
- panic("illegal base")
- case len(x) == 0:
- return string(charset[0])
- }
-
- // allocate buffer for conversion
- i := x.bitLen()/log2(Word(base)) + 1 // +1: round up
- s := make([]byte, i)
-
- // don't destroy x
- q := nat{}.set(x)
-
- // convert
- for len(q) > 0 {
- i--
- var r Word
- q, r = q.divW(q, Word(base))
- s[i] = charset[r]
- }
-
- return string(s[i:])
-}
-
-var strTests = []struct {
- x nat // nat value to be converted
- c string // conversion charset
- s string // expected result
-}{
- {nil, "01", "0"},
- {nat{1}, "01", "1"},
- {nat{0xc5}, "01", "11000101"},
- {nat{03271}, lowercaseDigits[0:8], "3271"},
- {nat{10}, lowercaseDigits[0:10], "10"},
- {nat{1234567890}, uppercaseDigits[0:10], "1234567890"},
- {nat{0xdeadbeef}, lowercaseDigits[0:16], "deadbeef"},
- {nat{0xdeadbeef}, uppercaseDigits[0:16], "DEADBEEF"},
- {nat{0x229be7}, lowercaseDigits[0:17], "1a2b3c"},
- {nat{0x309663e6}, uppercaseDigits[0:32], "O9COV6"},
-}
-
-func TestString(t *testing.T) {
- for _, a := range strTests {
- s := a.x.string(a.c)
- if s != a.s {
- t.Errorf("string%+v\n\tgot s = %s; want %s", a, s, a.s)
- }
-
- x, b, err := nat{}.scan(strings.NewReader(a.s), len(a.c))
- if x.cmp(a.x) != 0 {
- t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
- }
- if b != len(a.c) {
- t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, len(a.c))
- }
- if err != nil {
- t.Errorf("scan%+v\n\tgot error = %s", a, err)
- }
- }
-}
-
-var natScanTests = []struct {
- s string // string to be scanned
- base int // input base
- x nat // expected nat
- b int // expected base
- ok bool // expected success
- next rune // next character (or 0, if at EOF)
-}{
- // error: illegal base
- {base: -1},
- {base: 1},
- {base: 37},
-
- // error: no mantissa
- {},
- {s: "?"},
- {base: 10},
- {base: 36},
- {s: "?", base: 10},
- {s: "0x"},
- {s: "345", base: 2},
-
- // no errors
- {"0", 0, nil, 10, true, 0},
- {"0", 10, nil, 10, true, 0},
- {"0", 36, nil, 36, true, 0},
- {"1", 0, nat{1}, 10, true, 0},
- {"1", 10, nat{1}, 10, true, 0},
- {"0 ", 0, nil, 10, true, ' '},
- {"08", 0, nil, 10, true, '8'},
- {"018", 0, nat{1}, 8, true, '8'},
- {"0b1", 0, nat{1}, 2, true, 0},
- {"0b11000101", 0, nat{0xc5}, 2, true, 0},
- {"03271", 0, nat{03271}, 8, true, 0},
- {"10ab", 0, nat{10}, 10, true, 'a'},
- {"1234567890", 0, nat{1234567890}, 10, true, 0},
- {"xyz", 36, nat{(33*36+34)*36 + 35}, 36, true, 0},
- {"xyz?", 36, nat{(33*36+34)*36 + 35}, 36, true, '?'},
- {"0x", 16, nil, 16, true, 'x'},
- {"0xdeadbeef", 0, nat{0xdeadbeef}, 16, true, 0},
- {"0XDEADBEEF", 0, nat{0xdeadbeef}, 16, true, 0},
-}
-
-func TestScanBase(t *testing.T) {
- for _, a := range natScanTests {
- r := strings.NewReader(a.s)
- x, b, err := nat{}.scan(r, a.base)
- if err == nil && !a.ok {
- t.Errorf("scan%+v\n\texpected error", a)
- }
- if err != nil {
- if a.ok {
- t.Errorf("scan%+v\n\tgot error = %s", a, err)
- }
- continue
- }
- if x.cmp(a.x) != 0 {
- t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
- }
- if b != a.b {
- t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.base)
- }
- next, _, err := r.ReadRune()
- if err == io.EOF {
- next = 0
- err = nil
- }
- if err == nil && next != a.next {
- t.Errorf("scan%+v\n\tgot next = %q; want %q", a, next, a.next)
- }
- }
-}
-
-var pi = "3" +
- "14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651" +
- "32823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461" +
- "28475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920" +
- "96282925409171536436789259036001133053054882046652138414695194151160943305727036575959195309218611738193261179" +
- "31051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798" +
- "60943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901" +
- "22495343014654958537105079227968925892354201995611212902196086403441815981362977477130996051870721134999999837" +
- "29780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083" +
- "81420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909" +
- "21642019893809525720106548586327886593615338182796823030195203530185296899577362259941389124972177528347913151" +
- "55748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035" +
- "63707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104" +
- "75216205696602405803815019351125338243003558764024749647326391419927260426992279678235478163600934172164121992" +
- "45863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818" +
- "34797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548" +
- "16136115735255213347574184946843852332390739414333454776241686251898356948556209921922218427255025425688767179" +
- "04946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886" +
- "26945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645" +
- "99581339047802759009946576407895126946839835259570982582262052248940772671947826848260147699090264013639443745" +
- "53050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382" +
- "68683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244" +
- "13654976278079771569143599770012961608944169486855584840635342207222582848864815845602850601684273945226746767" +
- "88952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288" +
- "79710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821" +
- "68299894872265880485756401427047755513237964145152374623436454285844479526586782105114135473573952311342716610" +
- "21359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435" +
- "06430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675" +
- "14269123974894090718649423196156794520809514655022523160388193014209376213785595663893778708303906979207734672" +
- "21825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539" +
- "05796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007" +
- "23055876317635942187312514712053292819182618612586732157919841484882916447060957527069572209175671167229109816" +
- "90915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398" +
- "31501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064" +
- "20467525907091548141654985946163718027098199430992448895757128289059232332609729971208443357326548938239119325" +
- "97463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100" +
- "44929321516084244485963766983895228684783123552658213144957685726243344189303968642624341077322697802807318915" +
- "44110104468232527162010526522721116603966655730925471105578537634668206531098965269186205647693125705863566201" +
- "85581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318" +
- "58676975145661406800700237877659134401712749470420562230538994561314071127000407854733269939081454664645880797" +
- "27082668306343285878569830523580893306575740679545716377525420211495576158140025012622859413021647155097925923" +
- "09907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111" +
- "79042978285647503203198691514028708085990480109412147221317947647772622414254854540332157185306142288137585043" +
- "06332175182979866223717215916077166925474873898665494945011465406284336639379003976926567214638530673609657120" +
- "91807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862" +
- "94726547364252308177036751590673502350728354056704038674351362222477158915049530984448933309634087807693259939" +
- "78054193414473774418426312986080998886874132604721569516239658645730216315981931951673538129741677294786724229" +
- "24654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001" +
- "59377647165122893578601588161755782973523344604281512627203734314653197777416031990665541876397929334419521541" +
- "34189948544473456738316249934191318148092777710386387734317720754565453220777092120190516609628049092636019759" +
- "88281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267" +
- "94561275318134078330336254232783944975382437205835311477119926063813346776879695970309833913077109870408591337"
-
-// Test case for BenchmarkScanPi.
-func TestScanPi(t *testing.T) {
- var x nat
- z, _, err := x.scan(strings.NewReader(pi), 10)
- if err != nil {
- t.Errorf("scanning pi: %s", err)
- }
- if s := z.decimalString(); s != pi {
- t.Errorf("scanning pi: got %s", s)
- }
-}
-
-func BenchmarkScanPi(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x nat
- x.scan(strings.NewReader(pi), 10)
- }
-}
-
-const (
- // 314**271
- // base 2: 2249 digits
- // base 8: 751 digits
- // base 10: 678 digits
- // base 16: 563 digits
- shortBase = 314
- shortExponent = 271
-
- // 3141**2178
- // base 2: 31577 digits
- // base 8: 10527 digits
- // base 10: 9507 digits
- // base 16: 7895 digits
- mediumBase = 3141
- mediumExponent = 2718
-
- // 3141**2178
- // base 2: 406078 digits
- // base 8: 135360 digits
- // base 10: 122243 digits
- // base 16: 101521 digits
- longBase = 31415
- longExponent = 27182
-)
-
-func BenchmarkScanShort2(b *testing.B) {
- ScanHelper(b, 2, shortBase, shortExponent)
-}
-
-func BenchmarkScanShort8(b *testing.B) {
- ScanHelper(b, 8, shortBase, shortExponent)
-}
-
-func BenchmarkScanSort10(b *testing.B) {
- ScanHelper(b, 10, shortBase, shortExponent)
-}
-
-func BenchmarkScanShort16(b *testing.B) {
- ScanHelper(b, 16, shortBase, shortExponent)
-}
-
-func BenchmarkScanMedium2(b *testing.B) {
- ScanHelper(b, 2, mediumBase, mediumExponent)
-}
-
-func BenchmarkScanMedium8(b *testing.B) {
- ScanHelper(b, 8, mediumBase, mediumExponent)
-}
-
-func BenchmarkScanMedium10(b *testing.B) {
- ScanHelper(b, 10, mediumBase, mediumExponent)
-}
-
-func BenchmarkScanMedium16(b *testing.B) {
- ScanHelper(b, 16, mediumBase, mediumExponent)
-}
-
-func BenchmarkScanLong2(b *testing.B) {
- ScanHelper(b, 2, longBase, longExponent)
-}
-
-func BenchmarkScanLong8(b *testing.B) {
- ScanHelper(b, 8, longBase, longExponent)
-}
-
-func BenchmarkScanLong10(b *testing.B) {
- ScanHelper(b, 10, longBase, longExponent)
-}
-
-func BenchmarkScanLong16(b *testing.B) {
- ScanHelper(b, 16, longBase, longExponent)
-}
-
-func ScanHelper(b *testing.B, base int, xv, yv Word) {
- b.StopTimer()
- var x, y, z nat
- x = x.setWord(xv)
- y = y.setWord(yv)
- z = z.expNN(x, y, nil)
-
- var s string
- s = z.string(lowercaseDigits[0:base])
- if t := toString(z, lowercaseDigits[0:base]); t != s {
- panic(fmt.Sprintf("scanning: got %s; want %s", s, t))
- }
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- x.scan(strings.NewReader(s), base)
- }
-}
-
-func BenchmarkStringShort2(b *testing.B) {
- StringHelper(b, 2, shortBase, shortExponent)
-}
-
-func BenchmarkStringShort8(b *testing.B) {
- StringHelper(b, 8, shortBase, shortExponent)
-}
-
-func BenchmarkStringShort10(b *testing.B) {
- StringHelper(b, 10, shortBase, shortExponent)
-}
-
-func BenchmarkStringShort16(b *testing.B) {
- StringHelper(b, 16, shortBase, shortExponent)
-}
-
-func BenchmarkStringMedium2(b *testing.B) {
- StringHelper(b, 2, mediumBase, mediumExponent)
-}
-
-func BenchmarkStringMedium8(b *testing.B) {
- StringHelper(b, 8, mediumBase, mediumExponent)
-}
-
-func BenchmarkStringMedium10(b *testing.B) {
- StringHelper(b, 10, mediumBase, mediumExponent)
-}
-
-func BenchmarkStringMedium16(b *testing.B) {
- StringHelper(b, 16, mediumBase, mediumExponent)
-}
-
-func BenchmarkStringLong2(b *testing.B) {
- StringHelper(b, 2, longBase, longExponent)
-}
-
-func BenchmarkStringLong8(b *testing.B) {
- StringHelper(b, 8, longBase, longExponent)
-}
-
-func BenchmarkStringLong10(b *testing.B) {
- StringHelper(b, 10, longBase, longExponent)
-}
-
-func BenchmarkStringLong16(b *testing.B) {
- StringHelper(b, 16, longBase, longExponent)
-}
-
-func StringHelper(b *testing.B, base int, xv, yv Word) {
- b.StopTimer()
- var x, y, z nat
- x = x.setWord(xv)
- y = y.setWord(yv)
- z = z.expNN(x, y, nil)
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- z.string(lowercaseDigits[0:base])
- }
-}
-
-func TestLeadingZeros(t *testing.T) {
- var x Word = _B >> 1
- for i := 0; i <= _W; i++ {
- if int(leadingZeros(x)) != i {
- t.Errorf("failed at %x: got %d want %d", x, leadingZeros(x), i)
- }
- x >>= 1
- }
-}
-
-type shiftTest struct {
- in nat
- shift uint
- out nat
-}
-
-var leftShiftTests = []shiftTest{
- {nil, 0, nil},
- {nil, 1, nil},
- {natOne, 0, natOne},
- {natOne, 1, natTwo},
- {nat{1 << (_W - 1)}, 1, nat{0}},
- {nat{1 << (_W - 1), 0}, 1, nat{0, 1}},
-}
-
-func TestShiftLeft(t *testing.T) {
- for i, test := range leftShiftTests {
- var z nat
- z = z.shl(test.in, test.shift)
- for j, d := range test.out {
- if j >= len(z) || z[j] != d {
- t.Errorf("#%d: got: %v want: %v", i, z, test.out)
- break
- }
- }
- }
-}
-
-var rightShiftTests = []shiftTest{
- {nil, 0, nil},
- {nil, 1, nil},
- {natOne, 0, natOne},
- {natOne, 1, nil},
- {natTwo, 1, natOne},
- {nat{0, 1}, 1, nat{1 << (_W - 1)}},
- {nat{2, 1, 1}, 1, nat{1<<(_W-1) + 1, 1 << (_W - 1)}},
-}
-
-func TestShiftRight(t *testing.T) {
- for i, test := range rightShiftTests {
- var z nat
- z = z.shr(test.in, test.shift)
- for j, d := range test.out {
- if j >= len(z) || z[j] != d {
- t.Errorf("#%d: got: %v want: %v", i, z, test.out)
- break
- }
- }
- }
-}
-
-type modWTest struct {
- in string
- dividend string
- out string
-}
-
-var modWTests32 = []modWTest{
- {"23492635982634928349238759823742", "252341", "220170"},
-}
-
-var modWTests64 = []modWTest{
- {"6527895462947293856291561095690465243862946", "524326975699234", "375066989628668"},
-}
-
-func runModWTests(t *testing.T, tests []modWTest) {
- for i, test := range tests {
- in, _ := new(Int).SetString(test.in, 10)
- d, _ := new(Int).SetString(test.dividend, 10)
- out, _ := new(Int).SetString(test.out, 10)
-
- r := in.abs.modW(d.abs[0])
- if r != out.abs[0] {
- t.Errorf("#%d failed: got %s want %s", i, r, out)
- }
- }
-}
-
-func TestModW(t *testing.T) {
- if _W >= 32 {
- runModWTests(t, modWTests32)
- }
- if _W >= 64 {
- runModWTests(t, modWTests64)
- }
-}
-
-func TestTrailingZeroBits(t *testing.T) {
- var x Word
- x--
- for i := 0; i < _W; i++ {
- if trailingZeroBits(x) != i {
- t.Errorf("Failed at step %d: x: %x got: %d", i, x, trailingZeroBits(x))
- }
- x <<= 1
- }
-}
-
-var expNNTests = []struct {
- x, y, m string
- out string
-}{
- {"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
- {"0x8000000000000000", "2", "6719", "4944"},
- {"0x8000000000000000", "3", "6719", "5447"},
- {"0x8000000000000000", "1000", "6719", "1603"},
- {"0x8000000000000000", "1000000", "6719", "3199"},
- {
- "2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
- "298472983472983471903246121093472394872319615612417471234712061",
- "29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
- "23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
- },
-}
-
-func TestExpNN(t *testing.T) {
- for i, test := range expNNTests {
- x, _, _ := nat{}.scan(strings.NewReader(test.x), 0)
- y, _, _ := nat{}.scan(strings.NewReader(test.y), 0)
- out, _, _ := nat{}.scan(strings.NewReader(test.out), 0)
-
- var m nat
-
- if len(test.m) > 0 {
- m, _, _ = nat{}.scan(strings.NewReader(test.m), 0)
- }
-
- z := nat{}.expNN(x, y, m)
- if z.cmp(out) != 0 {
- t.Errorf("#%d got %v want %v", i, z, out)
- }
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements multi-precision rational numbers.
-
-package big
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "strings"
-)
-
-// A Rat represents a quotient a/b of arbitrary precision.
-// The zero value for a Rat represents the value 0.
-type Rat struct {
- a Int
- b nat // len(b) == 0 acts like b == 1
-}
-
-// NewRat creates a new Rat with numerator a and denominator b.
-func NewRat(a, b int64) *Rat {
- return new(Rat).SetFrac64(a, b)
-}
-
-// SetFrac sets z to a/b and returns z.
-func (z *Rat) SetFrac(a, b *Int) *Rat {
- z.a.neg = a.neg != b.neg
- babs := b.abs
- if len(babs) == 0 {
- panic("division by zero")
- }
- if &z.a == b || alias(z.a.abs, babs) {
- babs = nat{}.set(babs) // make a copy
- }
- z.a.abs = z.a.abs.set(a.abs)
- z.b = z.b.set(babs)
- return z.norm()
-}
-
-// SetFrac64 sets z to a/b and returns z.
-func (z *Rat) SetFrac64(a, b int64) *Rat {
- z.a.SetInt64(a)
- if b == 0 {
- panic("division by zero")
- }
- if b < 0 {
- b = -b
- z.a.neg = !z.a.neg
- }
- z.b = z.b.setUint64(uint64(b))
- return z.norm()
-}
-
-// SetInt sets z to x (by making a copy of x) and returns z.
-func (z *Rat) SetInt(x *Int) *Rat {
- z.a.Set(x)
- z.b = z.b.make(0)
- return z
-}
-
-// SetInt64 sets z to x and returns z.
-func (z *Rat) SetInt64(x int64) *Rat {
- z.a.SetInt64(x)
- z.b = z.b.make(0)
- return z
-}
-
-// Set sets z to x (by making a copy of x) and returns z.
-func (z *Rat) Set(x *Rat) *Rat {
- if z != x {
- z.a.Set(&x.a)
- z.b = z.b.set(x.b)
- }
- return z
-}
-
-// Abs sets z to |x| (the absolute value of x) and returns z.
-func (z *Rat) Abs(x *Rat) *Rat {
- z.Set(x)
- z.a.neg = false
- return z
-}
-
-// Neg sets z to -x and returns z.
-func (z *Rat) Neg(x *Rat) *Rat {
- z.Set(x)
- z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
- return z
-}
-
-// Inv sets z to 1/x and returns z.
-func (z *Rat) Inv(x *Rat) *Rat {
- if len(x.a.abs) == 0 {
- panic("division by zero")
- }
- z.Set(x)
- a := z.b
- if len(a) == 0 {
- a = a.setWord(1) // materialize numerator
- }
- b := z.a.abs
- if b.cmp(natOne) == 0 {
- b = b.make(0) // normalize denominator
- }
- z.a.abs, z.b = a, b // sign doesn't change
- return z
-}
-
-// Sign returns:
-//
-// -1 if x < 0
-// 0 if x == 0
-// +1 if x > 0
-//
-func (x *Rat) Sign() int {
- return x.a.Sign()
-}
-
-// IsInt returns true if the denominator of x is 1.
-func (x *Rat) IsInt() bool {
- return len(x.b) == 0 || x.b.cmp(natOne) == 0
-}
-
-// Num returns the numerator of x; it may be <= 0.
-// The result is a reference to x's numerator; it
-// may change if a new value is assigned to x.
-func (x *Rat) Num() *Int {
- return &x.a
-}
-
-// Denom returns the denominator of x; it is always > 0.
-// The result is a reference to x's denominator; it
-// may change if a new value is assigned to x.
-func (x *Rat) Denom() *Int {
- if len(x.b) == 0 {
- return &Int{abs: nat{1}}
- }
- return &Int{abs: x.b}
-}
-
-func gcd(x, y nat) nat {
- // Euclidean algorithm.
- var a, b nat
- a = a.set(x)
- b = b.set(y)
- for len(b) != 0 {
- var q, r nat
- _, r = q.div(r, a, b)
- a = b
- b = r
- }
- return a
-}
-
-func (z *Rat) norm() *Rat {
- switch {
- case len(z.a.abs) == 0:
- // z == 0 - normalize sign and denominator
- z.a.neg = false
- z.b = z.b.make(0)
- case len(z.b) == 0:
- // z is normalized int - nothing to do
- case z.b.cmp(natOne) == 0:
- // z is int - normalize denominator
- z.b = z.b.make(0)
- default:
- if f := gcd(z.a.abs, z.b); f.cmp(natOne) != 0 {
- z.a.abs, _ = z.a.abs.div(nil, z.a.abs, f)
- z.b, _ = z.b.div(nil, z.b, f)
- }
- }
- return z
-}
-
-// mulDenom sets z to the denominator product x*y (by taking into
-// account that 0 values for x or y must be interpreted as 1) and
-// returns z.
-func mulDenom(z, x, y nat) nat {
- switch {
- case len(x) == 0:
- return z.set(y)
- case len(y) == 0:
- return z.set(x)
- }
- return z.mul(x, y)
-}
-
-// scaleDenom computes x*f.
-// If f == 0 (zero value of denominator), the result is (a copy of) x.
-func scaleDenom(x *Int, f nat) *Int {
- var z Int
- if len(f) == 0 {
- return z.Set(x)
- }
- z.abs = z.abs.mul(x.abs, f)
- z.neg = x.neg
- return &z
-}
-
-// Cmp compares x and y and returns:
-//
-// -1 if x < y
-// 0 if x == y
-// +1 if x > y
-//
-func (x *Rat) Cmp(y *Rat) int {
- return scaleDenom(&x.a, y.b).Cmp(scaleDenom(&y.a, x.b))
-}
-
-// Add sets z to the sum x+y and returns z.
-func (z *Rat) Add(x, y *Rat) *Rat {
- a1 := scaleDenom(&x.a, y.b)
- a2 := scaleDenom(&y.a, x.b)
- z.a.Add(a1, a2)
- z.b = mulDenom(z.b, x.b, y.b)
- return z.norm()
-}
-
-// Sub sets z to the difference x-y and returns z.
-func (z *Rat) Sub(x, y *Rat) *Rat {
- a1 := scaleDenom(&x.a, y.b)
- a2 := scaleDenom(&y.a, x.b)
- z.a.Sub(a1, a2)
- z.b = mulDenom(z.b, x.b, y.b)
- return z.norm()
-}
-
-// Mul sets z to the product x*y and returns z.
-func (z *Rat) Mul(x, y *Rat) *Rat {
- z.a.Mul(&x.a, &y.a)
- z.b = mulDenom(z.b, x.b, y.b)
- return z.norm()
-}
-
-// Quo sets z to the quotient x/y and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-func (z *Rat) Quo(x, y *Rat) *Rat {
- if len(y.a.abs) == 0 {
- panic("division by zero")
- }
- a := scaleDenom(&x.a, y.b)
- b := scaleDenom(&y.a, x.b)
- z.a.abs = a.abs
- z.b = b.abs
- z.a.neg = a.neg != b.neg
- return z.norm()
-}
-
-func ratTok(ch rune) bool {
- return strings.IndexRune("+-/0123456789.eE", ch) >= 0
-}
-
-// Scan is a support routine for fmt.Scanner. It accepts the formats
-// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
-func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
- tok, err := s.Token(true, ratTok)
- if err != nil {
- return err
- }
- if strings.IndexRune("efgEFGv", ch) < 0 {
- return errors.New("Rat.Scan: invalid verb")
- }
- if _, ok := z.SetString(string(tok)); !ok {
- return errors.New("Rat.Scan: invalid syntax")
- }
- return nil
-}
-
-// SetString sets z to the value of s and returns z and a boolean indicating
-// success. s can be given as a fraction "a/b" or as a floating-point number
-// optionally followed by an exponent. If the operation failed, the value of
-// z is undefined but the returned value is nil.
-func (z *Rat) SetString(s string) (*Rat, bool) {
- if len(s) == 0 {
- return nil, false
- }
-
- // check for a quotient
- sep := strings.Index(s, "/")
- if sep >= 0 {
- if _, ok := z.a.SetString(s[0:sep], 10); !ok {
- return nil, false
- }
- s = s[sep+1:]
- var err error
- if z.b, _, err = z.b.scan(strings.NewReader(s), 10); err != nil {
- return nil, false
- }
- return z.norm(), true
- }
-
- // check for a decimal point
- sep = strings.Index(s, ".")
- // check for an exponent
- e := strings.IndexAny(s, "eE")
- var exp Int
- if e >= 0 {
- if e < sep {
- // The E must come after the decimal point.
- return nil, false
- }
- if _, ok := exp.SetString(s[e+1:], 10); !ok {
- return nil, false
- }
- s = s[0:e]
- }
- if sep >= 0 {
- s = s[0:sep] + s[sep+1:]
- exp.Sub(&exp, NewInt(int64(len(s)-sep)))
- }
-
- if _, ok := z.a.SetString(s, 10); !ok {
- return nil, false
- }
- powTen := nat{}.expNN(natTen, exp.abs, nil)
- if exp.neg {
- z.b = powTen
- z.norm()
- } else {
- z.a.abs = z.a.abs.mul(z.a.abs, powTen)
- z.b = z.b.make(0)
- }
-
- return z, true
-}
-
-// String returns a string representation of z in the form "a/b" (even if b == 1).
-func (z *Rat) String() string {
- s := "/1"
- if len(z.b) != 0 {
- s = "/" + z.b.decimalString()
- }
- return z.a.String() + s
-}
-
-// RatString returns a string representation of z in the form "a/b" if b != 1,
-// and in the form "a" if b == 1.
-func (z *Rat) RatString() string {
- if z.IsInt() {
- return z.a.String()
- }
- return z.String()
-}
-
-// FloatString returns a string representation of z in decimal form with prec
-// digits of precision after the decimal point and the last digit rounded.
-func (z *Rat) FloatString(prec int) string {
- if z.IsInt() {
- s := z.a.String()
- if prec > 0 {
- s += "." + strings.Repeat("0", prec)
- }
- return s
- }
- // z.b != 0
-
- q, r := nat{}.div(nat{}, z.a.abs, z.b)
-
- p := natOne
- if prec > 0 {
- p = nat{}.expNN(natTen, nat{}.setUint64(uint64(prec)), nil)
- }
-
- r = r.mul(r, p)
- r, r2 := r.div(nat{}, r, z.b)
-
- // see if we need to round up
- r2 = r2.add(r2, r2)
- if z.b.cmp(r2) <= 0 {
- r = r.add(r, natOne)
- if r.cmp(p) >= 0 {
- q = nat{}.add(q, natOne)
- r = nat{}.sub(r, p)
- }
- }
-
- s := q.decimalString()
- if z.a.neg {
- s = "-" + s
- }
-
- if prec > 0 {
- rs := r.decimalString()
- leadingZeros := prec - len(rs)
- s += "." + strings.Repeat("0", leadingZeros) + rs
- }
-
- return s
-}
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const ratGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-func (z *Rat) GobEncode() ([]byte, error) {
- buf := make([]byte, 1+4+(len(z.a.abs)+len(z.b))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
- i := z.b.bytes(buf)
- j := z.a.abs.bytes(buf[0:i])
- n := i - j
- if int(uint32(n)) != n {
- // this should never happen
- return nil, errors.New("Rat.GobEncode: numerator too large")
- }
- binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
- j -= 1 + 4
- b := ratGobVersion << 1 // make space for sign bit
- if z.a.neg {
- b |= 1
- }
- buf[j] = b
- return buf[j:], nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (z *Rat) GobDecode(buf []byte) error {
- if len(buf) == 0 {
- return errors.New("Rat.GobDecode: no data")
- }
- b := buf[0]
- if b>>1 != ratGobVersion {
- return errors.New(fmt.Sprintf("Rat.GobDecode: encoding version %d not supported", b>>1))
- }
- const j = 1 + 4
- i := j + binary.BigEndian.Uint32(buf[j-4:j])
- z.a.neg = b&1 != 0
- z.a.abs = z.a.abs.setBytes(buf[j:i])
- z.b = z.b.setBytes(buf[i:])
- return nil
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
- "bytes"
- "fmt"
- "gob"
- "testing"
-)
-
-func TestZeroRat(t *testing.T) {
- var x, y, z Rat
- y.SetFrac64(0, 42)
-
- if x.Cmp(&y) != 0 {
- t.Errorf("x and y should be both equal and zero")
- }
-
- if s := x.String(); s != "0/1" {
- t.Errorf("got x = %s, want 0/1", s)
- }
-
- if s := x.RatString(); s != "0" {
- t.Errorf("got x = %s, want 0", s)
- }
-
- z.Add(&x, &y)
- if s := z.RatString(); s != "0" {
- t.Errorf("got x+y = %s, want 0", s)
- }
-
- z.Sub(&x, &y)
- if s := z.RatString(); s != "0" {
- t.Errorf("got x-y = %s, want 0", s)
- }
-
- z.Mul(&x, &y)
- if s := z.RatString(); s != "0" {
- t.Errorf("got x*y = %s, want 0", s)
- }
-
- // check for division by zero
- defer func() {
- if s := recover(); s == nil || s.(string) != "division by zero" {
- panic(s)
- }
- }()
- z.Quo(&x, &y)
-}
-
-var setStringTests = []struct {
- in, out string
- ok bool
-}{
- {"0", "0", true},
- {"-0", "0", true},
- {"1", "1", true},
- {"-1", "-1", true},
- {"1.", "1", true},
- {"1e0", "1", true},
- {"1.e1", "10", true},
- {in: "1e", ok: false},
- {in: "1.e", ok: false},
- {in: "1e+14e-5", ok: false},
- {in: "1e4.5", ok: false},
- {in: "r", ok: false},
- {in: "a/b", ok: false},
- {in: "a.b", ok: false},
- {"-0.1", "-1/10", true},
- {"-.1", "-1/10", true},
- {"2/4", "1/2", true},
- {".25", "1/4", true},
- {"-1/5", "-1/5", true},
- {"8129567.7690E14", "812956776900000000000", true},
- {"78189e+4", "781890000", true},
- {"553019.8935e+8", "55301989350000", true},
- {"98765432109876543210987654321e-10", "98765432109876543210987654321/10000000000", true},
- {"9877861857500000E-7", "3951144743/4", true},
- {"2169378.417e-3", "2169378417/1000000", true},
- {"884243222337379604041632732738665534", "884243222337379604041632732738665534", true},
- {"53/70893980658822810696", "53/70893980658822810696", true},
- {"106/141787961317645621392", "53/70893980658822810696", true},
- {"204211327800791583.81095", "4084226556015831676219/20000", true},
-}
-
-func TestRatSetString(t *testing.T) {
- for i, test := range setStringTests {
- x, ok := new(Rat).SetString(test.in)
-
- if ok {
- if !test.ok {
- t.Errorf("#%d SetString(%q) expected failure", i, test.in)
- } else if x.RatString() != test.out {
- t.Errorf("#%d SetString(%q) got %s want %s", i, test.in, x.RatString(), test.out)
- }
- } else if x != nil {
- t.Errorf("#%d SetString(%q) got %p want nil", i, test.in, x)
- }
- }
-}
-
-func TestRatScan(t *testing.T) {
- var buf bytes.Buffer
- for i, test := range setStringTests {
- x := new(Rat)
- buf.Reset()
- buf.WriteString(test.in)
-
- _, err := fmt.Fscanf(&buf, "%v", x)
- if err == nil != test.ok {
- if test.ok {
- t.Errorf("#%d error: %s", i, err)
- } else {
- t.Errorf("#%d expected error", i)
- }
- continue
- }
- if err == nil && x.RatString() != test.out {
- t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
- }
- }
-}
-
-var floatStringTests = []struct {
- in string
- prec int
- out string
-}{
- {"0", 0, "0"},
- {"0", 4, "0.0000"},
- {"1", 0, "1"},
- {"1", 2, "1.00"},
- {"-1", 0, "-1"},
- {".25", 2, "0.25"},
- {".25", 1, "0.3"},
- {".25", 3, "0.250"},
- {"-1/3", 3, "-0.333"},
- {"-2/3", 4, "-0.6667"},
- {"0.96", 1, "1.0"},
- {"0.999", 2, "1.00"},
- {"0.9", 0, "1"},
- {".25", -1, "0"},
- {".55", -1, "1"},
-}
-
-func TestFloatString(t *testing.T) {
- for i, test := range floatStringTests {
- x, _ := new(Rat).SetString(test.in)
-
- if x.FloatString(test.prec) != test.out {
- t.Errorf("#%d got %s want %s", i, x.FloatString(test.prec), test.out)
- }
- }
-}
-
-func TestRatSign(t *testing.T) {
- zero := NewRat(0, 1)
- for _, a := range setStringTests {
- x, ok := new(Rat).SetString(a.in)
- if !ok {
- continue
- }
- s := x.Sign()
- e := x.Cmp(zero)
- if s != e {
- t.Errorf("got %d; want %d for z = %v", s, e, &x)
- }
- }
-}
-
-var ratCmpTests = []struct {
- rat1, rat2 string
- out int
-}{
- {"0", "0/1", 0},
- {"1/1", "1", 0},
- {"-1", "-2/2", 0},
- {"1", "0", 1},
- {"0/1", "1/1", -1},
- {"-5/1434770811533343057144", "-5/1434770811533343057145", -1},
- {"49832350382626108453/8964749413", "49832350382626108454/8964749413", -1},
- {"-37414950961700930/7204075375675961", "37414950961700930/7204075375675961", -1},
- {"37414950961700930/7204075375675961", "74829901923401860/14408150751351922", 0},
-}
-
-func TestRatCmp(t *testing.T) {
- for i, test := range ratCmpTests {
- x, _ := new(Rat).SetString(test.rat1)
- y, _ := new(Rat).SetString(test.rat2)
-
- out := x.Cmp(y)
- if out != test.out {
- t.Errorf("#%d got out = %v; want %v", i, out, test.out)
- }
- }
-}
-
-func TestIsInt(t *testing.T) {
- one := NewInt(1)
- for _, a := range setStringTests {
- x, ok := new(Rat).SetString(a.in)
- if !ok {
- continue
- }
- i := x.IsInt()
- e := x.Denom().Cmp(one) == 0
- if i != e {
- t.Errorf("got IsInt(%v) == %v; want %v", x, i, e)
- }
- }
-}
-
-func TestRatAbs(t *testing.T) {
- zero := new(Rat)
- for _, a := range setStringTests {
- x, ok := new(Rat).SetString(a.in)
- if !ok {
- continue
- }
- e := new(Rat).Set(x)
- if e.Cmp(zero) < 0 {
- e.Sub(zero, e)
- }
- z := new(Rat).Abs(x)
- if z.Cmp(e) != 0 {
- t.Errorf("got Abs(%v) = %v; want %v", x, z, e)
- }
- }
-}
-
-func TestRatNeg(t *testing.T) {
- zero := new(Rat)
- for _, a := range setStringTests {
- x, ok := new(Rat).SetString(a.in)
- if !ok {
- continue
- }
- e := new(Rat).Sub(zero, x)
- z := new(Rat).Neg(x)
- if z.Cmp(e) != 0 {
- t.Errorf("got Neg(%v) = %v; want %v", x, z, e)
- }
- }
-}
-
-func TestRatInv(t *testing.T) {
- zero := new(Rat)
- for _, a := range setStringTests {
- x, ok := new(Rat).SetString(a.in)
- if !ok {
- continue
- }
- if x.Cmp(zero) == 0 {
- continue // avoid division by zero
- }
- e := new(Rat).SetFrac(x.Denom(), x.Num())
- z := new(Rat).Inv(x)
- if z.Cmp(e) != 0 {
- t.Errorf("got Inv(%v) = %v; want %v", x, z, e)
- }
- }
-}
-
-type ratBinFun func(z, x, y *Rat) *Rat
-type ratBinArg struct {
- x, y, z string
-}
-
-func testRatBin(t *testing.T, i int, name string, f ratBinFun, a ratBinArg) {
- x, _ := new(Rat).SetString(a.x)
- y, _ := new(Rat).SetString(a.y)
- z, _ := new(Rat).SetString(a.z)
- out := f(new(Rat), x, y)
-
- if out.Cmp(z) != 0 {
- t.Errorf("%s #%d got %s want %s", name, i, out, z)
- }
-}
-
-var ratBinTests = []struct {
- x, y string
- sum, prod string
-}{
- {"0", "0", "0", "0"},
- {"0", "1", "1", "0"},
- {"-1", "0", "-1", "0"},
- {"-1", "1", "0", "-1"},
- {"1", "1", "2", "1"},
- {"1/2", "1/2", "1", "1/4"},
- {"1/4", "1/3", "7/12", "1/12"},
- {"2/5", "-14/3", "-64/15", "-28/15"},
- {"4707/49292519774798173060", "-3367/70976135186689855734", "84058377121001851123459/1749296273614329067191168098769082663020", "-1760941/388732505247628681598037355282018369560"},
- {"-61204110018146728334/3", "-31052192278051565633/2", "-215564796870448153567/6", "950260896245257153059642991192710872711/3"},
- {"-854857841473707320655/4237645934602118692642972629634714039", "-18/31750379913563777419", "-27/133467566250814981", "15387441146526731771790/134546868362786310073779084329032722548987800600710485341"},
- {"618575745270541348005638912139/19198433543745179392300736", "-19948846211000086/637313996471", "27674141753240653/30123979153216", "-6169936206128396568797607742807090270137721977/6117715203873571641674006593837351328"},
- {"-3/26206484091896184128", "5/2848423294177090248", "15310893822118706237/9330894968229805033368778458685147968", "-5/24882386581946146755650075889827061248"},
- {"26946729/330400702820", "41563965/225583428284", "1238218672302860271/4658307703098666660055", "224002580204097/14906584649915733312176"},
- {"-8259900599013409474/7", "-84829337473700364773/56707961321161574960", "-468402123685491748914621885145127724451/396955729248131024720", "350340947706464153265156004876107029701/198477864624065512360"},
- {"575775209696864/1320203974639986246357", "29/712593081308", "410331716733912717985762465/940768218243776489278275419794956", "808/45524274987585732633"},
- {"1786597389946320496771/2066653520653241", "6269770/1992362624741777", "3559549865190272133656109052308126637/4117523232840525481453983149257", "8967230/3296219033"},
- {"-36459180403360509753/32150500941194292113930", "9381566963714/9633539", "301622077145533298008420642898530153/309723104686531919656937098270", "-3784609207827/3426986245"},
-}
-
-func TestRatBin(t *testing.T) {
- for i, test := range ratBinTests {
- arg := ratBinArg{test.x, test.y, test.sum}
- testRatBin(t, i, "Add", (*Rat).Add, arg)
-
- arg = ratBinArg{test.y, test.x, test.sum}
- testRatBin(t, i, "Add symmetric", (*Rat).Add, arg)
-
- arg = ratBinArg{test.sum, test.x, test.y}
- testRatBin(t, i, "Sub", (*Rat).Sub, arg)
-
- arg = ratBinArg{test.sum, test.y, test.x}
- testRatBin(t, i, "Sub symmetric", (*Rat).Sub, arg)
-
- arg = ratBinArg{test.x, test.y, test.prod}
- testRatBin(t, i, "Mul", (*Rat).Mul, arg)
-
- arg = ratBinArg{test.y, test.x, test.prod}
- testRatBin(t, i, "Mul symmetric", (*Rat).Mul, arg)
-
- if test.x != "0" {
- arg = ratBinArg{test.prod, test.x, test.y}
- testRatBin(t, i, "Quo", (*Rat).Quo, arg)
- }
-
- if test.y != "0" {
- arg = ratBinArg{test.prod, test.y, test.x}
- testRatBin(t, i, "Quo symmetric", (*Rat).Quo, arg)
- }
- }
-}
-
-func TestIssue820(t *testing.T) {
- x := NewRat(3, 1)
- y := NewRat(2, 1)
- z := y.Quo(x, y)
- q := NewRat(3, 2)
- if z.Cmp(q) != 0 {
- t.Errorf("got %s want %s", z, q)
- }
-
- y = NewRat(3, 1)
- x = NewRat(2, 1)
- z = y.Quo(x, y)
- q = NewRat(2, 3)
- if z.Cmp(q) != 0 {
- t.Errorf("got %s want %s", z, q)
- }
-
- x = NewRat(3, 1)
- z = x.Quo(x, x)
- q = NewRat(3, 3)
- if z.Cmp(q) != 0 {
- t.Errorf("got %s want %s", z, q)
- }
-}
-
-var setFrac64Tests = []struct {
- a, b int64
- out string
-}{
- {0, 1, "0"},
- {0, -1, "0"},
- {1, 1, "1"},
- {-1, 1, "-1"},
- {1, -1, "-1"},
- {-1, -1, "1"},
- {-9223372036854775808, -9223372036854775808, "1"},
-}
-
-func TestRatSetFrac64Rat(t *testing.T) {
- for i, test := range setFrac64Tests {
- x := new(Rat).SetFrac64(test.a, test.b)
- if x.RatString() != test.out {
- t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
- }
- }
-}
-
-func TestRatGobEncoding(t *testing.T) {
- var medium bytes.Buffer
- enc := gob.NewEncoder(&medium)
- dec := gob.NewDecoder(&medium)
- for i, test := range gobEncodingTests {
- for j := 0; j < 4; j++ {
- medium.Reset() // empty buffer for each test case (in case of failures)
- stest := test
- if j&1 != 0 {
- // negative numbers
- stest = "-" + test
- }
- if j%2 != 0 {
- // fractions
- stest = stest + "." + test
- }
- var tx Rat
- tx.SetString(stest)
- if err := enc.Encode(&tx); err != nil {
- t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
- }
- var rx Rat
- if err := dec.Decode(&rx); err != nil {
- t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
- }
- if rx.Cmp(&tx) != 0 {
- t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
- }
- }
- }
-}
-
-func TestIssue2379(t *testing.T) {
- // 1) no aliasing
- q := NewRat(3, 2)
- x := new(Rat)
- x.SetFrac(NewInt(3), NewInt(2))
- if x.Cmp(q) != 0 {
- t.Errorf("1) got %s want %s", x, q)
- }
-
- // 2) aliasing of numerator
- x = NewRat(2, 3)
- x.SetFrac(NewInt(3), x.Num())
- if x.Cmp(q) != 0 {
- t.Errorf("2) got %s want %s", x, q)
- }
-
- // 3) aliasing of denominator
- x = NewRat(2, 3)
- x.SetFrac(x.Denom(), NewInt(2))
- if x.Cmp(q) != 0 {
- t.Errorf("3) got %s want %s", x, q)
- }
-
- // 4) aliasing of numerator and denominator
- x = NewRat(2, 3)
- x.SetFrac(x.Denom(), x.Num())
- if x.Cmp(q) != 0 {
- t.Errorf("4) got %s want %s", x, q)
- }
-
- // 5) numerator and denominator are the same
- q = NewRat(1, 1)
- x = new(Rat)
- n := NewInt(7)
- x.SetFrac(n, n)
- if x.Cmp(q) != 0 {
- t.Errorf("5) got %s want %s", x, q)
- }
-}
"bytes"
"io"
"strconv"
- "utf8"
+ "unicode/utf8"
)
const (
// It returns the number of bytes read into p.
// It calls Read at most once on the underlying Reader,
// hence n may be less than len(p).
-// At EOF, the count will be zero and err will be os.EOF.
+// At EOF, the count will be zero and err will be io.EOF.
func (b *Reader) Read(p []byte) (n int, err error) {
n = len(p)
if n == 0 {
// returning a slice pointing at the bytes in the buffer.
// The bytes stop being valid at the next read call.
// If ReadSlice encounters an error before finding a delimiter,
-// it returns all the data in the buffer and the error itself (often os.EOF).
+// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
}
if len(line) == 0 {
+ if err != nil {
+ line = nil
+ }
return
}
err = nil
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often os.EOF).
+// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often os.EOF).
+// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end in
// delim.
func (b *Reader) ReadString(delim byte) (line string, err error) {
"strings"
"testing"
"testing/iotest"
- "utf8"
+ "unicode/utf8"
)
// Reads from a reader and rot13s the result.
}
}
+func TestReadLineNonNilLineOrError(t *testing.T) {
+ r := NewReader(strings.NewReader("line 1\n"))
+ for i := 0; i < 2; i++ {
+ l, _, err := r.ReadLine()
+ if l != nil && err != nil {
+ t.Fatalf("on line %d/2; ReadLine=%#v, %v; want non-nil line or Error, but not both",
+ i+1, l, err)
+ }
+ }
+}
+
type readLineResult struct {
line []byte
isPrefix bool
// license that can be found in the LICENSE file.
/*
- Package builtin provides documentation for Go's built-in functions.
- The functions documented here are not actually in package builtin
+ Package builtin provides documentation for Go's predeclared identifiers.
+ The items documented here are not actually in package builtin
but their descriptions here allow godoc to present documentation
- for the language's special functions.
+ for the language's special identifiers.
*/
package builtin
+// bool is the set of boolean values, true and false.
+type bool bool
+
+// uint8 is the set of all unsigned 8-bit integers.
+// Range: 0 through 255.
+type uint8 uint8
+
+// uint16 is the set of all unsigned 16-bit integers.
+// Range: 0 through 65535.
+type uint16 uint16
+
+// uint32 is the set of all unsigned 32-bit integers.
+// Range: 0 through 4294967295.
+type uint32 uint32
+
+// uint64 is the set of all unsigned 64-bit integers.
+// Range: 0 through 18446744073709551615.
+type uint64 uint64
+
+// int8 is the set of all signed 8-bit integers.
+// Range: -128 through 127.
+type int8 int8
+
+// int16 is the set of all signed 16-bit integers.
+// Range: -32768 through 32767.
+type int16 int16
+
+// int32 is the set of all signed 32-bit integers.
+// Range: -2147483648 through 2147483647.
+type int32 int32
+
+// int64 is the set of all signed 64-bit integers.
+// Range: -9223372036854775808 through 9223372036854775807.
+type int64 int64
+
+// float32 is the set of all IEEE-754 32-bit floating-point numbers.
+type float32 float32
+
+// float64 is the set of all IEEE-754 64-bit floating-point numbers.
+type float64 float64
+
+// complex64 is the set of all complex numbers with float32 real and
+// imaginary parts.
+type complex64 complex64
+
+// complex128 is the set of all complex numbers with float64 real and
+// imaginary parts.
+type complex128 complex128
+
+// string is the set of all strings of 8-bit bytes, conventionally but not
+// necessarily representing UTF-8-encoded text. A string may be empty, but
+// not nil. Values of string type are immutable.
+type string string
+
+// int is a signed integer type that is at least 32 bits in size. It is a
+// distinct type, however, and not an alias for, say, int32.
+type int int
+
+// uint is an unsigned integer type that is at least 32 bits in size. It is a
+// distinct type, however, and not an alias for, say, uint32.
+type uint uint
+
+// uintptr is an integer type that is large enough to hold the bit pattern of
+// any pointer.
+type uintptr uintptr
+
+// byte is an alias for uint8 and is equivalent to uint8 in all ways. It is
+// used, by convention, to distinguish byte values from 8-bit unsigned
+// integer values.
+type byte byte
+
+// rune is an alias for int and is equivalent to int in all ways. It is
+// used, by convention, to distinguish character values from integer values.
+// In a future version of Go, it will change to an alias of int32.
+type rune rune
+
// Type is here for the purposes of documentation only. It is a stand-in
// for any Go type, but represents the same type for any given function
// invocation.
// FloatType is here for the purposes of documentation only. It is a stand-in
// for either float type: float32 or float64.
-type FloatType int
+type FloatType float32
// ComplexType is here for the purposes of documentation only. It is a
// stand-in for either complex type: complex64 or complex128.
-type ComplexType int
+type ComplexType complex64
// The append built-in function appends elements to the end of a slice. If
// it has sufficient capacity, the destination is resliced to accommodate the
// nil. Thus the return value from recover reports whether the goroutine is
// panicking.
func recover() interface{}
+
+// The error built-in interface type is the conventional interface for
+// representing an error condition, with the nil value representing no error.
+type error interface {
+ Error() string
+}
import (
"errors"
"io"
- "utf8"
+ "unicode/utf8"
)
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// ReadFrom reads data from r until EOF and appends it to the buffer.
// The return value n is the number of bytes read.
-// Any error except os.EOF encountered during the read
+// Any error except io.EOF encountered during the read
// is also returned.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
-// buffer has no data to return, err is os.EOF even if len(p) is zero;
+// buffer has no data to return, err is io.EOF even if len(p) is zero;
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
}
// ReadByte reads and returns the next byte from the buffer.
-// If no byte is available, it returns error os.EOF.
+// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (c byte, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
-// If no bytes are available, the error returned is os.EOF.
+// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often os.EOF).
+// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often os.EOF).
+// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
import (
. "bytes"
"io"
- "rand"
+ "math/rand"
"testing"
- "utf8"
+ "unicode/utf8"
)
const N = 10000 // make this bigger for a larger (and slower) test
import (
"unicode"
- "utf8"
+ "unicode/utf8"
)
// Compare returns an integer comparing the two byte arrays lexicographically.
return n
}
+// Contains returns whether subslice is within b.
+func Contains(b, subslice []byte) bool {
+ return Index(b, subslice) != -1
+}
+
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
"reflect"
"testing"
"unicode"
- "utf8"
+ "unicode/utf8"
)
func eq(a, b []string) bool {
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cmath provides basic constants and mathematical functions for
-// complex numbers.
-package cmath
-
-import "math"
-
-// Abs returns the absolute value (also called the modulus) of x.
-func Abs(x complex128) float64 { return math.Hypot(real(x), imag(x)) }
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex circular arc sine
-//
-// DESCRIPTION:
-//
-// Inverse complex sine:
-// 2
-// w = -i clog( iz + csqrt( 1 - z ) ).
-//
-// casin(z) = -i casinh(iz)
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 10100 2.1e-15 3.4e-16
-// IEEE -10,+10 30000 2.2e-14 2.7e-15
-// Larger relative error can be observed for z near zero.
-// Also tested by csin(casin(z)) = z.
-
-// Asin returns the inverse sine of x.
-func Asin(x complex128) complex128 {
- if imag(x) == 0 {
- if math.Abs(real(x)) > 1 {
- return complex(math.Pi/2, 0) // DOMAIN error
- }
- return complex(math.Asin(real(x)), 0)
- }
- ct := complex(-imag(x), real(x)) // i * x
- xx := x * x
- x1 := complex(1-real(xx), -imag(xx)) // 1 - x*x
- x2 := Sqrt(x1) // x2 = sqrt(1 - x*x)
- w := Log(ct + x2)
- return complex(imag(w), -real(w)) // -i * w
-}
-
-// Asinh returns the inverse hyperbolic sine of x.
-func Asinh(x complex128) complex128 {
- // TODO check range
- if imag(x) == 0 {
- if math.Abs(real(x)) > 1 {
- return complex(math.Pi/2, 0) // DOMAIN error
- }
- return complex(math.Asinh(real(x)), 0)
- }
- xx := x * x
- x1 := complex(1+real(xx), imag(xx)) // 1 + x*x
- return Log(x + Sqrt(x1)) // log(x + sqrt(1 + x*x))
-}
-
-// Complex circular arc cosine
-//
-// DESCRIPTION:
-//
-// w = arccos z = PI/2 - arcsin z.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 5200 1.6e-15 2.8e-16
-// IEEE -10,+10 30000 1.8e-14 2.2e-15
-
-// Acos returns the inverse cosine of x.
-func Acos(x complex128) complex128 {
- w := Asin(x)
- return complex(math.Pi/2-real(w), -imag(w))
-}
-
-// Acosh returns the inverse hyperbolic cosine of x.
-func Acosh(x complex128) complex128 {
- w := Acos(x)
- if imag(w) <= 0 {
- return complex(-imag(w), real(w)) // i * w
- }
- return complex(imag(w), -real(w)) // -i * w
-}
-
-// Complex circular arc tangent
-//
-// DESCRIPTION:
-//
-// If
-// z = x + iy,
-//
-// then
-// 1 ( 2x )
-// Re w = - arctan(-----------) + k PI
-// 2 ( 2 2)
-// (1 - x - y )
-//
-// ( 2 2)
-// 1 (x + (y+1) )
-// Im w = - log(------------)
-// 4 ( 2 2)
-// (x + (y-1) )
-//
-// Where k is an arbitrary integer.
-//
-// catan(z) = -i catanh(iz).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 5900 1.3e-16 7.8e-18
-// IEEE -10,+10 30000 2.3e-15 8.5e-17
-// The check catan( ctan(z) ) = z, with |x| and |y| < PI/2,
-// had peak relative error 1.5e-16, rms relative error
-// 2.9e-17. See also clog().
-
-// Atan returns the inverse tangent of x.
-func Atan(x complex128) complex128 {
- if real(x) == 0 && imag(x) > 1 {
- return NaN()
- }
-
- x2 := real(x) * real(x)
- a := 1 - x2 - imag(x)*imag(x)
- if a == 0 {
- return NaN()
- }
- t := 0.5 * math.Atan2(2*real(x), a)
- w := reducePi(t)
-
- t = imag(x) - 1
- b := x2 + t*t
- if b == 0 {
- return NaN()
- }
- t = imag(x) + 1
- c := (x2 + t*t) / b
- return complex(w, 0.25*math.Log(c))
-}
-
-// Atanh returns the inverse hyperbolic tangent of x.
-func Atanh(x complex128) complex128 {
- z := complex(-imag(x), real(x)) // z = i * x
- z = Atan(z)
- return complex(imag(z), -real(z)) // z = -i * z
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import (
- "math"
- "testing"
-)
-
-var vc26 = []complex128{
- (4.97901192488367350108546816 + 7.73887247457810456552351752i),
- (7.73887247457810456552351752 - 0.27688005719200159404635997i),
- (-0.27688005719200159404635997 - 5.01060361827107492160848778i),
- (-5.01060361827107492160848778 + 9.63629370719841737980004837i),
- (9.63629370719841737980004837 + 2.92637723924396464525443662i),
- (2.92637723924396464525443662 + 5.22908343145930665230025625i),
- (5.22908343145930665230025625 + 2.72793991043601025126008608i),
- (2.72793991043601025126008608 + 1.82530809168085506044576505i),
- (1.82530809168085506044576505 - 8.68592476857560136238589621i),
- (-8.68592476857560136238589621 + 4.97901192488367350108546816i),
-}
-var vc = []complex128{
- (4.9790119248836735e+00 + 7.7388724745781045e+00i),
- (7.7388724745781045e+00 - 2.7688005719200159e-01i),
- (-2.7688005719200159e-01 - 5.0106036182710749e+00i),
- (-5.0106036182710749e+00 + 9.6362937071984173e+00i),
- (9.6362937071984173e+00 + 2.9263772392439646e+00i),
- (2.9263772392439646e+00 + 5.2290834314593066e+00i),
- (5.2290834314593066e+00 + 2.7279399104360102e+00i),
- (2.7279399104360102e+00 + 1.8253080916808550e+00i),
- (1.8253080916808550e+00 - 8.6859247685756013e+00i),
- (-8.6859247685756013e+00 + 4.9790119248836735e+00i),
-}
-
-// The expected results below were computed by the high precision calculators
-// at http://keisan.casio.com/. More exact input values (array vc[], above)
-// were obtained by printing them with "%.26f". The answers were calculated
-// to 26 digits (by using the "Digit number" drop-down control of each
-// calculator).
-
-var abs = []float64{
- 9.2022120669932650313380972e+00,
- 7.7438239742296106616261394e+00,
- 5.0182478202557746902556648e+00,
- 1.0861137372799545160704002e+01,
- 1.0070841084922199607011905e+01,
- 5.9922447613166942183705192e+00,
- 5.8978784056736762299945176e+00,
- 3.2822866700678709020367184e+00,
- 8.8756430028990417290744307e+00,
- 1.0011785496777731986390856e+01,
-}
-
-var acos = []complex128{
- (1.0017679804707456328694569 - 2.9138232718554953784519807i),
- (0.03606427612041407369636057 + 2.7358584434576260925091256i),
- (1.6249365462333796703711823 + 2.3159537454335901187730929i),
- (2.0485650849650740120660391 - 3.0795576791204117911123886i),
- (0.29621132089073067282488147 - 3.0007392508200622519398814i),
- (1.0664555914934156601503632 - 2.4872865024796011364747111i),
- (0.48681307452231387690013905 - 2.463655912283054555225301i),
- (0.6116977071277574248407752 - 1.8734458851737055262693056i),
- (1.3649311280370181331184214 + 2.8793528632328795424123832i),
- (2.6189310485682988308904501 - 2.9956543302898767795858704i),
-}
-var acosh = []complex128{
- (2.9138232718554953784519807 + 1.0017679804707456328694569i),
- (2.7358584434576260925091256 - 0.03606427612041407369636057i),
- (2.3159537454335901187730929 - 1.6249365462333796703711823i),
- (3.0795576791204117911123886 + 2.0485650849650740120660391i),
- (3.0007392508200622519398814 + 0.29621132089073067282488147i),
- (2.4872865024796011364747111 + 1.0664555914934156601503632i),
- (2.463655912283054555225301 + 0.48681307452231387690013905i),
- (1.8734458851737055262693056 + 0.6116977071277574248407752i),
- (2.8793528632328795424123832 - 1.3649311280370181331184214i),
- (2.9956543302898767795858704 + 2.6189310485682988308904501i),
-}
-var asin = []complex128{
- (0.56902834632415098636186476 + 2.9138232718554953784519807i),
- (1.5347320506744825455349611 - 2.7358584434576260925091256i),
- (-0.054140219438483051139860579 - 2.3159537454335901187730929i),
- (-0.47776875817017739283471738 + 3.0795576791204117911123886i),
- (1.2745850059041659464064402 + 3.0007392508200622519398814i),
- (0.50434073530148095908095852 + 2.4872865024796011364747111i),
- (1.0839832522725827423311826 + 2.463655912283054555225301i),
- (0.9590986196671391943905465 + 1.8734458851737055262693056i),
- (0.20586519875787848611290031 - 2.8793528632328795424123832i),
- (-1.0481347217734022116591284 + 2.9956543302898767795858704i),
-}
-var asinh = []complex128{
- (2.9113760469415295679342185 + 0.99639459545704326759805893i),
- (2.7441755423994259061579029 - 0.035468308789000500601119392i),
- (-2.2962136462520690506126678 - 1.5144663565690151885726707i),
- (-3.0771233459295725965402455 + 1.0895577967194013849422294i),
- (3.0048366100923647417557027 + 0.29346979169819220036454168i),
- (2.4800059370795363157364643 + 1.0545868606049165710424232i),
- (2.4718773838309585611141821 + 0.47502344364250803363708842i),
- (1.8910743588080159144378396 + 0.56882925572563602341139174i),
- (2.8735426423367341878069406 - 1.362376149648891420997548i),
- (-2.9981750586172477217567878 + 0.5183571985225367505624207i),
-}
-var atan = []complex128{
- (1.5115747079332741358607654 + 0.091324403603954494382276776i),
- (1.4424504323482602560806727 - 0.0045416132642803911503770933i),
- (-1.5593488703630532674484026 - 0.20163295409248362456446431i),
- (-1.5280619472445889867794105 + 0.081721556230672003746956324i),
- (1.4759909163240799678221039 + 0.028602969320691644358773586i),
- (1.4877353772046548932715555 + 0.14566877153207281663773599i),
- (1.4206983927779191889826 + 0.076830486127880702249439993i),
- (1.3162236060498933364869556 + 0.16031313000467530644933363i),
- (1.5473450684303703578810093 - 0.11064907507939082484935782i),
- (-1.4841462340185253987375812 + 0.049341850305024399493142411i),
-}
-var atanh = []complex128{
- (0.058375027938968509064640438 + 1.4793488495105334458167782i),
- (0.12977343497790381229915667 - 1.5661009410463561327262499i),
- (-0.010576456067347252072200088 - 1.3743698658402284549750563i),
- (-0.042218595678688358882784918 + 1.4891433968166405606692604i),
- (0.095218997991316722061828397 + 1.5416884098777110330499698i),
- (0.079965459366890323857556487 + 1.4252510353873192700350435i),
- (0.15051245471980726221708301 + 1.4907432533016303804884461i),
- (0.25082072933993987714470373 + 1.392057665392187516442986i),
- (0.022896108815797135846276662 - 1.4609224989282864208963021i),
- (-0.08665624101841876130537396 + 1.5207902036935093480142159i),
-}
-var conj = []complex128{
- (4.9790119248836735e+00 - 7.7388724745781045e+00i),
- (7.7388724745781045e+00 + 2.7688005719200159e-01i),
- (-2.7688005719200159e-01 + 5.0106036182710749e+00i),
- (-5.0106036182710749e+00 - 9.6362937071984173e+00i),
- (9.6362937071984173e+00 - 2.9263772392439646e+00i),
- (2.9263772392439646e+00 - 5.2290834314593066e+00i),
- (5.2290834314593066e+00 - 2.7279399104360102e+00i),
- (2.7279399104360102e+00 - 1.8253080916808550e+00i),
- (1.8253080916808550e+00 + 8.6859247685756013e+00i),
- (-8.6859247685756013e+00 - 4.9790119248836735e+00i),
-}
-var cos = []complex128{
- (3.024540920601483938336569e+02 + 1.1073797572517071650045357e+03i),
- (1.192858682649064973252758e-01 + 2.7857554122333065540970207e-01i),
- (7.2144394304528306603857962e+01 - 2.0500129667076044169954205e+01i),
- (2.24921952538403984190541e+03 - 7.317363745602773587049329e+03i),
- (-9.148222970032421760015498e+00 + 1.953124661113563541862227e+00i),
- (-9.116081175857732248227078e+01 - 1.992669213569952232487371e+01i),
- (3.795639179042704640002918e+00 + 6.623513350981458399309662e+00i),
- (-2.9144840732498869560679084e+00 - 1.214620271628002917638748e+00i),
- (-7.45123482501299743872481e+02 + 2.8641692314488080814066734e+03i),
- (-5.371977967039319076416747e+01 + 4.893348341339375830564624e+01i),
-}
-var cosh = []complex128{
- (8.34638383523018249366948e+00 + 7.2181057886425846415112064e+01i),
- (1.10421967379919366952251e+03 - 3.1379638689277575379469861e+02i),
- (3.051485206773701584738512e-01 - 2.6805384730105297848044485e-01i),
- (-7.33294728684187933370938e+01 + 1.574445942284918251038144e+01i),
- (-7.478643293945957535757355e+03 + 1.6348382209913353929473321e+03i),
- (4.622316522966235701630926e+00 - 8.088695185566375256093098e+00i),
- (-8.544333183278877406197712e+01 + 3.7505836120128166455231717e+01i),
- (-1.934457815021493925115198e+00 + 7.3725859611767228178358673e+00i),
- (-2.352958770061749348353548e+00 - 2.034982010440878358915409e+00i),
- (7.79756457532134748165069e+02 + 2.8549350716819176560377717e+03i),
-}
-var exp = []complex128{
- (1.669197736864670815125146e+01 + 1.4436895109507663689174096e+02i),
- (2.2084389286252583447276212e+03 - 6.2759289284909211238261917e+02i),
- (2.227538273122775173434327e-01 + 7.2468284028334191250470034e-01i),
- (-6.5182985958153548997881627e-03 - 1.39965837915193860879044e-03i),
- (-1.4957286524084015746110777e+04 + 3.269676455931135688988042e+03i),
- (9.218158701983105935659273e+00 - 1.6223985291084956009304582e+01i),
- (-1.7088175716853040841444505e+02 + 7.501382609870410713795546e+01i),
- (-3.852461315830959613132505e+00 + 1.4808420423156073221970892e+01i),
- (-4.586775503301407379786695e+00 - 4.178501081246873415144744e+00i),
- (4.451337963005453491095747e-05 - 1.62977574205442915935263e-04i),
-}
-var log = []complex128{
- (2.2194438972179194425697051e+00 + 9.9909115046919291062461269e-01i),
- (2.0468956191154167256337289e+00 - 3.5762575021856971295156489e-02i),
- (1.6130808329853860438751244e+00 - 1.6259990074019058442232221e+00i),
- (2.3851910394823008710032651e+00 + 2.0502936359659111755031062e+00i),
- (2.3096442270679923004800651e+00 + 2.9483213155446756211881774e-01i),
- (1.7904660933974656106951860e+00 + 1.0605860367252556281902109e+00i),
- (1.7745926939841751666177512e+00 + 4.8084556083358307819310911e-01i),
- (1.1885403350045342425648780e+00 + 5.8969634164776659423195222e-01i),
- (2.1833107837679082586772505e+00 - 1.3636647724582455028314573e+00i),
- (2.3037629487273259170991671e+00 + 2.6210913895386013290915234e+00i),
-}
-var log10 = []complex128{
- (9.6389223745559042474184943e-01 + 4.338997735671419492599631e-01i),
- (8.8895547241376579493490892e-01 - 1.5531488990643548254864806e-02i),
- (7.0055210462945412305244578e-01 - 7.0616239649481243222248404e-01i),
- (1.0358753067322445311676952e+00 + 8.9043121238134980156490909e-01i),
- (1.003065742975330237172029e+00 + 1.2804396782187887479857811e-01i),
- (7.7758954439739162532085157e-01 + 4.6060666333341810869055108e-01i),
- (7.7069581462315327037689152e-01 + 2.0882857371769952195512475e-01i),
- (5.1617650901191156135137239e-01 + 2.5610186717615977620363299e-01i),
- (9.4819982567026639742663212e-01 - 5.9223208584446952284914289e-01i),
- (1.0005115362454417135973429e+00 + 1.1383255270407412817250921e+00i),
-}
-
-type ff struct {
- r, theta float64
-}
-
-var polar = []ff{
- {9.2022120669932650313380972e+00, 9.9909115046919291062461269e-01},
- {7.7438239742296106616261394e+00, -3.5762575021856971295156489e-02},
- {5.0182478202557746902556648e+00, -1.6259990074019058442232221e+00},
- {1.0861137372799545160704002e+01, 2.0502936359659111755031062e+00},
- {1.0070841084922199607011905e+01, 2.9483213155446756211881774e-01},
- {5.9922447613166942183705192e+00, 1.0605860367252556281902109e+00},
- {5.8978784056736762299945176e+00, 4.8084556083358307819310911e-01},
- {3.2822866700678709020367184e+00, 5.8969634164776659423195222e-01},
- {8.8756430028990417290744307e+00, -1.3636647724582455028314573e+00},
- {1.0011785496777731986390856e+01, 2.6210913895386013290915234e+00},
-}
-var pow = []complex128{
- (-2.499956739197529585028819e+00 + 1.759751724335650228957144e+00i),
- (7.357094338218116311191939e+04 - 5.089973412479151648145882e+04i),
- (1.320777296067768517259592e+01 - 3.165621914333901498921986e+01i),
- (-3.123287828297300934072149e-07 - 1.9849567521490553032502223E-7i),
- (8.0622651468477229614813e+04 - 7.80028727944573092944363e+04i),
- (-1.0268824572103165858577141e+00 - 4.716844738244989776610672e-01i),
- (-4.35953819012244175753187e+01 + 2.2036445974645306917648585e+02i),
- (8.3556092283250594950239e-01 - 1.2261571947167240272593282e+01i),
- (1.582292972120769306069625e+03 + 1.273564263524278244782512e+04i),
- (6.592208301642122149025369e-08 + 2.584887236651661903526389e-08i),
-}
-var sin = []complex128{
- (-1.1073801774240233539648544e+03 + 3.024539773002502192425231e+02i),
- (1.0317037521400759359744682e+00 - 3.2208979799929570242818e-02i),
- (-2.0501952097271429804261058e+01 - 7.2137981348240798841800967e+01i),
- (7.3173638080346338642193078e+03 + 2.249219506193664342566248e+03i),
- (-1.964375633631808177565226e+00 - 9.0958264713870404464159683e+00i),
- (1.992783647158514838337674e+01 - 9.11555769410191350416942e+01i),
- (-6.680335650741921444300349e+00 + 3.763353833142432513086117e+00i),
- (1.2794028166657459148245993e+00 - 2.7669092099795781155109602e+00i),
- (2.8641693949535259594188879e+03 + 7.451234399649871202841615e+02i),
- (-4.893811726244659135553033e+01 - 5.371469305562194635957655e+01i),
-}
-var sinh = []complex128{
- (8.34559353341652565758198e+00 + 7.2187893208650790476628899e+01i),
- (1.1042192548260646752051112e+03 - 3.1379650595631635858792056e+02i),
- (-8.239469336509264113041849e-02 + 9.9273668758439489098514519e-01i),
- (7.332295456982297798219401e+01 - 1.574585908122833444899023e+01i),
- (-7.4786432301380582103534216e+03 + 1.63483823493980029604071e+03i),
- (4.595842179016870234028347e+00 - 8.135290105518580753211484e+00i),
- (-8.543842533574163435246793e+01 + 3.750798997857594068272375e+01i),
- (-1.918003500809465688017307e+00 + 7.4358344619793504041350251e+00i),
- (-2.233816733239658031433147e+00 - 2.143519070805995056229335e+00i),
- (-7.797564130187551181105341e+02 - 2.8549352346594918614806877e+03i),
-}
-var sqrt = []complex128{
- (2.6628203086086130543813948e+00 + 1.4531345674282185229796902e+00i),
- (2.7823278427251986247149295e+00 - 4.9756907317005224529115567e-02i),
- (1.5397025302089642757361015e+00 - 1.6271336573016637535695727e+00i),
- (1.7103411581506875260277898e+00 + 2.8170677122737589676157029e+00i),
- (3.1390392472953103383607947e+00 + 4.6612625849858653248980849e-01i),
- (2.1117080764822417640789287e+00 + 1.2381170223514273234967850e+00i),
- (2.3587032281672256703926939e+00 + 5.7827111903257349935720172e-01i),
- (1.7335262588873410476661577e+00 + 5.2647258220721269141550382e-01i),
- (2.3131094974708716531499282e+00 - 1.8775429304303785570775490e+00i),
- (8.1420535745048086240947359e-01 + 3.0575897587277248522656113e+00i),
-}
-var tan = []complex128{
- (-1.928757919086441129134525e-07 + 1.0000003267499169073251826e+00i),
- (1.242412685364183792138948e+00 - 3.17149693883133370106696e+00i),
- (-4.6745126251587795225571826e-05 - 9.9992439225263959286114298e-01i),
- (4.792363401193648192887116e-09 + 1.0000000070589333451557723e+00i),
- (2.345740824080089140287315e-03 + 9.947733046570988661022763e-01i),
- (-2.396030789494815566088809e-05 + 9.9994781345418591429826779e-01i),
- (-7.370204836644931340905303e-03 + 1.0043553413417138987717748e+00i),
- (-3.691803847992048527007457e-02 + 9.6475071993469548066328894e-01i),
- (-2.781955256713729368401878e-08 - 1.000000049848910609006646e+00i),
- (9.4281590064030478879791249e-05 + 9.9999119340863718183758545e-01i),
-}
-var tanh = []complex128{
- (1.0000921981225144748819918e+00 + 2.160986245871518020231507e-05i),
- (9.9999967727531993209562591e-01 - 1.9953763222959658873657676e-07i),
- (-1.765485739548037260789686e+00 + 1.7024216325552852445168471e+00i),
- (-9.999189442732736452807108e-01 + 3.64906070494473701938098e-05i),
- (9.9999999224622333738729767e-01 - 3.560088949517914774813046e-09i),
- (1.0029324933367326862499343e+00 - 4.948790309797102353137528e-03i),
- (9.9996113064788012488693567e-01 - 4.226995742097032481451259e-05i),
- (1.0074784189316340029873945e+00 - 4.194050814891697808029407e-03i),
- (9.9385534229718327109131502e-01 + 5.144217985914355502713437e-02i),
- (-1.0000000491604982429364892e+00 - 2.901873195374433112227349e-08i),
-}
-
-// special cases
-var vcAbsSC = []complex128{
- NaN(),
-}
-var absSC = []float64{
- math.NaN(),
-}
-var vcAcosSC = []complex128{
- NaN(),
-}
-var acosSC = []complex128{
- NaN(),
-}
-var vcAcoshSC = []complex128{
- NaN(),
-}
-var acoshSC = []complex128{
- NaN(),
-}
-var vcAsinSC = []complex128{
- NaN(),
-}
-var asinSC = []complex128{
- NaN(),
-}
-var vcAsinhSC = []complex128{
- NaN(),
-}
-var asinhSC = []complex128{
- NaN(),
-}
-var vcAtanSC = []complex128{
- NaN(),
-}
-var atanSC = []complex128{
- NaN(),
-}
-var vcAtanhSC = []complex128{
- NaN(),
-}
-var atanhSC = []complex128{
- NaN(),
-}
-var vcConjSC = []complex128{
- NaN(),
-}
-var conjSC = []complex128{
- NaN(),
-}
-var vcCosSC = []complex128{
- NaN(),
-}
-var cosSC = []complex128{
- NaN(),
-}
-var vcCoshSC = []complex128{
- NaN(),
-}
-var coshSC = []complex128{
- NaN(),
-}
-var vcExpSC = []complex128{
- NaN(),
-}
-var expSC = []complex128{
- NaN(),
-}
-var vcIsNaNSC = []complex128{
- complex(math.Inf(-1), math.Inf(-1)),
- complex(math.Inf(-1), math.NaN()),
- complex(math.NaN(), math.Inf(-1)),
- complex(0, math.NaN()),
- complex(math.NaN(), 0),
- complex(math.Inf(1), math.Inf(1)),
- complex(math.Inf(1), math.NaN()),
- complex(math.NaN(), math.Inf(1)),
- complex(math.NaN(), math.NaN()),
-}
-var isNaNSC = []bool{
- false,
- false,
- false,
- true,
- true,
- false,
- false,
- false,
- true,
-}
-var vcLogSC = []complex128{
- NaN(),
-}
-var logSC = []complex128{
- NaN(),
-}
-var vcLog10SC = []complex128{
- NaN(),
-}
-var log10SC = []complex128{
- NaN(),
-}
-var vcPolarSC = []complex128{
- NaN(),
-}
-var polarSC = []ff{
- {math.NaN(), math.NaN()},
-}
-var vcPowSC = [][2]complex128{
- {NaN(), NaN()},
-}
-var powSC = []complex128{
- NaN(),
-}
-var vcSinSC = []complex128{
- NaN(),
-}
-var sinSC = []complex128{
- NaN(),
-}
-var vcSinhSC = []complex128{
- NaN(),
-}
-var sinhSC = []complex128{
- NaN(),
-}
-var vcSqrtSC = []complex128{
- NaN(),
-}
-var sqrtSC = []complex128{
- NaN(),
-}
-var vcTanSC = []complex128{
- NaN(),
-}
-var tanSC = []complex128{
- NaN(),
-}
-var vcTanhSC = []complex128{
- NaN(),
-}
-var tanhSC = []complex128{
- NaN(),
-}
-
-// functions borrowed from pkg/math/all_test.go
-func tolerance(a, b, e float64) bool {
- d := a - b
- if d < 0 {
- d = -d
- }
-
- if a != 0 {
- e = e * a
- if e < 0 {
- e = -e
- }
- }
- return d < e
-}
-func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
-func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
-func alike(a, b float64) bool {
- switch {
- case a != a && b != b: // math.IsNaN(a) && math.IsNaN(b):
- return true
- case a == b:
- return math.Signbit(a) == math.Signbit(b)
- }
- return false
-}
-
-func cTolerance(a, b complex128, e float64) bool {
- d := Abs(a - b)
- if a != 0 {
- e = e * Abs(a)
- if e < 0 {
- e = -e
- }
- }
- return d < e
-}
-func cSoclose(a, b complex128, e float64) bool { return cTolerance(a, b, e) }
-func cVeryclose(a, b complex128) bool { return cTolerance(a, b, 4e-16) }
-func cAlike(a, b complex128) bool {
- switch {
- case IsNaN(a) && IsNaN(b):
- return true
- case a == b:
- return math.Signbit(real(a)) == math.Signbit(real(b)) && math.Signbit(imag(a)) == math.Signbit(imag(b))
- }
- return false
-}
-
-func TestAbs(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Abs(vc[i]); !veryclose(abs[i], f) {
- t.Errorf("Abs(%g) = %g, want %g", vc[i], f, abs[i])
- }
- }
- for i := 0; i < len(vcAbsSC); i++ {
- if f := Abs(vcAbsSC[i]); !alike(absSC[i], f) {
- t.Errorf("Abs(%g) = %g, want %g", vcAbsSC[i], f, absSC[i])
- }
- }
-}
-func TestAcos(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Acos(vc[i]); !cSoclose(acos[i], f, 1e-14) {
- t.Errorf("Acos(%g) = %g, want %g", vc[i], f, acos[i])
- }
- }
- for i := 0; i < len(vcAcosSC); i++ {
- if f := Acos(vcAcosSC[i]); !cAlike(acosSC[i], f) {
- t.Errorf("Acos(%g) = %g, want %g", vcAcosSC[i], f, acosSC[i])
- }
- }
-}
-func TestAcosh(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Acosh(vc[i]); !cSoclose(acosh[i], f, 1e-14) {
- t.Errorf("Acosh(%g) = %g, want %g", vc[i], f, acosh[i])
- }
- }
- for i := 0; i < len(vcAcoshSC); i++ {
- if f := Acosh(vcAcoshSC[i]); !cAlike(acoshSC[i], f) {
- t.Errorf("Acosh(%g) = %g, want %g", vcAcoshSC[i], f, acoshSC[i])
- }
- }
-}
-func TestAsin(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Asin(vc[i]); !cSoclose(asin[i], f, 1e-14) {
- t.Errorf("Asin(%g) = %g, want %g", vc[i], f, asin[i])
- }
- }
- for i := 0; i < len(vcAsinSC); i++ {
- if f := Asin(vcAsinSC[i]); !cAlike(asinSC[i], f) {
- t.Errorf("Asin(%g) = %g, want %g", vcAsinSC[i], f, asinSC[i])
- }
- }
-}
-func TestAsinh(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Asinh(vc[i]); !cSoclose(asinh[i], f, 4e-15) {
- t.Errorf("Asinh(%g) = %g, want %g", vc[i], f, asinh[i])
- }
- }
- for i := 0; i < len(vcAsinhSC); i++ {
- if f := Asinh(vcAsinhSC[i]); !cAlike(asinhSC[i], f) {
- t.Errorf("Asinh(%g) = %g, want %g", vcAsinhSC[i], f, asinhSC[i])
- }
- }
-}
-func TestAtan(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Atan(vc[i]); !cVeryclose(atan[i], f) {
- t.Errorf("Atan(%g) = %g, want %g", vc[i], f, atan[i])
- }
- }
- for i := 0; i < len(vcAtanSC); i++ {
- if f := Atan(vcAtanSC[i]); !cAlike(atanSC[i], f) {
- t.Errorf("Atan(%g) = %g, want %g", vcAtanSC[i], f, atanSC[i])
- }
- }
-}
-func TestAtanh(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Atanh(vc[i]); !cVeryclose(atanh[i], f) {
- t.Errorf("Atanh(%g) = %g, want %g", vc[i], f, atanh[i])
- }
- }
- for i := 0; i < len(vcAtanhSC); i++ {
- if f := Atanh(vcAtanhSC[i]); !cAlike(atanhSC[i], f) {
- t.Errorf("Atanh(%g) = %g, want %g", vcAtanhSC[i], f, atanhSC[i])
- }
- }
-}
-func TestConj(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Conj(vc[i]); !cVeryclose(conj[i], f) {
- t.Errorf("Conj(%g) = %g, want %g", vc[i], f, conj[i])
- }
- }
- for i := 0; i < len(vcConjSC); i++ {
- if f := Conj(vcConjSC[i]); !cAlike(conjSC[i], f) {
- t.Errorf("Conj(%g) = %g, want %g", vcConjSC[i], f, conjSC[i])
- }
- }
-}
-func TestCos(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Cos(vc[i]); !cSoclose(cos[i], f, 3e-15) {
- t.Errorf("Cos(%g) = %g, want %g", vc[i], f, cos[i])
- }
- }
- for i := 0; i < len(vcCosSC); i++ {
- if f := Cos(vcCosSC[i]); !cAlike(cosSC[i], f) {
- t.Errorf("Cos(%g) = %g, want %g", vcCosSC[i], f, cosSC[i])
- }
- }
-}
-func TestCosh(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Cosh(vc[i]); !cSoclose(cosh[i], f, 2e-15) {
- t.Errorf("Cosh(%g) = %g, want %g", vc[i], f, cosh[i])
- }
- }
- for i := 0; i < len(vcCoshSC); i++ {
- if f := Cosh(vcCoshSC[i]); !cAlike(coshSC[i], f) {
- t.Errorf("Cosh(%g) = %g, want %g", vcCoshSC[i], f, coshSC[i])
- }
- }
-}
-func TestExp(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Exp(vc[i]); !cSoclose(exp[i], f, 1e-15) {
- t.Errorf("Exp(%g) = %g, want %g", vc[i], f, exp[i])
- }
- }
- for i := 0; i < len(vcExpSC); i++ {
- if f := Exp(vcExpSC[i]); !cAlike(expSC[i], f) {
- t.Errorf("Exp(%g) = %g, want %g", vcExpSC[i], f, expSC[i])
- }
- }
-}
-func TestIsNaN(t *testing.T) {
- for i := 0; i < len(vcIsNaNSC); i++ {
- if f := IsNaN(vcIsNaNSC[i]); isNaNSC[i] != f {
- t.Errorf("IsNaN(%v) = %v, want %v", vcIsNaNSC[i], f, isNaNSC[i])
- }
- }
-}
-func TestLog(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Log(vc[i]); !cVeryclose(log[i], f) {
- t.Errorf("Log(%g) = %g, want %g", vc[i], f, log[i])
- }
- }
- for i := 0; i < len(vcLogSC); i++ {
- if f := Log(vcLogSC[i]); !cAlike(logSC[i], f) {
- t.Errorf("Log(%g) = %g, want %g", vcLogSC[i], f, logSC[i])
- }
- }
-}
-func TestLog10(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Log10(vc[i]); !cVeryclose(log10[i], f) {
- t.Errorf("Log10(%g) = %g, want %g", vc[i], f, log10[i])
- }
- }
- for i := 0; i < len(vcLog10SC); i++ {
- if f := Log10(vcLog10SC[i]); !cAlike(log10SC[i], f) {
- t.Errorf("Log10(%g) = %g, want %g", vcLog10SC[i], f, log10SC[i])
- }
- }
-}
-func TestPolar(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if r, theta := Polar(vc[i]); !veryclose(polar[i].r, r) && !veryclose(polar[i].theta, theta) {
- t.Errorf("Polar(%g) = %g, %g want %g, %g", vc[i], r, theta, polar[i].r, polar[i].theta)
- }
- }
- for i := 0; i < len(vcPolarSC); i++ {
- if r, theta := Polar(vcPolarSC[i]); !alike(polarSC[i].r, r) && !alike(polarSC[i].theta, theta) {
- t.Errorf("Polar(%g) = %g, %g, want %g, %g", vcPolarSC[i], r, theta, polarSC[i].r, polarSC[i].theta)
- }
- }
-}
-func TestPow(t *testing.T) {
- var a = complex(3.0, 3.0)
- for i := 0; i < len(vc); i++ {
- if f := Pow(a, vc[i]); !cSoclose(pow[i], f, 4e-15) {
- t.Errorf("Pow(%g, %g) = %g, want %g", a, vc[i], f, pow[i])
- }
- }
- for i := 0; i < len(vcPowSC); i++ {
- if f := Pow(vcPowSC[i][0], vcPowSC[i][0]); !cAlike(powSC[i], f) {
- t.Errorf("Pow(%g, %g) = %g, want %g", vcPowSC[i][0], vcPowSC[i][0], f, powSC[i])
- }
- }
-}
-func TestRect(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Rect(polar[i].r, polar[i].theta); !cVeryclose(vc[i], f) {
- t.Errorf("Rect(%g, %g) = %g want %g", polar[i].r, polar[i].theta, f, vc[i])
- }
- }
- for i := 0; i < len(vcPolarSC); i++ {
- if f := Rect(polarSC[i].r, polarSC[i].theta); !cAlike(vcPolarSC[i], f) {
- t.Errorf("Rect(%g, %g) = %g, want %g", polarSC[i].r, polarSC[i].theta, f, vcPolarSC[i])
- }
- }
-}
-func TestSin(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Sin(vc[i]); !cSoclose(sin[i], f, 2e-15) {
- t.Errorf("Sin(%g) = %g, want %g", vc[i], f, sin[i])
- }
- }
- for i := 0; i < len(vcSinSC); i++ {
- if f := Sin(vcSinSC[i]); !cAlike(sinSC[i], f) {
- t.Errorf("Sin(%g) = %g, want %g", vcSinSC[i], f, sinSC[i])
- }
- }
-}
-func TestSinh(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Sinh(vc[i]); !cSoclose(sinh[i], f, 2e-15) {
- t.Errorf("Sinh(%g) = %g, want %g", vc[i], f, sinh[i])
- }
- }
- for i := 0; i < len(vcSinhSC); i++ {
- if f := Sinh(vcSinhSC[i]); !cAlike(sinhSC[i], f) {
- t.Errorf("Sinh(%g) = %g, want %g", vcSinhSC[i], f, sinhSC[i])
- }
- }
-}
-func TestSqrt(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Sqrt(vc[i]); !cVeryclose(sqrt[i], f) {
- t.Errorf("Sqrt(%g) = %g, want %g", vc[i], f, sqrt[i])
- }
- }
- for i := 0; i < len(vcSqrtSC); i++ {
- if f := Sqrt(vcSqrtSC[i]); !cAlike(sqrtSC[i], f) {
- t.Errorf("Sqrt(%g) = %g, want %g", vcSqrtSC[i], f, sqrtSC[i])
- }
- }
-}
-func TestTan(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Tan(vc[i]); !cSoclose(tan[i], f, 3e-15) {
- t.Errorf("Tan(%g) = %g, want %g", vc[i], f, tan[i])
- }
- }
- for i := 0; i < len(vcTanSC); i++ {
- if f := Tan(vcTanSC[i]); !cAlike(tanSC[i], f) {
- t.Errorf("Tan(%g) = %g, want %g", vcTanSC[i], f, tanSC[i])
- }
- }
-}
-func TestTanh(t *testing.T) {
- for i := 0; i < len(vc); i++ {
- if f := Tanh(vc[i]); !cSoclose(tanh[i], f, 2e-15) {
- t.Errorf("Tanh(%g) = %g, want %g", vc[i], f, tanh[i])
- }
- }
- for i := 0; i < len(vcTanhSC); i++ {
- if f := Tanh(vcTanhSC[i]); !cAlike(tanhSC[i], f) {
- t.Errorf("Tanh(%g) = %g, want %g", vcTanhSC[i], f, tanhSC[i])
- }
- }
-}
-
-func BenchmarkAbs(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Abs(complex(2.5, 3.5))
- }
-}
-func BenchmarkAcos(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Acos(complex(2.5, 3.5))
- }
-}
-func BenchmarkAcosh(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Acosh(complex(2.5, 3.5))
- }
-}
-func BenchmarkAsin(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Asin(complex(2.5, 3.5))
- }
-}
-func BenchmarkAsinh(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Asinh(complex(2.5, 3.5))
- }
-}
-func BenchmarkAtan(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Atan(complex(2.5, 3.5))
- }
-}
-func BenchmarkAtanh(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Atanh(complex(2.5, 3.5))
- }
-}
-func BenchmarkConj(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Conj(complex(2.5, 3.5))
- }
-}
-func BenchmarkCos(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Cos(complex(2.5, 3.5))
- }
-}
-func BenchmarkCosh(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Cosh(complex(2.5, 3.5))
- }
-}
-func BenchmarkExp(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Exp(complex(2.5, 3.5))
- }
-}
-func BenchmarkLog(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Log(complex(2.5, 3.5))
- }
-}
-func BenchmarkLog10(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Log10(complex(2.5, 3.5))
- }
-}
-func BenchmarkPhase(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Phase(complex(2.5, 3.5))
- }
-}
-func BenchmarkPolar(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Polar(complex(2.5, 3.5))
- }
-}
-func BenchmarkPow(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Pow(complex(2.5, 3.5), complex(2.5, 3.5))
- }
-}
-func BenchmarkRect(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Rect(2.5, 1.5)
- }
-}
-func BenchmarkSin(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Sin(complex(2.5, 3.5))
- }
-}
-func BenchmarkSinh(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Sinh(complex(2.5, 3.5))
- }
-}
-func BenchmarkSqrt(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Sqrt(complex(2.5, 3.5))
- }
-}
-func BenchmarkTan(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Tan(complex(2.5, 3.5))
- }
-}
-func BenchmarkTanh(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Tanh(complex(2.5, 3.5))
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-// Conj returns the complex conjugate of x.
-func Conj(x complex128) complex128 { return complex(real(x), -imag(x)) }
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex exponential function
-//
-// DESCRIPTION:
-//
-// Returns the complex exponential of the complex argument z.
-//
-// If
-// z = x + iy,
-// r = exp(x),
-// then
-// w = r cos y + i r sin y.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 8700 3.7e-17 1.1e-17
-// IEEE -10,+10 30000 3.0e-16 8.7e-17
-
-// Exp returns e**x, the base-e exponential of x.
-func Exp(x complex128) complex128 {
- r := math.Exp(real(x))
- s, c := math.Sincos(imag(x))
- return complex(r*c, r*s)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// IsInf returns true if either real(x) or imag(x) is an infinity.
-func IsInf(x complex128) bool {
- if math.IsInf(real(x), 0) || math.IsInf(imag(x), 0) {
- return true
- }
- return false
-}
-
-// Inf returns a complex infinity, complex(+Inf, +Inf).
-func Inf() complex128 {
- inf := math.Inf(1)
- return complex(inf, inf)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// IsNaN returns true if either real(x) or imag(x) is NaN
-// and neither is an infinity.
-func IsNaN(x complex128) bool {
- switch {
- case math.IsInf(real(x), 0) || math.IsInf(imag(x), 0):
- return false
- case math.IsNaN(real(x)) || math.IsNaN(imag(x)):
- return true
- }
- return false
-}
-
-// NaN returns a complex ``not-a-number'' value.
-func NaN() complex128 {
- nan := math.NaN()
- return complex(nan, nan)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex natural logarithm
-//
-// DESCRIPTION:
-//
-// Returns complex logarithm to the base e (2.718...) of
-// the complex argument z.
-//
-// If
-// z = x + iy, r = sqrt( x**2 + y**2 ),
-// then
-// w = log(r) + i arctan(y/x).
-//
-// The arctangent ranges from -PI to +PI.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 7000 8.5e-17 1.9e-17
-// IEEE -10,+10 30000 5.0e-15 1.1e-16
-//
-// Larger relative error can be observed for z near 1 +i0.
-// In IEEE arithmetic the peak absolute error is 5.2e-16, rms
-// absolute error 1.0e-16.
-
-// Log returns the natural logarithm of x.
-func Log(x complex128) complex128 {
- return complex(math.Log(Abs(x)), Phase(x))
-}
-
-// Log10 returns the decimal logarithm of x.
-func Log10(x complex128) complex128 {
- return math.Log10E * Log(x)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// Phase returns the phase (also called the argument) of x.
-// The returned value is in the range [-Pi, Pi].
-func Phase(x complex128) float64 { return math.Atan2(imag(x), real(x)) }
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-// Polar returns the absolute value r and phase θ of x,
-// such that x = r * e**θi.
-// The phase is in the range [-Pi, Pi].
-func Polar(x complex128) (r, θ float64) {
- return Abs(x), Phase(x)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex power function
-//
-// DESCRIPTION:
-//
-// Raises complex A to the complex Zth power.
-// Definition is per AMS55 # 4.2.8,
-// analytically equivalent to cpow(a,z) = cexp(z clog(a)).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// IEEE -10,+10 30000 9.4e-15 1.5e-15
-
-// Pow returns x**y, the base-x exponential of y.
-func Pow(x, y complex128) complex128 {
- modulus := Abs(x)
- if modulus == 0 {
- return complex(0, 0)
- }
- r := math.Pow(modulus, real(y))
- arg := Phase(x)
- theta := real(y) * arg
- if imag(y) != 0 {
- r *= math.Exp(-imag(y) * arg)
- theta += imag(y) * math.Log(modulus)
- }
- s, c := math.Sincos(theta)
- return complex(r*c, r*s)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// Rect returns the complex number x with polar coordinates r, θ.
-func Rect(r, θ float64) complex128 {
- s, c := math.Sincos(θ)
- return complex(r*c, r*s)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex circular sine
-//
-// DESCRIPTION:
-//
-// If
-// z = x + iy,
-//
-// then
-//
-// w = sin x cosh y + i cos x sinh y.
-//
-// csin(z) = -i csinh(iz).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 8400 5.3e-17 1.3e-17
-// IEEE -10,+10 30000 3.8e-16 1.0e-16
-// Also tested by csin(casin(z)) = z.
-
-// Sin returns the sine of x.
-func Sin(x complex128) complex128 {
- s, c := math.Sincos(real(x))
- sh, ch := sinhcosh(imag(x))
- return complex(s*ch, c*sh)
-}
-
-// Complex hyperbolic sine
-//
-// DESCRIPTION:
-//
-// csinh z = (cexp(z) - cexp(-z))/2
-// = sinh x * cos y + i cosh x * sin y .
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// IEEE -10,+10 30000 3.1e-16 8.2e-17
-
-// Sinh returns the hyperbolic sine of x.
-func Sinh(x complex128) complex128 {
- s, c := math.Sincos(imag(x))
- sh, ch := sinhcosh(real(x))
- return complex(c*sh, s*ch)
-}
-
-// Complex circular cosine
-//
-// DESCRIPTION:
-//
-// If
-// z = x + iy,
-//
-// then
-//
-// w = cos x cosh y - i sin x sinh y.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 8400 4.5e-17 1.3e-17
-// IEEE -10,+10 30000 3.8e-16 1.0e-16
-
-// Cos returns the cosine of x.
-func Cos(x complex128) complex128 {
- s, c := math.Sincos(real(x))
- sh, ch := sinhcosh(imag(x))
- return complex(c*ch, -s*sh)
-}
-
-// Complex hyperbolic cosine
-//
-// DESCRIPTION:
-//
-// ccosh(z) = cosh x cos y + i sinh x sin y .
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// IEEE -10,+10 30000 2.9e-16 8.1e-17
-
-// Cosh returns the hyperbolic cosine of x.
-func Cosh(x complex128) complex128 {
- s, c := math.Sincos(imag(x))
- sh, ch := sinhcosh(real(x))
- return complex(c*ch, s*sh)
-}
-
-// calculate sinh and cosh
-func sinhcosh(x float64) (sh, ch float64) {
- if math.Abs(x) <= 0.5 {
- return math.Sinh(x), math.Cosh(x)
- }
- e := math.Exp(x)
- ei := 0.5 / e
- e *= 0.5
- return e - ei, e + ei
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex square root
-//
-// DESCRIPTION:
-//
-// If z = x + iy, r = |z|, then
-//
-// 1/2
-// Re w = [ (r + x)/2 ] ,
-//
-// 1/2
-// Im w = [ (r - x)/2 ] .
-//
-// Cancellation error in r-x or r+x is avoided by using the
-// identity 2 Re w Im w = y.
-//
-// Note that -w is also a square root of z. The root chosen
-// is always in the right half plane and Im w has the same sign as y.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 25000 3.2e-17 9.6e-18
-// IEEE -10,+10 1,000,000 2.9e-16 6.1e-17
-
-// Sqrt returns the square root of x.
-func Sqrt(x complex128) complex128 {
- if imag(x) == 0 {
- if real(x) == 0 {
- return complex(0, 0)
- }
- if real(x) < 0 {
- return complex(0, math.Sqrt(-real(x)))
- }
- return complex(math.Sqrt(real(x)), 0)
- }
- if real(x) == 0 {
- if imag(x) < 0 {
- r := math.Sqrt(-0.5 * imag(x))
- return complex(r, -r)
- }
- r := math.Sqrt(0.5 * imag(x))
- return complex(r, r)
- }
- a := real(x)
- b := imag(x)
- var scale float64
- // Rescale to avoid internal overflow or underflow.
- if math.Abs(a) > 4 || math.Abs(b) > 4 {
- a *= 0.25
- b *= 0.25
- scale = 2
- } else {
- a *= 1.8014398509481984e16 // 2**54
- b *= 1.8014398509481984e16
- scale = 7.450580596923828125e-9 // 2**-27
- }
- r := math.Hypot(a, b)
- var t float64
- if a > 0 {
- t = math.Sqrt(0.5*r + 0.5*a)
- r = scale * math.Abs((0.5*b)/t)
- t *= scale
- } else {
- r = math.Sqrt(0.5*r - 0.5*a)
- t = scale * math.Abs((0.5*b)/r)
- r *= scale
- }
- if b < 0 {
- return complex(t, -r)
- }
- return complex(t, r)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmath
-
-import "math"
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
-// The go code is a simplified version of the original C.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// Complex circular tangent
-//
-// DESCRIPTION:
-//
-// If
-// z = x + iy,
-//
-// then
-//
-// sin 2x + i sinh 2y
-// w = --------------------.
-// cos 2x + cosh 2y
-//
-// On the real axis the denominator is zero at odd multiples
-// of PI/2. The denominator is evaluated by its Taylor
-// series near these points.
-//
-// ctan(z) = -i ctanh(iz).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 5200 7.1e-17 1.6e-17
-// IEEE -10,+10 30000 7.2e-16 1.2e-16
-// Also tested by ctan * ccot = 1 and catan(ctan(z)) = z.
-
-// Tan returns the tangent of x.
-func Tan(x complex128) complex128 {
- d := math.Cos(2*real(x)) + math.Cosh(2*imag(x))
- if math.Abs(d) < 0.25 {
- d = tanSeries(x)
- }
- if d == 0 {
- return Inf()
- }
- return complex(math.Sin(2*real(x))/d, math.Sinh(2*imag(x))/d)
-}
-
-// Complex hyperbolic tangent
-//
-// DESCRIPTION:
-//
-// tanh z = (sinh 2x + i sin 2y) / (cosh 2x + cos 2y) .
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// IEEE -10,+10 30000 1.7e-14 2.4e-16
-
-// Tanh returns the hyperbolic tangent of x.
-func Tanh(x complex128) complex128 {
- d := math.Cosh(2*real(x)) + math.Cos(2*imag(x))
- if d == 0 {
- return Inf()
- }
- return complex(math.Sinh(2*real(x))/d, math.Sin(2*imag(x))/d)
-}
-
-// Program to subtract nearest integer multiple of PI
-func reducePi(x float64) float64 {
- const (
- // extended precision value of PI:
- DP1 = 3.14159265160560607910E0 // ?? 0x400921fb54000000
- DP2 = 1.98418714791870343106E-9 // ?? 0x3e210b4610000000
- DP3 = 1.14423774522196636802E-17 // ?? 0x3c6a62633145c06e
- )
- t := x / math.Pi
- if t >= 0 {
- t += 0.5
- } else {
- t -= 0.5
- }
- t = float64(int64(t)) // int64(t) = the multiple
- return ((x - t*DP1) - t*DP2) - t*DP3
-}
-
-// Taylor series expansion for cosh(2y) - cos(2x)
-func tanSeries(z complex128) float64 {
- const MACHEP = 1.0 / (1 << 53)
- x := math.Abs(2 * real(z))
- y := math.Abs(2 * imag(z))
- x = reducePi(x)
- x = x * x
- y = y * y
- x2 := 1.0
- y2 := 1.0
- f := 1.0
- rn := 0.0
- d := 0.0
- for {
- rn += 1
- f *= rn
- rn += 1
- f *= rn
- x2 *= x
- y2 *= y
- t := y2 + x2
- t /= f
- d += t
-
- rn += 1
- f *= rn
- rn += 1
- f *= rn
- x2 *= x
- y2 *= y
- t = y2 - x2
- t /= f
- d += t
- if math.Abs(t/d) <= MACHEP {
- break
- }
- }
- return d
-}
-
-// Complex circular cotangent
-//
-// DESCRIPTION:
-//
-// If
-// z = x + iy,
-//
-// then
-//
-// sin 2x - i sinh 2y
-// w = --------------------.
-// cosh 2y - cos 2x
-//
-// On the real axis, the denominator has zeros at even
-// multiples of PI/2. Near these points it is evaluated
-// by a Taylor series.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10,+10 3000 6.5e-17 1.6e-17
-// IEEE -10,+10 30000 9.2e-16 1.2e-16
-// Also tested by ctan * ccot = 1 + i0.
-
-// Cot returns the cotangent of x.
-func Cot(x complex128) complex128 {
- d := math.Cosh(2*imag(x)) - math.Cos(2*real(x))
- if math.Abs(d) < 0.25 {
- d = tanSeries(x)
- }
- if d == 0 {
- return Inf()
- }
- return complex(math.Sin(2*real(x))/d, -math.Sinh(2*imag(x))/d)
-}
// ReadBits64 reads the given number of bits and returns them in the
// least-significant part of a uint64. In the event of an error, it returns 0
-// and the error can be obtained by calling Error().
+// and the error can be obtained by calling Err().
func (br *bitReader) ReadBits64(bits uint) (n uint64) {
for bits > br.bits {
b, err := br.r.ReadByte()
return n != 0
}
-func (br *bitReader) Error() error {
+func (br *bitReader) Err() error {
return br.err
}
if !bz2.setupDone {
err = bz2.setup()
- brErr := bz2.br.Error()
+ brErr := bz2.br.Err()
if brErr != nil {
err = brErr
}
}
n, err = bz2.read(buf)
- brErr := bz2.br.Error()
+ brErr := bz2.br.Err()
if brErr != nil {
err = brErr
}
+++ /dev/null
-2.7182818284590452353602874713526624977572470936999595749669676277240766303535475945713821785251664274274663919320030599218174135966290435729003342952605956307381323286279434907632338298807531952510190115738341879307021540891499348841675092447614606680822648001684774118537423454424371075390777449920695517027618386062613313845830007520449338265602976067371132007093287091274437470472306969772093101416928368190255151086574637721112523897844250569536967707854499699679468644549059879316368892300987931277361782154249992295763514822082698951936680331825288693984964651058209392398294887933203625094431173012381970684161403970198376793206832823764648042953118023287825098194558153017567173613320698112509961818815930416903515988885193458072738667385894228792284998920868058257492796104841984443634632449684875602336248270419786232090021609902353043699418491463140934317381436405462531520961836908887070167683964243781405927145635490613031072085103837505101157477041718986106873969655212671546889570350354021234078498193343210681701210056278802351930332247450158539047304199577770935036604169973297250886876966403555707162268447162560798826517871341951246652010305921236677194325278675398558944896970964097545918569563802363701621120477427228364896134225164450781824423529486363721417402388934412479635743702637552944483379980161254922785092577825620926226483262779333865664816277251640191059004916449982893150566047258027786318641551956532442586982946959308019152987211725563475463964479101459040905862984967912874068705048958586717479854667757573205681288459205413340539220001137863009455606881667400169842055804033637953764520304024322566135278369511778838638744396625322498506549958862342818997077332761717839280349465014345588970719425863987727547109629537415211151368350627526023264847287039207643100595841166120545297030236472549296669381151373227536450988890313602057248176585118063036442812314965507047510254465011727211555194866850800368532281831521960037356252794495158284188294787610852639813955990067376482922443752871846245780361929819713991475644882626039033814418232625150974827987779964373089970388867782271383605772978824125611907176639465070633045279546618550966661856647097113444740160704626215680717481877844371436988218559670959102596862002353718588748569652200050311734392073211390803293634479727355955277349071783793421637012050054513263835440001863239914907054797780566978533580489669062951194324730995876552368128590413832411607226029983305353708761389396391779574540161372236187893652605381558415871869255386061647798340254351284396129460352913325942794904337299085731580290958631382683291477116396337092400316894586360606458459251269946557248391865642097526850823075442545993769170419777800853627309417101634349076964237222943523661255725088147792231519747780605696725380171807763603462459278778465850656050780844211529697521890874019660906651803516501792504619501366585436632712549639908549144200014574760819302212066024330096412704894390397177195180699086998606636583232278709376502260149291011517177635944602023249300280401867723910288097866605651183260043688508817157238669842242201024950551881694803221002515426494639812873677658927688163598312477886520141174110913601164995076629077943646005851941998560162647907615321038727557126992518275687989302761761146162549356495903798045838182323368612016243736569846703785853305275833337939907521660692380533698879565137285593883499894707416181550125397064648171946708348197214488898790676503795903669672494992545279033729636162658976039498576741397359441023744329709355477982629614591442936451428617158587339746791897571211956187385783644758448423555581050025611492391518893099463428413936080383091662818811503715284967059741625628236092168075150177725387402564253470879089137291722828611515915683725241630772254406337875931059826760944203261924285317018781772960235413060672136046000389661093647095141417185777014180606443636815464440053316087783143174440811949422975599314011888683314832802706553833004693290115744147563139997221703804617092894579096271662260740718749975359212756084414737823303270330168237193648002173285734935947564334129943024850235732214597843282641421684878721673367010615094243456984401873312810107945127223737886126058165668053714396127888732527373890392890506865324138062796025930387727697783792868409325365880733988457218746021005311483351323850047827169376218004904795597959290591655470505777514308175112698985188408718564026035305583737832422924185625644255022672155980274012617971928047139600689163828665277009752767069777036439260224372841840883251848770472638440379530166905465937461619323840363893131364327137688841026811219891275223056256756254701725086349765367288605966752740868627407912856576996313789753034660616669804218267724560530660773899624218340859882071864682623215080288286359746839654358856685503773131296587975810501214916207656769950659715344763470320853215603674828608378656803073062657633469774295634643716709397193060876963495328846833613038829431040800296873869117066666146800015121143442256023874474325250769387077775193299942137277211258843608715834835626961661980572526612206797540621062080649882918454395301529982092503005498257043390553570168653120526495614857249257386206917403695213533732531666345466588597286659451136441370331393672118569553952108458407244323835586063106806964924851232632699514603596037297253198368423363904632136710116192821711150282801604488058802382031981493096369596735832742024988245684941273860566491352526706046234450549227581151709314921879592718001940968866986837037302200475314338181092708030017205935530520700706072233999463990571311587099635777359027196285061146514837526209565346713290025994397663114545902685898979115837093419370441155121920117164880566945938131183843765620627846310490346293950029458341164824114969758326011800731699437393506966295712410273239138741754923071862454543222039552735295240245903805744502892246886285336542213815722131163288112052146489805180092024719391710555390113943316681515828843687606961102505171007392762385553386272553538830960671644662370922646809671254061869502143176211668140097595281493907222601112681153108387317617323235263605838173151034595736538223534992935822836851007810884634349983518404451704270189381994243410090575376257767571118090088164183319201962623416288166521374717325477727783488774366518828752156685719506371936565390389449366421764003121527870222366463635755503565576948886549500270853923617105502131147413744106134445544192101336172996285694899193369184729478580729156088510396781959429833186480756083679551496636448965592948187851784038773326247051945050419847742014183947731202815886845707290544057510601285258056594703046836344592652552137008068752009593453607316226118728173928074623094685367823106097921599360019946237993434210687813497346959246469752506246958616909178573976595199392993995567542714654910456860702099012606818704984178079173924071945996323060254707901774527513186809982284730860766536866855516467702911336827563107223346726113705490795365834538637196235856312618387156774118738527722922594743373785695538456246801013905727871016512966636764451872465653730402443684140814488732957847348490003019477888020460324660842875351848364959195082888323206522128104190448047247949291342284951970022601310430062410717971502793433263407995960531446053230488528972917659876016667811937932372453857209607582277178483361613582612896226118129455927462767137794487586753657544861407611931125958512655759734573015333642630767985443385761715333462325270572005303988289499034259566232975782488735029259166825894456894655992658454762694528780516501720674785417887982276806536650641910973434528878338621726156269582654478205672987756426325321594294418039943217000090542650763095588465895171709147607437136893319469090981904501290307099566226620303182649365733698419555776963787624918852865686607600566025605445711337286840205574416030837052312242587223438854123179481388550075689381124935386318635287083799845692619981794523364087429591180747453419551420351726184200845509170845682368200897739455842679214273477560879644279202708312150156406341341617166448069815483764491573900121217041547872591998943825364950514771379399147205219529079396137621107238494290616357604596231253506068537651423115349665683715116604220796394466621163255157729070978473156278277598788136491951257483328793771571459091064841642678309949723674420175862269402159407924480541255360431317992696739157542419296607312393763542139230617876753958711436104089409966089471418340698362993675362621545247298464213752891079884381306095552622720837518629837066787224430195793793786072107254277289071732854874374355781966511716618330881129120245204048682200072344035025448202834254187884653602591506445271657700044521097735585897622655484941621714989532383421600114062950718490427789258552743035221396835679018076406042138307308774460170842688272261177180842664333651780002171903449234264266292261456004337383868335555343453004264818473989215627086095650629340405264943244261445665921291225648893569655009154306426134252668472594914314239398845432486327461842846655985332312210466259890141712103446084271616619001257195870793217569698544013397622096749454185407118446433946990162698351607848924514058940946395267807354579700307051163682519487701189764002827648414160587206184185297189154019688253289309149665345753571427318482016384644832499037886069008072709327673127581966563941148961716832980455139729506687604740915420428429993541025829113502241690769431668574242522509026939034814856451303069925199590436384028429267412573422447765584177886171737265462085498294498946787350929581652632072258992368768457017823038096567883112289305809140572610865884845873101658151167533327674887014829167419701512559782572707406431808601428149024146780472327597684269633935773542930186739439716388611764209004068663398856841681003872389214483176070116684503887212364367043314091155733280182977988736590916659612402021778558854876176161989370794380056663364884365089144805571039765214696027662583599051987042300179465536788
+++ /dev/null
-3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275900994657640789512694683983525957098258226205224894077267194782684826014769909026401363944374553050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382686838689427741559918559252459539594310499725246808459872736446958486538367362226260991246080512438843904512441365497627807977156914359977001296160894416948685558484063534220722258284886481584560285060168427394522674676788952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288797108931456691368672287489405601015033086179286809208747609178249385890097149096759852613655497818931297848216829989487226588048575640142704775551323796414515237462343645428584447952658678210511413547357395231134271661021359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435064302184531910484810053706146806749192781911979399520614196634287544406437451237181921799983910159195618146751426912397489409071864942319615679452080951465502252316038819301420937621378559566389377870830390697920773467221825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539057962685610055081066587969981635747363840525714591028970641401109712062804390397595156771577004203378699360072305587631763594218731251471205329281918261861258673215791984148488291644706095752706957220917567116722910981690915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398315019701651511685171437657618351556508849099898599823873455283316355076479185358932261854896321329330898570642046752590709154814165498594616371802709819943099244889575712828905923233260972997120844335732654893823911932597463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100449293215160842444859637669838952286847831235526582131449576857262433441893039686426243410773226978028073189154411010446823252716201052652272111660396665573092547110557853763466820653109896526918620564769312570586356620185581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318586769751456614068007002378776591344017127494704205622305389945613140711270004078547332699390814546646458807972708266830634328587856983052358089330657574067954571637752542021149557615814002501262285941302164715509792592309907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111790429782856475032031986915140287080859904801094121472213179476477726224142548545403321571853061422881375850430633217518297986622371721591607716692547487389866549494501146540628433663937900397692656721463853067360965712091807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862947265473642523081770367515906735023507283540567040386743513622224771589150495309844489333096340878076932599397805419341447377441842631298608099888687413260472156951623965864573021631598193195167353812974167729478672422924654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001593776471651228935786015881617557829735233446042815126272037343146531977774160319906655418763979293344195215413418994854447345673831624993419131814809277771038638773431772075456545322077709212019051660962804909263601975988281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267945612753181340783303362542327839449753824372058353114771199260638133467768796959703098339130771098704085913374641442822772634659470474587847787201927715280731767907707157213444730605700733492436931138350493163128404251219256517980694113528013147013047816437885185290928545201165839341965621349143415956258658655705526904965209858033850722426482939728584783163057777560688876446248246857926039535277348030480290058760758251047470916439613626760449256274204208320856611906254543372131535958450687724602901618766795240616342522577195429162991930645537799140373404328752628889639958794757291746426357455254079091451357111369410911939325191076020825202618798531887705842972591677813149699009019211697173727847684726860849003377024242916513005005168323364350389517029893922334517220138128069650117844087451960121228599371623130171144484640903890644954440061986907548516026327505298349187407866808818338510228334508504860825039302133219715518430635455007668282949304137765527939751754613953984683393638304746119966538581538420568533862186725233402830871123282789212507712629463229563989898935821167456270102183564622013496715188190973038119800497340723961036854066431939509790190699639552453005450580685501956730229219139339185680344903982059551002263535361920419947455385938102343955449597783779023742161727111723643435439478221818528624085140066604433258885698670543154706965747458550332323342107301545940516553790686627333799585115625784322988273723198987571415957811196358330059408730681216028764962867446047746491599505497374256269010490377819868359381465741268049256487985561453723478673303904688383436346553794986419270563872931748723320837601123029911367938627089438799362016295154133714248928307220126901475466847653576164773794675200490757155527819653621323926406160136358155907422020203187277605277219005561484255518792530343513984425322341576233610642506390497500865627109535919465897514131034822769306247435363256916078154781811528436679570611086153315044521274739245449454236828860613408414863776700961207151249140430272538607648236341433462351897576645216413767969031495019108575984423919862916421939949072362346468441173940326591840443780513338945257423995082965912285085558215725031071257012668302402929525220118726767562204154205161841634847565169998116141010029960783869092916030288400269104140792886215078424516709087000699282120660418371806535567252532567532861291042487761825829765157959847035622262934860034158722980534989650226291748788202734209222245339856264766914905562842503912757710284027998066365825488926488025456610172967026640765590429099456815065265305371829412703369313785178609040708667114965583434347693385781711386455873678123014587687126603489139095620099393610310291616152881384379099042317473363948045759314931405297634757481193567091101377517210080315590248530906692037671922033229094334676851422144773793937517034436619910403375111735471918550464490263655128162288244625759163330391072253837421821408835086573917715096828874782656995995744906617583441375223970968340800535598491754173818839994469748676265516582765848358845314277568790029095170283529716344562129640435231176006651012412006597558512761785838292041974844236080071930457618932349229279650198751872127267507981255470958904556357921221033346697499235630254947802490114195212382815309114079073860251522742995818072471625916685451333123948049470791191532673430282441860414263639548000448002670496248201792896476697583183271314251702969234889627668440323260927524960357996469256504936818360900323809293459588970695365349406034021665443755890045632882250545255640564482465151875471196218443965825337543885690941130315095261793780029741207665147939425902989695946995565761218656196733786236256125216320862869222103274889218654364802296780705765615144632046927906821207388377814233562823608963208068222468012248261177185896381409183903673672220888321513755600372798394004152970028783076670944474560134556417254370906979396122571429894671543578468788614445812314593571984922528471605049221242470141214780573455105008019086996033027634787081081754501193071412233908663938339529425786905076431006383519834389341596131854347546495569781038293097164651438407007073604112373599843452251610507027056235266012764848308407611830130527932054274628654036036745328651057065874882256981579367897669742205750596834408697350201410206723585020072452256326513410559240190274216248439140359989535394590944070469120914093870012645600162374288021092764579310657922955249887275846101264836999892256959688159205600101655256375678
// Any type that implements heap.Interface may be used as a
// min-heap with the following invariants (established after
-// Init has been called):
+// Init has been called or if the data is empty or sorted):
//
// !h.Less(j, i) for 0 <= i < h.Len() and j = 2*i+1 or 2*i+2 and j < h.Len()
//
+// Note that Push and Pop in this interface are for package heap's
+// implementation to call. To add and remove things from the heap,
+// use heap.Push and heap.Pop.
type Interface interface {
sort.Interface
- Push(x interface{})
- Pop() interface{}
+ Push(x interface{}) // add x as element Len()
+ Pop() interface{} // remove and return element Len() - 1.
}
// A heap must be initialized before any of the heap operations
package heap_test
import (
- "testing"
. "container/heap"
+ "testing"
)
type myHeap []int
package dsa
import (
- "big"
"errors"
"io"
+ "math/big"
)
// Parameters represents the domain parameters for a key. These parameters can
package dsa
import (
- "big"
"crypto/rand"
+ "math/big"
"testing"
)
// http://www.secg.org/download/aid-780/sec1-v2.pdf
import (
- "big"
"crypto/elliptic"
"io"
+ "math/big"
)
// PublicKey represents an ECDSA public key.
package ecdsa
import (
- "big"
"crypto/elliptic"
- "crypto/sha1"
"crypto/rand"
+ "crypto/sha1"
"encoding/hex"
+ "math/big"
"testing"
)
// reverse the transform than to operate in affine coordinates.
import (
- "big"
"io"
+ "math/big"
"sync"
)
package elliptic
import (
- "big"
"crypto/rand"
"fmt"
+ "math/big"
"testing"
)
package hmac
import (
- "hash"
"fmt"
+ "hash"
"testing"
)
package ocsp
import (
- "asn1"
"crypto"
"crypto/rsa"
_ "crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
+ "encoding/asn1"
"time"
)
}
// Decode reads a PGP armored block from the given Reader. It will ignore
-// leading garbage. If it doesn't find a block, it will return nil, os.EOF. The
+// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
// given Reader is not usable after calling this function: an arbitrary amount
// of data may have been read past the end of the block.
func Decode(in io.Reader) (p *Block, err error) {
package elgamal
import (
- "big"
"crypto/rand"
"crypto/subtle"
"errors"
"io"
+ "math/big"
)
// PublicKey represents an ElGamal public key.
package elgamal
import (
- "big"
"bytes"
"crypto/rand"
+ "math/big"
"testing"
)
package packet
import (
- "big"
"crypto/openpgp/elgamal"
error_ "crypto/openpgp/error"
"crypto/rand"
"crypto/rsa"
"encoding/binary"
"io"
+ "math/big"
"strconv"
)
package packet
import (
- "big"
"bytes"
"crypto/rand"
"crypto/rsa"
"fmt"
+ "math/big"
"testing"
)
package packet
import (
- "big"
"crypto/aes"
"crypto/cast5"
"crypto/cipher"
error_ "crypto/openpgp/error"
"io"
+ "math/big"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
package packet
import (
- "big"
"bytes"
"crypto/cipher"
"crypto/dsa"
"crypto/sha1"
"io"
"io/ioutil"
+ "math/big"
"strconv"
)
package packet
import (
- "big"
"crypto/dsa"
"crypto/openpgp/elgamal"
error_ "crypto/openpgp/error"
"fmt"
"hash"
"io"
+ "math/big"
"strconv"
)
import (
"bytes"
- "crypto/sha1"
"crypto/rand"
+ "crypto/sha1"
"encoding/hex"
"testing"
)
package rand
import (
- "big"
"io"
+ "math/big"
"os"
)
package rsa
import (
- "big"
"crypto"
"crypto/subtle"
"errors"
"io"
+ "math/big"
)
// This file implements encryption and decryption using PKCS#1 v1.5 padding.
package rsa
import (
- "big"
"bytes"
"crypto"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"io"
+ "math/big"
"testing"
"testing/quick"
)
// TODO(agl): Add support for PSS padding.
import (
- "big"
"crypto/rand"
"crypto/subtle"
"errors"
"hash"
"io"
+ "math/big"
)
var bigZero = big.NewInt(0)
package rsa
import (
- "big"
"bytes"
"crypto/rand"
"crypto/sha1"
+ "math/big"
"testing"
)
// RFC suggests that EOF without an alertCloseNotify is
// an error, but popular web sites seem to do this,
// so we can't make it an error.
- // if err == os.EOF {
+ // if err == io.EOF {
// err = io.ErrUnexpectedEOF
// }
if e, ok := err.(net.Error); !ok || !e.Temporary() {
package main
import (
- "big"
- "crypto/x509/pkix"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
+ "crypto/x509/pkix"
"encoding/pem"
"flag"
"log"
+ "math/big"
"os"
"time"
)
package tls
import (
- "rand"
+ "math/rand"
"reflect"
"testing"
"testing/quick"
package tls
import (
- "big"
"bytes"
"crypto/rsa"
"encoding/hex"
"flag"
"io"
+ "math/big"
"net"
"strconv"
"strings"
package tls
import (
- "big"
"crypto"
"crypto/elliptic"
"crypto/md5"
"crypto/x509"
"errors"
"io"
+ "math/big"
)
// rsaKeyAgreement implements the standard TLS key agreement where the client
package x509
import (
- "asn1"
- "big"
- "errors"
"crypto/rsa"
+ "encoding/asn1"
+ "errors"
+ "math/big"
)
// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
package pkix
import (
- "asn1"
- "big"
+ "encoding/asn1"
+ "math/big"
"time"
)
package x509
import (
- "asn1"
- "big"
"bytes"
"crypto"
"crypto/dsa"
"crypto/rsa"
"crypto/sha1"
"crypto/x509/pkix"
+ "encoding/asn1"
"encoding/pem"
"errors"
"io"
+ "math/big"
"time"
)
package x509
import (
- "asn1"
- "big"
"bytes"
"crypto/dsa"
"crypto/rand"
"crypto/rsa"
"crypto/x509/pkix"
+ "encoding/asn1"
"encoding/base64"
"encoding/hex"
"encoding/pem"
+ "math/big"
"testing"
"time"
)
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package csv reads and writes comma-separated values (CSV) files.
-//
-// A csv file contains zero or more records of one or more fields per record.
-// Each record is separated by the newline character. The final record may
-// optionally be followed by a newline character.
-//
-// field1,field2,field3
-//
-// White space is considered part of a field.
-//
-// Carriage returns before newline characters are silently removed.
-//
-// Blank lines are ignored. A line with only whitespace characters (excluding
-// the ending newline character) is not considered a blank line.
-//
-// Fields which start and stop with the quote character " are called
-// quoted-fields. The beginning and ending quote are not part of the
-// field.
-//
-// The source:
-//
-// normal string,"quoted-field"
-//
-// results in the fields
-//
-// {`normal string`, `quoted-field`}
-//
-// Within a quoted-field a quote character followed by a second quote
-// character is considered a single quote.
-//
-// "the ""word"" is true","a ""quoted-field"""
-//
-// results in
-//
-// {`the "word" is true`, `a "quoted-field"`}
-//
-// Newlines and commas may be included in a quoted-field
-//
-// "Multi-line
-// field","comma is ,"
-//
-// results in
-//
-// {`Multi-line
-// field`, `comma is ,`}
-package csv
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "unicode"
-)
-
-// A ParseError is returned for parsing errors.
-// The first line is 1. The first column is 0.
-type ParseError struct {
- Line int // Line where the error occurred
- Column int // Column (rune index) where the error occurred
- Err error // The actual error
-}
-
-func (e *ParseError) Error() string {
- return fmt.Sprintf("line %d, column %d: %s", e.Line, e.Column, e.Err)
-}
-
-// These are the errors that can be returned in ParseError.Error
-var (
- ErrTrailingComma = errors.New("extra delimiter at end of line")
- ErrBareQuote = errors.New("bare \" in non-quoted-field")
- ErrQuote = errors.New("extraneous \" in field")
- ErrFieldCount = errors.New("wrong number of fields in line")
-)
-
-// A Reader reads records from a CSV-encoded file.
-//
-// As returned by NewReader, a Reader expects input conforming to RFC 4180.
-// The exported fields can be changed to customize the details before the
-// first call to Read or ReadAll.
-//
-// Comma is the field delimiter. It defaults to ','.
-//
-// Comment, if not 0, is the comment character. Lines beginning with the
-// Comment character are ignored.
-//
-// If FieldsPerRecord is positive, Read requires each record to
-// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
-// the number of fields in the first record, so that future records must
-// have the same field count.
-//
-// If LazyQuotes is true, a quote may appear in an unquoted field and a
-// non-doubled quote may appear in a quoted field.
-//
-// If TrailingComma is true, the last field may be an unquoted empty field.
-//
-// If TrimLeadingSpace is true, leading white space in a field is ignored.
-type Reader struct {
- Comma rune // Field delimiter (set to ',' by NewReader)
- Comment rune // Comment character for start of line
- FieldsPerRecord int // Number of expected fields per record
- LazyQuotes bool // Allow lazy quotes
- TrailingComma bool // Allow trailing comma
- TrimLeadingSpace bool // Trim leading space
- line int
- column int
- r *bufio.Reader
- field bytes.Buffer
-}
-
-// NewReader returns a new Reader that reads from r.
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- Comma: ',',
- r: bufio.NewReader(r),
- }
-}
-
-// error creates a new ParseError based on err.
-func (r *Reader) error(err error) error {
- return &ParseError{
- Line: r.line,
- Column: r.column,
- Err: err,
- }
-}
-
-// Read reads one record from r. The record is a slice of strings with each
-// string representing one field.
-func (r *Reader) Read() (record []string, err error) {
- for {
- record, err = r.parseRecord()
- if record != nil {
- break
- }
- if err != nil {
- return nil, err
- }
- }
-
- if r.FieldsPerRecord > 0 {
- if len(record) != r.FieldsPerRecord {
- r.column = 0 // report at start of record
- return record, r.error(ErrFieldCount)
- }
- } else if r.FieldsPerRecord == 0 {
- r.FieldsPerRecord = len(record)
- }
- return record, nil
-}
-
-// ReadAll reads all the remaining records from r.
-// Each record is a slice of fields.
-func (r *Reader) ReadAll() (records [][]string, err error) {
- for {
- record, err := r.Read()
- if err == io.EOF {
- return records, nil
- }
- if err != nil {
- return nil, err
- }
- records = append(records, record)
- }
- panic("unreachable")
-}
-
-// readRune reads one rune from r, folding \r\n to \n and keeping track
-// of how far into the line we have read. r.column will point to the start
-// of this rune, not the end of this rune.
-func (r *Reader) readRune() (rune, error) {
- r1, _, err := r.r.ReadRune()
-
- // Handle \r\n here. We make the simplifying assumption that
- // anytime \r is followed by \n that it can be folded to \n.
- // We will not detect files which contain both \r\n and bare \n.
- if r1 == '\r' {
- r1, _, err = r.r.ReadRune()
- if err == nil {
- if r1 != '\n' {
- r.r.UnreadRune()
- r1 = '\r'
- }
- }
- }
- r.column++
- return r1, err
-}
-
-// unreadRune puts the last rune read from r back.
-func (r *Reader) unreadRune() {
- r.r.UnreadRune()
- r.column--
-}
-
-// skip reads runes up to and including the rune delim or until error.
-func (r *Reader) skip(delim rune) error {
- for {
- r1, err := r.readRune()
- if err != nil {
- return err
- }
- if r1 == delim {
- return nil
- }
- }
- panic("unreachable")
-}
-
-// parseRecord reads and parses a single csv record from r.
-func (r *Reader) parseRecord() (fields []string, err error) {
- // Each record starts on a new line. We increment our line
- // number (lines start at 1, not 0) and set column to -1
- // so as we increment in readRune it points to the character we read.
- r.line++
- r.column = -1
-
- // Peek at the first rune. If it is an error we are done.
- // If we are support comments and it is the comment character
- // then skip to the end of line.
-
- r1, _, err := r.r.ReadRune()
- if err != nil {
- return nil, err
- }
-
- if r.Comment != 0 && r1 == r.Comment {
- return nil, r.skip('\n')
- }
- r.r.UnreadRune()
-
- // At this point we have at least one field.
- for {
- haveField, delim, err := r.parseField()
- if haveField {
- fields = append(fields, r.field.String())
- }
- if delim == '\n' || err == io.EOF {
- return fields, err
- } else if err != nil {
- return nil, err
- }
- }
- panic("unreachable")
-}
-
-// parseField parses the next field in the record. The read field is
-// located in r.field. Delim is the first character not part of the field
-// (r.Comma or '\n').
-func (r *Reader) parseField() (haveField bool, delim rune, err error) {
- r.field.Reset()
-
- r1, err := r.readRune()
- if err != nil {
- // If we have EOF and are not at the start of a line
- // then we return the empty field. We have already
- // checked for trailing commas if needed.
- if err == io.EOF && r.column != 0 {
- return true, 0, err
- }
- return false, 0, err
- }
-
- if r.TrimLeadingSpace {
- for r1 != '\n' && unicode.IsSpace(r1) {
- r1, err = r.readRune()
- if err != nil {
- return false, 0, err
- }
- }
- }
-
- switch r1 {
- case r.Comma:
- // will check below
-
- case '\n':
- // We are a trailing empty field or a blank line
- if r.column == 0 {
- return false, r1, nil
- }
- return true, r1, nil
-
- case '"':
- // quoted field
- Quoted:
- for {
- r1, err = r.readRune()
- if err != nil {
- if err == io.EOF {
- if r.LazyQuotes {
- return true, 0, err
- }
- return false, 0, r.error(ErrQuote)
- }
- return false, 0, err
- }
- switch r1 {
- case '"':
- r1, err = r.readRune()
- if err != nil || r1 == r.Comma {
- break Quoted
- }
- if r1 == '\n' {
- return true, r1, nil
- }
- if r1 != '"' {
- if !r.LazyQuotes {
- r.column--
- return false, 0, r.error(ErrQuote)
- }
- // accept the bare quote
- r.field.WriteRune('"')
- }
- case '\n':
- r.line++
- r.column = -1
- }
- r.field.WriteRune(r1)
- }
-
- default:
- // unquoted field
- for {
- r.field.WriteRune(r1)
- r1, err = r.readRune()
- if err != nil || r1 == r.Comma {
- break
- }
- if r1 == '\n' {
- return true, r1, nil
- }
- if !r.LazyQuotes && r1 == '"' {
- return false, 0, r.error(ErrBareQuote)
- }
- }
- }
-
- if err != nil {
- if err == io.EOF {
- return true, 0, err
- }
- return false, 0, err
- }
-
- if !r.TrailingComma {
- // We don't allow trailing commas. See if we
- // are at the end of the line (being mindful
- // of trimming spaces).
- c := r.column
- r1, err = r.readRune()
- if r.TrimLeadingSpace {
- for r1 != '\n' && unicode.IsSpace(r1) {
- r1, err = r.readRune()
- if err != nil {
- break
- }
- }
- }
- if err == io.EOF || r1 == '\n' {
- r.column = c // report the comma
- return false, 0, r.error(ErrTrailingComma)
- }
- r.unreadRune()
- }
- return true, r1, nil
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package csv
-
-import (
- "reflect"
- "strings"
- "testing"
-)
-
-var readTests = []struct {
- Name string
- Input string
- Output [][]string
- UseFieldsPerRecord bool // false (default) means FieldsPerRecord is -1
-
- // These fields are copied into the Reader
- Comma rune
- Comment rune
- FieldsPerRecord int
- LazyQuotes bool
- TrailingComma bool
- TrimLeadingSpace bool
-
- Error string
- Line int // Expected error line if != 0
- Column int // Expected error column if line != 0
-}{
- {
- Name: "Simple",
- Input: "a,b,c\n",
- Output: [][]string{{"a", "b", "c"}},
- },
- {
- Name: "CRLF",
- Input: "a,b\r\nc,d\r\n",
- Output: [][]string{{"a", "b"}, {"c", "d"}},
- },
- {
- Name: "BareCR",
- Input: "a,b\rc,d\r\n",
- Output: [][]string{{"a", "b\rc", "d"}},
- },
- {
- Name: "RFC4180test",
- UseFieldsPerRecord: true,
- Input: `#field1,field2,field3
-"aaa","bb
-b","ccc"
-"a,a","b""bb","ccc"
-zzz,yyy,xxx
-`,
- Output: [][]string{
- {"#field1", "field2", "field3"},
- {"aaa", "bb\nb", "ccc"},
- {"a,a", `b"bb`, "ccc"},
- {"zzz", "yyy", "xxx"},
- },
- },
- {
- Name: "NoEOLTest",
- Input: "a,b,c",
- Output: [][]string{{"a", "b", "c"}},
- },
- {
- Name: "Semicolon",
- Comma: ';',
- Input: "a;b;c\n",
- Output: [][]string{{"a", "b", "c"}},
- },
- {
- Name: "MultiLine",
- Input: `"two
-line","one line","three
-line
-field"`,
- Output: [][]string{{"two\nline", "one line", "three\nline\nfield"}},
- },
- {
- Name: "BlankLine",
- Input: "a,b,c\n\nd,e,f\n\n",
- Output: [][]string{
- {"a", "b", "c"},
- {"d", "e", "f"},
- },
- },
- {
- Name: "TrimSpace",
- Input: " a, b, c\n",
- TrimLeadingSpace: true,
- Output: [][]string{{"a", "b", "c"}},
- },
- {
- Name: "LeadingSpace",
- Input: " a, b, c\n",
- Output: [][]string{{" a", " b", " c"}},
- },
- {
- Name: "Comment",
- Comment: '#',
- Input: "#1,2,3\na,b,c\n#comment",
- Output: [][]string{{"a", "b", "c"}},
- },
- {
- Name: "NoComment",
- Input: "#1,2,3\na,b,c",
- Output: [][]string{{"#1", "2", "3"}, {"a", "b", "c"}},
- },
- {
- Name: "LazyQuotes",
- LazyQuotes: true,
- Input: `a "word","1"2",a","b`,
- Output: [][]string{{`a "word"`, `1"2`, `a"`, `b`}},
- },
- {
- Name: "BareQuotes",
- LazyQuotes: true,
- Input: `a "word","1"2",a"`,
- Output: [][]string{{`a "word"`, `1"2`, `a"`}},
- },
- {
- Name: "BareDoubleQuotes",
- LazyQuotes: true,
- Input: `a""b,c`,
- Output: [][]string{{`a""b`, `c`}},
- },
- {
- Name: "BadDoubleQuotes",
- Input: `a""b,c`,
- Error: `bare " in non-quoted-field`, Line: 1, Column: 1,
- },
- {
- Name: "TrimQuote",
- Input: ` "a"," b",c`,
- TrimLeadingSpace: true,
- Output: [][]string{{"a", " b", "c"}},
- },
- {
- Name: "BadBareQuote",
- Input: `a "word","b"`,
- Error: `bare " in non-quoted-field`, Line: 1, Column: 2,
- },
- {
- Name: "BadTrailingQuote",
- Input: `"a word",b"`,
- Error: `bare " in non-quoted-field`, Line: 1, Column: 10,
- },
- {
- Name: "ExtraneousQuote",
- Input: `"a "word","b"`,
- Error: `extraneous " in field`, Line: 1, Column: 3,
- },
- {
- Name: "BadFieldCount",
- UseFieldsPerRecord: true,
- Input: "a,b,c\nd,e",
- Error: "wrong number of fields", Line: 2,
- },
- {
- Name: "BadFieldCount1",
- UseFieldsPerRecord: true,
- FieldsPerRecord: 2,
- Input: `a,b,c`,
- Error: "wrong number of fields", Line: 1,
- },
- {
- Name: "FieldCount",
- Input: "a,b,c\nd,e",
- Output: [][]string{{"a", "b", "c"}, {"d", "e"}},
- },
- {
- Name: "BadTrailingCommaEOF",
- Input: "a,b,c,",
- Error: "extra delimiter at end of line", Line: 1, Column: 5,
- },
- {
- Name: "BadTrailingCommaEOL",
- Input: "a,b,c,\n",
- Error: "extra delimiter at end of line", Line: 1, Column: 5,
- },
- {
- Name: "BadTrailingCommaSpaceEOF",
- TrimLeadingSpace: true,
- Input: "a,b,c, ",
- Error: "extra delimiter at end of line", Line: 1, Column: 5,
- },
- {
- Name: "BadTrailingCommaSpaceEOL",
- TrimLeadingSpace: true,
- Input: "a,b,c, \n",
- Error: "extra delimiter at end of line", Line: 1, Column: 5,
- },
- {
- Name: "BadTrailingCommaLine3",
- TrimLeadingSpace: true,
- Input: "a,b,c\nd,e,f\ng,hi,",
- Error: "extra delimiter at end of line", Line: 3, Column: 4,
- },
- {
- Name: "NotTrailingComma3",
- Input: "a,b,c, \n",
- Output: [][]string{{"a", "b", "c", " "}},
- },
- {
- Name: "CommaFieldTest",
- TrailingComma: true,
- Input: `x,y,z,w
-x,y,z,
-x,y,,
-x,,,
-,,,
-"x","y","z","w"
-"x","y","z",""
-"x","y","",""
-"x","","",""
-"","","",""
-`,
- Output: [][]string{
- {"x", "y", "z", "w"},
- {"x", "y", "z", ""},
- {"x", "y", "", ""},
- {"x", "", "", ""},
- {"", "", "", ""},
- {"x", "y", "z", "w"},
- {"x", "y", "z", ""},
- {"x", "y", "", ""},
- {"x", "", "", ""},
- {"", "", "", ""},
- },
- },
- {
- Name: "Issue 2366",
- TrailingComma: true,
- TrimLeadingSpace: true,
- Input: "a,b,\nc,d,e",
- Output: [][]string{
- {"a", "b", ""},
- {"c", "d", "e"},
- },
- },
- {
- Name: "Issue 2366a",
- TrailingComma: false,
- TrimLeadingSpace: true,
- Input: "a,b,\nc,d,e",
- Error: "extra delimiter at end of line",
- },
-}
-
-func TestRead(t *testing.T) {
- for _, tt := range readTests {
- r := NewReader(strings.NewReader(tt.Input))
- r.Comment = tt.Comment
- if tt.UseFieldsPerRecord {
- r.FieldsPerRecord = tt.FieldsPerRecord
- } else {
- r.FieldsPerRecord = -1
- }
- r.LazyQuotes = tt.LazyQuotes
- r.TrailingComma = tt.TrailingComma
- r.TrimLeadingSpace = tt.TrimLeadingSpace
- if tt.Comma != 0 {
- r.Comma = tt.Comma
- }
- out, err := r.ReadAll()
- perr, _ := err.(*ParseError)
- if tt.Error != "" {
- if err == nil || !strings.Contains(err.Error(), tt.Error) {
- t.Errorf("%s: error %v, want error %q", tt.Name, err, tt.Error)
- } else if tt.Line != 0 && (tt.Line != perr.Line || tt.Column != perr.Column) {
- t.Errorf("%s: error at %d:%d expected %d:%d", tt.Name, perr.Line, perr.Column, tt.Line, tt.Column)
- }
- } else if err != nil {
- t.Errorf("%s: unexpected error %v", tt.Name, err)
- } else if !reflect.DeepEqual(out, tt.Output) {
- t.Errorf("%s: out=%q want %q", tt.Name, out, tt.Output)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package csv
-
-import (
- "bufio"
- "io"
- "strings"
- "unicode"
- "utf8"
-)
-
-// A Writer writes records to a CSV encoded file.
-//
-// As returned by NewWriter, a Writer writes records terminated by a
-// newline and uses ',' as the field delimiter. The exported fields can be
-// changed to customize the details before the first call to Write or WriteAll.
-//
-// Comma is the field delimiter.
-//
-// If UseCRLF is true, the Writer ends each record with \r\n instead of \n.
-type Writer struct {
- Comma rune // Field delimiter (set to to ',' by NewWriter)
- UseCRLF bool // True to use \r\n as the line terminator
- w *bufio.Writer
-}
-
-// NewWriter returns a new Writer that writes to w.
-func NewWriter(w io.Writer) *Writer {
- return &Writer{
- Comma: ',',
- w: bufio.NewWriter(w),
- }
-}
-
-// Writer writes a single CSV record to w along with any necessary quoting.
-// A record is a slice of strings with each string being one field.
-func (w *Writer) Write(record []string) (err error) {
- for n, field := range record {
- if n > 0 {
- if _, err = w.w.WriteRune(w.Comma); err != nil {
- return
- }
- }
-
- // If we don't have to have a quoted field then just
- // write out the field and continue to the next field.
- if !w.fieldNeedsQuotes(field) {
- if _, err = w.w.WriteString(field); err != nil {
- return
- }
- continue
- }
- if err = w.w.WriteByte('"'); err != nil {
- return
- }
-
- for _, r1 := range field {
- switch r1 {
- case '"':
- _, err = w.w.WriteString(`""`)
- case '\r':
- if !w.UseCRLF {
- err = w.w.WriteByte('\r')
- }
- case '\n':
- if w.UseCRLF {
- _, err = w.w.WriteString("\r\n")
- } else {
- err = w.w.WriteByte('\n')
- }
- default:
- _, err = w.w.WriteRune(r1)
- }
- if err != nil {
- return
- }
- }
-
- if err = w.w.WriteByte('"'); err != nil {
- return
- }
- }
- if w.UseCRLF {
- _, err = w.w.WriteString("\r\n")
- } else {
- err = w.w.WriteByte('\n')
- }
- return
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-func (w *Writer) Flush() {
- w.w.Flush()
-}
-
-// WriteAll writes multiple CSV records to w using Write and then calls Flush.
-func (w *Writer) WriteAll(records [][]string) (err error) {
- for _, record := range records {
- err = w.Write(record)
- if err != nil {
- break
- }
- }
- w.Flush()
- return nil
-}
-
-// fieldNeedsQuotes returns true if our field must be enclosed in quotes.
-// Empty fields, files with a Comma, fields with a quote or newline, and
-// fields which start with a space must be enclosed in quotes.
-func (w *Writer) fieldNeedsQuotes(field string) bool {
- if len(field) == 0 || strings.IndexRune(field, w.Comma) >= 0 || strings.IndexAny(field, "\"\r\n") >= 0 {
- return true
- }
-
- r1, _ := utf8.DecodeRuneInString(field)
- return unicode.IsSpace(r1)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package csv
-
-import (
- "bytes"
- "testing"
-)
-
-var writeTests = []struct {
- Input [][]string
- Output string
- UseCRLF bool
-}{
- {Input: [][]string{{"abc"}}, Output: "abc\n"},
- {Input: [][]string{{"abc"}}, Output: "abc\r\n", UseCRLF: true},
- {Input: [][]string{{`"abc"`}}, Output: `"""abc"""` + "\n"},
- {Input: [][]string{{`a"b`}}, Output: `"a""b"` + "\n"},
- {Input: [][]string{{`"a"b"`}}, Output: `"""a""b"""` + "\n"},
- {Input: [][]string{{" abc"}}, Output: `" abc"` + "\n"},
- {Input: [][]string{{"abc,def"}}, Output: `"abc,def"` + "\n"},
- {Input: [][]string{{"abc", "def"}}, Output: "abc,def\n"},
- {Input: [][]string{{"abc"}, {"def"}}, Output: "abc\ndef\n"},
- {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\ndef\"\n"},
- {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true},
-}
-
-func TestWrite(t *testing.T) {
- for n, tt := range writeTests {
- b := &bytes.Buffer{}
- f := NewWriter(b)
- f.UseCRLF = tt.UseCRLF
- err := f.WriteAll(tt.Input)
- if err != nil {
- t.Errorf("Unexpected error: %s\n", err)
- }
- out := b.String()
- if out != tt.Output {
- t.Errorf("#%d: out=%q want %q", n, out, tt.Output)
- }
- }
-}
import (
"debug/elf"
"os"
- "testing"
"syscall"
+ "testing"
)
func dotest() bool {
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+ "fmt"
+ "math/big"
+ "reflect"
+ "time"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+ Msg string
+}
+
+func (e StructuralError) Error() string { return "ASN.1 structure error: " + e.Msg }
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+ Msg string
+}
+
+func (e SyntaxError) Error() string { return "ASN.1 syntax error: " + e.Msg }
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte) (ret bool, err error) {
+ if len(bytes) != 1 {
+ err = SyntaxError{"invalid boolean"}
+ return
+ }
+
+ return bytes[0] != 0, nil
+}
+
+// INTEGER
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte) (ret int64, err error) {
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = StructuralError{"integer too large"}
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt(bytes []byte) (int, error) {
+ ret64, err := parseInt64(bytes)
+ if err != nil {
+ return 0, err
+ }
+ if ret64 != int64(int(ret64)) {
+ return 0, StructuralError{"integer too large"}
+ }
+ return int(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) *big.Int {
+ ret := new(big.Int)
+ if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+ // This is a negative number.
+ notBytes := make([]byte, len(bytes))
+ for i := range notBytes {
+ notBytes[i] = ^bytes[i]
+ }
+ ret.SetBytes(notBytes)
+ ret.Add(ret, bigOne)
+ ret.Neg(ret)
+ return ret
+ }
+ ret.SetBytes(bytes)
+ return ret
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+ Bytes []byte // bits packed into bytes.
+ BitLength int // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+ if i < 0 || i >= b.BitLength {
+ return 0
+ }
+ x := i / 8
+ y := 7 - uint(i%8)
+ return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+ shift := uint(8 - (b.BitLength % 8))
+ if shift == 8 || len(b.Bytes) == 0 {
+ return b.Bytes
+ }
+
+ a := make([]byte, len(b.Bytes))
+ a[0] = b.Bytes[0] >> shift
+ for i := 1; i < len(b.Bytes); i++ {
+ a[i] = b.Bytes[i-1] << (8 - shift)
+ a[i] |= b.Bytes[i] >> shift
+ }
+
+ return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte) (ret BitString, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length BIT STRING"}
+ return
+ }
+ paddingBits := int(bytes[0])
+ if paddingBits > 7 ||
+ len(bytes) == 1 && paddingBits > 0 ||
+ bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
+ err = SyntaxError{"invalid padding bits in BIT STRING"}
+ return
+ }
+ ret.BitLength = (len(bytes)-1)*8 - paddingBits
+ ret.Bytes = bytes[1:]
+ return
+}
+
+// OBJECT IDENTIFIER
+
+// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
+type ObjectIdentifier []int
+
+// Equal returns true iff oi and other represent the same identifier.
+func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
+ if len(oi) != len(other) {
+ return false
+ }
+ for i := 0; i < len(oi); i++ {
+ if oi[i] != other[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length OBJECT IDENTIFIER"}
+ return
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ s = make([]int, len(bytes)+1)
+
+ // The first byte is 40*value1 + value2:
+ s[0] = int(bytes[0]) / 40
+ s[1] = int(bytes[0]) % 40
+ i := 2
+ for offset := 1; offset < len(bytes); i++ {
+ var v int
+ v, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+ offset = initOffset
+ for shifted := 0; offset < len(bytes); shifted++ {
+ if shifted > 4 {
+ err = StructuralError{"base 128 integer too large"}
+ return
+ }
+ ret <<= 7
+ b := bytes[offset]
+ ret |= int(b & 0x7f)
+ offset++
+ if b&0x80 == 0 {
+ return
+ }
+ }
+ err = SyntaxError{"truncated base 128 integer"}
+ return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret *time.Time, err error) {
+ s := string(bytes)
+ ret, err = time.Parse("0601021504Z0700", s)
+ if err == nil {
+ return
+ }
+ ret, err = time.Parse("060102150405Z0700", s)
+ return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret *time.Time, err error) {
+ return time.Parse("20060102150405Z0700", string(bytes))
+}
+
+// PrintableString
+
+// parsePrintableString parses a ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if !isPrintable(b) {
+ err = SyntaxError{"PrintableString contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// isPrintable returns true iff the given b is in the ASN.1 PrintableString set.
+func isPrintable(b byte) bool {
+ return 'a' <= b && b <= 'z' ||
+ 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' ||
+ '\'' <= b && b <= ')' ||
+ '+' <= b && b <= '/' ||
+ b == ' ' ||
+ b == ':' ||
+ b == '=' ||
+ b == '?' ||
+ // This is technically not allowed in a PrintableString.
+ // However, x509 certificates with wildcard strings don't
+ // always use the correct string type so we permit it.
+ b == '*'
+}
+
+// IA5String
+
+// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if b >= 0x80 {
+ err = SyntaxError{"IA5String contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// T61String
+
+// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+ Class, Tag int
+ IsCompound bool
+ Bytes []byte
+ FullBytes []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
+ offset = initOffset
+ b := bytes[offset]
+ offset++
+ ret.class = int(b >> 6)
+ ret.isCompound = b&0x20 == 0x20
+ ret.tag = int(b & 0x1f)
+
+ // If the bottom five bits are set, then the tag number is actually base 128
+ // encoded afterwards
+ if ret.tag == 0x1f {
+ ret.tag, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ }
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if b&0x80 == 0 {
+ // The length is encoded in the bottom 7 bits.
+ ret.length = int(b & 0x7f)
+ } else {
+ // Bottom 7 bits give the number of length bytes to follow.
+ numBytes := int(b & 0x7f)
+ // We risk overflowing a signed 32-bit number if we accept more than 3 bytes.
+ if numBytes > 3 {
+ err = StructuralError{"length too large"}
+ return
+ }
+ if numBytes == 0 {
+ err = SyntaxError{"indefinite length found (not DER)"}
+ return
+ }
+ ret.length = 0
+ for i := 0; i < numBytes; i++ {
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ ret.length <<= 8
+ ret.length |= int(b)
+ }
+ }
+
+ return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
+ expectedTag, compoundType, ok := getUniversalType(elemType)
+ if !ok {
+ err = StructuralError{"unknown Go type for slice"}
+ return
+ }
+
+ // First we iterate over the input and count the number of elements,
+ // checking that the types are correct in each case.
+ numElements := 0
+ for offset := 0; offset < len(bytes); {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ // We pretend that GENERAL STRINGs are PRINTABLE STRINGs so
+ // that a sequence of them can be parsed into a []string.
+ if t.tag == tagGeneralString {
+ t.tag = tagPrintableString
+ }
+ if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag {
+ err = StructuralError{"sequence tag mismatch"}
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"truncated sequence"}
+ return
+ }
+ offset += t.length
+ numElements++
+ }
+ ret = reflect.MakeSlice(sliceType, numElements, numElements)
+ params := fieldParameters{}
+ offset := 0
+ for i := 0; i < numElements; i++ {
+ offset, err = parseField(ret.Index(i), bytes, offset, params)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+var (
+ bitStringType = reflect.TypeOf(BitString{})
+ objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+ enumeratedType = reflect.TypeOf(Enumerated(0))
+ flagType = reflect.TypeOf(Flag(false))
+ timeType = reflect.TypeOf(&time.Time{})
+ rawValueType = reflect.TypeOf(RawValue{})
+ rawContentsType = reflect.TypeOf(RawContent(nil))
+ bigIntType = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength returns true iff offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+ return offset+length < offset || offset+length > sliceLength
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+ offset = initOffset
+ fieldType := v.Type()
+
+ // If we have run out of data, it may be that there are optional elements at the end.
+ if offset == len(bytes) {
+ if !setDefaultValue(v, params) {
+ err = SyntaxError{"sequence truncated"}
+ }
+ return
+ }
+
+ // Deal with raw values.
+ if fieldType == rawValueType {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
+ offset += t.length
+ v.Set(reflect.ValueOf(result))
+ return
+ }
+
+ // Deal with the ANY type.
+ if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ var result interface{}
+ if !t.isCompound && t.class == classUniversal {
+ innerBytes := bytes[offset : offset+t.length]
+ switch t.tag {
+ case tagPrintableString:
+ result, err = parsePrintableString(innerBytes)
+ case tagIA5String:
+ result, err = parseIA5String(innerBytes)
+ case tagT61String:
+ result, err = parseT61String(innerBytes)
+ case tagUTF8String:
+ result, err = parseUTF8String(innerBytes)
+ case tagInteger:
+ result, err = parseInt64(innerBytes)
+ case tagBitString:
+ result, err = parseBitString(innerBytes)
+ case tagOID:
+ result, err = parseObjectIdentifier(innerBytes)
+ case tagUTCTime:
+ result, err = parseUTCTime(innerBytes)
+ case tagOctetString:
+ result = innerBytes
+ default:
+ // If we don't know how to handle the type, we just leave Value as nil.
+ }
+ }
+ offset += t.length
+ if err != nil {
+ return
+ }
+ if result != nil {
+ v.Set(reflect.ValueOf(result))
+ }
+ return
+ }
+ universalTag, compoundType, ok1 := getUniversalType(fieldType)
+ if !ok1 {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
+ return
+ }
+
+ t, offset, err := parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if params.explicit {
+ expectedClass := classContextSpecific
+ if params.application {
+ expectedClass = classApplication
+ }
+ if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+ if t.length > 0 {
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ } else {
+ if fieldType != flagType {
+ err = StructuralError{"Zero length explicit tag was not an asn1.Flag"}
+ return
+ }
+ v.SetBool(true)
+ return
+ }
+ } else {
+ // The tags didn't match, it might be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{"explicitly tagged member didn't match"}
+ }
+ return
+ }
+ }
+
+ // Special case for strings: all the ASN.1 string types map to the Go
+ // type string. getUniversalType returns the tag for PrintableString
+ // when it sees a string, so if we see a different string type on the
+ // wire, we change the universal type to match.
+ if universalTag == tagPrintableString {
+ switch t.tag {
+ case tagIA5String, tagGeneralString, tagT61String, tagUTF8String:
+ universalTag = t.tag
+ }
+ }
+
+ // Special case for time: UTCTime and GeneralizedTime both map to the
+ // Go type time.Time.
+ if universalTag == tagUTCTime && t.tag == tagGeneralizedTime {
+ universalTag = tagGeneralizedTime
+ }
+
+ expectedClass := classUniversal
+ expectedTag := universalTag
+
+ if !params.explicit && params.tag != nil {
+ expectedClass = classContextSpecific
+ expectedTag = *params.tag
+ }
+
+ if !params.explicit && params.application && params.tag != nil {
+ expectedClass = classApplication
+ expectedTag = *params.tag
+ }
+
+ // We have unwrapped any explicit tagging at this point.
+ if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
+ // Tags don't match. Again, it could be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
+ }
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ innerBytes := bytes[offset : offset+t.length]
+ offset += t.length
+
+ // We deal with the structures defined in this package first.
+ switch fieldType {
+ case objectIdentifierType:
+ newSlice, err1 := parseObjectIdentifier(innerBytes)
+ v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
+ if err1 == nil {
+ reflect.Copy(v, reflect.ValueOf(newSlice))
+ }
+ err = err1
+ return
+ case bitStringType:
+ bs, err1 := parseBitString(innerBytes)
+ if err1 == nil {
+ v.Set(reflect.ValueOf(bs))
+ }
+ err = err1
+ return
+ case timeType:
+ var time *time.Time
+ var err1 error
+ if universalTag == tagUTCTime {
+ time, err1 = parseUTCTime(innerBytes)
+ } else {
+ time, err1 = parseGeneralizedTime(innerBytes)
+ }
+ if err1 == nil {
+ v.Set(reflect.ValueOf(time))
+ }
+ err = err1
+ return
+ case enumeratedType:
+ parsedInt, err1 := parseInt(innerBytes)
+ if err1 == nil {
+ v.SetInt(int64(parsedInt))
+ }
+ err = err1
+ return
+ case flagType:
+ v.SetBool(true)
+ return
+ case bigIntType:
+ parsedInt := parseBigInt(innerBytes)
+ v.Set(reflect.ValueOf(parsedInt))
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Bool:
+ parsedBool, err1 := parseBool(innerBytes)
+ if err1 == nil {
+ val.SetBool(parsedBool)
+ }
+ err = err1
+ return
+ case reflect.Int, reflect.Int32:
+ parsedInt, err1 := parseInt(innerBytes)
+ if err1 == nil {
+ val.SetInt(int64(parsedInt))
+ }
+ err = err1
+ return
+ case reflect.Int64:
+ parsedInt, err1 := parseInt64(innerBytes)
+ if err1 == nil {
+ val.SetInt(parsedInt)
+ }
+ err = err1
+ return
+ // TODO(dfc) Add support for the remaining integer types
+ case reflect.Struct:
+ structType := fieldType
+
+ if structType.NumField() > 0 &&
+ structType.Field(0).Type == rawContentsType {
+ bytes := bytes[initOffset:offset]
+ val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+ }
+
+ innerOffset := 0
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ if i == 0 && field.Type == rawContentsType {
+ continue
+ }
+ innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ // We allow extra bytes at the end of the SEQUENCE because
+ // adding elements to the end has been used in X.509 as the
+ // version numbers have increased.
+ return
+ case reflect.Slice:
+ sliceType := fieldType
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+ reflect.Copy(val, reflect.ValueOf(innerBytes))
+ return
+ }
+ newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
+ if err1 == nil {
+ val.Set(newSlice)
+ }
+ err = err1
+ return
+ case reflect.String:
+ var v string
+ switch universalTag {
+ case tagPrintableString:
+ v, err = parsePrintableString(innerBytes)
+ case tagIA5String:
+ v, err = parseIA5String(innerBytes)
+ case tagT61String:
+ v, err = parseT61String(innerBytes)
+ case tagUTF8String:
+ v, err = parseUTF8String(innerBytes)
+ case tagGeneralString:
+ // GeneralString is specified in ISO-2022/ECMA-35,
+ // A brief review suggests that it includes structures
+ // that allow the encoding to change midstring and
+ // such. We give up and pass it as an 8-bit string.
+ v, err = parseT61String(innerBytes)
+ default:
+ err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
+ }
+ if err == nil {
+ val.SetString(v)
+ }
+ return
+ }
+ err = StructuralError{"unsupported: " + v.Type().String()}
+ return
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful is the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+ if !params.optional {
+ return
+ }
+ ok = true
+ if params.defaultValue == nil {
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val.SetInt(*params.defaultValue)
+ }
+ return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names.
+//
+// An ASN.1 INTEGER can be written to an int, int32 or int64.
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a *time.Time.
+//
+// An ASN.1 PrintableString or IA5String can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+// optional marks the field as ASN.1 OPTIONAL
+// [explicit] tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+// default:x sets the default value for optional integer fields
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
+ v := reflect.ValueOf(val).Elem()
+ offset, err := parseField(v, b, 0, parseFieldParameters(params))
+ if err != nil {
+ return nil, err
+ }
+ return b[offset:], nil
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+ "time"
+)
+
+type int64Test struct {
+ in []byte
+ ok bool
+ out int64
+}
+
+var int64TestData = []int64Test{
+ {[]byte{0x00}, true, 0},
+ {[]byte{0x7f}, true, 127},
+ {[]byte{0x00, 0x80}, true, 128},
+ {[]byte{0x01, 0x00}, true, 256},
+ {[]byte{0x80}, true, -128},
+ {[]byte{0xff, 0x7f}, true, -129},
+ {[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, true, -1},
+ {[]byte{0xff}, true, -1},
+ {[]byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, true, -9223372036854775808},
+ {[]byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, false, 0},
+}
+
+func TestParseInt64(t *testing.T) {
+ for i, test := range int64TestData {
+ ret, err := parseInt64(test.in)
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if test.ok && ret != test.out {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
+ }
+ }
+}
+
+type int32Test struct {
+ in []byte
+ ok bool
+ out int32
+}
+
+var int32TestData = []int32Test{
+ {[]byte{0x00}, true, 0},
+ {[]byte{0x7f}, true, 127},
+ {[]byte{0x00, 0x80}, true, 128},
+ {[]byte{0x01, 0x00}, true, 256},
+ {[]byte{0x80}, true, -128},
+ {[]byte{0xff, 0x7f}, true, -129},
+ {[]byte{0xff, 0xff, 0xff, 0xff}, true, -1},
+ {[]byte{0xff}, true, -1},
+ {[]byte{0x80, 0x00, 0x00, 0x00}, true, -2147483648},
+ {[]byte{0x80, 0x00, 0x00, 0x00, 0x00}, false, 0},
+}
+
+func TestParseInt32(t *testing.T) {
+ for i, test := range int32TestData {
+ ret, err := parseInt(test.in)
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if test.ok && int32(ret) != test.out {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
+ }
+ }
+}
+
+var bigIntTests = []struct {
+ in []byte
+ base10 string
+}{
+ {[]byte{0xff}, "-1"},
+ {[]byte{0x00}, "0"},
+ {[]byte{0x01}, "1"},
+ {[]byte{0x00, 0xff}, "255"},
+ {[]byte{0xff, 0x00}, "-256"},
+ {[]byte{0x01, 0x00}, "256"},
+}
+
+func TestParseBigInt(t *testing.T) {
+ for i, test := range bigIntTests {
+ ret := parseBigInt(test.in)
+ if ret.String() != test.base10 {
+ t.Errorf("#%d: bad result from %x, got %s want %s", i, test.in, ret.String(), test.base10)
+ }
+ fw := newForkableWriter()
+ marshalBigInt(fw, ret)
+ result := fw.Bytes()
+ if !bytes.Equal(result, test.in) {
+ t.Errorf("#%d: got %x from marshaling %s, want %x", i, result, ret, test.in)
+ }
+ }
+}
+
+type bitStringTest struct {
+ in []byte
+ ok bool
+ out []byte
+ bitLength int
+}
+
+var bitStringTestData = []bitStringTest{
+ {[]byte{}, false, []byte{}, 0},
+ {[]byte{0x00}, true, []byte{}, 0},
+ {[]byte{0x07, 0x00}, true, []byte{0x00}, 1},
+ {[]byte{0x07, 0x01}, false, []byte{}, 0},
+ {[]byte{0x07, 0x40}, false, []byte{}, 0},
+ {[]byte{0x08, 0x00}, false, []byte{}, 0},
+}
+
+func TestBitString(t *testing.T) {
+ for i, test := range bitStringTestData {
+ ret, err := parseBitString(test.in)
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if err == nil {
+ if test.bitLength != ret.BitLength || bytes.Compare(ret.Bytes, test.out) != 0 {
+ t.Errorf("#%d: Bad result: %v (expected %v %v)", i, ret, test.out, test.bitLength)
+ }
+ }
+ }
+}
+
+func TestBitStringAt(t *testing.T) {
+ bs := BitString{[]byte{0x82, 0x40}, 16}
+ if bs.At(0) != 1 {
+ t.Error("#1: Failed")
+ }
+ if bs.At(1) != 0 {
+ t.Error("#2: Failed")
+ }
+ if bs.At(6) != 1 {
+ t.Error("#3: Failed")
+ }
+ if bs.At(9) != 1 {
+ t.Error("#4: Failed")
+ }
+}
+
+type bitStringRightAlignTest struct {
+ in []byte
+ inlen int
+ out []byte
+}
+
+var bitStringRightAlignTests = []bitStringRightAlignTest{
+ {[]byte{0x80}, 1, []byte{0x01}},
+ {[]byte{0x80, 0x80}, 9, []byte{0x01, 0x01}},
+ {[]byte{}, 0, []byte{}},
+ {[]byte{0xce}, 8, []byte{0xce}},
+ {[]byte{0xce, 0x47}, 16, []byte{0xce, 0x47}},
+ {[]byte{0x34, 0x50}, 12, []byte{0x03, 0x45}},
+}
+
+func TestBitStringRightAlign(t *testing.T) {
+ for i, test := range bitStringRightAlignTests {
+ bs := BitString{test.in, test.inlen}
+ out := bs.RightAlign()
+ if bytes.Compare(out, test.out) != 0 {
+ t.Errorf("#%d got: %x want: %x", i, out, test.out)
+ }
+ }
+}
+
+type objectIdentifierTest struct {
+ in []byte
+ ok bool
+ out []int
+}
+
+var objectIdentifierTestData = []objectIdentifierTest{
+ {[]byte{}, false, []int{}},
+ {[]byte{85}, true, []int{2, 5}},
+ {[]byte{85, 0x02}, true, []int{2, 5, 2}},
+ {[]byte{85, 0x02, 0xc0, 0x00}, true, []int{2, 5, 2, 0x2000}},
+ {[]byte{85, 0x02, 0xc0, 0x80, 0x80, 0x80, 0x80}, false, []int{}},
+}
+
+func TestObjectIdentifier(t *testing.T) {
+ for i, test := range objectIdentifierTestData {
+ ret, err := parseObjectIdentifier(test.in)
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if err == nil {
+ if !reflect.DeepEqual(test.out, ret) {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
+ }
+ }
+ }
+}
+
+type timeTest struct {
+ in string
+ ok bool
+ out *time.Time
+}
+
+var utcTestData = []timeTest{
+ {"910506164540-0700", true, &time.Time{1991, 05, 06, 16, 45, 40, 0, -7 * 60 * 60, ""}},
+ {"910506164540+0730", true, &time.Time{1991, 05, 06, 16, 45, 40, 0, 7*60*60 + 30*60, ""}},
+ {"910506234540Z", true, &time.Time{1991, 05, 06, 23, 45, 40, 0, 0, "UTC"}},
+ {"9105062345Z", true, &time.Time{1991, 05, 06, 23, 45, 0, 0, 0, "UTC"}},
+ {"a10506234540Z", false, nil},
+ {"91a506234540Z", false, nil},
+ {"9105a6234540Z", false, nil},
+ {"910506a34540Z", false, nil},
+ {"910506334a40Z", false, nil},
+ {"91050633444aZ", false, nil},
+ {"910506334461Z", false, nil},
+ {"910506334400Za", false, nil},
+}
+
+func TestUTCTime(t *testing.T) {
+ for i, test := range utcTestData {
+ ret, err := parseUTCTime([]byte(test.in))
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if err == nil {
+ if !reflect.DeepEqual(test.out, ret) {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
+ }
+ }
+ }
+}
+
+var generalizedTimeTestData = []timeTest{
+ {"20100102030405Z", true, &time.Time{2010, 01, 02, 03, 04, 05, 0, 0, "UTC"}},
+ {"20100102030405", false, nil},
+ {"20100102030405+0607", true, &time.Time{2010, 01, 02, 03, 04, 05, 0, 6*60*60 + 7*60, ""}},
+ {"20100102030405-0607", true, &time.Time{2010, 01, 02, 03, 04, 05, 0, -6*60*60 - 7*60, ""}},
+}
+
+func TestGeneralizedTime(t *testing.T) {
+ for i, test := range generalizedTimeTestData {
+ ret, err := parseGeneralizedTime([]byte(test.in))
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did fail? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if err == nil {
+ if !reflect.DeepEqual(test.out, ret) {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, ret, test.out)
+ }
+ }
+ }
+}
+
+type tagAndLengthTest struct {
+ in []byte
+ ok bool
+ out tagAndLength
+}
+
+var tagAndLengthData = []tagAndLengthTest{
+ {[]byte{0x80, 0x01}, true, tagAndLength{2, 0, 1, false}},
+ {[]byte{0xa0, 0x01}, true, tagAndLength{2, 0, 1, true}},
+ {[]byte{0x02, 0x00}, true, tagAndLength{0, 2, 0, false}},
+ {[]byte{0xfe, 0x00}, true, tagAndLength{3, 30, 0, true}},
+ {[]byte{0x1f, 0x01, 0x00}, true, tagAndLength{0, 1, 0, false}},
+ {[]byte{0x1f, 0x81, 0x00, 0x00}, true, tagAndLength{0, 128, 0, false}},
+ {[]byte{0x1f, 0x81, 0x80, 0x01, 0x00}, true, tagAndLength{0, 0x4001, 0, false}},
+ {[]byte{0x00, 0x81, 0x01}, true, tagAndLength{0, 0, 1, false}},
+ {[]byte{0x00, 0x82, 0x01, 0x00}, true, tagAndLength{0, 0, 256, false}},
+ {[]byte{0x00, 0x83, 0x01, 0x00}, false, tagAndLength{}},
+ {[]byte{0x1f, 0x85}, false, tagAndLength{}},
+ {[]byte{0x30, 0x80}, false, tagAndLength{}},
+}
+
+func TestParseTagAndLength(t *testing.T) {
+ for i, test := range tagAndLengthData {
+ tagAndLength, _, err := parseTagAndLength(test.in, 0)
+ if (err == nil) != test.ok {
+ t.Errorf("#%d: Incorrect error result (did pass? %v, expected: %v)", i, err == nil, test.ok)
+ }
+ if err == nil && !reflect.DeepEqual(test.out, tagAndLength) {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, tagAndLength, test.out)
+ }
+ }
+}
+
+type parseFieldParametersTest struct {
+ in string
+ out fieldParameters
+}
+
+func newInt(n int) *int { return &n }
+
+func newInt64(n int64) *int64 { return &n }
+
+func newString(s string) *string { return &s }
+
+func newBool(b bool) *bool { return &b }
+
+var parseFieldParametersTestData []parseFieldParametersTest = []parseFieldParametersTest{
+ {"", fieldParameters{}},
+ {"ia5", fieldParameters{stringType: tagIA5String}},
+ {"printable", fieldParameters{stringType: tagPrintableString}},
+ {"optional", fieldParameters{optional: true}},
+ {"explicit", fieldParameters{explicit: true, tag: new(int)}},
+ {"application", fieldParameters{application: true, tag: new(int)}},
+ {"optional,explicit", fieldParameters{optional: true, explicit: true, tag: new(int)}},
+ {"default:42", fieldParameters{defaultValue: newInt64(42)}},
+ {"tag:17", fieldParameters{tag: newInt(17)}},
+ {"optional,explicit,default:42,tag:17", fieldParameters{optional: true, explicit: true, defaultValue: newInt64(42), tag: newInt(17)}},
+ {"optional,explicit,default:42,tag:17,rubbish1", fieldParameters{true, true, false, newInt64(42), newInt(17), 0, false}},
+ {"set", fieldParameters{set: true}},
+}
+
+func TestParseFieldParameters(t *testing.T) {
+ for i, test := range parseFieldParametersTestData {
+ f := parseFieldParameters(test.in)
+ if !reflect.DeepEqual(f, test.out) {
+ t.Errorf("#%d: Bad result: %v (expected %v)", i, f, test.out)
+ }
+ }
+}
+
+type TestObjectIdentifierStruct struct {
+ OID ObjectIdentifier
+}
+
+type TestContextSpecificTags struct {
+ A int `asn1:"tag:1"`
+}
+
+type TestContextSpecificTags2 struct {
+ A int `asn1:"explicit,tag:1"`
+ B int
+}
+
+type TestElementsAfterString struct {
+ S string
+ A, B int
+}
+
+var unmarshalTestData = []struct {
+ in []byte
+ out interface{}
+}{
+ {[]byte{0x02, 0x01, 0x42}, newInt(0x42)},
+ {[]byte{0x30, 0x08, 0x06, 0x06, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d}, &TestObjectIdentifierStruct{[]int{1, 2, 840, 113549}}},
+ {[]byte{0x03, 0x04, 0x06, 0x6e, 0x5d, 0xc0}, &BitString{[]byte{110, 93, 192}, 18}},
+ {[]byte{0x30, 0x09, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x01, 0x03}, &[]int{1, 2, 3}},
+ {[]byte{0x02, 0x01, 0x10}, newInt(16)},
+ {[]byte{0x13, 0x04, 't', 'e', 's', 't'}, newString("test")},
+ {[]byte{0x16, 0x04, 't', 'e', 's', 't'}, newString("test")},
+ {[]byte{0x16, 0x04, 't', 'e', 's', 't'}, &RawValue{0, 22, false, []byte("test"), []byte("\x16\x04test")}},
+ {[]byte{0x04, 0x04, 1, 2, 3, 4}, &RawValue{0, 4, false, []byte{1, 2, 3, 4}, []byte{4, 4, 1, 2, 3, 4}}},
+ {[]byte{0x30, 0x03, 0x81, 0x01, 0x01}, &TestContextSpecificTags{1}},
+ {[]byte{0x30, 0x08, 0xa1, 0x03, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02}, &TestContextSpecificTags2{1, 2}},
+ {[]byte{0x01, 0x01, 0x00}, newBool(false)},
+ {[]byte{0x01, 0x01, 0x01}, newBool(true)},
+ {[]byte{0x30, 0x0b, 0x13, 0x03, 0x66, 0x6f, 0x6f, 0x02, 0x01, 0x22, 0x02, 0x01, 0x33}, &TestElementsAfterString{"foo", 0x22, 0x33}},
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, test := range unmarshalTestData {
+ pv := reflect.New(reflect.TypeOf(test.out).Elem())
+ val := pv.Interface()
+ _, err := Unmarshal(test.in, val)
+ if err != nil {
+ t.Errorf("Unmarshal failed at index %d %v", i, err)
+ }
+ if !reflect.DeepEqual(val, test.out) {
+ t.Errorf("#%d:\nhave %#v\nwant %#v", i, val, test.out)
+ }
+ }
+}
+
+type Certificate struct {
+ TBSCertificate TBSCertificate
+ SignatureAlgorithm AlgorithmIdentifier
+ SignatureValue BitString
+}
+
+type TBSCertificate struct {
+ Version int `asn1:"optional,explicit,default:0,tag:0"`
+ SerialNumber RawValue
+ SignatureAlgorithm AlgorithmIdentifier
+ Issuer RDNSequence
+ Validity Validity
+ Subject RDNSequence
+ PublicKey PublicKeyInfo
+}
+
+type AlgorithmIdentifier struct {
+ Algorithm ObjectIdentifier
+}
+
+type RDNSequence []RelativeDistinguishedNameSET
+
+type RelativeDistinguishedNameSET []AttributeTypeAndValue
+
+type AttributeTypeAndValue struct {
+ Type ObjectIdentifier
+ Value interface{}
+}
+
+type Validity struct {
+ NotBefore, NotAfter *time.Time
+}
+
+type PublicKeyInfo struct {
+ Algorithm AlgorithmIdentifier
+ PublicKey BitString
+}
+
+func TestCertificate(t *testing.T) {
+ // This is a minimal, self-signed certificate that should parse correctly.
+ var cert Certificate
+ if _, err := Unmarshal(derEncodedSelfSignedCertBytes, &cert); err != nil {
+ t.Errorf("Unmarshal failed: %v", err)
+ }
+ if !reflect.DeepEqual(cert, derEncodedSelfSignedCert) {
+ t.Errorf("Bad result:\ngot: %+v\nwant: %+v", cert, derEncodedSelfSignedCert)
+ }
+}
+
+func TestCertificateWithNUL(t *testing.T) {
+ // This is the paypal NUL-hack certificate. It should fail to parse because
+ // NUL isn't a permitted character in a PrintableString.
+
+ var cert Certificate
+ if _, err := Unmarshal(derEncodedPaypalNULCertBytes, &cert); err == nil {
+ t.Error("Unmarshal succeeded, should not have")
+ }
+}
+
+type rawStructTest struct {
+ Raw RawContent
+ A int
+}
+
+func TestRawStructs(t *testing.T) {
+ var s rawStructTest
+ input := []byte{0x30, 0x03, 0x02, 0x01, 0x50}
+
+ rest, err := Unmarshal(input, &s)
+ if len(rest) != 0 {
+ t.Errorf("incomplete parse: %x", rest)
+ return
+ }
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if s.A != 0x50 {
+ t.Errorf("bad value for A: got %d want %d", s.A, 0x50)
+ }
+ if bytes.Compare([]byte(s.Raw), input) != 0 {
+ t.Errorf("bad value for Raw: got %x want %x", s.Raw, input)
+ }
+}
+
+var derEncodedSelfSignedCert = Certificate{
+ TBSCertificate: TBSCertificate{
+ Version: 0,
+ SerialNumber: RawValue{Class: 0, Tag: 2, IsCompound: false, Bytes: []uint8{0x0, 0x8c, 0xc3, 0x37, 0x92, 0x10, 0xec, 0x2c, 0x98}, FullBytes: []byte{2, 9, 0x0, 0x8c, 0xc3, 0x37, 0x92, 0x10, 0xec, 0x2c, 0x98}},
+ SignatureAlgorithm: AlgorithmIdentifier{Algorithm: ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}},
+ Issuer: RDNSequence{
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 6}, Value: "XX"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 8}, Value: "Some-State"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 7}, Value: "City"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 10}, Value: "Internet Widgits Pty Ltd"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 3}, Value: "false.example.com"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{1, 2, 840, 113549, 1, 9, 1}, Value: "false@example.com"}},
+ },
+ Validity: Validity{NotBefore: &time.Time{Year: 2009, Month: 10, Day: 8, Hour: 0, Minute: 25, Second: 53, ZoneOffset: 0, Zone: "UTC"}, NotAfter: &time.Time{Year: 2010, Month: 10, Day: 8, Hour: 0, Minute: 25, Second: 53, ZoneOffset: 0, Zone: "UTC"}},
+ Subject: RDNSequence{
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 6}, Value: "XX"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 8}, Value: "Some-State"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 7}, Value: "City"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 10}, Value: "Internet Widgits Pty Ltd"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{2, 5, 4, 3}, Value: "false.example.com"}},
+ RelativeDistinguishedNameSET{AttributeTypeAndValue{Type: ObjectIdentifier{1, 2, 840, 113549, 1, 9, 1}, Value: "false@example.com"}},
+ },
+ PublicKey: PublicKeyInfo{
+ Algorithm: AlgorithmIdentifier{Algorithm: ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}},
+ PublicKey: BitString{
+ Bytes: []uint8{
+ 0x30, 0x48, 0x2, 0x41, 0x0, 0xcd, 0xb7,
+ 0x63, 0x9c, 0x32, 0x78, 0xf0, 0x6, 0xaa, 0x27, 0x7f, 0x6e, 0xaf, 0x42,
+ 0x90, 0x2b, 0x59, 0x2d, 0x8c, 0xbc, 0xbe, 0x38, 0xa1, 0xc9, 0x2b, 0xa4,
+ 0x69, 0x5a, 0x33, 0x1b, 0x1d, 0xea, 0xde, 0xad, 0xd8, 0xe9, 0xa5, 0xc2,
+ 0x7e, 0x8c, 0x4c, 0x2f, 0xd0, 0xa8, 0x88, 0x96, 0x57, 0x72, 0x2a, 0x4f,
+ 0x2a, 0xf7, 0x58, 0x9c, 0xf2, 0xc7, 0x70, 0x45, 0xdc, 0x8f, 0xde, 0xec,
+ 0x35, 0x7d, 0x2, 0x3, 0x1, 0x0, 0x1,
+ },
+ BitLength: 592,
+ },
+ },
+ },
+ SignatureAlgorithm: AlgorithmIdentifier{Algorithm: ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}},
+ SignatureValue: BitString{
+ Bytes: []uint8{
+ 0xa6, 0x7b, 0x6, 0xec, 0x5e, 0xce,
+ 0x92, 0x77, 0x2c, 0xa4, 0x13, 0xcb, 0xa3, 0xca, 0x12, 0x56, 0x8f, 0xdc, 0x6c,
+ 0x7b, 0x45, 0x11, 0xcd, 0x40, 0xa7, 0xf6, 0x59, 0x98, 0x4, 0x2, 0xdf, 0x2b,
+ 0x99, 0x8b, 0xb9, 0xa4, 0xa8, 0xcb, 0xeb, 0x34, 0xc0, 0xf0, 0xa7, 0x8c, 0xf8,
+ 0xd9, 0x1e, 0xde, 0x14, 0xa5, 0xed, 0x76, 0xbf, 0x11, 0x6f, 0xe3, 0x60, 0xaa,
+ 0xfa, 0x88, 0x21, 0x49, 0x4, 0x35,
+ },
+ BitLength: 512,
+ },
+}
+
+var derEncodedSelfSignedCertBytes = []byte{
+ 0x30, 0x82, 0x02, 0x18, 0x30,
+ 0x82, 0x01, 0xc2, 0x02, 0x09, 0x00, 0x8c, 0xc3, 0x37, 0x92, 0x10, 0xec, 0x2c,
+ 0x98, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+ 0x05, 0x05, 0x00, 0x30, 0x81, 0x92, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55,
+ 0x04, 0x06, 0x13, 0x02, 0x58, 0x58, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55,
+ 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x04, 0x43,
+ 0x69, 0x74, 0x79, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13,
+ 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64,
+ 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x31,
+ 0x1a, 0x30, 0x18, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x11, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x31, 0x20, 0x30, 0x1e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
+ 0x01, 0x09, 0x01, 0x16, 0x11, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x40, 0x65, 0x78,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d,
+ 0x30, 0x39, 0x31, 0x30, 0x30, 0x38, 0x30, 0x30, 0x32, 0x35, 0x35, 0x33, 0x5a,
+ 0x17, 0x0d, 0x31, 0x30, 0x31, 0x30, 0x30, 0x38, 0x30, 0x30, 0x32, 0x35, 0x35,
+ 0x33, 0x5a, 0x30, 0x81, 0x92, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04,
+ 0x06, 0x13, 0x02, 0x58, 0x58, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04,
+ 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x04, 0x43, 0x69,
+ 0x74, 0x79, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18,
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67,
+ 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x31, 0x1a,
+ 0x30, 0x18, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x11, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x31, 0x20, 0x30, 0x1e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
+ 0x09, 0x01, 0x16, 0x11, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x40, 0x65, 0x78, 0x61,
+ 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x5c, 0x30, 0x0d, 0x06,
+ 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03,
+ 0x4b, 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0xcd, 0xb7, 0x63, 0x9c, 0x32, 0x78,
+ 0xf0, 0x06, 0xaa, 0x27, 0x7f, 0x6e, 0xaf, 0x42, 0x90, 0x2b, 0x59, 0x2d, 0x8c,
+ 0xbc, 0xbe, 0x38, 0xa1, 0xc9, 0x2b, 0xa4, 0x69, 0x5a, 0x33, 0x1b, 0x1d, 0xea,
+ 0xde, 0xad, 0xd8, 0xe9, 0xa5, 0xc2, 0x7e, 0x8c, 0x4c, 0x2f, 0xd0, 0xa8, 0x88,
+ 0x96, 0x57, 0x72, 0x2a, 0x4f, 0x2a, 0xf7, 0x58, 0x9c, 0xf2, 0xc7, 0x70, 0x45,
+ 0xdc, 0x8f, 0xde, 0xec, 0x35, 0x7d, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d,
+ 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00,
+ 0x03, 0x41, 0x00, 0xa6, 0x7b, 0x06, 0xec, 0x5e, 0xce, 0x92, 0x77, 0x2c, 0xa4,
+ 0x13, 0xcb, 0xa3, 0xca, 0x12, 0x56, 0x8f, 0xdc, 0x6c, 0x7b, 0x45, 0x11, 0xcd,
+ 0x40, 0xa7, 0xf6, 0x59, 0x98, 0x04, 0x02, 0xdf, 0x2b, 0x99, 0x8b, 0xb9, 0xa4,
+ 0xa8, 0xcb, 0xeb, 0x34, 0xc0, 0xf0, 0xa7, 0x8c, 0xf8, 0xd9, 0x1e, 0xde, 0x14,
+ 0xa5, 0xed, 0x76, 0xbf, 0x11, 0x6f, 0xe3, 0x60, 0xaa, 0xfa, 0x88, 0x21, 0x49,
+ 0x04, 0x35,
+}
+
+var derEncodedPaypalNULCertBytes = []byte{
+ 0x30, 0x82, 0x06, 0x44, 0x30,
+ 0x82, 0x05, 0xad, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x03, 0x00, 0xf0, 0x9b,
+ 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05,
+ 0x05, 0x00, 0x30, 0x82, 0x01, 0x12, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55,
+ 0x04, 0x06, 0x13, 0x02, 0x45, 0x53, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55,
+ 0x04, 0x08, 0x13, 0x09, 0x42, 0x61, 0x72, 0x63, 0x65, 0x6c, 0x6f, 0x6e, 0x61,
+ 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x09, 0x42, 0x61,
+ 0x72, 0x63, 0x65, 0x6c, 0x6f, 0x6e, 0x61, 0x31, 0x29, 0x30, 0x27, 0x06, 0x03,
+ 0x55, 0x04, 0x0a, 0x13, 0x20, 0x49, 0x50, 0x53, 0x20, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x20, 0x73, 0x2e, 0x6c, 0x2e, 0x31, 0x2e,
+ 0x30, 0x2c, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x14, 0x25, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x6c, 0x40, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x20, 0x43, 0x2e, 0x49, 0x2e, 0x46, 0x2e, 0x20, 0x20, 0x42, 0x2d, 0x42, 0x36,
+ 0x32, 0x32, 0x31, 0x30, 0x36, 0x39, 0x35, 0x31, 0x2e, 0x30, 0x2c, 0x06, 0x03,
+ 0x55, 0x04, 0x0b, 0x13, 0x25, 0x69, 0x70, 0x73, 0x43, 0x41, 0x20, 0x43, 0x4c,
+ 0x41, 0x53, 0x45, 0x41, 0x31, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x74, 0x79, 0x31, 0x2e, 0x30, 0x2c, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13,
+ 0x25, 0x69, 0x70, 0x73, 0x43, 0x41, 0x20, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41,
+ 0x31, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31,
+ 0x20, 0x30, 0x1e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09,
+ 0x01, 0x16, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x40, 0x69, 0x70,
+ 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x30, 0x39,
+ 0x30, 0x32, 0x32, 0x34, 0x32, 0x33, 0x30, 0x34, 0x31, 0x37, 0x5a, 0x17, 0x0d,
+ 0x31, 0x31, 0x30, 0x32, 0x32, 0x34, 0x32, 0x33, 0x30, 0x34, 0x31, 0x37, 0x5a,
+ 0x30, 0x81, 0x94, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13,
+ 0x02, 0x55, 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13,
+ 0x0a, 0x43, 0x61, 0x6c, 0x69, 0x66, 0x6f, 0x72, 0x6e, 0x69, 0x61, 0x31, 0x16,
+ 0x30, 0x14, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x0d, 0x53, 0x61, 0x6e, 0x20,
+ 0x46, 0x72, 0x61, 0x6e, 0x63, 0x69, 0x73, 0x63, 0x6f, 0x31, 0x11, 0x30, 0x0f,
+ 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x08, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x0b,
+ 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x20, 0x55, 0x6e, 0x69, 0x74, 0x31, 0x2f,
+ 0x30, 0x2d, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x26, 0x77, 0x77, 0x77, 0x2e,
+ 0x70, 0x61, 0x79, 0x70, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x00, 0x73, 0x73,
+ 0x6c, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x63, 0x63, 0x30, 0x81, 0x9f, 0x30, 0x0d,
+ 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00,
+ 0x03, 0x81, 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xd2, 0x69,
+ 0xfa, 0x6f, 0x3a, 0x00, 0xb4, 0x21, 0x1b, 0xc8, 0xb1, 0x02, 0xd7, 0x3f, 0x19,
+ 0xb2, 0xc4, 0x6d, 0xb4, 0x54, 0xf8, 0x8b, 0x8a, 0xcc, 0xdb, 0x72, 0xc2, 0x9e,
+ 0x3c, 0x60, 0xb9, 0xc6, 0x91, 0x3d, 0x82, 0xb7, 0x7d, 0x99, 0xff, 0xd1, 0x29,
+ 0x84, 0xc1, 0x73, 0x53, 0x9c, 0x82, 0xdd, 0xfc, 0x24, 0x8c, 0x77, 0xd5, 0x41,
+ 0xf3, 0xe8, 0x1e, 0x42, 0xa1, 0xad, 0x2d, 0x9e, 0xff, 0x5b, 0x10, 0x26, 0xce,
+ 0x9d, 0x57, 0x17, 0x73, 0x16, 0x23, 0x38, 0xc8, 0xd6, 0xf1, 0xba, 0xa3, 0x96,
+ 0x5b, 0x16, 0x67, 0x4a, 0x4f, 0x73, 0x97, 0x3a, 0x4d, 0x14, 0xa4, 0xf4, 0xe2,
+ 0x3f, 0x8b, 0x05, 0x83, 0x42, 0xd1, 0xd0, 0xdc, 0x2f, 0x7a, 0xe5, 0xb6, 0x10,
+ 0xb2, 0x11, 0xc0, 0xdc, 0x21, 0x2a, 0x90, 0xff, 0xae, 0x97, 0x71, 0x5a, 0x49,
+ 0x81, 0xac, 0x40, 0xf3, 0x3b, 0xb8, 0x59, 0xb2, 0x4f, 0x02, 0x03, 0x01, 0x00,
+ 0x01, 0xa3, 0x82, 0x03, 0x21, 0x30, 0x82, 0x03, 0x1d, 0x30, 0x09, 0x06, 0x03,
+ 0x55, 0x1d, 0x13, 0x04, 0x02, 0x30, 0x00, 0x30, 0x11, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x01, 0x04, 0x04, 0x03, 0x02, 0x06, 0x40,
+ 0x30, 0x0b, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x04, 0x04, 0x03, 0x02, 0x03, 0xf8,
+ 0x30, 0x13, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x0c, 0x30, 0x0a, 0x06, 0x08,
+ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01, 0x30, 0x1d, 0x06, 0x03, 0x55,
+ 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x61, 0x8f, 0x61, 0x34, 0x43, 0x55, 0x14,
+ 0x7f, 0x27, 0x09, 0xce, 0x4c, 0x8b, 0xea, 0x9b, 0x7b, 0x19, 0x25, 0xbc, 0x6e,
+ 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14,
+ 0x0e, 0x07, 0x60, 0xd4, 0x39, 0xc9, 0x1b, 0x5b, 0x5d, 0x90, 0x7b, 0x23, 0xc8,
+ 0xd2, 0x34, 0x9d, 0x4a, 0x9a, 0x46, 0x39, 0x30, 0x09, 0x06, 0x03, 0x55, 0x1d,
+ 0x11, 0x04, 0x02, 0x30, 0x00, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x1d, 0x12, 0x04,
+ 0x15, 0x30, 0x13, 0x81, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x40,
+ 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x72, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x0d, 0x04, 0x65, 0x16, 0x63,
+ 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20,
+ 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x4e,
+ 0x4f, 0x54, 0x20, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x45, 0x44, 0x2e,
+ 0x20, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x20, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x20, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70,
+ 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x2f, 0x06, 0x09, 0x60,
+ 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x02, 0x04, 0x22, 0x16, 0x20, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70,
+ 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61,
+ 0x32, 0x30, 0x30, 0x32, 0x2f, 0x30, 0x43, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01,
+ 0x86, 0xf8, 0x42, 0x01, 0x04, 0x04, 0x36, 0x16, 0x34, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30, 0x30,
+ 0x32, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30, 0x30, 0x32, 0x43, 0x4c,
+ 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x63, 0x72, 0x6c, 0x30, 0x46, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x03, 0x04, 0x39, 0x16, 0x37,
+ 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
+ 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63,
+ 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f, 0x72, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x68, 0x74,
+ 0x6d, 0x6c, 0x3f, 0x30, 0x43, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8,
+ 0x42, 0x01, 0x07, 0x04, 0x36, 0x16, 0x34, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
+ 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f,
+ 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x6c, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41,
+ 0x31, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x3f, 0x30, 0x41, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x08, 0x04, 0x34, 0x16, 0x32, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x70, 0x73,
+ 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32,
+ 0x30, 0x30, 0x32, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x4c, 0x41,
+ 0x53, 0x45, 0x41, 0x31, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x30, 0x81, 0x83, 0x06,
+ 0x03, 0x55, 0x1d, 0x1f, 0x04, 0x7c, 0x30, 0x7a, 0x30, 0x39, 0xa0, 0x37, 0xa0,
+ 0x35, 0x86, 0x33, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
+ 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70,
+ 0x73, 0x63, 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61,
+ 0x32, 0x30, 0x30, 0x32, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x63,
+ 0x72, 0x6c, 0x30, 0x3d, 0xa0, 0x3b, 0xa0, 0x39, 0x86, 0x37, 0x68, 0x74, 0x74,
+ 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x62, 0x61, 0x63, 0x6b, 0x2e, 0x69,
+ 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x70, 0x73, 0x63,
+ 0x61, 0x32, 0x30, 0x30, 0x32, 0x2f, 0x69, 0x70, 0x73, 0x63, 0x61, 0x32, 0x30,
+ 0x30, 0x32, 0x43, 0x4c, 0x41, 0x53, 0x45, 0x41, 0x31, 0x2e, 0x63, 0x72, 0x6c,
+ 0x30, 0x32, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x01, 0x04,
+ 0x26, 0x30, 0x24, 0x30, 0x22, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07,
+ 0x30, 0x01, 0x86, 0x16, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63,
+ 0x73, 0x70, 0x2e, 0x69, 0x70, 0x73, 0x63, 0x61, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05,
+ 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, 0x68, 0xee, 0x79, 0x97, 0x97, 0xdd, 0x3b,
+ 0xef, 0x16, 0x6a, 0x06, 0xf2, 0x14, 0x9a, 0x6e, 0xcd, 0x9e, 0x12, 0xf7, 0xaa,
+ 0x83, 0x10, 0xbd, 0xd1, 0x7c, 0x98, 0xfa, 0xc7, 0xae, 0xd4, 0x0e, 0x2c, 0x9e,
+ 0x38, 0x05, 0x9d, 0x52, 0x60, 0xa9, 0x99, 0x0a, 0x81, 0xb4, 0x98, 0x90, 0x1d,
+ 0xae, 0xbb, 0x4a, 0xd7, 0xb9, 0xdc, 0x88, 0x9e, 0x37, 0x78, 0x41, 0x5b, 0xf7,
+ 0x82, 0xa5, 0xf2, 0xba, 0x41, 0x25, 0x5a, 0x90, 0x1a, 0x1e, 0x45, 0x38, 0xa1,
+ 0x52, 0x58, 0x75, 0x94, 0x26, 0x44, 0xfb, 0x20, 0x07, 0xba, 0x44, 0xcc, 0xe5,
+ 0x4a, 0x2d, 0x72, 0x3f, 0x98, 0x47, 0xf6, 0x26, 0xdc, 0x05, 0x46, 0x05, 0x07,
+ 0x63, 0x21, 0xab, 0x46, 0x9b, 0x9c, 0x78, 0xd5, 0x54, 0x5b, 0x3d, 0x0c, 0x1e,
+ 0xc8, 0x64, 0x8c, 0xb5, 0x50, 0x23, 0x82, 0x6f, 0xdb, 0xb8, 0x22, 0x1c, 0x43,
+ 0x96, 0x07, 0xa8, 0xbb,
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// ASN.1 objects have metadata preceding them:
+// the tag: the type of the object
+// a flag denoting if this object is compound or not
+// the class type: the namespace of the tag
+// the length of the object, in bytes
+
+// Here are some standard tags and classes
+
+const (
+ tagBoolean = 1
+ tagInteger = 2
+ tagBitString = 3
+ tagOctetString = 4
+ tagOID = 6
+ tagEnum = 10
+ tagUTF8String = 12
+ tagSequence = 16
+ tagSet = 17
+ tagPrintableString = 19
+ tagT61String = 20
+ tagIA5String = 22
+ tagUTCTime = 23
+ tagGeneralizedTime = 24
+ tagGeneralString = 27
+)
+
+const (
+ classUniversal = 0
+ classApplication = 1
+ classContextSpecific = 2
+ classPrivate = 3
+)
+
+type tagAndLength struct {
+ class, tag, length int
+ isCompound bool
+}
+
+// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
+// of" and "in addition to". When not specified, every primitive type has a
+// default tag in the UNIVERSAL class.
+//
+// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
+// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
+// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
+//
+// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
+// /additional/ tag would wrap the default tag. This explicit tag will have the
+// compound flag set.
+//
+// (This is used in order to remove ambiguity with optional elements.)
+//
+// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
+// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
+// tagging with tag strings on the fields of a structure.
+
+// fieldParameters is the parsed representation of tag string from a structure field.
+type fieldParameters struct {
+ optional bool // true iff the field is OPTIONAL
+ explicit bool // true iff an EXPLICIT tag is in use.
+ application bool // true iff an APPLICATION tag is in use.
+ defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
+ tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
+ stringType int // the string tag to use when marshaling.
+ set bool // true iff this should be encoded as a SET
+
+ // Invariants:
+ // if explicit is set, tag is non-nil.
+}
+
+// Given a tag string with the format specified in the package comment,
+// parseFieldParameters will parse it into a fieldParameters structure,
+// ignoring unknown parts of the string.
+func parseFieldParameters(str string) (ret fieldParameters) {
+ for _, part := range strings.Split(str, ",") {
+ switch {
+ case part == "optional":
+ ret.optional = true
+ case part == "explicit":
+ ret.explicit = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "ia5":
+ ret.stringType = tagIA5String
+ case part == "printable":
+ ret.stringType = tagPrintableString
+ case strings.HasPrefix(part, "default:"):
+ i, err := strconv.Atoi64(part[8:])
+ if err == nil {
+ ret.defaultValue = new(int64)
+ *ret.defaultValue = i
+ }
+ case strings.HasPrefix(part, "tag:"):
+ i, err := strconv.Atoi(part[4:])
+ if err == nil {
+ ret.tag = new(int)
+ *ret.tag = i
+ }
+ case part == "set":
+ ret.set = true
+ case part == "application":
+ ret.application = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ }
+ }
+ return
+}
+
+// Given a reflected Go type, getUniversalType returns the default tag number
+// and expected compound flag.
+func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
+ switch t {
+ case objectIdentifierType:
+ return tagOID, false, true
+ case bitStringType:
+ return tagBitString, false, true
+ case timeType:
+ return tagUTCTime, false, true
+ case enumeratedType:
+ return tagEnum, false, true
+ case bigIntType:
+ return tagInteger, false, true
+ }
+ switch t.Kind() {
+ case reflect.Bool:
+ return tagBoolean, false, true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return tagInteger, false, true
+ case reflect.Struct:
+ return tagSequence, true, true
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return tagOctetString, false, true
+ }
+ if strings.HasSuffix(t.Name(), "SET") {
+ return tagSet, true, true
+ }
+ return tagSequence, true, true
+ case reflect.String:
+ return tagPrintableString, false, true
+ }
+ return 0, false, false
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math/big"
+ "reflect"
+ "time"
+)
+
+// A forkableWriter is an in-memory buffer that can be
+// 'forked' to create new forkableWriters that bracket the
+// original. After
+// pre, post := w.fork();
+// the overall sequence of bytes represented is logically w+pre+post.
+type forkableWriter struct {
+ *bytes.Buffer
+ pre, post *forkableWriter
+}
+
+func newForkableWriter() *forkableWriter {
+ return &forkableWriter{bytes.NewBuffer(nil), nil, nil}
+}
+
+func (f *forkableWriter) fork() (pre, post *forkableWriter) {
+ if f.pre != nil || f.post != nil {
+ panic("have already forked")
+ }
+ f.pre = newForkableWriter()
+ f.post = newForkableWriter()
+ return f.pre, f.post
+}
+
+func (f *forkableWriter) Len() (l int) {
+ l += f.Buffer.Len()
+ if f.pre != nil {
+ l += f.pre.Len()
+ }
+ if f.post != nil {
+ l += f.post.Len()
+ }
+ return
+}
+
+func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
+ n, err = out.Write(f.Bytes())
+ if err != nil {
+ return
+ }
+
+ var nn int
+
+ if f.pre != nil {
+ nn, err = f.pre.writeTo(out)
+ n += nn
+ if err != nil {
+ return
+ }
+ }
+
+ if f.post != nil {
+ nn, err = f.post.writeTo(out)
+ n += nn
+ }
+ return
+}
+
+func marshalBase128Int(out *forkableWriter, n int64) (err error) {
+ if n == 0 {
+ err = out.WriteByte(0)
+ return
+ }
+
+ l := 0
+ for i := n; i > 0; i >>= 7 {
+ l++
+ }
+
+ for i := l - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+ err = out.WriteByte(o)
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func marshalInt64(out *forkableWriter, i int64) (err error) {
+ n := int64Length(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func int64Length(i int64) (numBytes int) {
+ numBytes = 1
+
+ for i > 127 {
+ numBytes++
+ i >>= 8
+ }
+
+ for i < -128 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
+
+func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement
+ // form. So we'll subtract 1 and invert. If the
+ // most-significant-bit isn't set then we'll need to pad the
+ // beginning with 0xff in order to keep the number negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ err = out.WriteByte(0xff)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ } else if n.Sign() == 0 {
+ // Zero is written as a single 0 zero rather than no bytes.
+ err = out.WriteByte(0x00)
+ } else {
+ bytes := n.Bytes()
+ if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+ // We'll have to pad this with 0x00 in order to stop it
+ // looking like a negative number.
+ err = out.WriteByte(0)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ }
+ return
+}
+
+func marshalLength(out *forkableWriter, i int) (err error) {
+ n := lengthLength(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func lengthLength(i int) (numBytes int) {
+ numBytes = 1
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+ return
+}
+
+func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
+ b := uint8(t.class) << 6
+ if t.isCompound {
+ b |= 0x20
+ }
+ if t.tag >= 31 {
+ b |= 0x1f
+ err = out.WriteByte(b)
+ if err != nil {
+ return
+ }
+ err = marshalBase128Int(out, int64(t.tag))
+ if err != nil {
+ return
+ }
+ } else {
+ b |= uint8(t.tag)
+ err = out.WriteByte(b)
+ if err != nil {
+ return
+ }
+ }
+
+ if t.length >= 128 {
+ l := lengthLength(t.length)
+ err = out.WriteByte(0x80 | byte(l))
+ if err != nil {
+ return
+ }
+ err = marshalLength(out, t.length)
+ if err != nil {
+ return
+ }
+ } else {
+ err = out.WriteByte(byte(t.length))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func marshalBitString(out *forkableWriter, b BitString) (err error) {
+ paddingBits := byte((8 - b.BitLength%8) % 8)
+ err = out.WriteByte(paddingBits)
+ if err != nil {
+ return
+ }
+ _, err = out.Write(b.Bytes)
+ return
+}
+
+func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
+ if len(oid) < 2 || oid[0] > 6 || oid[1] >= 40 {
+ return StructuralError{"invalid object identifier"}
+ }
+
+ err = out.WriteByte(byte(oid[0]*40 + oid[1]))
+ if err != nil {
+ return
+ }
+ for i := 2; i < len(oid); i++ {
+ err = marshalBase128Int(out, int64(oid[i]))
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func marshalPrintableString(out *forkableWriter, s string) (err error) {
+ b := []byte(s)
+ for _, c := range b {
+ if !isPrintable(c) {
+ return StructuralError{"PrintableString contains invalid character"}
+ }
+ }
+
+ _, err = out.Write(b)
+ return
+}
+
+func marshalIA5String(out *forkableWriter, s string) (err error) {
+ b := []byte(s)
+ for _, c := range b {
+ if c > 127 {
+ return StructuralError{"IA5String contains invalid character"}
+ }
+ }
+
+ _, err = out.Write(b)
+ return
+}
+
+func marshalTwoDigits(out *forkableWriter, v int) (err error) {
+ err = out.WriteByte(byte('0' + (v/10)%10))
+ if err != nil {
+ return
+ }
+ return out.WriteByte(byte('0' + v%10))
+}
+
+func marshalUTCTime(out *forkableWriter, t *time.Time) (err error) {
+ switch {
+ case 1950 <= t.Year && t.Year < 2000:
+ err = marshalTwoDigits(out, int(t.Year-1900))
+ case 2000 <= t.Year && t.Year < 2050:
+ err = marshalTwoDigits(out, int(t.Year-2000))
+ default:
+ return StructuralError{"Cannot represent time as UTCTime"}
+ }
+
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, t.Month)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, t.Day)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, t.Hour)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, t.Minute)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, t.Second)
+ if err != nil {
+ return
+ }
+
+ switch {
+ case t.ZoneOffset/60 == 0:
+ err = out.WriteByte('Z')
+ return
+ case t.ZoneOffset > 0:
+ err = out.WriteByte('+')
+ case t.ZoneOffset < 0:
+ err = out.WriteByte('-')
+ }
+
+ if err != nil {
+ return
+ }
+
+ offsetMinutes := t.ZoneOffset / 60
+ if offsetMinutes < 0 {
+ offsetMinutes = -offsetMinutes
+ }
+
+ err = marshalTwoDigits(out, offsetMinutes/60)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, offsetMinutes%60)
+ return
+}
+
+func stripTagAndLength(in []byte) []byte {
+ _, offset, err := parseTagAndLength(in, 0)
+ if err != nil {
+ return in
+ }
+ return in[offset:]
+}
+
+func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
+ switch value.Type() {
+ case timeType:
+ return marshalUTCTime(out, value.Interface().(*time.Time))
+ case bitStringType:
+ return marshalBitString(out, value.Interface().(BitString))
+ case objectIdentifierType:
+ return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
+ case bigIntType:
+ return marshalBigInt(out, value.Interface().(*big.Int))
+ }
+
+ switch v := value; v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ return out.WriteByte(255)
+ } else {
+ return out.WriteByte(0)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return marshalInt64(out, int64(v.Int()))
+ case reflect.Struct:
+ t := v.Type()
+
+ startingField := 0
+
+ // If the first element of the structure is a non-empty
+ // RawContents, then we don't bother serializing the rest.
+ if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
+ s := v.Field(0)
+ if s.Len() > 0 {
+ bytes := make([]byte, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ bytes[i] = uint8(s.Index(i).Uint())
+ }
+ /* The RawContents will contain the tag and
+ * length fields but we'll also be writing
+ * those ourselves, so we strip them out of
+ * bytes */
+ _, err = out.Write(stripTagAndLength(bytes))
+ return
+ } else {
+ startingField = 1
+ }
+ }
+
+ for i := startingField; i < t.NumField(); i++ {
+ var pre *forkableWriter
+ pre, out = out.fork()
+ err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ return
+ case reflect.Slice:
+ sliceType := v.Type()
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ bytes := make([]byte, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ bytes[i] = uint8(v.Index(i).Uint())
+ }
+ _, err = out.Write(bytes)
+ return
+ }
+
+ var params fieldParameters
+ for i := 0; i < v.Len(); i++ {
+ var pre *forkableWriter
+ pre, out = out.fork()
+ err = marshalField(pre, v.Index(i), params)
+ if err != nil {
+ return
+ }
+ }
+ return
+ case reflect.String:
+ if params.stringType == tagIA5String {
+ return marshalIA5String(out, v.String())
+ } else {
+ return marshalPrintableString(out, v.String())
+ }
+ return
+ }
+
+ return StructuralError{"unknown Go type"}
+}
+
+func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
+ // If the field is an interface{} then recurse into it.
+ if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
+ return marshalField(out, v.Elem(), params)
+ }
+
+ if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
+ return
+ }
+
+ if v.Type() == rawValueType {
+ rv := v.Interface().(RawValue)
+ if len(rv.FullBytes) != 0 {
+ _, err = out.Write(rv.FullBytes)
+ } else {
+ err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
+ if err != nil {
+ return
+ }
+ _, err = out.Write(rv.Bytes)
+ }
+ return
+ }
+
+ tag, isCompound, ok := getUniversalType(v.Type())
+ if !ok {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
+ return
+ }
+ class := classUniversal
+
+ if params.stringType != 0 {
+ if tag != tagPrintableString {
+ return StructuralError{"Explicit string type given to non-string member"}
+ }
+ tag = params.stringType
+ }
+
+ if params.set {
+ if tag != tagSequence {
+ return StructuralError{"Non sequence tagged as set"}
+ }
+ tag = tagSet
+ }
+
+ tags, body := out.fork()
+
+ err = marshalBody(body, v, params)
+ if err != nil {
+ return
+ }
+
+ bodyLen := body.Len()
+
+ var explicitTag *forkableWriter
+ if params.explicit {
+ explicitTag, tags = tags.fork()
+ }
+
+ if !params.explicit && params.tag != nil {
+ // implicit tag.
+ tag = *params.tag
+ class = classContextSpecific
+ }
+
+ err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
+ if err != nil {
+ return
+ }
+
+ if params.explicit {
+ err = marshalTagAndLength(explicitTag, tagAndLength{
+ class: classContextSpecific,
+ tag: *params.tag,
+ length: bodyLen + tags.Len(),
+ isCompound: true,
+ })
+ }
+
+ return nil
+}
+
+// Marshal returns the ASN.1 encoding of val.
+func Marshal(val interface{}) ([]byte, error) {
+ var out bytes.Buffer
+ v := reflect.ValueOf(val)
+ f := newForkableWriter()
+ err := marshalField(f, v, fieldParameters{})
+ if err != nil {
+ return nil, err
+ }
+ _, err = f.writeTo(&out)
+ return out.Bytes(), nil
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+ "time"
+)
+
+type intStruct struct {
+ A int
+}
+
+type twoIntStruct struct {
+ A int
+ B int
+}
+
+type nestedStruct struct {
+ A intStruct
+}
+
+type rawContentsStruct struct {
+ Raw RawContent
+ A int
+}
+
+type implicitTagTest struct {
+ A int `asn1:"implicit,tag:5"`
+}
+
+type explicitTagTest struct {
+ A int `asn1:"explicit,tag:5"`
+}
+
+type ia5StringTest struct {
+ A string `asn1:"ia5"`
+}
+
+type printableStringTest struct {
+ A string `asn1:"printable"`
+}
+
+type optionalRawValueTest struct {
+ A RawValue `asn1:"optional"`
+}
+
+type testSET []int
+
+func setPST(t *time.Time) *time.Time {
+ t.ZoneOffset = -28800
+ return t
+}
+
+type marshalTest struct {
+ in interface{}
+ out string // hex encoded
+}
+
+var marshalTests = []marshalTest{
+ {10, "02010a"},
+ {127, "02017f"},
+ {128, "02020080"},
+ {-128, "020180"},
+ {-129, "0202ff7f"},
+ {intStruct{64}, "3003020140"},
+ {twoIntStruct{64, 65}, "3006020140020141"},
+ {nestedStruct{intStruct{127}}, "3005300302017f"},
+ {[]byte{1, 2, 3}, "0403010203"},
+ {implicitTagTest{64}, "3003850140"},
+ {explicitTagTest{64}, "3005a503020140"},
+ {time.SecondsToUTC(0), "170d3730303130313030303030305a"},
+ {time.SecondsToUTC(1258325776), "170d3039313131353232353631365a"},
+ {setPST(time.SecondsToUTC(1258325776)), "17113039313131353232353631362d30383030"},
+ {BitString{[]byte{0x80}, 1}, "03020780"},
+ {BitString{[]byte{0x81, 0xf0}, 12}, "03030481f0"},
+ {ObjectIdentifier([]int{1, 2, 3, 4}), "06032a0304"},
+ {ObjectIdentifier([]int{1, 2, 840, 133549, 1, 1, 5}), "06092a864888932d010105"},
+ {"test", "130474657374"},
+ {
+ "" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", // This is 127 times 'x'
+ "137f" +
+ "7878787878787878787878787878787878787878787878787878787878787878" +
+ "7878787878787878787878787878787878787878787878787878787878787878" +
+ "7878787878787878787878787878787878787878787878787878787878787878" +
+ "78787878787878787878787878787878787878787878787878787878787878",
+ },
+ {
+ "" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", // This is 128 times 'x'
+ "138180" +
+ "7878787878787878787878787878787878787878787878787878787878787878" +
+ "7878787878787878787878787878787878787878787878787878787878787878" +
+ "7878787878787878787878787878787878787878787878787878787878787878" +
+ "7878787878787878787878787878787878787878787878787878787878787878",
+ },
+ {ia5StringTest{"test"}, "3006160474657374"},
+ {optionalRawValueTest{}, "3000"},
+ {printableStringTest{"test"}, "3006130474657374"},
+ {printableStringTest{"test*"}, "30071305746573742a"},
+ {rawContentsStruct{nil, 64}, "3003020140"},
+ {rawContentsStruct{[]byte{0x30, 3, 1, 2, 3}, 64}, "3003010203"},
+ {RawValue{Tag: 1, Class: 2, IsCompound: false, Bytes: []byte{1, 2, 3}}, "8103010203"},
+ {testSET([]int{10}), "310302010a"},
+}
+
+func TestMarshal(t *testing.T) {
+ for i, test := range marshalTests {
+ data, err := Marshal(test.in)
+ if err != nil {
+ t.Errorf("#%d failed: %s", i, err)
+ }
+ out, _ := hex.DecodeString(test.out)
+ if bytes.Compare(out, data) != 0 {
+ t.Errorf("#%d got: %x want %x", i, data, out)
+ }
+ }
+}
import (
"errors"
- "math"
"io"
+ "math"
"reflect"
)
import (
"bytes"
"io"
- "bytes"
"math"
"reflect"
"testing"
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package csv reads and writes comma-separated values (CSV) files.
+//
+// A csv file contains zero or more records of one or more fields per record.
+// Each record is separated by the newline character. The final record may
+// optionally be followed by a newline character.
+//
+// field1,field2,field3
+//
+// White space is considered part of a field.
+//
+// Carriage returns before newline characters are silently removed.
+//
+// Blank lines are ignored. A line with only whitespace characters (excluding
+// the ending newline character) is not considered a blank line.
+//
+// Fields which start and stop with the quote character " are called
+// quoted-fields. The beginning and ending quote are not part of the
+// field.
+//
+// The source:
+//
+// normal string,"quoted-field"
+//
+// results in the fields
+//
+// {`normal string`, `quoted-field`}
+//
+// Within a quoted-field a quote character followed by a second quote
+// character is considered a single quote.
+//
+// "the ""word"" is true","a ""quoted-field"""
+//
+// results in
+//
+// {`the "word" is true`, `a "quoted-field"`}
+//
+// Newlines and commas may be included in a quoted-field
+//
+// "Multi-line
+// field","comma is ,"
+//
+// results in
+//
+// {`Multi-line
+// field`, `comma is ,`}
+package csv
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "unicode"
+)
+
+// A ParseError is returned for parsing errors.
+// The first line is 1. The first column is 0.
+type ParseError struct {
+ Line int // Line where the error occurred
+ Column int // Column (rune index) where the error occurred
+ Err error // The actual error
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("line %d, column %d: %s", e.Line, e.Column, e.Err)
+}
+
+// These are the errors that can be returned in ParseError.Error
+var (
+ ErrTrailingComma = errors.New("extra delimiter at end of line")
+ ErrBareQuote = errors.New("bare \" in non-quoted-field")
+ ErrQuote = errors.New("extraneous \" in field")
+ ErrFieldCount = errors.New("wrong number of fields in line")
+)
+
+// A Reader reads records from a CSV-encoded file.
+//
+// As returned by NewReader, a Reader expects input conforming to RFC 4180.
+// The exported fields can be changed to customize the details before the
+// first call to Read or ReadAll.
+//
+// Comma is the field delimiter. It defaults to ','.
+//
+// Comment, if not 0, is the comment character. Lines beginning with the
+// Comment character are ignored.
+//
+// If FieldsPerRecord is positive, Read requires each record to
+// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
+// the number of fields in the first record, so that future records must
+// have the same field count.
+//
+// If LazyQuotes is true, a quote may appear in an unquoted field and a
+// non-doubled quote may appear in a quoted field.
+//
+// If TrailingComma is true, the last field may be an unquoted empty field.
+//
+// If TrimLeadingSpace is true, leading white space in a field is ignored.
+type Reader struct {
+ Comma rune // Field delimiter (set to ',' by NewReader)
+ Comment rune // Comment character for start of line
+ FieldsPerRecord int // Number of expected fields per record
+ LazyQuotes bool // Allow lazy quotes
+ TrailingComma bool // Allow trailing comma
+ TrimLeadingSpace bool // Trim leading space
+ line int
+ column int
+ r *bufio.Reader
+ field bytes.Buffer
+}
+
+// NewReader returns a new Reader that reads from r.
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ Comma: ',',
+ r: bufio.NewReader(r),
+ }
+}
+
+// error creates a new ParseError based on err.
+func (r *Reader) error(err error) error {
+ return &ParseError{
+ Line: r.line,
+ Column: r.column,
+ Err: err,
+ }
+}
+
+// Read reads one record from r. The record is a slice of strings with each
+// string representing one field.
+func (r *Reader) Read() (record []string, err error) {
+ for {
+ record, err = r.parseRecord()
+ if record != nil {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if r.FieldsPerRecord > 0 {
+ if len(record) != r.FieldsPerRecord {
+ r.column = 0 // report at start of record
+ return record, r.error(ErrFieldCount)
+ }
+ } else if r.FieldsPerRecord == 0 {
+ r.FieldsPerRecord = len(record)
+ }
+ return record, nil
+}
+
+// ReadAll reads all the remaining records from r.
+// Each record is a slice of fields.
+func (r *Reader) ReadAll() (records [][]string, err error) {
+ for {
+ record, err := r.Read()
+ if err == io.EOF {
+ return records, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ records = append(records, record)
+ }
+ panic("unreachable")
+}
+
+// readRune reads one rune from r, folding \r\n to \n and keeping track
+// of how far into the line we have read. r.column will point to the start
+// of this rune, not the end of this rune.
+func (r *Reader) readRune() (rune, error) {
+ r1, _, err := r.r.ReadRune()
+
+ // Handle \r\n here. We make the simplifying assumption that
+ // anytime \r is followed by \n that it can be folded to \n.
+ // We will not detect files which contain both \r\n and bare \n.
+ if r1 == '\r' {
+ r1, _, err = r.r.ReadRune()
+ if err == nil {
+ if r1 != '\n' {
+ r.r.UnreadRune()
+ r1 = '\r'
+ }
+ }
+ }
+ r.column++
+ return r1, err
+}
+
+// unreadRune puts the last rune read from r back.
+func (r *Reader) unreadRune() {
+ r.r.UnreadRune()
+ r.column--
+}
+
+// skip reads runes up to and including the rune delim or until error.
+func (r *Reader) skip(delim rune) error {
+ for {
+ r1, err := r.readRune()
+ if err != nil {
+ return err
+ }
+ if r1 == delim {
+ return nil
+ }
+ }
+ panic("unreachable")
+}
+
+// parseRecord reads and parses a single csv record from r.
+func (r *Reader) parseRecord() (fields []string, err error) {
+ // Each record starts on a new line. We increment our line
+ // number (lines start at 1, not 0) and set column to -1
+ // so as we increment in readRune it points to the character we read.
+ r.line++
+ r.column = -1
+
+ // Peek at the first rune. If it is an error we are done.
+ // If we are support comments and it is the comment character
+ // then skip to the end of line.
+
+ r1, _, err := r.r.ReadRune()
+ if err != nil {
+ return nil, err
+ }
+
+ if r.Comment != 0 && r1 == r.Comment {
+ return nil, r.skip('\n')
+ }
+ r.r.UnreadRune()
+
+ // At this point we have at least one field.
+ for {
+ haveField, delim, err := r.parseField()
+ if haveField {
+ fields = append(fields, r.field.String())
+ }
+ if delim == '\n' || err == io.EOF {
+ return fields, err
+ } else if err != nil {
+ return nil, err
+ }
+ }
+ panic("unreachable")
+}
+
+// parseField parses the next field in the record. The read field is
+// located in r.field. Delim is the first character not part of the field
+// (r.Comma or '\n').
+func (r *Reader) parseField() (haveField bool, delim rune, err error) {
+ r.field.Reset()
+
+ r1, err := r.readRune()
+ if err != nil {
+ // If we have EOF and are not at the start of a line
+ // then we return the empty field. We have already
+ // checked for trailing commas if needed.
+ if err == io.EOF && r.column != 0 {
+ return true, 0, err
+ }
+ return false, 0, err
+ }
+
+ if r.TrimLeadingSpace {
+ for r1 != '\n' && unicode.IsSpace(r1) {
+ r1, err = r.readRune()
+ if err != nil {
+ return false, 0, err
+ }
+ }
+ }
+
+ switch r1 {
+ case r.Comma:
+ // will check below
+
+ case '\n':
+ // We are a trailing empty field or a blank line
+ if r.column == 0 {
+ return false, r1, nil
+ }
+ return true, r1, nil
+
+ case '"':
+ // quoted field
+ Quoted:
+ for {
+ r1, err = r.readRune()
+ if err != nil {
+ if err == io.EOF {
+ if r.LazyQuotes {
+ return true, 0, err
+ }
+ return false, 0, r.error(ErrQuote)
+ }
+ return false, 0, err
+ }
+ switch r1 {
+ case '"':
+ r1, err = r.readRune()
+ if err != nil || r1 == r.Comma {
+ break Quoted
+ }
+ if r1 == '\n' {
+ return true, r1, nil
+ }
+ if r1 != '"' {
+ if !r.LazyQuotes {
+ r.column--
+ return false, 0, r.error(ErrQuote)
+ }
+ // accept the bare quote
+ r.field.WriteRune('"')
+ }
+ case '\n':
+ r.line++
+ r.column = -1
+ }
+ r.field.WriteRune(r1)
+ }
+
+ default:
+ // unquoted field
+ for {
+ r.field.WriteRune(r1)
+ r1, err = r.readRune()
+ if err != nil || r1 == r.Comma {
+ break
+ }
+ if r1 == '\n' {
+ return true, r1, nil
+ }
+ if !r.LazyQuotes && r1 == '"' {
+ return false, 0, r.error(ErrBareQuote)
+ }
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ return true, 0, err
+ }
+ return false, 0, err
+ }
+
+ if !r.TrailingComma {
+ // We don't allow trailing commas. See if we
+ // are at the end of the line (being mindful
+ // of trimming spaces).
+ c := r.column
+ r1, err = r.readRune()
+ if r.TrimLeadingSpace {
+ for r1 != '\n' && unicode.IsSpace(r1) {
+ r1, err = r.readRune()
+ if err != nil {
+ break
+ }
+ }
+ }
+ if err == io.EOF || r1 == '\n' {
+ r.column = c // report the comma
+ return false, 0, r.error(ErrTrailingComma)
+ }
+ r.unreadRune()
+ }
+ return true, r1, nil
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package csv
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var readTests = []struct {
+ Name string
+ Input string
+ Output [][]string
+ UseFieldsPerRecord bool // false (default) means FieldsPerRecord is -1
+
+ // These fields are copied into the Reader
+ Comma rune
+ Comment rune
+ FieldsPerRecord int
+ LazyQuotes bool
+ TrailingComma bool
+ TrimLeadingSpace bool
+
+ Error string
+ Line int // Expected error line if != 0
+ Column int // Expected error column if line != 0
+}{
+ {
+ Name: "Simple",
+ Input: "a,b,c\n",
+ Output: [][]string{{"a", "b", "c"}},
+ },
+ {
+ Name: "CRLF",
+ Input: "a,b\r\nc,d\r\n",
+ Output: [][]string{{"a", "b"}, {"c", "d"}},
+ },
+ {
+ Name: "BareCR",
+ Input: "a,b\rc,d\r\n",
+ Output: [][]string{{"a", "b\rc", "d"}},
+ },
+ {
+ Name: "RFC4180test",
+ UseFieldsPerRecord: true,
+ Input: `#field1,field2,field3
+"aaa","bb
+b","ccc"
+"a,a","b""bb","ccc"
+zzz,yyy,xxx
+`,
+ Output: [][]string{
+ {"#field1", "field2", "field3"},
+ {"aaa", "bb\nb", "ccc"},
+ {"a,a", `b"bb`, "ccc"},
+ {"zzz", "yyy", "xxx"},
+ },
+ },
+ {
+ Name: "NoEOLTest",
+ Input: "a,b,c",
+ Output: [][]string{{"a", "b", "c"}},
+ },
+ {
+ Name: "Semicolon",
+ Comma: ';',
+ Input: "a;b;c\n",
+ Output: [][]string{{"a", "b", "c"}},
+ },
+ {
+ Name: "MultiLine",
+ Input: `"two
+line","one line","three
+line
+field"`,
+ Output: [][]string{{"two\nline", "one line", "three\nline\nfield"}},
+ },
+ {
+ Name: "BlankLine",
+ Input: "a,b,c\n\nd,e,f\n\n",
+ Output: [][]string{
+ {"a", "b", "c"},
+ {"d", "e", "f"},
+ },
+ },
+ {
+ Name: "TrimSpace",
+ Input: " a, b, c\n",
+ TrimLeadingSpace: true,
+ Output: [][]string{{"a", "b", "c"}},
+ },
+ {
+ Name: "LeadingSpace",
+ Input: " a, b, c\n",
+ Output: [][]string{{" a", " b", " c"}},
+ },
+ {
+ Name: "Comment",
+ Comment: '#',
+ Input: "#1,2,3\na,b,c\n#comment",
+ Output: [][]string{{"a", "b", "c"}},
+ },
+ {
+ Name: "NoComment",
+ Input: "#1,2,3\na,b,c",
+ Output: [][]string{{"#1", "2", "3"}, {"a", "b", "c"}},
+ },
+ {
+ Name: "LazyQuotes",
+ LazyQuotes: true,
+ Input: `a "word","1"2",a","b`,
+ Output: [][]string{{`a "word"`, `1"2`, `a"`, `b`}},
+ },
+ {
+ Name: "BareQuotes",
+ LazyQuotes: true,
+ Input: `a "word","1"2",a"`,
+ Output: [][]string{{`a "word"`, `1"2`, `a"`}},
+ },
+ {
+ Name: "BareDoubleQuotes",
+ LazyQuotes: true,
+ Input: `a""b,c`,
+ Output: [][]string{{`a""b`, `c`}},
+ },
+ {
+ Name: "BadDoubleQuotes",
+ Input: `a""b,c`,
+ Error: `bare " in non-quoted-field`, Line: 1, Column: 1,
+ },
+ {
+ Name: "TrimQuote",
+ Input: ` "a"," b",c`,
+ TrimLeadingSpace: true,
+ Output: [][]string{{"a", " b", "c"}},
+ },
+ {
+ Name: "BadBareQuote",
+ Input: `a "word","b"`,
+ Error: `bare " in non-quoted-field`, Line: 1, Column: 2,
+ },
+ {
+ Name: "BadTrailingQuote",
+ Input: `"a word",b"`,
+ Error: `bare " in non-quoted-field`, Line: 1, Column: 10,
+ },
+ {
+ Name: "ExtraneousQuote",
+ Input: `"a "word","b"`,
+ Error: `extraneous " in field`, Line: 1, Column: 3,
+ },
+ {
+ Name: "BadFieldCount",
+ UseFieldsPerRecord: true,
+ Input: "a,b,c\nd,e",
+ Error: "wrong number of fields", Line: 2,
+ },
+ {
+ Name: "BadFieldCount1",
+ UseFieldsPerRecord: true,
+ FieldsPerRecord: 2,
+ Input: `a,b,c`,
+ Error: "wrong number of fields", Line: 1,
+ },
+ {
+ Name: "FieldCount",
+ Input: "a,b,c\nd,e",
+ Output: [][]string{{"a", "b", "c"}, {"d", "e"}},
+ },
+ {
+ Name: "BadTrailingCommaEOF",
+ Input: "a,b,c,",
+ Error: "extra delimiter at end of line", Line: 1, Column: 5,
+ },
+ {
+ Name: "BadTrailingCommaEOL",
+ Input: "a,b,c,\n",
+ Error: "extra delimiter at end of line", Line: 1, Column: 5,
+ },
+ {
+ Name: "BadTrailingCommaSpaceEOF",
+ TrimLeadingSpace: true,
+ Input: "a,b,c, ",
+ Error: "extra delimiter at end of line", Line: 1, Column: 5,
+ },
+ {
+ Name: "BadTrailingCommaSpaceEOL",
+ TrimLeadingSpace: true,
+ Input: "a,b,c, \n",
+ Error: "extra delimiter at end of line", Line: 1, Column: 5,
+ },
+ {
+ Name: "BadTrailingCommaLine3",
+ TrimLeadingSpace: true,
+ Input: "a,b,c\nd,e,f\ng,hi,",
+ Error: "extra delimiter at end of line", Line: 3, Column: 4,
+ },
+ {
+ Name: "NotTrailingComma3",
+ Input: "a,b,c, \n",
+ Output: [][]string{{"a", "b", "c", " "}},
+ },
+ {
+ Name: "CommaFieldTest",
+ TrailingComma: true,
+ Input: `x,y,z,w
+x,y,z,
+x,y,,
+x,,,
+,,,
+"x","y","z","w"
+"x","y","z",""
+"x","y","",""
+"x","","",""
+"","","",""
+`,
+ Output: [][]string{
+ {"x", "y", "z", "w"},
+ {"x", "y", "z", ""},
+ {"x", "y", "", ""},
+ {"x", "", "", ""},
+ {"", "", "", ""},
+ {"x", "y", "z", "w"},
+ {"x", "y", "z", ""},
+ {"x", "y", "", ""},
+ {"x", "", "", ""},
+ {"", "", "", ""},
+ },
+ },
+ {
+ Name: "Issue 2366",
+ TrailingComma: true,
+ TrimLeadingSpace: true,
+ Input: "a,b,\nc,d,e",
+ Output: [][]string{
+ {"a", "b", ""},
+ {"c", "d", "e"},
+ },
+ },
+ {
+ Name: "Issue 2366a",
+ TrailingComma: false,
+ TrimLeadingSpace: true,
+ Input: "a,b,\nc,d,e",
+ Error: "extra delimiter at end of line",
+ },
+}
+
+func TestRead(t *testing.T) {
+ for _, tt := range readTests {
+ r := NewReader(strings.NewReader(tt.Input))
+ r.Comment = tt.Comment
+ if tt.UseFieldsPerRecord {
+ r.FieldsPerRecord = tt.FieldsPerRecord
+ } else {
+ r.FieldsPerRecord = -1
+ }
+ r.LazyQuotes = tt.LazyQuotes
+ r.TrailingComma = tt.TrailingComma
+ r.TrimLeadingSpace = tt.TrimLeadingSpace
+ if tt.Comma != 0 {
+ r.Comma = tt.Comma
+ }
+ out, err := r.ReadAll()
+ perr, _ := err.(*ParseError)
+ if tt.Error != "" {
+ if err == nil || !strings.Contains(err.Error(), tt.Error) {
+ t.Errorf("%s: error %v, want error %q", tt.Name, err, tt.Error)
+ } else if tt.Line != 0 && (tt.Line != perr.Line || tt.Column != perr.Column) {
+ t.Errorf("%s: error at %d:%d expected %d:%d", tt.Name, perr.Line, perr.Column, tt.Line, tt.Column)
+ }
+ } else if err != nil {
+ t.Errorf("%s: unexpected error %v", tt.Name, err)
+ } else if !reflect.DeepEqual(out, tt.Output) {
+ t.Errorf("%s: out=%q want %q", tt.Name, out, tt.Output)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package csv
+
+import (
+ "bufio"
+ "io"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Writer writes records to a CSV encoded file.
+//
+// As returned by NewWriter, a Writer writes records terminated by a
+// newline and uses ',' as the field delimiter. The exported fields can be
+// changed to customize the details before the first call to Write or WriteAll.
+//
+// Comma is the field delimiter.
+//
+// If UseCRLF is true, the Writer ends each record with \r\n instead of \n.
+type Writer struct {
+ Comma rune // Field delimiter (set to to ',' by NewWriter)
+ UseCRLF bool // True to use \r\n as the line terminator
+ w *bufio.Writer
+}
+
+// NewWriter returns a new Writer that writes to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ Comma: ',',
+ w: bufio.NewWriter(w),
+ }
+}
+
+// Writer writes a single CSV record to w along with any necessary quoting.
+// A record is a slice of strings with each string being one field.
+func (w *Writer) Write(record []string) (err error) {
+ for n, field := range record {
+ if n > 0 {
+ if _, err = w.w.WriteRune(w.Comma); err != nil {
+ return
+ }
+ }
+
+ // If we don't have to have a quoted field then just
+ // write out the field and continue to the next field.
+ if !w.fieldNeedsQuotes(field) {
+ if _, err = w.w.WriteString(field); err != nil {
+ return
+ }
+ continue
+ }
+ if err = w.w.WriteByte('"'); err != nil {
+ return
+ }
+
+ for _, r1 := range field {
+ switch r1 {
+ case '"':
+ _, err = w.w.WriteString(`""`)
+ case '\r':
+ if !w.UseCRLF {
+ err = w.w.WriteByte('\r')
+ }
+ case '\n':
+ if w.UseCRLF {
+ _, err = w.w.WriteString("\r\n")
+ } else {
+ err = w.w.WriteByte('\n')
+ }
+ default:
+ _, err = w.w.WriteRune(r1)
+ }
+ if err != nil {
+ return
+ }
+ }
+
+ if err = w.w.WriteByte('"'); err != nil {
+ return
+ }
+ }
+ if w.UseCRLF {
+ _, err = w.w.WriteString("\r\n")
+ } else {
+ err = w.w.WriteByte('\n')
+ }
+ return
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (w *Writer) Flush() {
+ w.w.Flush()
+}
+
+// WriteAll writes multiple CSV records to w using Write and then calls Flush.
+func (w *Writer) WriteAll(records [][]string) (err error) {
+ for _, record := range records {
+ err = w.Write(record)
+ if err != nil {
+ break
+ }
+ }
+ w.Flush()
+ return nil
+}
+
+// fieldNeedsQuotes returns true if our field must be enclosed in quotes.
+// Empty fields, files with a Comma, fields with a quote or newline, and
+// fields which start with a space must be enclosed in quotes.
+func (w *Writer) fieldNeedsQuotes(field string) bool {
+ if len(field) == 0 || strings.IndexRune(field, w.Comma) >= 0 || strings.IndexAny(field, "\"\r\n") >= 0 {
+ return true
+ }
+
+ r1, _ := utf8.DecodeRuneInString(field)
+ return unicode.IsSpace(r1)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package csv
+
+import (
+ "bytes"
+ "testing"
+)
+
+var writeTests = []struct {
+ Input [][]string
+ Output string
+ UseCRLF bool
+}{
+ {Input: [][]string{{"abc"}}, Output: "abc\n"},
+ {Input: [][]string{{"abc"}}, Output: "abc\r\n", UseCRLF: true},
+ {Input: [][]string{{`"abc"`}}, Output: `"""abc"""` + "\n"},
+ {Input: [][]string{{`a"b`}}, Output: `"a""b"` + "\n"},
+ {Input: [][]string{{`"a"b"`}}, Output: `"""a""b"""` + "\n"},
+ {Input: [][]string{{" abc"}}, Output: `" abc"` + "\n"},
+ {Input: [][]string{{"abc,def"}}, Output: `"abc,def"` + "\n"},
+ {Input: [][]string{{"abc", "def"}}, Output: "abc,def\n"},
+ {Input: [][]string{{"abc"}, {"def"}}, Output: "abc\ndef\n"},
+ {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\ndef\"\n"},
+ {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true},
+}
+
+func TestWrite(t *testing.T) {
+ for n, tt := range writeTests {
+ b := &bytes.Buffer{}
+ f := NewWriter(b)
+ f.UseCRLF = tt.UseCRLF
+ err := f.WriteAll(tt.Input)
+ if err != nil {
+ t.Errorf("Unexpected error: %s\n", err)
+ }
+ out := b.String()
+ if out != tt.Output {
+ t.Errorf("#%d: out=%q want %q", n, out, tt.Output)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "bytes"
+ "errors"
+ "math"
+ "reflect"
+ "strings"
+ "testing"
+ "unsafe"
+)
+
+// Guarantee encoding format by comparing some encodings to hand-written values
+type EncodeT struct {
+ x uint64
+ b []byte
+}
+
+var encodeT = []EncodeT{
+ {0x00, []byte{0x00}},
+ {0x0F, []byte{0x0F}},
+ {0xFF, []byte{0xFF, 0xFF}},
+ {0xFFFF, []byte{0xFE, 0xFF, 0xFF}},
+ {0xFFFFFF, []byte{0xFD, 0xFF, 0xFF, 0xFF}},
+ {0xFFFFFFFF, []byte{0xFC, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {0xFFFFFFFFFF, []byte{0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {0xFFFFFFFFFFFF, []byte{0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {0xFFFFFFFFFFFFFF, []byte{0xF9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {0xFFFFFFFFFFFFFFFF, []byte{0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {0x1111, []byte{0xFE, 0x11, 0x11}},
+ {0x1111111111111111, []byte{0xF8, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}},
+ {0x8888888888888888, []byte{0xF8, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88}},
+ {1 << 63, []byte{0xF8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+}
+
+// testError is meant to be used as a deferred function to turn a panic(gobError) into a
+// plain test.Error call.
+func testError(t *testing.T) {
+ if e := recover(); e != nil {
+ t.Error(e.(gobError).err) // Will re-panic if not one of our errors, such as a runtime error.
+ }
+ return
+}
+
+// Test basic encode/decode routines for unsigned integers
+func TestUintCodec(t *testing.T) {
+ defer testError(t)
+ b := new(bytes.Buffer)
+ encState := newEncoderState(b)
+ for _, tt := range encodeT {
+ b.Reset()
+ encState.encodeUint(tt.x)
+ if !bytes.Equal(tt.b, b.Bytes()) {
+ t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes())
+ }
+ }
+ decState := newDecodeState(b)
+ for u := uint64(0); ; u = (u + 1) * 7 {
+ b.Reset()
+ encState.encodeUint(u)
+ v := decState.decodeUint()
+ if u != v {
+ t.Errorf("Encode/Decode: sent %#x received %#x", u, v)
+ }
+ if u&(1<<63) != 0 {
+ break
+ }
+ }
+}
+
+func verifyInt(i int64, t *testing.T) {
+ defer testError(t)
+ var b = new(bytes.Buffer)
+ encState := newEncoderState(b)
+ encState.encodeInt(i)
+ decState := newDecodeState(b)
+ decState.buf = make([]byte, 8)
+ j := decState.decodeInt()
+ if i != j {
+ t.Errorf("Encode/Decode: sent %#x received %#x", uint64(i), uint64(j))
+ }
+}
+
+// Test basic encode/decode routines for signed integers
+func TestIntCodec(t *testing.T) {
+ for u := uint64(0); ; u = (u + 1) * 7 {
+ // Do positive and negative values
+ i := int64(u)
+ verifyInt(i, t)
+ verifyInt(-i, t)
+ verifyInt(^i, t)
+ if u&(1<<63) != 0 {
+ break
+ }
+ }
+ verifyInt(-1<<63, t) // a tricky case
+}
+
+// The result of encoding a true boolean with field number 7
+var boolResult = []byte{0x07, 0x01}
+// The result of encoding a number 17 with field number 7
+var signedResult = []byte{0x07, 2 * 17}
+var unsignedResult = []byte{0x07, 17}
+var floatResult = []byte{0x07, 0xFE, 0x31, 0x40}
+// The result of encoding a number 17+19i with field number 7
+var complexResult = []byte{0x07, 0xFE, 0x31, 0x40, 0xFE, 0x33, 0x40}
+// The result of encoding "hello" with field number 7
+var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'}
+
+func newDecodeState(buf *bytes.Buffer) *decoderState {
+ d := new(decoderState)
+ d.b = buf
+ d.buf = make([]byte, uint64Size)
+ return d
+}
+
+func newEncoderState(b *bytes.Buffer) *encoderState {
+ b.Reset()
+ state := &encoderState{enc: nil, b: b}
+ state.fieldnum = -1
+ return state
+}
+
+// Test instruction execution for encoding.
+// Do not run the machine yet; instead do individual instructions crafted by hand.
+func TestScalarEncInstructions(t *testing.T) {
+ var b = new(bytes.Buffer)
+
+ // bool
+ {
+ data := struct{ a bool }{true}
+ instr := &encInstr{encBool, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(boolResult, b.Bytes()) {
+ t.Errorf("bool enc instructions: expected % x got % x", boolResult, b.Bytes())
+ }
+ }
+
+ // int
+ {
+ b.Reset()
+ data := struct{ a int }{17}
+ instr := &encInstr{encInt, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(signedResult, b.Bytes()) {
+ t.Errorf("int enc instructions: expected % x got % x", signedResult, b.Bytes())
+ }
+ }
+
+ // uint
+ {
+ b.Reset()
+ data := struct{ a uint }{17}
+ instr := &encInstr{encUint, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(unsignedResult, b.Bytes()) {
+ t.Errorf("uint enc instructions: expected % x got % x", unsignedResult, b.Bytes())
+ }
+ }
+
+ // int8
+ {
+ b.Reset()
+ data := struct{ a int8 }{17}
+ instr := &encInstr{encInt8, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(signedResult, b.Bytes()) {
+ t.Errorf("int8 enc instructions: expected % x got % x", signedResult, b.Bytes())
+ }
+ }
+
+ // uint8
+ {
+ b.Reset()
+ data := struct{ a uint8 }{17}
+ instr := &encInstr{encUint8, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(unsignedResult, b.Bytes()) {
+ t.Errorf("uint8 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
+ }
+ }
+
+ // int16
+ {
+ b.Reset()
+ data := struct{ a int16 }{17}
+ instr := &encInstr{encInt16, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(signedResult, b.Bytes()) {
+ t.Errorf("int16 enc instructions: expected % x got % x", signedResult, b.Bytes())
+ }
+ }
+
+ // uint16
+ {
+ b.Reset()
+ data := struct{ a uint16 }{17}
+ instr := &encInstr{encUint16, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(unsignedResult, b.Bytes()) {
+ t.Errorf("uint16 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
+ }
+ }
+
+ // int32
+ {
+ b.Reset()
+ data := struct{ a int32 }{17}
+ instr := &encInstr{encInt32, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(signedResult, b.Bytes()) {
+ t.Errorf("int32 enc instructions: expected % x got % x", signedResult, b.Bytes())
+ }
+ }
+
+ // uint32
+ {
+ b.Reset()
+ data := struct{ a uint32 }{17}
+ instr := &encInstr{encUint32, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(unsignedResult, b.Bytes()) {
+ t.Errorf("uint32 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
+ }
+ }
+
+ // int64
+ {
+ b.Reset()
+ data := struct{ a int64 }{17}
+ instr := &encInstr{encInt64, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(signedResult, b.Bytes()) {
+ t.Errorf("int64 enc instructions: expected % x got % x", signedResult, b.Bytes())
+ }
+ }
+
+ // uint64
+ {
+ b.Reset()
+ data := struct{ a uint64 }{17}
+ instr := &encInstr{encUint64, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(unsignedResult, b.Bytes()) {
+ t.Errorf("uint64 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
+ }
+ }
+
+ // float32
+ {
+ b.Reset()
+ data := struct{ a float32 }{17}
+ instr := &encInstr{encFloat32, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(floatResult, b.Bytes()) {
+ t.Errorf("float32 enc instructions: expected % x got % x", floatResult, b.Bytes())
+ }
+ }
+
+ // float64
+ {
+ b.Reset()
+ data := struct{ a float64 }{17}
+ instr := &encInstr{encFloat64, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(floatResult, b.Bytes()) {
+ t.Errorf("float64 enc instructions: expected % x got % x", floatResult, b.Bytes())
+ }
+ }
+
+ // bytes == []uint8
+ {
+ b.Reset()
+ data := struct{ a []byte }{[]byte("hello")}
+ instr := &encInstr{encUint8Array, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(bytesResult, b.Bytes()) {
+ t.Errorf("bytes enc instructions: expected % x got % x", bytesResult, b.Bytes())
+ }
+ }
+
+ // string
+ {
+ b.Reset()
+ data := struct{ a string }{"hello"}
+ instr := &encInstr{encString, 6, 0, 0}
+ state := newEncoderState(b)
+ instr.op(instr, state, unsafe.Pointer(&data))
+ if !bytes.Equal(bytesResult, b.Bytes()) {
+ t.Errorf("string enc instructions: expected % x got % x", bytesResult, b.Bytes())
+ }
+ }
+}
+
+func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, p unsafe.Pointer) {
+ defer testError(t)
+ v := int(state.decodeUint())
+ if v+state.fieldnum != 6 {
+ t.Fatalf("decoding field number %d, got %d", 6, v+state.fieldnum)
+ }
+ instr.op(instr, state, decIndirect(p, instr.indir))
+ state.fieldnum = 6
+}
+
+func newDecodeStateFromData(data []byte) *decoderState {
+ b := bytes.NewBuffer(data)
+ state := newDecodeState(b)
+ state.fieldnum = -1
+ return state
+}
+
+// Test instruction execution for decoding.
+// Do not run the machine yet; instead do individual instructions crafted by hand.
+func TestScalarDecInstructions(t *testing.T) {
+ ovfl := errors.New("overflow")
+
+ // bool
+ {
+ var data struct {
+ a bool
+ }
+ instr := &decInstr{decBool, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(boolResult)
+ execDec("bool", instr, state, t, unsafe.Pointer(&data))
+ if data.a != true {
+ t.Errorf("bool a = %v not true", data.a)
+ }
+ }
+ // int
+ {
+ var data struct {
+ a int
+ }
+ instr := &decInstr{decOpTable[reflect.Int], 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(signedResult)
+ execDec("int", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("int a = %v not 17", data.a)
+ }
+ }
+
+ // uint
+ {
+ var data struct {
+ a uint
+ }
+ instr := &decInstr{decOpTable[reflect.Uint], 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(unsignedResult)
+ execDec("uint", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("uint a = %v not 17", data.a)
+ }
+ }
+
+ // int8
+ {
+ var data struct {
+ a int8
+ }
+ instr := &decInstr{decInt8, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(signedResult)
+ execDec("int8", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("int8 a = %v not 17", data.a)
+ }
+ }
+
+ // uint8
+ {
+ var data struct {
+ a uint8
+ }
+ instr := &decInstr{decUint8, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(unsignedResult)
+ execDec("uint8", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("uint8 a = %v not 17", data.a)
+ }
+ }
+
+ // int16
+ {
+ var data struct {
+ a int16
+ }
+ instr := &decInstr{decInt16, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(signedResult)
+ execDec("int16", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("int16 a = %v not 17", data.a)
+ }
+ }
+
+ // uint16
+ {
+ var data struct {
+ a uint16
+ }
+ instr := &decInstr{decUint16, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(unsignedResult)
+ execDec("uint16", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("uint16 a = %v not 17", data.a)
+ }
+ }
+
+ // int32
+ {
+ var data struct {
+ a int32
+ }
+ instr := &decInstr{decInt32, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(signedResult)
+ execDec("int32", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("int32 a = %v not 17", data.a)
+ }
+ }
+
+ // uint32
+ {
+ var data struct {
+ a uint32
+ }
+ instr := &decInstr{decUint32, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(unsignedResult)
+ execDec("uint32", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("uint32 a = %v not 17", data.a)
+ }
+ }
+
+ // uintptr
+ {
+ var data struct {
+ a uintptr
+ }
+ instr := &decInstr{decOpTable[reflect.Uintptr], 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(unsignedResult)
+ execDec("uintptr", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("uintptr a = %v not 17", data.a)
+ }
+ }
+
+ // int64
+ {
+ var data struct {
+ a int64
+ }
+ instr := &decInstr{decInt64, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(signedResult)
+ execDec("int64", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("int64 a = %v not 17", data.a)
+ }
+ }
+
+ // uint64
+ {
+ var data struct {
+ a uint64
+ }
+ instr := &decInstr{decUint64, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(unsignedResult)
+ execDec("uint64", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("uint64 a = %v not 17", data.a)
+ }
+ }
+
+ // float32
+ {
+ var data struct {
+ a float32
+ }
+ instr := &decInstr{decFloat32, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(floatResult)
+ execDec("float32", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("float32 a = %v not 17", data.a)
+ }
+ }
+
+ // float64
+ {
+ var data struct {
+ a float64
+ }
+ instr := &decInstr{decFloat64, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(floatResult)
+ execDec("float64", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17 {
+ t.Errorf("float64 a = %v not 17", data.a)
+ }
+ }
+
+ // complex64
+ {
+ var data struct {
+ a complex64
+ }
+ instr := &decInstr{decOpTable[reflect.Complex64], 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(complexResult)
+ execDec("complex", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17+19i {
+ t.Errorf("complex a = %v not 17+19i", data.a)
+ }
+ }
+
+ // complex128
+ {
+ var data struct {
+ a complex128
+ }
+ instr := &decInstr{decOpTable[reflect.Complex128], 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(complexResult)
+ execDec("complex", instr, state, t, unsafe.Pointer(&data))
+ if data.a != 17+19i {
+ t.Errorf("complex a = %v not 17+19i", data.a)
+ }
+ }
+
+ // bytes == []uint8
+ {
+ var data struct {
+ a []byte
+ }
+ instr := &decInstr{decUint8Slice, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(bytesResult)
+ execDec("bytes", instr, state, t, unsafe.Pointer(&data))
+ if string(data.a) != "hello" {
+ t.Errorf(`bytes a = %q not "hello"`, string(data.a))
+ }
+ }
+
+ // string
+ {
+ var data struct {
+ a string
+ }
+ instr := &decInstr{decString, 6, 0, 0, ovfl}
+ state := newDecodeStateFromData(bytesResult)
+ execDec("bytes", instr, state, t, unsafe.Pointer(&data))
+ if data.a != "hello" {
+ t.Errorf(`bytes a = %q not "hello"`, data.a)
+ }
+ }
+}
+
+func TestEndToEnd(t *testing.T) {
+ type T2 struct {
+ T string
+ }
+ s1 := "string1"
+ s2 := "string2"
+ type T1 struct {
+ A, B, C int
+ M map[string]*float64
+ EmptyMap map[string]int // to check that we receive a non-nil map.
+ N *[3]float64
+ Strs *[2]string
+ Int64s *[]int64
+ RI complex64
+ S string
+ Y []byte
+ T *T2
+ }
+ pi := 3.14159
+ e := 2.71828
+ t1 := &T1{
+ A: 17,
+ B: 18,
+ C: -5,
+ M: map[string]*float64{"pi": &pi, "e": &e},
+ EmptyMap: make(map[string]int),
+ N: &[3]float64{1.5, 2.5, 3.5},
+ Strs: &[2]string{s1, s2},
+ Int64s: &[]int64{77, 89, 123412342134},
+ RI: 17 - 23i,
+ S: "Now is the time",
+ Y: []byte("hello, sailor"),
+ T: &T2{"this is T2"},
+ }
+ b := new(bytes.Buffer)
+ err := NewEncoder(b).Encode(t1)
+ if err != nil {
+ t.Error("encode:", err)
+ }
+ var _t1 T1
+ err = NewDecoder(b).Decode(&_t1)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ if !reflect.DeepEqual(t1, &_t1) {
+ t.Errorf("encode expected %v got %v", *t1, _t1)
+ }
+ // Be absolutely sure the received map is non-nil.
+ if t1.EmptyMap == nil {
+ t.Errorf("nil map sent")
+ }
+ if _t1.EmptyMap == nil {
+ t.Errorf("nil map received")
+ }
+}
+
+func TestOverflow(t *testing.T) {
+ type inputT struct {
+ Maxi int64
+ Mini int64
+ Maxu uint64
+ Maxf float64
+ Minf float64
+ Maxc complex128
+ Minc complex128
+ }
+ var it inputT
+ var err error
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ dec := NewDecoder(b)
+
+ // int8
+ b.Reset()
+ it = inputT{
+ Maxi: math.MaxInt8 + 1,
+ }
+ type outi8 struct {
+ Maxi int8
+ Mini int8
+ }
+ var o1 outi8
+ enc.Encode(it)
+ err = dec.Decode(&o1)
+ if err == nil || err.Error() != `value for "Maxi" out of range` {
+ t.Error("wrong overflow error for int8:", err)
+ }
+ it = inputT{
+ Mini: math.MinInt8 - 1,
+ }
+ b.Reset()
+ enc.Encode(it)
+ err = dec.Decode(&o1)
+ if err == nil || err.Error() != `value for "Mini" out of range` {
+ t.Error("wrong underflow error for int8:", err)
+ }
+
+ // int16
+ b.Reset()
+ it = inputT{
+ Maxi: math.MaxInt16 + 1,
+ }
+ type outi16 struct {
+ Maxi int16
+ Mini int16
+ }
+ var o2 outi16
+ enc.Encode(it)
+ err = dec.Decode(&o2)
+ if err == nil || err.Error() != `value for "Maxi" out of range` {
+ t.Error("wrong overflow error for int16:", err)
+ }
+ it = inputT{
+ Mini: math.MinInt16 - 1,
+ }
+ b.Reset()
+ enc.Encode(it)
+ err = dec.Decode(&o2)
+ if err == nil || err.Error() != `value for "Mini" out of range` {
+ t.Error("wrong underflow error for int16:", err)
+ }
+
+ // int32
+ b.Reset()
+ it = inputT{
+ Maxi: math.MaxInt32 + 1,
+ }
+ type outi32 struct {
+ Maxi int32
+ Mini int32
+ }
+ var o3 outi32
+ enc.Encode(it)
+ err = dec.Decode(&o3)
+ if err == nil || err.Error() != `value for "Maxi" out of range` {
+ t.Error("wrong overflow error for int32:", err)
+ }
+ it = inputT{
+ Mini: math.MinInt32 - 1,
+ }
+ b.Reset()
+ enc.Encode(it)
+ err = dec.Decode(&o3)
+ if err == nil || err.Error() != `value for "Mini" out of range` {
+ t.Error("wrong underflow error for int32:", err)
+ }
+
+ // uint8
+ b.Reset()
+ it = inputT{
+ Maxu: math.MaxUint8 + 1,
+ }
+ type outu8 struct {
+ Maxu uint8
+ }
+ var o4 outu8
+ enc.Encode(it)
+ err = dec.Decode(&o4)
+ if err == nil || err.Error() != `value for "Maxu" out of range` {
+ t.Error("wrong overflow error for uint8:", err)
+ }
+
+ // uint16
+ b.Reset()
+ it = inputT{
+ Maxu: math.MaxUint16 + 1,
+ }
+ type outu16 struct {
+ Maxu uint16
+ }
+ var o5 outu16
+ enc.Encode(it)
+ err = dec.Decode(&o5)
+ if err == nil || err.Error() != `value for "Maxu" out of range` {
+ t.Error("wrong overflow error for uint16:", err)
+ }
+
+ // uint32
+ b.Reset()
+ it = inputT{
+ Maxu: math.MaxUint32 + 1,
+ }
+ type outu32 struct {
+ Maxu uint32
+ }
+ var o6 outu32
+ enc.Encode(it)
+ err = dec.Decode(&o6)
+ if err == nil || err.Error() != `value for "Maxu" out of range` {
+ t.Error("wrong overflow error for uint32:", err)
+ }
+
+ // float32
+ b.Reset()
+ it = inputT{
+ Maxf: math.MaxFloat32 * 2,
+ }
+ type outf32 struct {
+ Maxf float32
+ Minf float32
+ }
+ var o7 outf32
+ enc.Encode(it)
+ err = dec.Decode(&o7)
+ if err == nil || err.Error() != `value for "Maxf" out of range` {
+ t.Error("wrong overflow error for float32:", err)
+ }
+
+ // complex64
+ b.Reset()
+ it = inputT{
+ Maxc: complex(math.MaxFloat32*2, math.MaxFloat32*2),
+ }
+ type outc64 struct {
+ Maxc complex64
+ Minc complex64
+ }
+ var o8 outc64
+ enc.Encode(it)
+ err = dec.Decode(&o8)
+ if err == nil || err.Error() != `value for "Maxc" out of range` {
+ t.Error("wrong overflow error for complex64:", err)
+ }
+}
+
+func TestNesting(t *testing.T) {
+ type RT struct {
+ A string
+ Next *RT
+ }
+ rt := new(RT)
+ rt.A = "level1"
+ rt.Next = new(RT)
+ rt.Next.A = "level2"
+ b := new(bytes.Buffer)
+ NewEncoder(b).Encode(rt)
+ var drt RT
+ dec := NewDecoder(b)
+ err := dec.Decode(&drt)
+ if err != nil {
+ t.Fatal("decoder error:", err)
+ }
+ if drt.A != rt.A {
+ t.Errorf("nesting: encode expected %v got %v", *rt, drt)
+ }
+ if drt.Next == nil {
+ t.Errorf("nesting: recursion failed")
+ }
+ if drt.Next.A != rt.Next.A {
+ t.Errorf("nesting: encode expected %v got %v", *rt.Next, *drt.Next)
+ }
+}
+
+// These three structures have the same data with different indirections
+type T0 struct {
+ A int
+ B int
+ C int
+ D int
+}
+type T1 struct {
+ A int
+ B *int
+ C **int
+ D ***int
+}
+type T2 struct {
+ A ***int
+ B **int
+ C *int
+ D int
+}
+
+func TestAutoIndirection(t *testing.T) {
+ // First transfer t1 into t0
+ var t1 T1
+ t1.A = 17
+ t1.B = new(int)
+ *t1.B = 177
+ t1.C = new(*int)
+ *t1.C = new(int)
+ **t1.C = 1777
+ t1.D = new(**int)
+ *t1.D = new(*int)
+ **t1.D = new(int)
+ ***t1.D = 17777
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ enc.Encode(t1)
+ dec := NewDecoder(b)
+ var t0 T0
+ dec.Decode(&t0)
+ if t0.A != 17 || t0.B != 177 || t0.C != 1777 || t0.D != 17777 {
+ t.Errorf("t1->t0: expected {17 177 1777 17777}; got %v", t0)
+ }
+
+ // Now transfer t2 into t0
+ var t2 T2
+ t2.D = 17777
+ t2.C = new(int)
+ *t2.C = 1777
+ t2.B = new(*int)
+ *t2.B = new(int)
+ **t2.B = 177
+ t2.A = new(**int)
+ *t2.A = new(*int)
+ **t2.A = new(int)
+ ***t2.A = 17
+ b.Reset()
+ enc.Encode(t2)
+ t0 = T0{}
+ dec.Decode(&t0)
+ if t0.A != 17 || t0.B != 177 || t0.C != 1777 || t0.D != 17777 {
+ t.Errorf("t2->t0 expected {17 177 1777 17777}; got %v", t0)
+ }
+
+ // Now transfer t0 into t1
+ t0 = T0{17, 177, 1777, 17777}
+ b.Reset()
+ enc.Encode(t0)
+ t1 = T1{}
+ dec.Decode(&t1)
+ if t1.A != 17 || *t1.B != 177 || **t1.C != 1777 || ***t1.D != 17777 {
+ t.Errorf("t0->t1 expected {17 177 1777 17777}; got {%d %d %d %d}", t1.A, *t1.B, **t1.C, ***t1.D)
+ }
+
+ // Now transfer t0 into t2
+ b.Reset()
+ enc.Encode(t0)
+ t2 = T2{}
+ dec.Decode(&t2)
+ if ***t2.A != 17 || **t2.B != 177 || *t2.C != 1777 || t2.D != 17777 {
+ t.Errorf("t0->t2 expected {17 177 1777 17777}; got {%d %d %d %d}", ***t2.A, **t2.B, *t2.C, t2.D)
+ }
+
+ // Now do t2 again but without pre-allocated pointers.
+ b.Reset()
+ enc.Encode(t0)
+ ***t2.A = 0
+ **t2.B = 0
+ *t2.C = 0
+ t2.D = 0
+ dec.Decode(&t2)
+ if ***t2.A != 17 || **t2.B != 177 || *t2.C != 1777 || t2.D != 17777 {
+ t.Errorf("t0->t2 expected {17 177 1777 17777}; got {%d %d %d %d}", ***t2.A, **t2.B, *t2.C, t2.D)
+ }
+}
+
+type RT0 struct {
+ A int
+ B string
+ C float64
+}
+type RT1 struct {
+ C float64
+ B string
+ A int
+ NotSet string
+}
+
+func TestReorderedFields(t *testing.T) {
+ var rt0 RT0
+ rt0.A = 17
+ rt0.B = "hello"
+ rt0.C = 3.14159
+ b := new(bytes.Buffer)
+ NewEncoder(b).Encode(rt0)
+ dec := NewDecoder(b)
+ var rt1 RT1
+ // Wire type is RT0, local type is RT1.
+ err := dec.Decode(&rt1)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if rt0.A != rt1.A || rt0.B != rt1.B || rt0.C != rt1.C {
+ t.Errorf("rt1->rt0: expected %v; got %v", rt0, rt1)
+ }
+}
+
+// Like an RT0 but with fields we'll ignore on the decode side.
+type IT0 struct {
+ A int64
+ B string
+ Ignore_d []int
+ Ignore_e [3]float64
+ Ignore_f bool
+ Ignore_g string
+ Ignore_h []byte
+ Ignore_i *RT1
+ Ignore_m map[string]int
+ C float64
+}
+
+func TestIgnoredFields(t *testing.T) {
+ var it0 IT0
+ it0.A = 17
+ it0.B = "hello"
+ it0.C = 3.14159
+ it0.Ignore_d = []int{1, 2, 3}
+ it0.Ignore_e[0] = 1.0
+ it0.Ignore_e[1] = 2.0
+ it0.Ignore_e[2] = 3.0
+ it0.Ignore_f = true
+ it0.Ignore_g = "pay no attention"
+ it0.Ignore_h = []byte("to the curtain")
+ it0.Ignore_i = &RT1{3.1, "hi", 7, "hello"}
+ it0.Ignore_m = map[string]int{"one": 1, "two": 2}
+
+ b := new(bytes.Buffer)
+ NewEncoder(b).Encode(it0)
+ dec := NewDecoder(b)
+ var rt1 RT1
+ // Wire type is IT0, local type is RT1.
+ err := dec.Decode(&rt1)
+ if err != nil {
+ t.Error("error: ", err)
+ }
+ if int(it0.A) != rt1.A || it0.B != rt1.B || it0.C != rt1.C {
+ t.Errorf("rt0->rt1: expected %v; got %v", it0, rt1)
+ }
+}
+
+func TestBadRecursiveType(t *testing.T) {
+ type Rec ***Rec
+ var rec Rec
+ b := new(bytes.Buffer)
+ err := NewEncoder(b).Encode(&rec)
+ if err == nil {
+ t.Error("expected error; got none")
+ } else if strings.Index(err.Error(), "recursive") < 0 {
+ t.Error("expected recursive type error; got", err)
+ }
+ // Can't test decode easily because we can't encode one, so we can't pass one to a Decoder.
+}
+
+type Bad0 struct {
+ CH chan int
+ C float64
+}
+
+func TestInvalidField(t *testing.T) {
+ var bad0 Bad0
+ bad0.CH = make(chan int)
+ b := new(bytes.Buffer)
+ dummyEncoder := new(Encoder) // sufficient for this purpose.
+ dummyEncoder.encode(b, reflect.ValueOf(&bad0), userType(reflect.TypeOf(&bad0)))
+ if err := dummyEncoder.err; err == nil {
+ t.Error("expected error; got none")
+ } else if strings.Index(err.Error(), "type") < 0 {
+ t.Error("expected type error; got", err)
+ }
+}
+
+type Indirect struct {
+ A ***[3]int
+ S ***[]int
+ M ****map[string]int
+}
+
+type Direct struct {
+ A [3]int
+ S []int
+ M map[string]int
+}
+
+func TestIndirectSliceMapArray(t *testing.T) {
+ // Marshal indirect, unmarshal to direct.
+ i := new(Indirect)
+ i.A = new(**[3]int)
+ *i.A = new(*[3]int)
+ **i.A = new([3]int)
+ ***i.A = [3]int{1, 2, 3}
+ i.S = new(**[]int)
+ *i.S = new(*[]int)
+ **i.S = new([]int)
+ ***i.S = []int{4, 5, 6}
+ i.M = new(***map[string]int)
+ *i.M = new(**map[string]int)
+ **i.M = new(*map[string]int)
+ ***i.M = new(map[string]int)
+ ****i.M = map[string]int{"one": 1, "two": 2, "three": 3}
+ b := new(bytes.Buffer)
+ NewEncoder(b).Encode(i)
+ dec := NewDecoder(b)
+ var d Direct
+ err := dec.Decode(&d)
+ if err != nil {
+ t.Error("error: ", err)
+ }
+ if len(d.A) != 3 || d.A[0] != 1 || d.A[1] != 2 || d.A[2] != 3 {
+ t.Errorf("indirect to direct: d.A is %v not %v", d.A, ***i.A)
+ }
+ if len(d.S) != 3 || d.S[0] != 4 || d.S[1] != 5 || d.S[2] != 6 {
+ t.Errorf("indirect to direct: d.S is %v not %v", d.S, ***i.S)
+ }
+ if len(d.M) != 3 || d.M["one"] != 1 || d.M["two"] != 2 || d.M["three"] != 3 {
+ t.Errorf("indirect to direct: d.M is %v not %v", d.M, ***i.M)
+ }
+ // Marshal direct, unmarshal to indirect.
+ d.A = [3]int{11, 22, 33}
+ d.S = []int{44, 55, 66}
+ d.M = map[string]int{"four": 4, "five": 5, "six": 6}
+ i = new(Indirect)
+ b.Reset()
+ NewEncoder(b).Encode(d)
+ dec = NewDecoder(b)
+ err = dec.Decode(&i)
+ if err != nil {
+ t.Fatal("error: ", err)
+ }
+ if len(***i.A) != 3 || (***i.A)[0] != 11 || (***i.A)[1] != 22 || (***i.A)[2] != 33 {
+ t.Errorf("direct to indirect: ***i.A is %v not %v", ***i.A, d.A)
+ }
+ if len(***i.S) != 3 || (***i.S)[0] != 44 || (***i.S)[1] != 55 || (***i.S)[2] != 66 {
+ t.Errorf("direct to indirect: ***i.S is %v not %v", ***i.S, ***i.S)
+ }
+ if len(****i.M) != 3 || (****i.M)["four"] != 4 || (****i.M)["five"] != 5 || (****i.M)["six"] != 6 {
+ t.Errorf("direct to indirect: ****i.M is %v not %v", ****i.M, d.M)
+ }
+}
+
+// An interface with several implementations
+type Squarer interface {
+ Square() int
+}
+
+type Int int
+
+func (i Int) Square() int {
+ return int(i * i)
+}
+
+type Float float64
+
+func (f Float) Square() int {
+ return int(f * f)
+}
+
+type Vector []int
+
+func (v Vector) Square() int {
+ sum := 0
+ for _, x := range v {
+ sum += x * x
+ }
+ return sum
+}
+
+type Point struct {
+ X, Y int
+}
+
+func (p Point) Square() int {
+ return p.X*p.X + p.Y*p.Y
+}
+
+// A struct with interfaces in it.
+type InterfaceItem struct {
+ I int
+ Sq1, Sq2, Sq3 Squarer
+ F float64
+ Sq []Squarer
+}
+
+// The same struct without interfaces
+type NoInterfaceItem struct {
+ I int
+ F float64
+}
+
+func TestInterface(t *testing.T) {
+ iVal := Int(3)
+ fVal := Float(5)
+ // Sending a Vector will require that the receiver define a type in the middle of
+ // receiving the value for item2.
+ vVal := Vector{1, 2, 3}
+ b := new(bytes.Buffer)
+ item1 := &InterfaceItem{1, iVal, fVal, vVal, 11.5, []Squarer{iVal, fVal, nil, vVal}}
+ // Register the types.
+ Register(Int(0))
+ Register(Float(0))
+ Register(Vector{})
+ err := NewEncoder(b).Encode(item1)
+ if err != nil {
+ t.Error("expected no encode error; got", err)
+ }
+
+ item2 := InterfaceItem{}
+ err = NewDecoder(b).Decode(&item2)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ if item2.I != item1.I {
+ t.Error("normal int did not decode correctly")
+ }
+ if item2.Sq1 == nil || item2.Sq1.Square() != iVal.Square() {
+ t.Error("Int did not decode correctly")
+ }
+ if item2.Sq2 == nil || item2.Sq2.Square() != fVal.Square() {
+ t.Error("Float did not decode correctly")
+ }
+ if item2.Sq3 == nil || item2.Sq3.Square() != vVal.Square() {
+ t.Error("Vector did not decode correctly")
+ }
+ if item2.F != item1.F {
+ t.Error("normal float did not decode correctly")
+ }
+ // Now check that we received a slice of Squarers correctly, including a nil element
+ if len(item1.Sq) != len(item2.Sq) {
+ t.Fatalf("[]Squarer length wrong: got %d; expected %d", len(item2.Sq), len(item1.Sq))
+ }
+ for i, v1 := range item1.Sq {
+ v2 := item2.Sq[i]
+ if v1 == nil || v2 == nil {
+ if v1 != nil || v2 != nil {
+ t.Errorf("item %d inconsistent nils", i)
+ }
+ continue
+ if v1.Square() != v2.Square() {
+ t.Errorf("item %d inconsistent values: %v %v", i, v1, v2)
+ }
+ }
+ }
+}
+
+// A struct with all basic types, stored in interfaces.
+type BasicInterfaceItem struct {
+ Int, Int8, Int16, Int32, Int64 interface{}
+ Uint, Uint8, Uint16, Uint32, Uint64 interface{}
+ Float32, Float64 interface{}
+ Complex64, Complex128 interface{}
+ Bool interface{}
+ String interface{}
+ Bytes interface{}
+}
+
+func TestInterfaceBasic(t *testing.T) {
+ b := new(bytes.Buffer)
+ item1 := &BasicInterfaceItem{
+ int(1), int8(1), int16(1), int32(1), int64(1),
+ uint(1), uint8(1), uint16(1), uint32(1), uint64(1),
+ float32(1), 1.0,
+ complex64(1i), complex128(1i),
+ true,
+ "hello",
+ []byte("sailor"),
+ }
+ err := NewEncoder(b).Encode(item1)
+ if err != nil {
+ t.Error("expected no encode error; got", err)
+ }
+
+ item2 := &BasicInterfaceItem{}
+ err = NewDecoder(b).Decode(&item2)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ if !reflect.DeepEqual(item1, item2) {
+ t.Errorf("encode expected %v got %v", item1, item2)
+ }
+ // Hand check a couple for correct types.
+ if v, ok := item2.Bool.(bool); !ok || !v {
+ t.Error("boolean should be true")
+ }
+ if v, ok := item2.String.(string); !ok || v != item1.String.(string) {
+ t.Errorf("string should be %v is %v", item1.String, v)
+ }
+}
+
+type String string
+
+type PtrInterfaceItem struct {
+ Str1 interface{} // basic
+ Str2 interface{} // derived
+}
+
+// We'll send pointers; should receive values.
+// Also check that we can register T but send *T.
+func TestInterfacePointer(t *testing.T) {
+ b := new(bytes.Buffer)
+ str1 := "howdy"
+ str2 := String("kiddo")
+ item1 := &PtrInterfaceItem{
+ &str1,
+ &str2,
+ }
+ // Register the type.
+ Register(str2)
+ err := NewEncoder(b).Encode(item1)
+ if err != nil {
+ t.Error("expected no encode error; got", err)
+ }
+
+ item2 := &PtrInterfaceItem{}
+ err = NewDecoder(b).Decode(&item2)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ // Hand test for correct types and values.
+ if v, ok := item2.Str1.(string); !ok || v != str1 {
+ t.Errorf("basic string failed: %q should be %q", v, str1)
+ }
+ if v, ok := item2.Str2.(String); !ok || v != str2 {
+ t.Errorf("derived type String failed: %q should be %q", v, str2)
+ }
+}
+
+func TestIgnoreInterface(t *testing.T) {
+ iVal := Int(3)
+ fVal := Float(5)
+ // Sending a Point will require that the receiver define a type in the middle of
+ // receiving the value for item2.
+ pVal := Point{2, 3}
+ b := new(bytes.Buffer)
+ item1 := &InterfaceItem{1, iVal, fVal, pVal, 11.5, nil}
+ // Register the types.
+ Register(Int(0))
+ Register(Float(0))
+ Register(Point{})
+ err := NewEncoder(b).Encode(item1)
+ if err != nil {
+ t.Error("expected no encode error; got", err)
+ }
+
+ item2 := NoInterfaceItem{}
+ err = NewDecoder(b).Decode(&item2)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ if item2.I != item1.I {
+ t.Error("normal int did not decode correctly")
+ }
+ if item2.F != item2.F {
+ t.Error("normal float did not decode correctly")
+ }
+}
+
+type U struct {
+ A int
+ B string
+ c float64
+ D uint
+}
+
+func TestUnexportedFields(t *testing.T) {
+ var u0 U
+ u0.A = 17
+ u0.B = "hello"
+ u0.c = 3.14159
+ u0.D = 23
+ b := new(bytes.Buffer)
+ NewEncoder(b).Encode(u0)
+ dec := NewDecoder(b)
+ var u1 U
+ u1.c = 1234.
+ err := dec.Decode(&u1)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if u0.A != u0.A || u0.B != u1.B || u0.D != u1.D {
+ t.Errorf("u1->u0: expected %v; got %v", u0, u1)
+ }
+ if u1.c != 1234. {
+ t.Error("u1.c modified")
+ }
+}
+
+var singletons = []interface{}{
+ true,
+ 7,
+ 3.2,
+ "hello",
+ [3]int{11, 22, 33},
+ []float32{0.5, 0.25, 0.125},
+ map[string]int{"one": 1, "two": 2},
+}
+
+func TestDebugSingleton(t *testing.T) {
+ if debugFunc == nil {
+ return
+ }
+ b := new(bytes.Buffer)
+ // Accumulate a number of values and print them out all at once.
+ for _, x := range singletons {
+ err := NewEncoder(b).Encode(x)
+ if err != nil {
+ t.Fatal("encode:", err)
+ }
+ }
+ debugFunc(b)
+}
+
+// A type that won't be defined in the gob until we send it in an interface value.
+type OnTheFly struct {
+ A int
+}
+
+type DT struct {
+ // X OnTheFly
+ A int
+ B string
+ C float64
+ I interface{}
+ J interface{}
+ I_nil interface{}
+ M map[string]int
+ T [3]int
+ S []string
+}
+
+func TestDebugStruct(t *testing.T) {
+ if debugFunc == nil {
+ return
+ }
+ Register(OnTheFly{})
+ var dt DT
+ dt.A = 17
+ dt.B = "hello"
+ dt.C = 3.14159
+ dt.I = 271828
+ dt.J = OnTheFly{3}
+ dt.I_nil = nil
+ dt.M = map[string]int{"one": 1, "two": 2}
+ dt.T = [3]int{11, 22, 33}
+ dt.S = []string{"hi", "joe"}
+ b := new(bytes.Buffer)
+ err := NewEncoder(b).Encode(dt)
+ if err != nil {
+ t.Fatal("encode:", err)
+ }
+ debugBuffer := bytes.NewBuffer(b.Bytes())
+ dt2 := &DT{}
+ err = NewDecoder(b).Decode(&dt2)
+ if err != nil {
+ t.Error("decode:", err)
+ }
+ debugFunc(debugBuffer)
+}
--- /dev/null
+package gob
+
+// This file is not normally included in the gob package. Used only for debugging the package itself.
+// Add debug.go to the files listed in the Makefile to add Debug to the gob package.
+// Except for reading uints, it is an implementation of a reader that is independent of
+// the one implemented by Decoder.
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var dumpBytes = false // If true, print the remaining bytes in the input buffer at each item.
+
+// Init installs the debugging facility. If this file is not compiled in the
+// package, the tests in codec_test.go are no-ops.
+func init() {
+ debugFunc = Debug
+}
+
+var (
+ blanks = bytes.Repeat([]byte{' '}, 3*10)
+ empty = []byte(": <empty>\n")
+ tabs = strings.Repeat("\t", 100)
+)
+
+// tab indents itself when printed.
+type tab int
+
+func (t tab) String() string {
+ n := int(t)
+ if n > len(tabs) {
+ n = len(tabs)
+ }
+ return tabs[0:n]
+}
+
+func (t tab) print() {
+ fmt.Fprint(os.Stderr, t)
+}
+
+// A peekReader wraps an io.Reader, allowing one to peek ahead to see
+// what's coming without stealing the data from the client of the Reader.
+type peekReader struct {
+ r io.Reader
+ data []byte // read-ahead data
+}
+
+// newPeekReader returns a peekReader that wraps r.
+func newPeekReader(r io.Reader) *peekReader {
+ return &peekReader{r: r}
+}
+
+// Read is the usual method. It will first take data that has been read ahead.
+func (p *peekReader) Read(b []byte) (n int, err error) {
+ if len(p.data) == 0 {
+ return p.r.Read(b)
+ }
+ // Satisfy what's possible from the read-ahead data.
+ n = copy(b, p.data)
+ // Move data down to beginning of slice, to avoid endless growth
+ copy(p.data, p.data[n:])
+ p.data = p.data[:len(p.data)-n]
+ return
+}
+
+// peek returns as many bytes as possible from the unread
+// portion of the stream, up to the length of b.
+func (p *peekReader) peek(b []byte) (n int, err error) {
+ if len(p.data) > 0 {
+ n = copy(b, p.data)
+ if n == len(b) {
+ return
+ }
+ b = b[n:]
+ }
+ if len(b) == 0 {
+ return
+ }
+ m, e := io.ReadFull(p.r, b)
+ if m > 0 {
+ p.data = append(p.data, b[:m]...)
+ }
+ n += m
+ if e == io.ErrUnexpectedEOF {
+ // That means m > 0 but we reached EOF. If we got data
+ // we won't complain about not being able to peek enough.
+ if n > 0 {
+ e = nil
+ } else {
+ e = io.EOF
+ }
+ }
+ return n, e
+}
+
+type debugger struct {
+ mutex sync.Mutex
+ remain int // the number of bytes known to remain in the input
+ remainingKnown bool // the value of 'remain' is valid
+ r *peekReader
+ wireType map[typeId]*wireType
+ tmp []byte // scratch space for decoding uints.
+}
+
+// dump prints the next nBytes of the input.
+// It arranges to print the output aligned from call to
+// call, to make it easy to see what has been consumed.
+func (deb *debugger) dump(format string, args ...interface{}) {
+ if !dumpBytes {
+ return
+ }
+ fmt.Fprintf(os.Stderr, format+" ", args...)
+ if !deb.remainingKnown {
+ return
+ }
+ if deb.remain < 0 {
+ fmt.Fprintf(os.Stderr, "remaining byte count is negative! %d\n", deb.remain)
+ return
+ }
+ data := make([]byte, deb.remain)
+ n, _ := deb.r.peek(data)
+ if n == 0 {
+ os.Stderr.Write(empty)
+ return
+ }
+ b := new(bytes.Buffer)
+ fmt.Fprintf(b, "[%d]{\n", deb.remain)
+ // Blanks until first byte
+ lineLength := 0
+ if n := len(data); n%10 != 0 {
+ lineLength = 10 - n%10
+ fmt.Fprintf(b, "\t%s", blanks[:lineLength*3])
+ }
+ // 10 bytes per line
+ for len(data) > 0 {
+ if lineLength == 0 {
+ fmt.Fprint(b, "\t")
+ }
+ m := 10 - lineLength
+ lineLength = 0
+ if m > len(data) {
+ m = len(data)
+ }
+ fmt.Fprintf(b, "% x\n", data[:m])
+ data = data[m:]
+ }
+ fmt.Fprint(b, "}\n")
+ os.Stderr.Write(b.Bytes())
+}
+
+// Debug prints a human-readable representation of the gob data read from r.
+// It is a no-op unless debugging was enabled when the package was built.
+func Debug(r io.Reader) {
+ err := debug(r)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "gob debug: %s\n", err)
+ }
+}
+
+// debug implements Debug, but catches panics and returns
+// them as errors to be printed by Debug.
+func debug(r io.Reader) (err error) {
+ defer catchError(&err)
+ fmt.Fprintln(os.Stderr, "Start of debugging")
+ deb := &debugger{
+ r: newPeekReader(r),
+ wireType: make(map[typeId]*wireType),
+ tmp: make([]byte, 16),
+ }
+ if b, ok := r.(*bytes.Buffer); ok {
+ deb.remain = b.Len()
+ deb.remainingKnown = true
+ }
+ deb.gobStream()
+ return
+}
+
+// note that we've consumed some bytes
+func (deb *debugger) consumed(n int) {
+ if deb.remainingKnown {
+ deb.remain -= n
+ }
+}
+
+// int64 decodes and returns the next integer, which must be present.
+// Don't call this if you could be at EOF.
+func (deb *debugger) int64() int64 {
+ return toInt(deb.uint64())
+}
+
+// uint64 returns and decodes the next unsigned integer, which must be present.
+// Don't call this if you could be at EOF.
+// TODO: handle errors better.
+func (deb *debugger) uint64() uint64 {
+ n, w, err := decodeUintReader(deb.r, deb.tmp)
+ if err != nil {
+ errorf("debug: read error: %s", err)
+ }
+ deb.consumed(w)
+ return n
+}
+
+// GobStream:
+// DelimitedMessage* (until EOF)
+func (deb *debugger) gobStream() {
+ // Make sure we're single-threaded through here.
+ deb.mutex.Lock()
+ defer deb.mutex.Unlock()
+
+ for deb.delimitedMessage(0) {
+ }
+}
+
+// DelimitedMessage:
+// uint(lengthOfMessage) Message
+func (deb *debugger) delimitedMessage(indent tab) bool {
+ for {
+ n := deb.loadBlock(true)
+ if n < 0 {
+ return false
+ }
+ deb.dump("Delimited message of length %d", n)
+ deb.message(indent)
+ }
+ return true
+}
+
+// loadBlock preps us to read a message
+// of the length specified next in the input. It returns
+// the length of the block. The argument tells whether
+// an EOF is acceptable now. If it is and one is found,
+// the return value is negative.
+func (deb *debugger) loadBlock(eofOK bool) int {
+ n64, w, err := decodeUintReader(deb.r, deb.tmp) // deb.uint64 will error at EOF
+ if err != nil {
+ if eofOK && err == io.EOF {
+ return -1
+ }
+ errorf("debug: unexpected error: %s", err)
+ }
+ deb.consumed(w)
+ n := int(n64)
+ if n < 0 {
+ errorf("huge value for message length: %d", n64)
+ }
+ return int(n)
+}
+
+// Message:
+// TypeSequence TypedValue
+// TypeSequence
+// (TypeDefinition DelimitedTypeDefinition*)?
+// DelimitedTypeDefinition:
+// uint(lengthOfTypeDefinition) TypeDefinition
+// TypedValue:
+// int(typeId) Value
+func (deb *debugger) message(indent tab) bool {
+ for {
+ // Convert the uint64 to a signed integer typeId
+ uid := deb.int64()
+ id := typeId(uid)
+ deb.dump("type id=%d", id)
+ if id < 0 {
+ deb.typeDefinition(indent, -id)
+ n := deb.loadBlock(false)
+ deb.dump("Message of length %d", n)
+ continue
+ } else {
+ deb.value(indent, id)
+ break
+ }
+ }
+ return true
+}
+
+// Helper methods to make it easy to scan a type descriptor.
+
+// common returns the CommonType at the input point.
+func (deb *debugger) common() CommonType {
+ fieldNum := -1
+ name := ""
+ id := typeId(0)
+ for {
+ delta := deb.delta(-1)
+ if delta == 0 {
+ break
+ }
+ fieldNum += delta
+ switch fieldNum {
+ case 0:
+ name = deb.string()
+ case 1:
+ // Id typeId
+ id = deb.typeId()
+ default:
+ errorf("corrupted CommonType")
+ }
+ }
+ return CommonType{name, id}
+}
+
+// uint returns the unsigned int at the input point, as a uint (not uint64).
+func (deb *debugger) uint() uint {
+ return uint(deb.uint64())
+}
+
+// int returns the signed int at the input point, as an int (not int64).
+func (deb *debugger) int() int {
+ return int(deb.int64())
+}
+
+// typeId returns the type id at the input point.
+func (deb *debugger) typeId() typeId {
+ return typeId(deb.int64())
+}
+
+// string returns the string at the input point.
+func (deb *debugger) string() string {
+ x := int(deb.uint64())
+ b := make([]byte, x)
+ nb, _ := deb.r.Read(b)
+ if nb != x {
+ errorf("corrupted type")
+ }
+ deb.consumed(nb)
+ return string(b)
+}
+
+// delta returns the field delta at the input point. The expect argument,
+// if non-negative, identifies what the value should be.
+func (deb *debugger) delta(expect int) int {
+ delta := int(deb.uint64())
+ if delta < 0 || (expect >= 0 && delta != expect) {
+ errorf("decode: corrupted type: delta %d expected %d", delta, expect)
+ }
+ return delta
+}
+
+// TypeDefinition:
+// [int(-typeId) (already read)] encodingOfWireType
+func (deb *debugger) typeDefinition(indent tab, id typeId) {
+ deb.dump("type definition for id %d", id)
+ // Encoding is of a wireType. Decode the structure as usual
+ fieldNum := -1
+ wire := new(wireType)
+ // A wireType defines a single field.
+ delta := deb.delta(-1)
+ fieldNum += delta
+ switch fieldNum {
+ case 0: // array type, one field of {{Common}, elem, length}
+ // Field number 0 is CommonType
+ deb.delta(1)
+ com := deb.common()
+ // Field number 1 is type Id of elem
+ deb.delta(1)
+ id := deb.typeId()
+ // Field number 3 is length
+ deb.delta(1)
+ length := deb.int()
+ wire.ArrayT = &arrayType{com, id, length}
+
+ case 1: // slice type, one field of {{Common}, elem}
+ // Field number 0 is CommonType
+ deb.delta(1)
+ com := deb.common()
+ // Field number 1 is type Id of elem
+ deb.delta(1)
+ id := deb.typeId()
+ wire.SliceT = &sliceType{com, id}
+
+ case 2: // struct type, one field of {{Common}, []fieldType}
+ // Field number 0 is CommonType
+ deb.delta(1)
+ com := deb.common()
+ // Field number 1 is slice of FieldType
+ deb.delta(1)
+ numField := int(deb.uint())
+ field := make([]*fieldType, numField)
+ for i := 0; i < numField; i++ {
+ field[i] = new(fieldType)
+ deb.delta(1) // field 0 of fieldType: name
+ field[i].Name = deb.string()
+ deb.delta(1) // field 1 of fieldType: id
+ field[i].Id = deb.typeId()
+ deb.delta(0) // end of fieldType
+ }
+ wire.StructT = &structType{com, field}
+
+ case 3: // map type, one field of {{Common}, key, elem}
+ // Field number 0 is CommonType
+ deb.delta(1)
+ com := deb.common()
+ // Field number 1 is type Id of key
+ deb.delta(1)
+ keyId := deb.typeId()
+ // Field number 2 is type Id of elem
+ deb.delta(1)
+ elemId := deb.typeId()
+ wire.MapT = &mapType{com, keyId, elemId}
+ case 4: // GobEncoder type, one field of {{Common}}
+ // Field number 0 is CommonType
+ deb.delta(1)
+ com := deb.common()
+ wire.GobEncoderT = &gobEncoderType{com}
+ default:
+ errorf("bad field in type %d", fieldNum)
+ }
+ deb.printWireType(indent, wire)
+ deb.delta(0) // end inner type (arrayType, etc.)
+ deb.delta(0) // end wireType
+ // Remember we've seen this type.
+ deb.wireType[id] = wire
+}
+
+// Value:
+// SingletonValue | StructValue
+func (deb *debugger) value(indent tab, id typeId) {
+ wire, ok := deb.wireType[id]
+ if ok && wire.StructT != nil {
+ deb.structValue(indent, id)
+ } else {
+ deb.singletonValue(indent, id)
+ }
+}
+
+// SingletonValue:
+// uint(0) FieldValue
+func (deb *debugger) singletonValue(indent tab, id typeId) {
+ deb.dump("Singleton value")
+ // is it a builtin type?
+ wire := deb.wireType[id]
+ _, ok := builtinIdToType[id]
+ if !ok && wire == nil {
+ errorf("type id %d not defined", id)
+ }
+ m := deb.uint64()
+ if m != 0 {
+ errorf("expected zero; got %d", m)
+ }
+ deb.fieldValue(indent, id)
+}
+
+// InterfaceValue:
+// NilInterfaceValue | NonNilInterfaceValue
+func (deb *debugger) interfaceValue(indent tab) {
+ deb.dump("Start of interface value")
+ if nameLen := deb.uint64(); nameLen == 0 {
+ deb.nilInterfaceValue(indent)
+ } else {
+ deb.nonNilInterfaceValue(indent, int(nameLen))
+ }
+}
+
+// NilInterfaceValue:
+// uint(0) [already read]
+func (deb *debugger) nilInterfaceValue(indent tab) int {
+ fmt.Fprintf(os.Stderr, "%snil interface\n", indent)
+ return 0
+}
+
+// NonNilInterfaceValue:
+// ConcreteTypeName TypeSequence InterfaceContents
+// ConcreteTypeName:
+// uint(lengthOfName) [already read=n] name
+// InterfaceContents:
+// int(concreteTypeId) DelimitedValue
+// DelimitedValue:
+// uint(length) Value
+func (deb *debugger) nonNilInterfaceValue(indent tab, nameLen int) {
+ // ConcreteTypeName
+ b := make([]byte, nameLen)
+ deb.r.Read(b) // TODO: CHECK THESE READS!!
+ deb.consumed(nameLen)
+ name := string(b)
+
+ for {
+ id := deb.typeId()
+ if id < 0 {
+ deb.typeDefinition(indent, -id)
+ n := deb.loadBlock(false)
+ deb.dump("Nested message of length %d", n)
+ } else {
+ // DelimitedValue
+ x := deb.uint64() // in case we want to ignore the value; we don't.
+ fmt.Fprintf(os.Stderr, "%sinterface value, type %q id=%d; valueLength %d\n", indent, name, id, x)
+ deb.value(indent, id)
+ break
+ }
+ }
+}
+
+// printCommonType prints a common type; used by printWireType.
+func (deb *debugger) printCommonType(indent tab, kind string, common *CommonType) {
+ indent.print()
+ fmt.Fprintf(os.Stderr, "%s %q id=%d\n", kind, common.Name, common.Id)
+}
+
+// printWireType prints the contents of a wireType.
+func (deb *debugger) printWireType(indent tab, wire *wireType) {
+ fmt.Fprintf(os.Stderr, "%stype definition {\n", indent)
+ indent++
+ switch {
+ case wire.ArrayT != nil:
+ deb.printCommonType(indent, "array", &wire.ArrayT.CommonType)
+ fmt.Fprintf(os.Stderr, "%slen %d\n", indent+1, wire.ArrayT.Len)
+ fmt.Fprintf(os.Stderr, "%selemid %d\n", indent+1, wire.ArrayT.Elem)
+ case wire.MapT != nil:
+ deb.printCommonType(indent, "map", &wire.MapT.CommonType)
+ fmt.Fprintf(os.Stderr, "%skey id=%d\n", indent+1, wire.MapT.Key)
+ fmt.Fprintf(os.Stderr, "%selem id=%d\n", indent+1, wire.MapT.Elem)
+ case wire.SliceT != nil:
+ deb.printCommonType(indent, "slice", &wire.SliceT.CommonType)
+ fmt.Fprintf(os.Stderr, "%selem id=%d\n", indent+1, wire.SliceT.Elem)
+ case wire.StructT != nil:
+ deb.printCommonType(indent, "struct", &wire.StructT.CommonType)
+ for i, field := range wire.StructT.Field {
+ fmt.Fprintf(os.Stderr, "%sfield %d:\t%s\tid=%d\n", indent+1, i, field.Name, field.Id)
+ }
+ case wire.GobEncoderT != nil:
+ deb.printCommonType(indent, "GobEncoder", &wire.GobEncoderT.CommonType)
+ }
+ indent--
+ fmt.Fprintf(os.Stderr, "%s}\n", indent)
+}
+
+// fieldValue prints a value of any type, such as a struct field.
+// FieldValue:
+// builtinValue | ArrayValue | MapValue | SliceValue | StructValue | InterfaceValue
+func (deb *debugger) fieldValue(indent tab, id typeId) {
+ _, ok := builtinIdToType[id]
+ if ok {
+ if id == tInterface {
+ deb.interfaceValue(indent)
+ } else {
+ deb.printBuiltin(indent, id)
+ }
+ return
+ }
+ wire, ok := deb.wireType[id]
+ if !ok {
+ errorf("type id %d not defined", id)
+ }
+ switch {
+ case wire.ArrayT != nil:
+ deb.arrayValue(indent, wire)
+ case wire.MapT != nil:
+ deb.mapValue(indent, wire)
+ case wire.SliceT != nil:
+ deb.sliceValue(indent, wire)
+ case wire.StructT != nil:
+ deb.structValue(indent, id)
+ case wire.GobEncoderT != nil:
+ deb.gobEncoderValue(indent, id)
+ default:
+ panic("bad wire type for field")
+ }
+}
+
+// printBuiltin prints a value not of a fundamental type, that is,
+// one whose type is known to gobs at bootstrap time.
+func (deb *debugger) printBuiltin(indent tab, id typeId) {
+ switch id {
+ case tBool:
+ x := deb.int64()
+ if x == 0 {
+ fmt.Fprintf(os.Stderr, "%sfalse\n", indent)
+ } else {
+ fmt.Fprintf(os.Stderr, "%strue\n", indent)
+ }
+ case tInt:
+ x := deb.int64()
+ fmt.Fprintf(os.Stderr, "%s%d\n", indent, x)
+ case tUint:
+ x := deb.int64()
+ fmt.Fprintf(os.Stderr, "%s%d\n", indent, x)
+ case tFloat:
+ x := deb.uint64()
+ fmt.Fprintf(os.Stderr, "%s%g\n", indent, floatFromBits(x))
+ case tComplex:
+ r := deb.uint64()
+ i := deb.uint64()
+ fmt.Fprintf(os.Stderr, "%s%g+%gi\n", indent, floatFromBits(r), floatFromBits(i))
+ case tBytes:
+ x := int(deb.uint64())
+ b := make([]byte, x)
+ deb.r.Read(b)
+ deb.consumed(x)
+ fmt.Fprintf(os.Stderr, "%s{% x}=%q\n", indent, b, b)
+ case tString:
+ x := int(deb.uint64())
+ b := make([]byte, x)
+ deb.r.Read(b)
+ deb.consumed(x)
+ fmt.Fprintf(os.Stderr, "%s%q\n", indent, b)
+ default:
+ panic("unknown builtin")
+ }
+}
+
+// ArrayValue:
+// uint(n) FieldValue*n
+func (deb *debugger) arrayValue(indent tab, wire *wireType) {
+ elemId := wire.ArrayT.Elem
+ u := deb.uint64()
+ length := int(u)
+ for i := 0; i < length; i++ {
+ deb.fieldValue(indent, elemId)
+ }
+ if length != wire.ArrayT.Len {
+ fmt.Fprintf(os.Stderr, "%s(wrong length for array: %d should be %d)\n", indent, length, wire.ArrayT.Len)
+ }
+}
+
+// MapValue:
+// uint(n) (FieldValue FieldValue)*n [n (key, value) pairs]
+func (deb *debugger) mapValue(indent tab, wire *wireType) {
+ keyId := wire.MapT.Key
+ elemId := wire.MapT.Elem
+ u := deb.uint64()
+ length := int(u)
+ for i := 0; i < length; i++ {
+ deb.fieldValue(indent+1, keyId)
+ deb.fieldValue(indent+1, elemId)
+ }
+}
+
+// SliceValue:
+// uint(n) (n FieldValue)
+func (deb *debugger) sliceValue(indent tab, wire *wireType) {
+ elemId := wire.SliceT.Elem
+ u := deb.uint64()
+ length := int(u)
+ deb.dump("Start of slice of length %d", length)
+
+ for i := 0; i < length; i++ {
+ deb.fieldValue(indent, elemId)
+ }
+}
+
+// StructValue:
+// (uint(fieldDelta) FieldValue)*
+func (deb *debugger) structValue(indent tab, id typeId) {
+ deb.dump("Start of struct value of %q id=%d\n<<\n", id.name(), id)
+ fmt.Fprintf(os.Stderr, "%s%s struct {\n", indent, id.name())
+ wire, ok := deb.wireType[id]
+ if !ok {
+ errorf("type id %d not defined", id)
+ }
+ strct := wire.StructT
+ fieldNum := -1
+ indent++
+ for {
+ delta := deb.uint64()
+ if delta == 0 { // struct terminator is zero delta fieldnum
+ break
+ }
+ fieldNum += int(delta)
+ if fieldNum < 0 || fieldNum >= len(strct.Field) {
+ deb.dump("field number out of range: prevField=%d delta=%d", fieldNum-int(delta), delta)
+ break
+ }
+ fmt.Fprintf(os.Stderr, "%sfield %d:\t%s\n", indent, fieldNum, wire.StructT.Field[fieldNum].Name)
+ deb.fieldValue(indent+1, strct.Field[fieldNum].Id)
+ }
+ indent--
+ fmt.Fprintf(os.Stderr, "%s} // end %s struct\n", indent, id.name())
+ deb.dump(">> End of struct value of type %d %q", id, id.name())
+}
+
+// GobEncoderValue:
+// uint(n) byte*n
+func (deb *debugger) gobEncoderValue(indent tab, id typeId) {
+ len := deb.uint64()
+ deb.dump("GobEncoder value of %q id=%d, length %d\n", id.name(), id, len)
+ fmt.Fprintf(os.Stderr, "%s%s (implements GobEncoder)\n", indent, id.name())
+ data := make([]byte, len)
+ _, err := deb.r.Read(data)
+ if err != nil {
+ errorf("gobEncoder data read: %s", err)
+ }
+ fmt.Fprintf(os.Stderr, "%s[% .2x]\n", indent+1, data)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+// TODO(rsc): When garbage collector changes, revisit
+// the allocations in this file that use unsafe.Pointer.
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+var (
+ errBadUint = errors.New("gob: encoded unsigned integer out of range")
+ errBadType = errors.New("gob: unknown type id or corrupted data")
+ errRange = errors.New("gob: bad data: field numbers out of bounds")
+)
+
+// decoderState is the execution state of an instance of the decoder. A new state
+// is created for nested objects.
+type decoderState struct {
+ dec *Decoder
+ // The buffer is stored with an extra indirection because it may be replaced
+ // if we load a type during decode (when reading an interface value).
+ b *bytes.Buffer
+ fieldnum int // the last field number read.
+ buf []byte
+ next *decoderState // for free list
+}
+
+// We pass the bytes.Buffer separately for easier testing of the infrastructure
+// without requiring a full Decoder.
+func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState {
+ d := dec.freeList
+ if d == nil {
+ d = new(decoderState)
+ d.dec = dec
+ d.buf = make([]byte, uint64Size)
+ } else {
+ dec.freeList = d.next
+ }
+ d.b = buf
+ return d
+}
+
+func (dec *Decoder) freeDecoderState(d *decoderState) {
+ d.next = dec.freeList
+ dec.freeList = d
+}
+
+func overflow(name string) error {
+ return errors.New(`value for "` + name + `" out of range`)
+}
+
+// decodeUintReader reads an encoded unsigned integer from an io.Reader.
+// Used only by the Decoder to read the message length.
+func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err error) {
+ width = 1
+ _, err = r.Read(buf[0:width])
+ if err != nil {
+ return
+ }
+ b := buf[0]
+ if b <= 0x7f {
+ return uint64(b), width, nil
+ }
+ n := -int(int8(b))
+ if n > uint64Size {
+ err = errBadUint
+ return
+ }
+ width, err = io.ReadFull(r, buf[0:n])
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+ }
+ // Could check that the high byte is zero but it's not worth it.
+ for _, b := range buf[0:width] {
+ x = x<<8 | uint64(b)
+ }
+ width++ // +1 for length byte
+ return
+}
+
+// decodeUint reads an encoded unsigned integer from state.r.
+// Does not check for overflow.
+func (state *decoderState) decodeUint() (x uint64) {
+ b, err := state.b.ReadByte()
+ if err != nil {
+ error_(err)
+ }
+ if b <= 0x7f {
+ return uint64(b)
+ }
+ n := -int(int8(b))
+ if n > uint64Size {
+ error_(errBadUint)
+ }
+ width, err := state.b.Read(state.buf[0:n])
+ if err != nil {
+ error_(err)
+ }
+ // Don't need to check error; it's safe to loop regardless.
+ // Could check that the high byte is zero but it's not worth it.
+ for _, b := range state.buf[0:width] {
+ x = x<<8 | uint64(b)
+ }
+ return x
+}
+
+// decodeInt reads an encoded signed integer from state.r.
+// Does not check for overflow.
+func (state *decoderState) decodeInt() int64 {
+ x := state.decodeUint()
+ if x&1 != 0 {
+ return ^int64(x >> 1)
+ }
+ return int64(x >> 1)
+}
+
+// decOp is the signature of a decoding operator for a given type.
+type decOp func(i *decInstr, state *decoderState, p unsafe.Pointer)
+
+// The 'instructions' of the decoding machine
+type decInstr struct {
+ op decOp
+ field int // field number of the wire type
+ indir int // how many pointer indirections to reach the value in the struct
+ offset uintptr // offset in the structure of the field to encode
+ ovfl error // error message for overflow/underflow (for arrays, of the elements)
+}
+
+// Since the encoder writes no zeros, if we arrive at a decoder we have
+// a value to extract and store. The field number has already been read
+// (it's how we knew to call this decoder).
+// Each decoder is responsible for handling any indirections associated
+// with the data structure. If any pointer so reached is nil, allocation must
+// be done.
+
+// Walk the pointer hierarchy, allocating if we find a nil. Stop one before the end.
+func decIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
+ for ; indir > 1; indir-- {
+ if *(*unsafe.Pointer)(p) == nil {
+ // Allocation required
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(unsafe.Pointer))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ return p
+}
+
+// ignoreUint discards a uint value with no destination.
+func ignoreUint(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.decodeUint()
+}
+
+// ignoreTwoUints discards a uint value with no destination. It's used to skip
+// complex values.
+func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.decodeUint()
+ state.decodeUint()
+}
+
+// decBool decodes a uint and stores it as a boolean through p.
+func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ *(*bool)(p) = state.decodeUint() != 0
+}
+
+// decInt8 decodes an integer and stores it as an int8 through p.
+func decInt8(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ v := state.decodeInt()
+ if v < math.MinInt8 || math.MaxInt8 < v {
+ error_(i.ovfl)
+ } else {
+ *(*int8)(p) = int8(v)
+ }
+}
+
+// decUint8 decodes an unsigned integer and stores it as a uint8 through p.
+func decUint8(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ v := state.decodeUint()
+ if math.MaxUint8 < v {
+ error_(i.ovfl)
+ } else {
+ *(*uint8)(p) = uint8(v)
+ }
+}
+
+// decInt16 decodes an integer and stores it as an int16 through p.
+func decInt16(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ v := state.decodeInt()
+ if v < math.MinInt16 || math.MaxInt16 < v {
+ error_(i.ovfl)
+ } else {
+ *(*int16)(p) = int16(v)
+ }
+}
+
+// decUint16 decodes an unsigned integer and stores it as a uint16 through p.
+func decUint16(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ v := state.decodeUint()
+ if math.MaxUint16 < v {
+ error_(i.ovfl)
+ } else {
+ *(*uint16)(p) = uint16(v)
+ }
+}
+
+// decInt32 decodes an integer and stores it as an int32 through p.
+func decInt32(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ v := state.decodeInt()
+ if v < math.MinInt32 || math.MaxInt32 < v {
+ error_(i.ovfl)
+ } else {
+ *(*int32)(p) = int32(v)
+ }
+}
+
+// decUint32 decodes an unsigned integer and stores it as a uint32 through p.
+func decUint32(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ v := state.decodeUint()
+ if math.MaxUint32 < v {
+ error_(i.ovfl)
+ } else {
+ *(*uint32)(p) = uint32(v)
+ }
+}
+
+// decInt64 decodes an integer and stores it as an int64 through p.
+func decInt64(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ *(*int64)(p) = int64(state.decodeInt())
+}
+
+// decUint64 decodes an unsigned integer and stores it as a uint64 through p.
+func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ *(*uint64)(p) = uint64(state.decodeUint())
+}
+
+// Floating-point numbers are transmitted as uint64s holding the bits
+// of the underlying representation. They are sent byte-reversed, with
+// the exponent end coming out first, so integer floating point numbers
+// (for example) transmit more compactly. This routine does the
+// unswizzling.
+func floatFromBits(u uint64) float64 {
+ var v uint64
+ for i := 0; i < 8; i++ {
+ v <<= 8
+ v |= u & 0xFF
+ u >>= 8
+ }
+ return math.Float64frombits(v)
+}
+
+// storeFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
+// number, and stores it through p. It's a helper function for float32 and complex64.
+func storeFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ v := floatFromBits(state.decodeUint())
+ av := v
+ if av < 0 {
+ av = -av
+ }
+ // +Inf is OK in both 32- and 64-bit floats. Underflow is always OK.
+ if math.MaxFloat32 < av && av <= math.MaxFloat64 {
+ error_(i.ovfl)
+ } else {
+ *(*float32)(p) = float32(v)
+ }
+}
+
+// decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
+// number, and stores it through p.
+func decFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ storeFloat32(i, state, p)
+}
+
+// decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point
+// number, and stores it through p.
+func decFloat64(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ *(*float64)(p) = floatFromBits(uint64(state.decodeUint()))
+}
+
+// decComplex64 decodes a pair of unsigned integers, treats them as a
+// pair of floating point numbers, and stores them as a complex64 through p.
+// The real part comes first.
+func decComplex64(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ storeFloat32(i, state, p)
+ storeFloat32(i, state, unsafe.Pointer(uintptr(p)+unsafe.Sizeof(float32(0))))
+}
+
+// decComplex128 decodes a pair of unsigned integers, treats them as a
+// pair of floating point numbers, and stores them as a complex128 through p.
+// The real part comes first.
+func decComplex128(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ real := floatFromBits(uint64(state.decodeUint()))
+ imag := floatFromBits(uint64(state.decodeUint()))
+ *(*complex128)(p) = complex(real, imag)
+}
+
+// decUint8Slice decodes a byte slice and stores through p a slice header
+// describing the data.
+// uint8 slices are encoded as an unsigned count followed by the raw bytes.
+func decUint8Slice(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ n := int(state.decodeUint())
+ if n < 0 {
+ errorf("negative length decoding []byte")
+ }
+ slice := (*[]uint8)(p)
+ if cap(*slice) < n {
+ *slice = make([]uint8, n)
+ } else {
+ *slice = (*slice)[0:n]
+ }
+ if _, err := state.b.Read(*slice); err != nil {
+ errorf("error decoding []byte: %s", err)
+ }
+}
+
+// decString decodes byte array and stores through p a string header
+// describing the data.
+// Strings are encoded as an unsigned count followed by the raw bytes.
+func decString(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.Pointer(new(string))
+ }
+ p = *(*unsafe.Pointer)(p)
+ }
+ b := make([]byte, state.decodeUint())
+ state.b.Read(b)
+ // It would be a shame to do the obvious thing here,
+ // *(*string)(p) = string(b)
+ // because we've already allocated the storage and this would
+ // allocate again and copy. So we do this ugly hack, which is even
+ // even more unsafe than it looks as it depends the memory
+ // representation of a string matching the beginning of the memory
+ // representation of a byte slice (a byte slice is longer).
+ *(*string)(p) = *(*string)(unsafe.Pointer(&b))
+}
+
+// ignoreUint8Array skips over the data for a byte slice value with no destination.
+func ignoreUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ b := make([]byte, state.decodeUint())
+ state.b.Read(b)
+}
+
+// Execution engine
+
+// The encoder engine is an array of instructions indexed by field number of the incoming
+// decoder. It is executed with random access according to field number.
+type decEngine struct {
+ instr []decInstr
+ numInstr int // the number of active instructions
+}
+
+// allocate makes sure storage is available for an object of underlying type rtyp
+// that is indir levels of indirection through p.
+func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr {
+ if indir == 0 {
+ return p
+ }
+ up := unsafe.Pointer(p)
+ if indir > 1 {
+ up = decIndirect(up, indir)
+ }
+ if *(*unsafe.Pointer)(up) == nil {
+ // Allocate object.
+ *(*unsafe.Pointer)(up) = unsafe.New(rtyp)
+ }
+ return *(*uintptr)(up)
+}
+
+// decodeSingle decodes a top-level value that is not a struct and stores it through p.
+// Such values are preceded by a zero, making them have the memory layout of a
+// struct field (although with an illegal field number).
+func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) (err error) {
+ state := dec.newDecoderState(&dec.buf)
+ state.fieldnum = singletonField
+ delta := int(state.decodeUint())
+ if delta != 0 {
+ errorf("decode: corrupted data: non-zero delta for singleton")
+ }
+ instr := &engine.instr[singletonField]
+ if instr.indir != ut.indir {
+ return errors.New("gob: internal error: inconsistent indirection")
+ }
+ ptr := unsafe.Pointer(basep) // offset will be zero
+ if instr.indir > 1 {
+ ptr = decIndirect(ptr, instr.indir)
+ }
+ instr.op(instr, state, ptr)
+ dec.freeDecoderState(state)
+ return nil
+}
+
+// decodeSingle decodes a top-level struct and stores it through p.
+// Indir is for the value, not the type. At the time of the call it may
+// differ from ut.indir, which was computed when the engine was built.
+// This state cannot arise for decodeSingle, which is called directly
+// from the user's value, not from the innards of an engine.
+func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) {
+ p = allocate(ut.base, p, indir)
+ state := dec.newDecoderState(&dec.buf)
+ state.fieldnum = -1
+ basep := p
+ for state.b.Len() > 0 {
+ delta := int(state.decodeUint())
+ if delta < 0 {
+ errorf("decode: corrupted data: negative delta")
+ }
+ if delta == 0 { // struct terminator is zero delta fieldnum
+ break
+ }
+ fieldnum := state.fieldnum + delta
+ if fieldnum >= len(engine.instr) {
+ error_(errRange)
+ break
+ }
+ instr := &engine.instr[fieldnum]
+ p := unsafe.Pointer(basep + instr.offset)
+ if instr.indir > 1 {
+ p = decIndirect(p, instr.indir)
+ }
+ instr.op(instr, state, p)
+ state.fieldnum = fieldnum
+ }
+ dec.freeDecoderState(state)
+}
+
+// ignoreStruct discards the data for a struct with no destination.
+func (dec *Decoder) ignoreStruct(engine *decEngine) {
+ state := dec.newDecoderState(&dec.buf)
+ state.fieldnum = -1
+ for state.b.Len() > 0 {
+ delta := int(state.decodeUint())
+ if delta < 0 {
+ errorf("ignore decode: corrupted data: negative delta")
+ }
+ if delta == 0 { // struct terminator is zero delta fieldnum
+ break
+ }
+ fieldnum := state.fieldnum + delta
+ if fieldnum >= len(engine.instr) {
+ error_(errRange)
+ }
+ instr := &engine.instr[fieldnum]
+ instr.op(instr, state, unsafe.Pointer(nil))
+ state.fieldnum = fieldnum
+ }
+ dec.freeDecoderState(state)
+}
+
+// ignoreSingle discards the data for a top-level non-struct value with no
+// destination. It's used when calling Decode with a nil value.
+func (dec *Decoder) ignoreSingle(engine *decEngine) {
+ state := dec.newDecoderState(&dec.buf)
+ state.fieldnum = singletonField
+ delta := int(state.decodeUint())
+ if delta != 0 {
+ errorf("decode: corrupted data: non-zero delta for singleton")
+ }
+ instr := &engine.instr[singletonField]
+ instr.op(instr, state, unsafe.Pointer(nil))
+ dec.freeDecoderState(state)
+}
+
+// decodeArrayHelper does the work for decoding arrays and slices.
+func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl error) {
+ instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl}
+ for i := 0; i < length; i++ {
+ up := unsafe.Pointer(p)
+ if elemIndir > 1 {
+ up = decIndirect(up, elemIndir)
+ }
+ elemOp(instr, state, up)
+ p += uintptr(elemWid)
+ }
+}
+
+// decodeArray decodes an array and stores it through p, that is, p points to the zeroth element.
+// The length is an unsigned integer preceding the elements. Even though the length is redundant
+// (it's part of the type), it's a useful check and is included in the encoding.
+func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl error) {
+ if indir > 0 {
+ p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect
+ }
+ if n := state.decodeUint(); n != uint64(length) {
+ errorf("length mismatch in decodeArray")
+ }
+ dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl)
+}
+
+// decodeIntoValue is a helper for map decoding. Since maps are decoded using reflection,
+// unlike the other items we can't use a pointer directly.
+func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl error) reflect.Value {
+ instr := &decInstr{op, 0, indir, 0, ovfl}
+ up := unsafe.Pointer(unsafeAddr(v))
+ if indir > 1 {
+ up = decIndirect(up, indir)
+ }
+ op(instr, state, up)
+ return v
+}
+
+// decodeMap decodes a map and stores its header through p.
+// Maps are encoded as a length followed by key:value pairs.
+// Because the internals of maps are not visible to us, we must
+// use reflection rather than pointer magic.
+func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl error) {
+ if indir > 0 {
+ p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect
+ }
+ up := unsafe.Pointer(p)
+ if *(*unsafe.Pointer)(up) == nil { // maps are represented as a pointer in the runtime
+ // Allocate map.
+ *(*unsafe.Pointer)(up) = unsafe.Pointer(reflect.MakeMap(mtyp).Pointer())
+ }
+ // Maps cannot be accessed by moving addresses around the way
+ // that slices etc. can. We must recover a full reflection value for
+ // the iteration.
+ v := reflect.ValueOf(unsafe.Unreflect(mtyp, unsafe.Pointer(p)))
+ n := int(state.decodeUint())
+ for i := 0; i < n; i++ {
+ key := decodeIntoValue(state, keyOp, keyIndir, allocValue(mtyp.Key()), ovfl)
+ elem := decodeIntoValue(state, elemOp, elemIndir, allocValue(mtyp.Elem()), ovfl)
+ v.SetMapIndex(key, elem)
+ }
+}
+
+// ignoreArrayHelper does the work for discarding arrays and slices.
+func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) {
+ instr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
+ for i := 0; i < length; i++ {
+ elemOp(instr, state, nil)
+ }
+}
+
+// ignoreArray discards the data for an array value with no destination.
+func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) {
+ if n := state.decodeUint(); n != uint64(length) {
+ errorf("length mismatch in ignoreArray")
+ }
+ dec.ignoreArrayHelper(state, elemOp, length)
+}
+
+// ignoreMap discards the data for a map value with no destination.
+func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
+ n := int(state.decodeUint())
+ keyInstr := &decInstr{keyOp, 0, 0, 0, errors.New("no error")}
+ elemInstr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
+ for i := 0; i < n; i++ {
+ keyOp(keyInstr, state, nil)
+ elemOp(elemInstr, state, nil)
+ }
+}
+
+// decodeSlice decodes a slice and stores the slice header through p.
+// Slices are encoded as an unsigned length followed by the elements.
+func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl error) {
+ n := int(uintptr(state.decodeUint()))
+ if indir > 0 {
+ up := unsafe.Pointer(p)
+ if *(*unsafe.Pointer)(up) == nil {
+ // Allocate the slice header.
+ *(*unsafe.Pointer)(up) = unsafe.Pointer(new([]unsafe.Pointer))
+ }
+ p = *(*uintptr)(up)
+ }
+ // Allocate storage for the slice elements, that is, the underlying array,
+ // if the existing slice does not have the capacity.
+ // Always write a header at p.
+ hdrp := (*reflect.SliceHeader)(unsafe.Pointer(p))
+ if hdrp.Cap < n {
+ hdrp.Data = uintptr(unsafe.NewArray(atyp.Elem(), n))
+ hdrp.Cap = n
+ }
+ hdrp.Len = n
+ dec.decodeArrayHelper(state, hdrp.Data, elemOp, elemWid, n, elemIndir, ovfl)
+}
+
+// ignoreSlice skips over the data for a slice value with no destination.
+func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) {
+ dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint()))
+}
+
+// setInterfaceValue sets an interface value to a concrete value,
+// but first it checks that the assignment will succeed.
+func setInterfaceValue(ivalue reflect.Value, value reflect.Value) {
+ if !value.Type().AssignableTo(ivalue.Type()) {
+ errorf("cannot assign value of type %s to %s", value.Type(), ivalue.Type())
+ }
+ ivalue.Set(value)
+}
+
+// decodeInterface decodes an interface value and stores it through p.
+// Interfaces are encoded as the name of a concrete type followed by a value.
+// If the name is empty, the value is nil and no value is sent.
+func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p uintptr, indir int) {
+ // Create a writable interface reflect.Value. We need one even for the nil case.
+ ivalue := allocValue(ityp)
+ // Read the name of the concrete type.
+ b := make([]byte, state.decodeUint())
+ state.b.Read(b)
+ name := string(b)
+ if name == "" {
+ // Copy the representation of the nil interface value to the target.
+ // This is horribly unsafe and special.
+ *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
+ return
+ }
+ // The concrete type must be registered.
+ typ, ok := nameToConcreteType[name]
+ if !ok {
+ errorf("name not registered for interface: %q", name)
+ }
+ // Read the type id of the concrete value.
+ concreteId := dec.decodeTypeSequence(true)
+ if concreteId < 0 {
+ error_(dec.err)
+ }
+ // Byte count of value is next; we don't care what it is (it's there
+ // in case we want to ignore the value by skipping it completely).
+ state.decodeUint()
+ // Read the concrete value.
+ value := allocValue(typ)
+ dec.decodeValue(concreteId, value)
+ if dec.err != nil {
+ error_(dec.err)
+ }
+ // Allocate the destination interface value.
+ if indir > 0 {
+ p = allocate(ityp, p, 1) // All but the last level has been allocated by dec.Indirect
+ }
+ // Assign the concrete value to the interface.
+ // Tread carefully; it might not satisfy the interface.
+ setInterfaceValue(ivalue, value)
+ // Copy the representation of the interface value to the target.
+ // This is horribly unsafe and special.
+ *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
+}
+
+// ignoreInterface discards the data for an interface value with no destination.
+func (dec *Decoder) ignoreInterface(state *decoderState) {
+ // Read the name of the concrete type.
+ b := make([]byte, state.decodeUint())
+ _, err := state.b.Read(b)
+ if err != nil {
+ error_(err)
+ }
+ id := dec.decodeTypeSequence(true)
+ if id < 0 {
+ error_(dec.err)
+ }
+ // At this point, the decoder buffer contains a delimited value. Just toss it.
+ state.b.Next(int(state.decodeUint()))
+}
+
+// decodeGobDecoder decodes something implementing the GobDecoder interface.
+// The data is encoded as a byte slice.
+func (dec *Decoder) decodeGobDecoder(state *decoderState, v reflect.Value) {
+ // Read the bytes for the value.
+ b := make([]byte, state.decodeUint())
+ _, err := state.b.Read(b)
+ if err != nil {
+ error_(err)
+ }
+ // We know it's a GobDecoder, so just call the method directly.
+ err = v.Interface().(GobDecoder).GobDecode(b)
+ if err != nil {
+ error_(err)
+ }
+}
+
+// ignoreGobDecoder discards the data for a GobDecoder value with no destination.
+func (dec *Decoder) ignoreGobDecoder(state *decoderState) {
+ // Read the bytes for the value.
+ b := make([]byte, state.decodeUint())
+ _, err := state.b.Read(b)
+ if err != nil {
+ error_(err)
+ }
+}
+
+// Index by Go types.
+var decOpTable = [...]decOp{
+ reflect.Bool: decBool,
+ reflect.Int8: decInt8,
+ reflect.Int16: decInt16,
+ reflect.Int32: decInt32,
+ reflect.Int64: decInt64,
+ reflect.Uint8: decUint8,
+ reflect.Uint16: decUint16,
+ reflect.Uint32: decUint32,
+ reflect.Uint64: decUint64,
+ reflect.Float32: decFloat32,
+ reflect.Float64: decFloat64,
+ reflect.Complex64: decComplex64,
+ reflect.Complex128: decComplex128,
+ reflect.String: decString,
+}
+
+// Indexed by gob types. tComplex will be added during type.init().
+var decIgnoreOpMap = map[typeId]decOp{
+ tBool: ignoreUint,
+ tInt: ignoreUint,
+ tUint: ignoreUint,
+ tFloat: ignoreUint,
+ tBytes: ignoreUint8Array,
+ tString: ignoreUint8Array,
+ tComplex: ignoreTwoUints,
+}
+
+// decOpFor returns the decoding op for the base type under rt and
+// the indirection count to reach it.
+func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) {
+ ut := userType(rt)
+ // If the type implements GobEncoder, we handle it without further processing.
+ if ut.isGobDecoder {
+ return dec.gobDecodeOpFor(ut)
+ }
+ // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
+ // Return the pointer to the op we're already building.
+ if opPtr := inProgress[rt]; opPtr != nil {
+ return opPtr, ut.indir
+ }
+ typ := ut.base
+ indir := ut.indir
+ var op decOp
+ k := typ.Kind()
+ if int(k) < len(decOpTable) {
+ op = decOpTable[k]
+ }
+ if op == nil {
+ inProgress[rt] = &op
+ // Special cases
+ switch t := typ; t.Kind() {
+ case reflect.Array:
+ name = "element of " + name
+ elemId := dec.wireType[wireId].ArrayT.Elem
+ elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
+ ovfl := overflow(name)
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl)
+ }
+
+ case reflect.Map:
+ name = "element of " + name
+ keyId := dec.wireType[wireId].MapT.Key
+ elemId := dec.wireType[wireId].MapT.Elem
+ keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress)
+ elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
+ ovfl := overflow(name)
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ up := unsafe.Pointer(p)
+ state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl)
+ }
+
+ case reflect.Slice:
+ name = "element of " + name
+ if t.Elem().Kind() == reflect.Uint8 {
+ op = decUint8Slice
+ break
+ }
+ var elemId typeId
+ if tt, ok := builtinIdToType[wireId]; ok {
+ elemId = tt.(*sliceType).Elem
+ } else {
+ elemId = dec.wireType[wireId].SliceT.Elem
+ }
+ elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
+ ovfl := overflow(name)
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl)
+ }
+
+ case reflect.Struct:
+ // Generate a closure that calls out to the engine for the nested type.
+ enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ))
+ if err != nil {
+ error_(err)
+ }
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ // indirect through enginePtr to delay evaluation for recursive structs.
+ dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir)
+ }
+ case reflect.Interface:
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.decodeInterface(t, state, uintptr(p), i.indir)
+ }
+ }
+ }
+ if op == nil {
+ errorf("decode can't handle type %s", rt)
+ }
+ return &op, indir
+}
+
+// decIgnoreOpFor returns the decoding op for a field that has no destination.
+func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
+ op, ok := decIgnoreOpMap[wireId]
+ if !ok {
+ if wireId == tInterface {
+ // Special case because it's a method: the ignored item might
+ // define types and we need to record their state in the decoder.
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.ignoreInterface(state)
+ }
+ return op
+ }
+ // Special cases
+ wire := dec.wireType[wireId]
+ switch {
+ case wire == nil:
+ errorf("bad data: undefined type %s", wireId.string())
+ case wire.ArrayT != nil:
+ elemId := wire.ArrayT.Elem
+ elemOp := dec.decIgnoreOpFor(elemId)
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len)
+ }
+
+ case wire.MapT != nil:
+ keyId := dec.wireType[wireId].MapT.Key
+ elemId := dec.wireType[wireId].MapT.Elem
+ keyOp := dec.decIgnoreOpFor(keyId)
+ elemOp := dec.decIgnoreOpFor(elemId)
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.ignoreMap(state, keyOp, elemOp)
+ }
+
+ case wire.SliceT != nil:
+ elemId := wire.SliceT.Elem
+ elemOp := dec.decIgnoreOpFor(elemId)
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.ignoreSlice(state, elemOp)
+ }
+
+ case wire.StructT != nil:
+ // Generate a closure that calls out to the engine for the nested type.
+ enginePtr, err := dec.getIgnoreEnginePtr(wireId)
+ if err != nil {
+ error_(err)
+ }
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ // indirect through enginePtr to delay evaluation for recursive structs
+ state.dec.ignoreStruct(*enginePtr)
+ }
+
+ case wire.GobEncoderT != nil:
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ state.dec.ignoreGobDecoder(state)
+ }
+ }
+ }
+ if op == nil {
+ errorf("bad data: ignore can't handle type %s", wireId.string())
+ }
+ return op
+}
+
+// gobDecodeOpFor returns the op for a type that is known to implement
+// GobDecoder.
+func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) {
+ rcvrType := ut.user
+ if ut.decIndir == -1 {
+ rcvrType = reflect.PtrTo(rcvrType)
+ } else if ut.decIndir > 0 {
+ for i := int8(0); i < ut.decIndir; i++ {
+ rcvrType = rcvrType.Elem()
+ }
+ }
+ var op decOp
+ op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
+ // Caller has gotten us to within one indirection of our value.
+ if i.indir > 0 {
+ if *(*unsafe.Pointer)(p) == nil {
+ *(*unsafe.Pointer)(p) = unsafe.New(ut.base)
+ }
+ }
+ // Now p is a pointer to the base type. Do we need to climb out to
+ // get to the receiver type?
+ var v reflect.Value
+ if ut.decIndir == -1 {
+ v = reflect.ValueOf(unsafe.Unreflect(rcvrType, unsafe.Pointer(&p)))
+ } else {
+ v = reflect.ValueOf(unsafe.Unreflect(rcvrType, p))
+ }
+ state.dec.decodeGobDecoder(state, v)
+ }
+ return &op, int(ut.indir)
+
+}
+
+// compatibleType asks: Are these two gob Types compatible?
+// Answers the question for basic types, arrays, maps and slices, plus
+// GobEncoder/Decoder pairs.
+// Structs are considered ok; fields will be checked later.
+func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool {
+ if rhs, ok := inProgress[fr]; ok {
+ return rhs == fw
+ }
+ inProgress[fr] = fw
+ ut := userType(fr)
+ wire, ok := dec.wireType[fw]
+ // If fr is a GobDecoder, the wire type must be GobEncoder.
+ // And if fr is not a GobDecoder, the wire type must not be either.
+ if ut.isGobDecoder != (ok && wire.GobEncoderT != nil) { // the parentheses look odd but are correct.
+ return false
+ }
+ if ut.isGobDecoder { // This test trumps all others.
+ return true
+ }
+ switch t := ut.base; t.Kind() {
+ default:
+ // chan, etc: cannot handle.
+ return false
+ case reflect.Bool:
+ return fw == tBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return fw == tInt
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return fw == tUint
+ case reflect.Float32, reflect.Float64:
+ return fw == tFloat
+ case reflect.Complex64, reflect.Complex128:
+ return fw == tComplex
+ case reflect.String:
+ return fw == tString
+ case reflect.Interface:
+ return fw == tInterface
+ case reflect.Array:
+ if !ok || wire.ArrayT == nil {
+ return false
+ }
+ array := wire.ArrayT
+ return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress)
+ case reflect.Map:
+ if !ok || wire.MapT == nil {
+ return false
+ }
+ MapType := wire.MapT
+ return dec.compatibleType(t.Key(), MapType.Key, inProgress) && dec.compatibleType(t.Elem(), MapType.Elem, inProgress)
+ case reflect.Slice:
+ // Is it an array of bytes?
+ if t.Elem().Kind() == reflect.Uint8 {
+ return fw == tBytes
+ }
+ // Extract and compare element types.
+ var sw *sliceType
+ if tt, ok := builtinIdToType[fw]; ok {
+ sw = tt.(*sliceType)
+ } else {
+ sw = dec.wireType[fw].SliceT
+ }
+ elem := userType(t.Elem()).base
+ return sw != nil && dec.compatibleType(elem, sw.Elem, inProgress)
+ case reflect.Struct:
+ return true
+ }
+ return true
+}
+
+// typeString returns a human-readable description of the type identified by remoteId.
+func (dec *Decoder) typeString(remoteId typeId) string {
+ if t := idToType[remoteId]; t != nil {
+ // globally known type.
+ return t.string()
+ }
+ return dec.wireType[remoteId].string()
+}
+
+// compileSingle compiles the decoder engine for a non-struct top-level value, including
+// GobDecoders.
+func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
+ rt := ut.user
+ engine = new(decEngine)
+ engine.instr = make([]decInstr, 1) // one item
+ name := rt.String() // best we can do
+ if !dec.compatibleType(rt, remoteId, make(map[reflect.Type]typeId)) {
+ return nil, errors.New("gob: wrong type received for local value " + name + ": " + dec.typeString(remoteId))
+ }
+ op, indir := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp))
+ ovfl := errors.New(`value for "` + name + `" out of range`)
+ engine.instr[singletonField] = decInstr{*op, singletonField, indir, 0, ovfl}
+ engine.numInstr = 1
+ return
+}
+
+// compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded.
+func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err error) {
+ engine = new(decEngine)
+ engine.instr = make([]decInstr, 1) // one item
+ op := dec.decIgnoreOpFor(remoteId)
+ ovfl := overflow(dec.typeString(remoteId))
+ engine.instr[0] = decInstr{op, 0, 0, 0, ovfl}
+ engine.numInstr = 1
+ return
+}
+
+// compileDec compiles the decoder engine for a value. If the value is not a struct,
+// it calls out to compileSingle.
+func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
+ rt := ut.base
+ srt := rt
+ if srt.Kind() != reflect.Struct ||
+ ut.isGobDecoder {
+ return dec.compileSingle(remoteId, ut)
+ }
+ var wireStruct *structType
+ // Builtin types can come from global pool; the rest must be defined by the decoder.
+ // Also we know we're decoding a struct now, so the client must have sent one.
+ if t, ok := builtinIdToType[remoteId]; ok {
+ wireStruct, _ = t.(*structType)
+ } else {
+ wire := dec.wireType[remoteId]
+ if wire == nil {
+ error_(errBadType)
+ }
+ wireStruct = wire.StructT
+ }
+ if wireStruct == nil {
+ errorf("type mismatch in decoder: want struct type %s; got non-struct", rt)
+ }
+ engine = new(decEngine)
+ engine.instr = make([]decInstr, len(wireStruct.Field))
+ seen := make(map[reflect.Type]*decOp)
+ // Loop over the fields of the wire type.
+ for fieldnum := 0; fieldnum < len(wireStruct.Field); fieldnum++ {
+ wireField := wireStruct.Field[fieldnum]
+ if wireField.Name == "" {
+ errorf("empty name for remote field of type %s", wireStruct.Name)
+ }
+ ovfl := overflow(wireField.Name)
+ // Find the field of the local type with the same name.
+ localField, present := srt.FieldByName(wireField.Name)
+ // TODO(r): anonymous names
+ if !present || !isExported(wireField.Name) {
+ op := dec.decIgnoreOpFor(wireField.Id)
+ engine.instr[fieldnum] = decInstr{op, fieldnum, 0, 0, ovfl}
+ continue
+ }
+ if !dec.compatibleType(localField.Type, wireField.Id, make(map[reflect.Type]typeId)) {
+ errorf("wrong type (%s) for received field %s.%s", localField.Type, wireStruct.Name, wireField.Name)
+ }
+ op, indir := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen)
+ engine.instr[fieldnum] = decInstr{*op, fieldnum, indir, uintptr(localField.Offset), ovfl}
+ engine.numInstr++
+ }
+ return
+}
+
+// getDecEnginePtr returns the engine for the specified type.
+func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err error) {
+ rt := ut.base
+ decoderMap, ok := dec.decoderCache[rt]
+ if !ok {
+ decoderMap = make(map[typeId]**decEngine)
+ dec.decoderCache[rt] = decoderMap
+ }
+ if enginePtr, ok = decoderMap[remoteId]; !ok {
+ // To handle recursive types, mark this engine as underway before compiling.
+ enginePtr = new(*decEngine)
+ decoderMap[remoteId] = enginePtr
+ *enginePtr, err = dec.compileDec(remoteId, ut)
+ if err != nil {
+ delete(decoderMap, remoteId)
+ }
+ }
+ return
+}
+
+// emptyStruct is the type we compile into when ignoring a struct value.
+type emptyStruct struct{}
+
+var emptyStructType = reflect.TypeOf(emptyStruct{})
+
+// getDecEnginePtr returns the engine for the specified type when the value is to be discarded.
+func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err error) {
+ var ok bool
+ if enginePtr, ok = dec.ignorerCache[wireId]; !ok {
+ // To handle recursive types, mark this engine as underway before compiling.
+ enginePtr = new(*decEngine)
+ dec.ignorerCache[wireId] = enginePtr
+ wire := dec.wireType[wireId]
+ if wire != nil && wire.StructT != nil {
+ *enginePtr, err = dec.compileDec(wireId, userType(emptyStructType))
+ } else {
+ *enginePtr, err = dec.compileIgnoreSingle(wireId)
+ }
+ if err != nil {
+ delete(dec.ignorerCache, wireId)
+ }
+ }
+ return
+}
+
+// decodeValue decodes the data stream representing a value and stores it in val.
+func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) {
+ defer catchError(&dec.err)
+ // If the value is nil, it means we should just ignore this item.
+ if !val.IsValid() {
+ dec.decodeIgnoredValue(wireId)
+ return
+ }
+ // Dereference down to the underlying type.
+ ut := userType(val.Type())
+ base := ut.base
+ var enginePtr **decEngine
+ enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut)
+ if dec.err != nil {
+ return
+ }
+ engine := *enginePtr
+ if st := base; st.Kind() == reflect.Struct && !ut.isGobDecoder {
+ if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 {
+ name := base.Name()
+ errorf("type mismatch: no fields matched compiling decoder for %s", name)
+ }
+ dec.decodeStruct(engine, ut, uintptr(unsafeAddr(val)), ut.indir)
+ } else {
+ dec.decodeSingle(engine, ut, uintptr(unsafeAddr(val)))
+ }
+}
+
+// decodeIgnoredValue decodes the data stream representing a value of the specified type and discards it.
+func (dec *Decoder) decodeIgnoredValue(wireId typeId) {
+ var enginePtr **decEngine
+ enginePtr, dec.err = dec.getIgnoreEnginePtr(wireId)
+ if dec.err != nil {
+ return
+ }
+ wire := dec.wireType[wireId]
+ if wire != nil && wire.StructT != nil {
+ dec.ignoreStruct(*enginePtr)
+ } else {
+ dec.ignoreSingle(*enginePtr)
+ }
+}
+
+func init() {
+ var iop, uop decOp
+ switch reflect.TypeOf(int(0)).Bits() {
+ case 32:
+ iop = decInt32
+ uop = decUint32
+ case 64:
+ iop = decInt64
+ uop = decUint64
+ default:
+ panic("gob: unknown size of int/uint")
+ }
+ decOpTable[reflect.Int] = iop
+ decOpTable[reflect.Uint] = uop
+
+ // Finally uintptr
+ switch reflect.TypeOf(uintptr(0)).Bits() {
+ case 32:
+ uop = decUint32
+ case 64:
+ uop = decUint64
+ default:
+ panic("gob: unknown size of uintptr")
+ }
+ decOpTable[reflect.Uintptr] = uop
+}
+
+// Gob assumes it can call UnsafeAddr on any Value
+// in order to get a pointer it can copy data from.
+// Values that have just been created and do not point
+// into existing structs or slices cannot be addressed,
+// so simulate it by returning a pointer to a copy.
+// Each call allocates once.
+func unsafeAddr(v reflect.Value) uintptr {
+ if v.CanAddr() {
+ return v.UnsafeAddr()
+ }
+ x := reflect.New(v.Type()).Elem()
+ x.Set(v)
+ return x.UnsafeAddr()
+}
+
+// Gob depends on being able to take the address
+// of zeroed Values it creates, so use this wrapper instead
+// of the standard reflect.Zero.
+// Each call allocates once.
+func allocValue(t reflect.Type) reflect.Value {
+ return reflect.New(t).Elem()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+ "sync"
+)
+
+// A Decoder manages the receipt of type and data information read from the
+// remote side of a connection.
+type Decoder struct {
+ mutex sync.Mutex // each item must be received atomically
+ r io.Reader // source of the data
+ buf bytes.Buffer // buffer for more efficient i/o from r
+ wireType map[typeId]*wireType // map from remote ID to local description
+ decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines
+ ignorerCache map[typeId]**decEngine // ditto for ignored objects
+ freeList *decoderState // list of free decoderStates; avoids reallocation
+ countBuf []byte // used for decoding integers while parsing messages
+ tmp []byte // temporary storage for i/o; saves reallocating
+ err error
+}
+
+// NewDecoder returns a new decoder that reads from the io.Reader.
+// If r does not also implement io.ByteReader, it will be wrapped in a
+// bufio.Reader.
+func NewDecoder(r io.Reader) *Decoder {
+ dec := new(Decoder)
+ // We use the ability to read bytes as a plausible surrogate for buffering.
+ if _, ok := r.(io.ByteReader); !ok {
+ r = bufio.NewReader(r)
+ }
+ dec.r = r
+ dec.wireType = make(map[typeId]*wireType)
+ dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine)
+ dec.ignorerCache = make(map[typeId]**decEngine)
+ dec.countBuf = make([]byte, 9) // counts may be uint64s (unlikely!), require 9 bytes
+
+ return dec
+}
+
+// recvType loads the definition of a type.
+func (dec *Decoder) recvType(id typeId) {
+ // Have we already seen this type? That's an error
+ if id < firstUserId || dec.wireType[id] != nil {
+ dec.err = errors.New("gob: duplicate type received")
+ return
+ }
+
+ // Type:
+ wire := new(wireType)
+ dec.decodeValue(tWireType, reflect.ValueOf(wire))
+ if dec.err != nil {
+ return
+ }
+ // Remember we've seen this type.
+ dec.wireType[id] = wire
+}
+
+var errBadCount = errors.New("invalid message length")
+
+// recvMessage reads the next count-delimited item from the input. It is the converse
+// of Encoder.writeMessage. It returns false on EOF or other error reading the message.
+func (dec *Decoder) recvMessage() bool {
+ // Read a count.
+ nbytes, _, err := decodeUintReader(dec.r, dec.countBuf)
+ if err != nil {
+ dec.err = err
+ return false
+ }
+ if nbytes >= 1<<31 {
+ dec.err = errBadCount
+ return false
+ }
+ dec.readMessage(int(nbytes))
+ return dec.err == nil
+}
+
+// readMessage reads the next nbytes bytes from the input.
+func (dec *Decoder) readMessage(nbytes int) {
+ // Allocate the buffer.
+ if cap(dec.tmp) < nbytes {
+ dec.tmp = make([]byte, nbytes+100) // room to grow
+ }
+ dec.tmp = dec.tmp[:nbytes]
+
+ // Read the data
+ _, dec.err = io.ReadFull(dec.r, dec.tmp)
+ if dec.err != nil {
+ if dec.err == io.EOF {
+ dec.err = io.ErrUnexpectedEOF
+ }
+ return
+ }
+ dec.buf.Write(dec.tmp)
+}
+
+// toInt turns an encoded uint64 into an int, according to the marshaling rules.
+func toInt(x uint64) int64 {
+ i := int64(x >> 1)
+ if x&1 != 0 {
+ i = ^i
+ }
+ return i
+}
+
+func (dec *Decoder) nextInt() int64 {
+ n, _, err := decodeUintReader(&dec.buf, dec.countBuf)
+ if err != nil {
+ dec.err = err
+ }
+ return toInt(n)
+}
+
+func (dec *Decoder) nextUint() uint64 {
+ n, _, err := decodeUintReader(&dec.buf, dec.countBuf)
+ if err != nil {
+ dec.err = err
+ }
+ return n
+}
+
+// decodeTypeSequence parses:
+// TypeSequence
+// (TypeDefinition DelimitedTypeDefinition*)?
+// and returns the type id of the next value. It returns -1 at
+// EOF. Upon return, the remainder of dec.buf is the value to be
+// decoded. If this is an interface value, it can be ignored by
+// simply resetting that buffer.
+func (dec *Decoder) decodeTypeSequence(isInterface bool) typeId {
+ for dec.err == nil {
+ if dec.buf.Len() == 0 {
+ if !dec.recvMessage() {
+ break
+ }
+ }
+ // Receive a type id.
+ id := typeId(dec.nextInt())
+ if id >= 0 {
+ // Value follows.
+ return id
+ }
+ // Type definition for (-id) follows.
+ dec.recvType(-id)
+ // When decoding an interface, after a type there may be a
+ // DelimitedValue still in the buffer. Skip its count.
+ // (Alternatively, the buffer is empty and the byte count
+ // will be absorbed by recvMessage.)
+ if dec.buf.Len() > 0 {
+ if !isInterface {
+ dec.err = errors.New("extra data in buffer")
+ break
+ }
+ dec.nextUint()
+ }
+ }
+ return -1
+}
+
+// Decode reads the next value from the connection and stores
+// it in the data represented by the empty interface value.
+// If e is nil, the value will be discarded. Otherwise,
+// the value underlying e must be a pointer to the
+// correct type for the next data item received.
+func (dec *Decoder) Decode(e interface{}) error {
+ if e == nil {
+ return dec.DecodeValue(reflect.Value{})
+ }
+ value := reflect.ValueOf(e)
+ // If e represents a value as opposed to a pointer, the answer won't
+ // get back to the caller. Make sure it's a pointer.
+ if value.Type().Kind() != reflect.Ptr {
+ dec.err = errors.New("gob: attempt to decode into a non-pointer")
+ return dec.err
+ }
+ return dec.DecodeValue(value)
+}
+
+// DecodeValue reads the next value from the connection.
+// If v is the zero reflect.Value (v.Kind() == Invalid), DecodeValue discards the value.
+// Otherwise, it stores the value into v. In that case, v must represent
+// a non-nil pointer to data or be an assignable reflect.Value (v.CanSet())
+func (dec *Decoder) DecodeValue(v reflect.Value) error {
+ if v.IsValid() {
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ // That's okay, we'll store through the pointer.
+ } else if !v.CanSet() {
+ return errors.New("gob: DecodeValue of unassignable value")
+ }
+ }
+ // Make sure we're single-threaded through here.
+ dec.mutex.Lock()
+ defer dec.mutex.Unlock()
+
+ dec.buf.Reset() // In case data lingers from previous invocation.
+ dec.err = nil
+ id := dec.decodeTypeSequence(false)
+ if dec.err == nil {
+ dec.decodeValue(id, v)
+ }
+ return dec.err
+}
+
+// If debug.go is compiled into the program , debugFunc prints a human-readable
+// representation of the gob data read from r by calling that file's Debug function.
+// Otherwise it is nil.
+var debugFunc func(io.Reader)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package gob manages streams of gobs - binary values exchanged between an
+Encoder (transmitter) and a Decoder (receiver). A typical use is transporting
+arguments and results of remote procedure calls (RPCs) such as those provided by
+package "rpc".
+
+A stream of gobs is self-describing. Each data item in the stream is preceded by
+a specification of its type, expressed in terms of a small set of predefined
+types. Pointers are not transmitted, but the things they point to are
+transmitted; that is, the values are flattened. Recursive types work fine, but
+recursive values (data with cycles) are problematic. This may change.
+
+To use gobs, create an Encoder and present it with a series of data items as
+values or addresses that can be dereferenced to values. The Encoder makes sure
+all type information is sent before it is needed. At the receive side, a
+Decoder retrieves values from the encoded stream and unpacks them into local
+variables.
+
+The source and destination values/types need not correspond exactly. For structs,
+fields (identified by name) that are in the source but absent from the receiving
+variable will be ignored. Fields that are in the receiving variable but missing
+from the transmitted type or value will be ignored in the destination. If a field
+with the same name is present in both, their types must be compatible. Both the
+receiver and transmitter will do all necessary indirection and dereferencing to
+convert between gobs and actual Go values. For instance, a gob type that is
+schematically,
+
+ struct { A, B int }
+
+can be sent from or received into any of these Go types:
+
+ struct { A, B int } // the same
+ *struct { A, B int } // extra indirection of the struct
+ struct { *A, **B int } // extra indirection of the fields
+ struct { A, B int64 } // different concrete value type; see below
+
+It may also be received into any of these:
+
+ struct { A, B int } // the same
+ struct { B, A int } // ordering doesn't matter; matching is by name
+ struct { A, B, C int } // extra field (C) ignored
+ struct { B int } // missing field (A) ignored; data will be dropped
+ struct { B, C int } // missing field (A) ignored; extra field (C) ignored.
+
+Attempting to receive into these types will draw a decode error:
+
+ struct { A int; B uint } // change of signedness for B
+ struct { A int; B float } // change of type for B
+ struct { } // no field names in common
+ struct { C, D int } // no field names in common
+
+Integers are transmitted two ways: arbitrary precision signed integers or
+arbitrary precision unsigned integers. There is no int8, int16 etc.
+discrimination in the gob format; there are only signed and unsigned integers. As
+described below, the transmitter sends the value in a variable-length encoding;
+the receiver accepts the value and stores it in the destination variable.
+Floating-point numbers are always sent using IEEE-754 64-bit precision (see
+below).
+
+Signed integers may be received into any signed integer variable: int, int16, etc.;
+unsigned integers may be received into any unsigned integer variable; and floating
+point values may be received into any floating point variable. However,
+the destination variable must be able to represent the value or the decode
+operation will fail.
+
+Structs, arrays and slices are also supported. Strings and arrays of bytes are
+supported with a special, efficient representation (see below). When a slice is
+decoded, if the existing slice has capacity the slice will be extended in place;
+if not, a new array is allocated. Regardless, the length of the resuling slice
+reports the number of elements decoded.
+
+Functions and channels cannot be sent in a gob. Attempting
+to encode a value that contains one will fail.
+
+The rest of this comment documents the encoding, details that are not important
+for most users. Details are presented bottom-up.
+
+An unsigned integer is sent one of two ways. If it is less than 128, it is sent
+as a byte with that value. Otherwise it is sent as a minimal-length big-endian
+(high byte first) byte stream holding the value, preceded by one byte holding the
+byte count, negated. Thus 0 is transmitted as (00), 7 is transmitted as (07) and
+256 is transmitted as (FE 01 00).
+
+A boolean is encoded within an unsigned integer: 0 for false, 1 for true.
+
+A signed integer, i, is encoded within an unsigned integer, u. Within u, bits 1
+upward contain the value; bit 0 says whether they should be complemented upon
+receipt. The encode algorithm looks like this:
+
+ uint u;
+ if i < 0 {
+ u = (^i << 1) | 1 // complement i, bit 0 is 1
+ } else {
+ u = (i << 1) // do not complement i, bit 0 is 0
+ }
+ encodeUnsigned(u)
+
+The low bit is therefore analogous to a sign bit, but making it the complement bit
+instead guarantees that the largest negative integer is not a special case. For
+example, -129=^128=(^256>>1) encodes as (FE 01 01).
+
+Floating-point numbers are always sent as a representation of a float64 value.
+That value is converted to a uint64 using math.Float64bits. The uint64 is then
+byte-reversed and sent as a regular unsigned integer. The byte-reversal means the
+exponent and high-precision part of the mantissa go first. Since the low bits are
+often zero, this can save encoding bytes. For instance, 17.0 is encoded in only
+three bytes (FE 31 40).
+
+Strings and slices of bytes are sent as an unsigned count followed by that many
+uninterpreted bytes of the value.
+
+All other slices and arrays are sent as an unsigned count followed by that many
+elements using the standard gob encoding for their type, recursively.
+
+Maps are sent as an unsigned count followed by that man key, element
+pairs. Empty but non-nil maps are sent, so if the sender has allocated
+a map, the receiver will allocate a map even no elements are
+transmitted.
+
+Structs are sent as a sequence of (field number, field value) pairs. The field
+value is sent using the standard gob encoding for its type, recursively. If a
+field has the zero value for its type, it is omitted from the transmission. The
+field number is defined by the type of the encoded struct: the first field of the
+encoded type is field 0, the second is field 1, etc. When encoding a value, the
+field numbers are delta encoded for efficiency and the fields are always sent in
+order of increasing field number; the deltas are therefore unsigned. The
+initialization for the delta encoding sets the field number to -1, so an unsigned
+integer field 0 with value 7 is transmitted as unsigned delta = 1, unsigned value
+= 7 or (01 07). Finally, after all the fields have been sent a terminating mark
+denotes the end of the struct. That mark is a delta=0 value, which has
+representation (00).
+
+Interface types are not checked for compatibility; all interface types are
+treated, for transmission, as members of a single "interface" type, analogous to
+int or []byte - in effect they're all treated as interface{}. Interface values
+are transmitted as a string identifying the concrete type being sent (a name
+that must be pre-defined by calling Register), followed by a byte count of the
+length of the following data (so the value can be skipped if it cannot be
+stored), followed by the usual encoding of concrete (dynamic) value stored in
+the interface value. (A nil interface value is identified by the empty string
+and transmits no value.) Upon receipt, the decoder verifies that the unpacked
+concrete item satisfies the interface of the receiving variable.
+
+The representation of types is described below. When a type is defined on a given
+connection between an Encoder and Decoder, it is assigned a signed integer type
+id. When Encoder.Encode(v) is called, it makes sure there is an id assigned for
+the type of v and all its elements and then it sends the pair (typeid, encoded-v)
+where typeid is the type id of the encoded type of v and encoded-v is the gob
+encoding of the value v.
+
+To define a type, the encoder chooses an unused, positive type id and sends the
+pair (-type id, encoded-type) where encoded-type is the gob encoding of a wireType
+description, constructed from these types:
+
+ type wireType struct {
+ ArrayT *ArrayType
+ SliceT *SliceType
+ StructT *StructType
+ MapT *MapType
+ }
+ type ArrayType struct {
+ CommonType
+ Elem typeId
+ Len int
+ }
+ type CommonType struct {
+ Name string // the name of the struct type
+ Id int // the id of the type, repeated so it's inside the type
+ }
+ type SliceType struct {
+ CommonType
+ Elem typeId
+ }
+ type StructType struct {
+ CommonType
+ Field []*fieldType // the fields of the struct.
+ }
+ type FieldType struct {
+ Name string // the name of the field.
+ Id int // the type id of the field, which must be already defined
+ }
+ type MapType struct {
+ CommonType
+ Key typeId
+ Elem typeId
+ }
+
+If there are nested type ids, the types for all inner type ids must be defined
+before the top-level type id is used to describe an encoded-v.
+
+For simplicity in setup, the connection is defined to understand these types a
+priori, as well as the basic gob types int, uint, etc. Their ids are:
+
+ bool 1
+ int 2
+ uint 3
+ float 4
+ []byte 5
+ string 6
+ complex 7
+ interface 8
+ // gap for reserved ids.
+ WireType 16
+ ArrayType 17
+ CommonType 18
+ SliceType 19
+ StructType 20
+ FieldType 21
+ // 22 is slice of fieldType.
+ MapType 23
+
+Finally, each message created by a call to Encode is preceded by an encoded
+unsigned integer count of the number of bytes remaining in the message. After
+the initial type name, interface values are wrapped the same way; in effect, the
+interface value acts like a recursive invocation of Encode.
+
+In summary, a gob stream looks like
+
+ (byteCount (-type id, encoding of a wireType)* (type id, encoding of a value))*
+
+where * signifies zero or more repetitions and the type id of a value must
+be predefined or be defined before the value in the stream.
+
+See "Gobs of data" for a design discussion of the gob wire format:
+http://blog.golang.org/2011/03/gobs-of-data.html
+*/
+package gob
+
+/*
+Grammar:
+
+Tokens starting with a lower case letter are terminals; int(n)
+and uint(n) represent the signed/unsigned encodings of the value n.
+
+GobStream:
+ DelimitedMessage*
+DelimitedMessage:
+ uint(lengthOfMessage) Message
+Message:
+ TypeSequence TypedValue
+TypeSequence
+ (TypeDefinition DelimitedTypeDefinition*)?
+DelimitedTypeDefinition:
+ uint(lengthOfTypeDefinition) TypeDefinition
+TypedValue:
+ int(typeId) Value
+TypeDefinition:
+ int(-typeId) encodingOfWireType
+Value:
+ SingletonValue | StructValue
+SingletonValue:
+ uint(0) FieldValue
+FieldValue:
+ builtinValue | ArrayValue | MapValue | SliceValue | StructValue | InterfaceValue
+InterfaceValue:
+ NilInterfaceValue | NonNilInterfaceValue
+NilInterfaceValue:
+ uint(0)
+NonNilInterfaceValue:
+ ConcreteTypeName TypeSequence InterfaceContents
+ConcreteTypeName:
+ uint(lengthOfName) [already read=n] name
+InterfaceContents:
+ int(concreteTypeId) DelimitedValue
+DelimitedValue:
+ uint(length) Value
+ArrayValue:
+ uint(n) FieldValue*n [n elements]
+MapValue:
+ uint(n) (FieldValue FieldValue)*n [n (key, value) pairs]
+SliceValue:
+ uint(n) FieldValue*n [n elements]
+StructValue:
+ (uint(fieldDelta) FieldValue)*
+*/
+
+/*
+For implementers and the curious, here is an encoded example. Given
+ type Point struct {X, Y int}
+and the value
+ p := Point{22, 33}
+the bytes transmitted that encode p will be:
+ 1f ff 81 03 01 01 05 50 6f 69 6e 74 01 ff 82 00
+ 01 02 01 01 58 01 04 00 01 01 59 01 04 00 00 00
+ 07 ff 82 01 2c 01 42 00
+They are determined as follows.
+
+Since this is the first transmission of type Point, the type descriptor
+for Point itself must be sent before the value. This is the first type
+we've sent on this Encoder, so it has type id 65 (0 through 64 are
+reserved).
+
+ 1f // This item (a type descriptor) is 31 bytes long.
+ ff 81 // The negative of the id for the type we're defining, -65.
+ // This is one byte (indicated by FF = -1) followed by
+ // ^-65<<1 | 1. The low 1 bit signals to complement the
+ // rest upon receipt.
+
+ // Now we send a type descriptor, which is itself a struct (wireType).
+ // The type of wireType itself is known (it's built in, as is the type of
+ // all its components), so we just need to send a *value* of type wireType
+ // that represents type "Point".
+ // Here starts the encoding of that value.
+ // Set the field number implicitly to -1; this is done at the beginning
+ // of every struct, including nested structs.
+ 03 // Add 3 to field number; now 2 (wireType.structType; this is a struct).
+ // structType starts with an embedded commonType, which appears
+ // as a regular structure here too.
+ 01 // add 1 to field number (now 0); start of embedded commonType.
+ 01 // add 1 to field number (now 0, the name of the type)
+ 05 // string is (unsigned) 5 bytes long
+ 50 6f 69 6e 74 // wireType.structType.commonType.name = "Point"
+ 01 // add 1 to field number (now 1, the id of the type)
+ ff 82 // wireType.structType.commonType._id = 65
+ 00 // end of embedded wiretype.structType.commonType struct
+ 01 // add 1 to field number (now 1, the field array in wireType.structType)
+ 02 // There are two fields in the type (len(structType.field))
+ 01 // Start of first field structure; add 1 to get field number 0: field[0].name
+ 01 // 1 byte
+ 58 // structType.field[0].name = "X"
+ 01 // Add 1 to get field number 1: field[0].id
+ 04 // structType.field[0].typeId is 2 (signed int).
+ 00 // End of structType.field[0]; start structType.field[1]; set field number to -1.
+ 01 // Add 1 to get field number 0: field[1].name
+ 01 // 1 byte
+ 59 // structType.field[1].name = "Y"
+ 01 // Add 1 to get field number 1: field[0].id
+ 04 // struct.Type.field[1].typeId is 2 (signed int).
+ 00 // End of structType.field[1]; end of structType.field.
+ 00 // end of wireType.structType structure
+ 00 // end of wireType structure
+
+Now we can send the Point value. Again the field number resets to -1:
+
+ 07 // this value is 7 bytes long
+ ff 82 // the type number, 65 (1 byte (-FF) followed by 65<<1)
+ 01 // add one to field number, yielding field 0
+ 2c // encoding of signed "22" (0x22 = 44 = 22<<1); Point.x = 22
+ 01 // add one to field number, yielding field 1
+ 42 // encoding of signed "33" (0x42 = 66 = 33<<1); Point.y = 33
+ 00 // end of structure
+
+The type encoding is long and fairly intricate but we send it only once.
+If p is transmitted a second time, the type is already known so the
+output will be just:
+
+ 07 ff 82 01 2c 01 42 00
+
+A single non-struct value at top level is transmitted like a field with
+delta tag 0. For instance, a signed integer with value 3 presented as
+the argument to Encode will emit:
+
+ 03 04 00 06
+
+Which represents:
+
+ 03 // this value is 3 bytes long
+ 04 // the type number, 2, represents an integer
+ 00 // tag delta 0
+ 06 // value 3
+
+*/
--- /dev/null
+package main
+
+// Need to compile package gob with debug.go to build this program.
+
+import (
+ "encoding/gob"
+ "fmt"
+ "os"
+)
+
+func main() {
+ var err error
+ file := os.Stdin
+ if len(os.Args) > 1 {
+ file, err = os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "dump: %s\n", err)
+ os.Exit(1)
+ }
+ }
+ gob.Debug(file)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+const uint64Size = int(unsafe.Sizeof(uint64(0)))
+
+// encoderState is the global execution state of an instance of the encoder.
+// Field numbers are delta encoded and always increase. The field
+// number is initialized to -1 so 0 comes out as delta(1). A delta of
+// 0 terminates the structure.
+type encoderState struct {
+ enc *Encoder
+ b *bytes.Buffer
+ sendZero bool // encoding an array element or map key/value pair; send zero values
+ fieldnum int // the last field number written.
+ buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation.
+ next *encoderState // for free list
+}
+
+func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState {
+ e := enc.freeList
+ if e == nil {
+ e = new(encoderState)
+ e.enc = enc
+ } else {
+ enc.freeList = e.next
+ }
+ e.sendZero = false
+ e.fieldnum = 0
+ e.b = b
+ return e
+}
+
+func (enc *Encoder) freeEncoderState(e *encoderState) {
+ e.next = enc.freeList
+ enc.freeList = e
+}
+
+// Unsigned integers have a two-state encoding. If the number is less
+// than 128 (0 through 0x7F), its value is written directly.
+// Otherwise the value is written in big-endian byte order preceded
+// by the byte length, negated.
+
+// encodeUint writes an encoded unsigned integer to state.b.
+func (state *encoderState) encodeUint(x uint64) {
+ if x <= 0x7F {
+ err := state.b.WriteByte(uint8(x))
+ if err != nil {
+ error_(err)
+ }
+ return
+ }
+ i := uint64Size
+ for x > 0 {
+ state.buf[i] = uint8(x)
+ x >>= 8
+ i--
+ }
+ state.buf[i] = uint8(i - uint64Size) // = loop count, negated
+ _, err := state.b.Write(state.buf[i : uint64Size+1])
+ if err != nil {
+ error_(err)
+ }
+}
+
+// encodeInt writes an encoded signed integer to state.w.
+// The low bit of the encoding says whether to bit complement the (other bits of the)
+// uint to recover the int.
+func (state *encoderState) encodeInt(i int64) {
+ var x uint64
+ if i < 0 {
+ x = uint64(^i<<1) | 1
+ } else {
+ x = uint64(i << 1)
+ }
+ state.encodeUint(uint64(x))
+}
+
+// encOp is the signature of an encoding operator for a given type.
+type encOp func(i *encInstr, state *encoderState, p unsafe.Pointer)
+
+// The 'instructions' of the encoding machine
+type encInstr struct {
+ op encOp
+ field int // field number
+ indir int // how many pointer indirections to reach the value in the struct
+ offset uintptr // offset in the structure of the field to encode
+}
+
+// update emits a field number and updates the state to record its value for delta encoding.
+// If the instruction pointer is nil, it does nothing
+func (state *encoderState) update(instr *encInstr) {
+ if instr != nil {
+ state.encodeUint(uint64(instr.field - state.fieldnum))
+ state.fieldnum = instr.field
+ }
+}
+
+// Each encoder for a composite is responsible for handling any
+// indirections associated with the elements of the data structure.
+// If any pointer so reached is nil, no bytes are written. If the
+// data item is zero, no bytes are written. Single values - ints,
+// strings etc. - are indirected before calling their encoders.
+// Otherwise, the output (for a scalar) is the field number, as an
+// encoded integer, followed by the field data in its appropriate
+// format.
+
+// encIndirect dereferences p indir times and returns the result.
+func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
+ for ; indir > 0; indir-- {
+ p = *(*unsafe.Pointer)(p)
+ if p == nil {
+ return unsafe.Pointer(nil)
+ }
+ }
+ return p
+}
+
+// encBool encodes the bool with address p as an unsigned 0 or 1.
+func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ b := *(*bool)(p)
+ if b || state.sendZero {
+ state.update(i)
+ if b {
+ state.encodeUint(1)
+ } else {
+ state.encodeUint(0)
+ }
+ }
+}
+
+// encInt encodes the int with address p.
+func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := int64(*(*int)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeInt(v)
+ }
+}
+
+// encUint encodes the uint with address p.
+func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := uint64(*(*uint)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// encInt8 encodes the int8 with address p.
+func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := int64(*(*int8)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeInt(v)
+ }
+}
+
+// encUint8 encodes the uint8 with address p.
+func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := uint64(*(*uint8)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// encInt16 encodes the int16 with address p.
+func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := int64(*(*int16)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeInt(v)
+ }
+}
+
+// encUint16 encodes the uint16 with address p.
+func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := uint64(*(*uint16)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// encInt32 encodes the int32 with address p.
+func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := int64(*(*int32)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeInt(v)
+ }
+}
+
+// encUint encodes the uint32 with address p.
+func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := uint64(*(*uint32)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// encInt64 encodes the int64 with address p.
+func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := *(*int64)(p)
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeInt(v)
+ }
+}
+
+// encInt64 encodes the uint64 with address p.
+func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := *(*uint64)(p)
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// encUintptr encodes the uintptr with address p.
+func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ v := uint64(*(*uintptr)(p))
+ if v != 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// floatBits returns a uint64 holding the bits of a floating-point number.
+// Floating-point numbers are transmitted as uint64s holding the bits
+// of the underlying representation. They are sent byte-reversed, with
+// the exponent end coming out first, so integer floating point numbers
+// (for example) transmit more compactly. This routine does the
+// swizzling.
+func floatBits(f float64) uint64 {
+ u := math.Float64bits(f)
+ var v uint64
+ for i := 0; i < 8; i++ {
+ v <<= 8
+ v |= u & 0xFF
+ u >>= 8
+ }
+ return v
+}
+
+// encFloat32 encodes the float32 with address p.
+func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ f := *(*float32)(p)
+ if f != 0 || state.sendZero {
+ v := floatBits(float64(f))
+ state.update(i)
+ state.encodeUint(v)
+ }
+}
+
+// encFloat64 encodes the float64 with address p.
+func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ f := *(*float64)(p)
+ if f != 0 || state.sendZero {
+ state.update(i)
+ v := floatBits(f)
+ state.encodeUint(v)
+ }
+}
+
+// encComplex64 encodes the complex64 with address p.
+// Complex numbers are just a pair of floating-point numbers, real part first.
+func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ c := *(*complex64)(p)
+ if c != 0+0i || state.sendZero {
+ rpart := floatBits(float64(real(c)))
+ ipart := floatBits(float64(imag(c)))
+ state.update(i)
+ state.encodeUint(rpart)
+ state.encodeUint(ipart)
+ }
+}
+
+// encComplex128 encodes the complex128 with address p.
+func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ c := *(*complex128)(p)
+ if c != 0+0i || state.sendZero {
+ rpart := floatBits(real(c))
+ ipart := floatBits(imag(c))
+ state.update(i)
+ state.encodeUint(rpart)
+ state.encodeUint(ipart)
+ }
+}
+
+// encUint8Array encodes the byte slice whose header has address p.
+// Byte arrays are encoded as an unsigned count followed by the raw bytes.
+func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ b := *(*[]byte)(p)
+ if len(b) > 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(uint64(len(b)))
+ state.b.Write(b)
+ }
+}
+
+// encString encodes the string whose header has address p.
+// Strings are encoded as an unsigned count followed by the raw bytes.
+func encString(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ s := *(*string)(p)
+ if len(s) > 0 || state.sendZero {
+ state.update(i)
+ state.encodeUint(uint64(len(s)))
+ state.b.WriteString(s)
+ }
+}
+
+// encStructTerminator encodes the end of an encoded struct
+// as delta field number of 0.
+func encStructTerminator(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ state.encodeUint(0)
+}
+
+// Execution engine
+
+// encEngine an array of instructions indexed by field number of the encoding
+// data, typically a struct. It is executed top to bottom, walking the struct.
+type encEngine struct {
+ instr []encInstr
+}
+
+const singletonField = 0
+
+// encodeSingle encodes a single top-level non-struct value.
+func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintptr) {
+ state := enc.newEncoderState(b)
+ state.fieldnum = singletonField
+ // There is no surrounding struct to frame the transmission, so we must
+ // generate data even if the item is zero. To do this, set sendZero.
+ state.sendZero = true
+ instr := &engine.instr[singletonField]
+ p := unsafe.Pointer(basep) // offset will be zero
+ if instr.indir > 0 {
+ if p = encIndirect(p, instr.indir); p == nil {
+ return
+ }
+ }
+ instr.op(instr, state, p)
+ enc.freeEncoderState(state)
+}
+
+// encodeStruct encodes a single struct value.
+func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintptr) {
+ state := enc.newEncoderState(b)
+ state.fieldnum = -1
+ for i := 0; i < len(engine.instr); i++ {
+ instr := &engine.instr[i]
+ p := unsafe.Pointer(basep + instr.offset)
+ if instr.indir > 0 {
+ if p = encIndirect(p, instr.indir); p == nil {
+ continue
+ }
+ }
+ instr.op(instr, state, p)
+ }
+ enc.freeEncoderState(state)
+}
+
+// encodeArray encodes the array whose 0th element is at p.
+func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid uintptr, elemIndir int, length int) {
+ state := enc.newEncoderState(b)
+ state.fieldnum = -1
+ state.sendZero = true
+ state.encodeUint(uint64(length))
+ for i := 0; i < length; i++ {
+ elemp := p
+ up := unsafe.Pointer(elemp)
+ if elemIndir > 0 {
+ if up = encIndirect(up, elemIndir); up == nil {
+ errorf("encodeArray: nil element")
+ }
+ elemp = uintptr(up)
+ }
+ op(nil, state, unsafe.Pointer(elemp))
+ p += uintptr(elemWid)
+ }
+ enc.freeEncoderState(state)
+}
+
+// encodeReflectValue is a helper for maps. It encodes the value v.
+func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir int) {
+ for i := 0; i < indir && v.IsValid(); i++ {
+ v = reflect.Indirect(v)
+ }
+ if !v.IsValid() {
+ errorf("encodeReflectValue: nil element")
+ }
+ op(nil, state, unsafe.Pointer(unsafeAddr(v)))
+}
+
+// encodeMap encodes a map as unsigned count followed by key:value pairs.
+// Because map internals are not exposed, we must use reflection rather than
+// addresses.
+func (enc *Encoder) encodeMap(b *bytes.Buffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) {
+ state := enc.newEncoderState(b)
+ state.fieldnum = -1
+ state.sendZero = true
+ keys := mv.MapKeys()
+ state.encodeUint(uint64(len(keys)))
+ for _, key := range keys {
+ encodeReflectValue(state, key, keyOp, keyIndir)
+ encodeReflectValue(state, mv.MapIndex(key), elemOp, elemIndir)
+ }
+ enc.freeEncoderState(state)
+}
+
+// encodeInterface encodes the interface value iv.
+// To send an interface, we send a string identifying the concrete type, followed
+// by the type identifier (which might require defining that type right now), followed
+// by the concrete value. A nil value gets sent as the empty string for the name,
+// followed by no value.
+func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) {
+ state := enc.newEncoderState(b)
+ state.fieldnum = -1
+ state.sendZero = true
+ if iv.IsNil() {
+ state.encodeUint(0)
+ return
+ }
+
+ ut := userType(iv.Elem().Type())
+ name, ok := concreteTypeToName[ut.base]
+ if !ok {
+ errorf("type not registered for interface: %s", ut.base)
+ }
+ // Send the name.
+ state.encodeUint(uint64(len(name)))
+ _, err := state.b.WriteString(name)
+ if err != nil {
+ error_(err)
+ }
+ // Define the type id if necessary.
+ enc.sendTypeDescriptor(enc.writer(), state, ut)
+ // Send the type id.
+ enc.sendTypeId(state, ut)
+ // Encode the value into a new buffer. Any nested type definitions
+ // should be written to b, before the encoded value.
+ enc.pushWriter(b)
+ data := new(bytes.Buffer)
+ data.Write(spaceForLength)
+ enc.encode(data, iv.Elem(), ut)
+ if enc.err != nil {
+ error_(enc.err)
+ }
+ enc.popWriter()
+ enc.writeMessage(b, data)
+ if enc.err != nil {
+ error_(err)
+ }
+ enc.freeEncoderState(state)
+}
+
+// isZero returns whether the value is the zero of its type.
+func isZero(val reflect.Value) bool {
+ switch val.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return val.Len() == 0
+ case reflect.Bool:
+ return !val.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ return val.Complex() == 0
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ return val.IsNil()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return val.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return val.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return val.Uint() == 0
+ }
+ panic("unknown type in isZero " + val.Type().String())
+}
+
+// encGobEncoder encodes a value that implements the GobEncoder interface.
+// The data is sent as a byte array.
+func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, v reflect.Value) {
+ // TODO: should we catch panics from the called method?
+ // We know it's a GobEncoder, so just call the method directly.
+ data, err := v.Interface().(GobEncoder).GobEncode()
+ if err != nil {
+ error_(err)
+ }
+ state := enc.newEncoderState(b)
+ state.fieldnum = -1
+ state.encodeUint(uint64(len(data)))
+ state.b.Write(data)
+ enc.freeEncoderState(state)
+}
+
+var encOpTable = [...]encOp{
+ reflect.Bool: encBool,
+ reflect.Int: encInt,
+ reflect.Int8: encInt8,
+ reflect.Int16: encInt16,
+ reflect.Int32: encInt32,
+ reflect.Int64: encInt64,
+ reflect.Uint: encUint,
+ reflect.Uint8: encUint8,
+ reflect.Uint16: encUint16,
+ reflect.Uint32: encUint32,
+ reflect.Uint64: encUint64,
+ reflect.Uintptr: encUintptr,
+ reflect.Float32: encFloat32,
+ reflect.Float64: encFloat64,
+ reflect.Complex64: encComplex64,
+ reflect.Complex128: encComplex128,
+ reflect.String: encString,
+}
+
+// encOpFor returns (a pointer to) the encoding op for the base type under rt and
+// the indirection count to reach it.
+func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) {
+ ut := userType(rt)
+ // If the type implements GobEncoder, we handle it without further processing.
+ if ut.isGobEncoder {
+ return enc.gobEncodeOpFor(ut)
+ }
+ // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
+ // Return the pointer to the op we're already building.
+ if opPtr := inProgress[rt]; opPtr != nil {
+ return opPtr, ut.indir
+ }
+ typ := ut.base
+ indir := ut.indir
+ k := typ.Kind()
+ var op encOp
+ if int(k) < len(encOpTable) {
+ op = encOpTable[k]
+ }
+ if op == nil {
+ inProgress[rt] = &op
+ // Special cases
+ switch t := typ; t.Kind() {
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ op = encUint8Array
+ break
+ }
+ // Slices have a header; we decode it to find the underlying array.
+ elemOp, indir := enc.encOpFor(t.Elem(), inProgress)
+ op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ slice := (*reflect.SliceHeader)(p)
+ if !state.sendZero && slice.Len == 0 {
+ return
+ }
+ state.update(i)
+ state.enc.encodeArray(state.b, slice.Data, *elemOp, t.Elem().Size(), indir, int(slice.Len))
+ }
+ case reflect.Array:
+ // True arrays have size in the type.
+ elemOp, indir := enc.encOpFor(t.Elem(), inProgress)
+ op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ state.update(i)
+ state.enc.encodeArray(state.b, uintptr(p), *elemOp, t.Elem().Size(), indir, t.Len())
+ }
+ case reflect.Map:
+ keyOp, keyIndir := enc.encOpFor(t.Key(), inProgress)
+ elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress)
+ op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ // Maps cannot be accessed by moving addresses around the way
+ // that slices etc. can. We must recover a full reflection value for
+ // the iteration.
+ v := reflect.ValueOf(unsafe.Unreflect(t, unsafe.Pointer(p)))
+ mv := reflect.Indirect(v)
+ // We send zero-length (but non-nil) maps because the
+ // receiver might want to use the map. (Maps don't use append.)
+ if !state.sendZero && mv.IsNil() {
+ return
+ }
+ state.update(i)
+ state.enc.encodeMap(state.b, mv, *keyOp, *elemOp, keyIndir, elemIndir)
+ }
+ case reflect.Struct:
+ // Generate a closure that calls out to the engine for the nested type.
+ enc.getEncEngine(userType(typ))
+ info := mustGetTypeInfo(typ)
+ op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ state.update(i)
+ // indirect through info to delay evaluation for recursive structs
+ state.enc.encodeStruct(state.b, info.encoder, uintptr(p))
+ }
+ case reflect.Interface:
+ op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ // Interfaces transmit the name and contents of the concrete
+ // value they contain.
+ v := reflect.ValueOf(unsafe.Unreflect(t, unsafe.Pointer(p)))
+ iv := reflect.Indirect(v)
+ if !state.sendZero && (!iv.IsValid() || iv.IsNil()) {
+ return
+ }
+ state.update(i)
+ state.enc.encodeInterface(state.b, iv)
+ }
+ }
+ }
+ if op == nil {
+ errorf("can't happen: encode type %s", rt)
+ }
+ return &op, indir
+}
+
+// gobEncodeOpFor returns the op for a type that is known to implement
+// GobEncoder.
+func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) {
+ rt := ut.user
+ if ut.encIndir == -1 {
+ rt = reflect.PtrTo(rt)
+ } else if ut.encIndir > 0 {
+ for i := int8(0); i < ut.encIndir; i++ {
+ rt = rt.Elem()
+ }
+ }
+ var op encOp
+ op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
+ var v reflect.Value
+ if ut.encIndir == -1 {
+ // Need to climb up one level to turn value into pointer.
+ v = reflect.ValueOf(unsafe.Unreflect(rt, unsafe.Pointer(&p)))
+ } else {
+ v = reflect.ValueOf(unsafe.Unreflect(rt, p))
+ }
+ if !state.sendZero && isZero(v) {
+ return
+ }
+ state.update(i)
+ state.enc.encodeGobEncoder(state.b, v)
+ }
+ return &op, int(ut.encIndir) // encIndir: op will get called with p == address of receiver.
+}
+
+// compileEnc returns the engine to compile the type.
+func (enc *Encoder) compileEnc(ut *userTypeInfo) *encEngine {
+ srt := ut.base
+ engine := new(encEngine)
+ seen := make(map[reflect.Type]*encOp)
+ rt := ut.base
+ if ut.isGobEncoder {
+ rt = ut.user
+ }
+ if !ut.isGobEncoder &&
+ srt.Kind() == reflect.Struct {
+ for fieldNum, wireFieldNum := 0, 0; fieldNum < srt.NumField(); fieldNum++ {
+ f := srt.Field(fieldNum)
+ if !isExported(f.Name) {
+ continue
+ }
+ op, indir := enc.encOpFor(f.Type, seen)
+ engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, indir, uintptr(f.Offset)})
+ wireFieldNum++
+ }
+ if srt.NumField() > 0 && len(engine.instr) == 0 {
+ errorf("type %s has no exported fields", rt)
+ }
+ engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0})
+ } else {
+ engine.instr = make([]encInstr, 1)
+ op, indir := enc.encOpFor(rt, seen)
+ engine.instr[0] = encInstr{*op, singletonField, indir, 0} // offset is zero
+ }
+ return engine
+}
+
+// getEncEngine returns the engine to compile the type.
+// typeLock must be held (or we're in initialization and guaranteed single-threaded).
+func (enc *Encoder) getEncEngine(ut *userTypeInfo) *encEngine {
+ info, err1 := getTypeInfo(ut)
+ if err1 != nil {
+ error_(err1)
+ }
+ if info.encoder == nil {
+ // mark this engine as underway before compiling to handle recursive types.
+ info.encoder = new(encEngine)
+ info.encoder = enc.compileEnc(ut)
+ }
+ return info.encoder
+}
+
+// lockAndGetEncEngine is a function that locks and compiles.
+// This lets us hold the lock only while compiling, not when encoding.
+func (enc *Encoder) lockAndGetEncEngine(ut *userTypeInfo) *encEngine {
+ typeLock.Lock()
+ defer typeLock.Unlock()
+ return enc.getEncEngine(ut)
+}
+
+func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) {
+ defer catchError(&enc.err)
+ engine := enc.lockAndGetEncEngine(ut)
+ indir := ut.indir
+ if ut.isGobEncoder {
+ indir = int(ut.encIndir)
+ }
+ for i := 0; i < indir; i++ {
+ value = reflect.Indirect(value)
+ }
+ if !ut.isGobEncoder && value.Type().Kind() == reflect.Struct {
+ enc.encodeStruct(b, engine, unsafeAddr(value))
+ } else {
+ enc.encodeSingle(b, engine, unsafeAddr(value))
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+ "sync"
+)
+
+// An Encoder manages the transmission of type and data information to the
+// other side of a connection.
+type Encoder struct {
+ mutex sync.Mutex // each item must be sent atomically
+ w []io.Writer // where to send the data
+ sent map[reflect.Type]typeId // which types we've already sent
+ countState *encoderState // stage for writing counts
+ freeList *encoderState // list of free encoderStates; avoids reallocation
+ byteBuf bytes.Buffer // buffer for top-level encoderState
+ err error
+}
+
+// Before we encode a message, we reserve space at the head of the
+// buffer in which to encode its length. This means we can use the
+// buffer to assemble the message without another allocation.
+const maxLength = 9 // Maximum size of an encoded length.
+var spaceForLength = make([]byte, maxLength)
+
+// NewEncoder returns a new encoder that will transmit on the io.Writer.
+func NewEncoder(w io.Writer) *Encoder {
+ enc := new(Encoder)
+ enc.w = []io.Writer{w}
+ enc.sent = make(map[reflect.Type]typeId)
+ enc.countState = enc.newEncoderState(new(bytes.Buffer))
+ return enc
+}
+
+// writer() returns the innermost writer the encoder is using
+func (enc *Encoder) writer() io.Writer {
+ return enc.w[len(enc.w)-1]
+}
+
+// pushWriter adds a writer to the encoder.
+func (enc *Encoder) pushWriter(w io.Writer) {
+ enc.w = append(enc.w, w)
+}
+
+// popWriter pops the innermost writer.
+func (enc *Encoder) popWriter() {
+ enc.w = enc.w[0 : len(enc.w)-1]
+}
+
+func (enc *Encoder) badType(rt reflect.Type) {
+ enc.setError(errors.New("gob: can't encode type " + rt.String()))
+}
+
+func (enc *Encoder) setError(err error) {
+ if enc.err == nil { // remember the first.
+ enc.err = err
+ }
+}
+
+// writeMessage sends the data item preceded by a unsigned count of its length.
+func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) {
+ // Space has been reserved for the length at the head of the message.
+ // This is a little dirty: we grab the slice from the bytes.Buffer and massage
+ // it by hand.
+ message := b.Bytes()
+ messageLen := len(message) - maxLength
+ // Encode the length.
+ enc.countState.b.Reset()
+ enc.countState.encodeUint(uint64(messageLen))
+ // Copy the length to be a prefix of the message.
+ offset := maxLength - enc.countState.b.Len()
+ copy(message[offset:], enc.countState.b.Bytes())
+ // Write the data.
+ _, err := w.Write(message[offset:])
+ // Drain the buffer and restore the space at the front for the count of the next message.
+ b.Reset()
+ b.Write(spaceForLength)
+ if err != nil {
+ enc.setError(err)
+ }
+}
+
+// sendActualType sends the requested type, without further investigation, unless
+// it's been sent before.
+func (enc *Encoder) sendActualType(w io.Writer, state *encoderState, ut *userTypeInfo, actual reflect.Type) (sent bool) {
+ if _, alreadySent := enc.sent[actual]; alreadySent {
+ return false
+ }
+ typeLock.Lock()
+ info, err := getTypeInfo(ut)
+ typeLock.Unlock()
+ if err != nil {
+ enc.setError(err)
+ return
+ }
+ // Send the pair (-id, type)
+ // Id:
+ state.encodeInt(-int64(info.id))
+ // Type:
+ enc.encode(state.b, reflect.ValueOf(info.wire), wireTypeUserInfo)
+ enc.writeMessage(w, state.b)
+ if enc.err != nil {
+ return
+ }
+
+ // Remember we've sent this type, both what the user gave us and the base type.
+ enc.sent[ut.base] = info.id
+ if ut.user != ut.base {
+ enc.sent[ut.user] = info.id
+ }
+ // Now send the inner types
+ switch st := actual; st.Kind() {
+ case reflect.Struct:
+ for i := 0; i < st.NumField(); i++ {
+ enc.sendType(w, state, st.Field(i).Type)
+ }
+ case reflect.Array, reflect.Slice:
+ enc.sendType(w, state, st.Elem())
+ case reflect.Map:
+ enc.sendType(w, state, st.Key())
+ enc.sendType(w, state, st.Elem())
+ }
+ return true
+}
+
+// sendType sends the type info to the other side, if necessary.
+func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) {
+ ut := userType(origt)
+ if ut.isGobEncoder {
+ // The rules are different: regardless of the underlying type's representation,
+ // we need to tell the other side that this exact type is a GobEncoder.
+ return enc.sendActualType(w, state, ut, ut.user)
+ }
+
+ // It's a concrete value, so drill down to the base type.
+ switch rt := ut.base; rt.Kind() {
+ default:
+ // Basic types and interfaces do not need to be described.
+ return
+ case reflect.Slice:
+ // If it's []uint8, don't send; it's considered basic.
+ if rt.Elem().Kind() == reflect.Uint8 {
+ return
+ }
+ // Otherwise we do send.
+ break
+ case reflect.Array:
+ // arrays must be sent so we know their lengths and element types.
+ break
+ case reflect.Map:
+ // maps must be sent so we know their lengths and key/value types.
+ break
+ case reflect.Struct:
+ // structs must be sent so we know their fields.
+ break
+ case reflect.Chan, reflect.Func:
+ // Probably a bad field in a struct.
+ enc.badType(rt)
+ return
+ }
+
+ return enc.sendActualType(w, state, ut, ut.base)
+}
+
+// Encode transmits the data item represented by the empty interface value,
+// guaranteeing that all necessary type information has been transmitted first.
+func (enc *Encoder) Encode(e interface{}) error {
+ return enc.EncodeValue(reflect.ValueOf(e))
+}
+
+// sendTypeDescriptor makes sure the remote side knows about this type.
+// It will send a descriptor if this is the first time the type has been
+// sent.
+func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) {
+ // Make sure the type is known to the other side.
+ // First, have we already sent this type?
+ rt := ut.base
+ if ut.isGobEncoder {
+ rt = ut.user
+ }
+ if _, alreadySent := enc.sent[rt]; !alreadySent {
+ // No, so send it.
+ sent := enc.sendType(w, state, rt)
+ if enc.err != nil {
+ return
+ }
+ // If the type info has still not been transmitted, it means we have
+ // a singleton basic type (int, []byte etc.) at top level. We don't
+ // need to send the type info but we do need to update enc.sent.
+ if !sent {
+ typeLock.Lock()
+ info, err := getTypeInfo(ut)
+ typeLock.Unlock()
+ if err != nil {
+ enc.setError(err)
+ return
+ }
+ enc.sent[rt] = info.id
+ }
+ }
+}
+
+// sendTypeId sends the id, which must have already been defined.
+func (enc *Encoder) sendTypeId(state *encoderState, ut *userTypeInfo) {
+ // Identify the type of this top-level value.
+ state.encodeInt(int64(enc.sent[ut.base]))
+}
+
+// EncodeValue transmits the data item represented by the reflection value,
+// guaranteeing that all necessary type information has been transmitted first.
+func (enc *Encoder) EncodeValue(value reflect.Value) error {
+ // Make sure we're single-threaded through here, so multiple
+ // goroutines can share an encoder.
+ enc.mutex.Lock()
+ defer enc.mutex.Unlock()
+
+ // Remove any nested writers remaining due to previous errors.
+ enc.w = enc.w[0:1]
+
+ ut, err := validUserType(value.Type())
+ if err != nil {
+ return err
+ }
+
+ enc.err = nil
+ enc.byteBuf.Reset()
+ enc.byteBuf.Write(spaceForLength)
+ state := enc.newEncoderState(&enc.byteBuf)
+
+ enc.sendTypeDescriptor(enc.writer(), state, ut)
+ enc.sendTypeId(state, ut)
+ if enc.err != nil {
+ return enc.err
+ }
+
+ // Encode the object.
+ enc.encode(state.b, value, ut)
+ if enc.err == nil {
+ enc.writeMessage(enc.writer(), state.b)
+ }
+
+ enc.freeEncoderState(state)
+ return enc.err
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+type ET2 struct {
+ X string
+}
+
+type ET1 struct {
+ A int
+ Et2 *ET2
+ Next *ET1
+}
+
+// Like ET1 but with a different name for a field
+type ET3 struct {
+ A int
+ Et2 *ET2
+ DifferentNext *ET1
+}
+
+// Like ET1 but with a different type for a field
+type ET4 struct {
+ A int
+ Et2 float64
+ Next int
+}
+
+func TestEncoderDecoder(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ et1 := new(ET1)
+ et1.A = 7
+ et1.Et2 = new(ET2)
+ err := enc.Encode(et1)
+ if err != nil {
+ t.Error("encoder fail:", err)
+ }
+ dec := NewDecoder(b)
+ newEt1 := new(ET1)
+ err = dec.Decode(newEt1)
+ if err != nil {
+ t.Fatal("error decoding ET1:", err)
+ }
+
+ if !reflect.DeepEqual(et1, newEt1) {
+ t.Fatalf("invalid data for et1: expected %+v; got %+v", *et1, *newEt1)
+ }
+ if b.Len() != 0 {
+ t.Error("not at eof;", b.Len(), "bytes left")
+ }
+
+ enc.Encode(et1)
+ newEt1 = new(ET1)
+ err = dec.Decode(newEt1)
+ if err != nil {
+ t.Fatal("round 2: error decoding ET1:", err)
+ }
+ if !reflect.DeepEqual(et1, newEt1) {
+ t.Fatalf("round 2: invalid data for et1: expected %+v; got %+v", *et1, *newEt1)
+ }
+ if b.Len() != 0 {
+ t.Error("round 2: not at eof;", b.Len(), "bytes left")
+ }
+
+ // Now test with a running encoder/decoder pair that we recognize a type mismatch.
+ err = enc.Encode(et1)
+ if err != nil {
+ t.Error("round 3: encoder fail:", err)
+ }
+ newEt2 := new(ET2)
+ err = dec.Decode(newEt2)
+ if err == nil {
+ t.Fatal("round 3: expected `bad type' error decoding ET2")
+ }
+}
+
+// Run one value through the encoder/decoder, but use the wrong type.
+// Input is always an ET1; we compare it to whatever is under 'e'.
+func badTypeCheck(e interface{}, shouldFail bool, msg string, t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ et1 := new(ET1)
+ et1.A = 7
+ et1.Et2 = new(ET2)
+ err := enc.Encode(et1)
+ if err != nil {
+ t.Error("encoder fail:", err)
+ }
+ dec := NewDecoder(b)
+ err = dec.Decode(e)
+ if shouldFail && err == nil {
+ t.Error("expected error for", msg)
+ }
+ if !shouldFail && err != nil {
+ t.Error("unexpected error for", msg, err)
+ }
+}
+
+// Test that we recognize a bad type the first time.
+func TestWrongTypeDecoder(t *testing.T) {
+ badTypeCheck(new(ET2), true, "no fields in common", t)
+ badTypeCheck(new(ET3), false, "different name of field", t)
+ badTypeCheck(new(ET4), true, "different type of field", t)
+}
+
+func corruptDataCheck(s string, err error, t *testing.T) {
+ b := bytes.NewBufferString(s)
+ dec := NewDecoder(b)
+ err1 := dec.Decode(new(ET2))
+ if err1 != err {
+ t.Errorf("from %q expected error %s; got %s", s, err, err1)
+ }
+}
+
+// Check that we survive bad data.
+func TestBadData(t *testing.T) {
+ corruptDataCheck("", io.EOF, t)
+ corruptDataCheck("\x7Fhi", io.ErrUnexpectedEOF, t)
+ corruptDataCheck("\x03now is the time for all good men", errBadType, t)
+}
+
+// Types not supported by the Encoder.
+var unsupportedValues = []interface{}{
+ make(chan int),
+ func(a int) bool { return true },
+}
+
+func TestUnsupported(t *testing.T) {
+ var b bytes.Buffer
+ enc := NewEncoder(&b)
+ for _, v := range unsupportedValues {
+ err := enc.Encode(v)
+ if err == nil {
+ t.Errorf("expected error for %T; got none", v)
+ }
+ }
+}
+
+func encAndDec(in, out interface{}) error {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(in)
+ if err != nil {
+ return err
+ }
+ dec := NewDecoder(b)
+ err = dec.Decode(out)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func TestTypeToPtrType(t *testing.T) {
+ // Encode a T, decode a *T
+ type Type0 struct {
+ A int
+ }
+ t0 := Type0{7}
+ t0p := new(Type0)
+ if err := encAndDec(t0, t0p); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPtrTypeToType(t *testing.T) {
+ // Encode a *T, decode a T
+ type Type1 struct {
+ A uint
+ }
+ t1p := &Type1{17}
+ var t1 Type1
+ if err := encAndDec(t1, t1p); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestTypeToPtrPtrPtrPtrType(t *testing.T) {
+ type Type2 struct {
+ A ****float64
+ }
+ t2 := Type2{}
+ t2.A = new(***float64)
+ *t2.A = new(**float64)
+ **t2.A = new(*float64)
+ ***t2.A = new(float64)
+ ****t2.A = 27.4
+ t2pppp := new(***Type2)
+ if err := encAndDec(t2, t2pppp); err != nil {
+ t.Fatal(err)
+ }
+ if ****(****t2pppp).A != ****t2.A {
+ t.Errorf("wrong value after decode: %g not %g", ****(****t2pppp).A, ****t2.A)
+ }
+}
+
+func TestSlice(t *testing.T) {
+ type Type3 struct {
+ A []string
+ }
+ t3p := &Type3{[]string{"hello", "world"}}
+ var t3 Type3
+ if err := encAndDec(t3, t3p); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestValueError(t *testing.T) {
+ // Encode a *T, decode a T
+ type Type4 struct {
+ A int
+ }
+ t4p := &Type4{3}
+ var t4 Type4 // note: not a pointer.
+ if err := encAndDec(t4p, t4); err == nil || strings.Index(err.Error(), "pointer") < 0 {
+ t.Error("expected error about pointer; got", err)
+ }
+}
+
+func TestArray(t *testing.T) {
+ type Type5 struct {
+ A [3]string
+ B [3]byte
+ }
+ type Type6 struct {
+ A [2]string // can't hold t5.a
+ }
+ t5 := Type5{[3]string{"hello", ",", "world"}, [3]byte{1, 2, 3}}
+ var t5p Type5
+ if err := encAndDec(t5, &t5p); err != nil {
+ t.Error(err)
+ }
+ var t6 Type6
+ if err := encAndDec(t5, &t6); err == nil {
+ t.Error("should fail with mismatched array sizes")
+ }
+}
+
+func TestRecursiveMapType(t *testing.T) {
+ type recursiveMap map[string]recursiveMap
+ r1 := recursiveMap{"A": recursiveMap{"B": nil, "C": nil}, "D": nil}
+ r2 := make(recursiveMap)
+ if err := encAndDec(r1, &r2); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestRecursiveSliceType(t *testing.T) {
+ type recursiveSlice []recursiveSlice
+ r1 := recursiveSlice{0: recursiveSlice{0: nil}, 1: nil}
+ r2 := make(recursiveSlice, 0)
+ if err := encAndDec(r1, &r2); err != nil {
+ t.Error(err)
+ }
+}
+
+// Regression test for bug: must send zero values inside arrays
+func TestDefaultsInArray(t *testing.T) {
+ type Type7 struct {
+ B []bool
+ I []int
+ S []string
+ F []float64
+ }
+ t7 := Type7{
+ []bool{false, false, true},
+ []int{0, 0, 1},
+ []string{"hi", "", "there"},
+ []float64{0, 0, 1},
+ }
+ var t7p Type7
+ if err := encAndDec(t7, &t7p); err != nil {
+ t.Error(err)
+ }
+}
+
+var testInt int
+var testFloat32 float32
+var testString string
+var testSlice []string
+var testMap map[string]int
+var testArray [7]int
+
+type SingleTest struct {
+ in interface{}
+ out interface{}
+ err string
+}
+
+var singleTests = []SingleTest{
+ {17, &testInt, ""},
+ {float32(17.5), &testFloat32, ""},
+ {"bike shed", &testString, ""},
+ {[]string{"bike", "shed", "paint", "color"}, &testSlice, ""},
+ {map[string]int{"seven": 7, "twelve": 12}, &testMap, ""},
+ {[7]int{4, 55, 0, 0, 0, 0, 0}, &testArray, ""}, // case that once triggered a bug
+ {[7]int{4, 55, 1, 44, 22, 66, 1234}, &testArray, ""},
+
+ // Decode errors
+ {172, &testFloat32, "wrong type"},
+}
+
+func TestSingletons(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ dec := NewDecoder(b)
+ for _, test := range singleTests {
+ b.Reset()
+ err := enc.Encode(test.in)
+ if err != nil {
+ t.Errorf("error encoding %v: %s", test.in, err)
+ continue
+ }
+ err = dec.Decode(test.out)
+ switch {
+ case err != nil && test.err == "":
+ t.Errorf("error decoding %v: %s", test.in, err)
+ continue
+ case err == nil && test.err != "":
+ t.Errorf("expected error decoding %v: %s", test.in, test.err)
+ continue
+ case err != nil && test.err != "":
+ if strings.Index(err.Error(), test.err) < 0 {
+ t.Errorf("wrong error decoding %v: wanted %s, got %v", test.in, test.err, err)
+ }
+ continue
+ }
+ // Get rid of the pointer in the rhs
+ val := reflect.ValueOf(test.out).Elem().Interface()
+ if !reflect.DeepEqual(test.in, val) {
+ t.Errorf("decoding singleton: expected %v got %v", test.in, val)
+ }
+ }
+}
+
+func TestStructNonStruct(t *testing.T) {
+ type Struct struct {
+ A string
+ }
+ type NonStruct string
+ s := Struct{"hello"}
+ var sp Struct
+ if err := encAndDec(s, &sp); err != nil {
+ t.Error(err)
+ }
+ var ns NonStruct
+ if err := encAndDec(s, &ns); err == nil {
+ t.Error("should get error for struct/non-struct")
+ } else if strings.Index(err.Error(), "type") < 0 {
+ t.Error("for struct/non-struct expected type error; got", err)
+ }
+ // Now try the other way
+ var nsp NonStruct
+ if err := encAndDec(ns, &nsp); err != nil {
+ t.Error(err)
+ }
+ if err := encAndDec(ns, &s); err == nil {
+ t.Error("should get error for non-struct/struct")
+ } else if strings.Index(err.Error(), "type") < 0 {
+ t.Error("for non-struct/struct expected type error; got", err)
+ }
+}
+
+type interfaceIndirectTestI interface {
+ F() bool
+}
+
+type interfaceIndirectTestT struct{}
+
+func (this *interfaceIndirectTestT) F() bool {
+ return true
+}
+
+// A version of a bug reported on golang-nuts. Also tests top-level
+// slice of interfaces. The issue was registering *T caused T to be
+// stored as the concrete type.
+func TestInterfaceIndirect(t *testing.T) {
+ Register(&interfaceIndirectTestT{})
+ b := new(bytes.Buffer)
+ w := []interfaceIndirectTestI{&interfaceIndirectTestT{}}
+ err := NewEncoder(b).Encode(w)
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+
+ var r []interfaceIndirectTestI
+ err = NewDecoder(b).Decode(&r)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+}
+
+// Now follow various tests that decode into things that can't represent the
+// encoded value, all of which should be legal.
+
+// Also, when the ignored object contains an interface value, it may define
+// types. Make sure that skipping the value still defines the types by using
+// the encoder/decoder pair to send a value afterwards. If an interface
+// is sent, its type in the test is always NewType0, so this checks that the
+// encoder and decoder don't skew with respect to type definitions.
+
+type Struct0 struct {
+ I interface{}
+}
+
+type NewType0 struct {
+ S string
+}
+
+type ignoreTest struct {
+ in, out interface{}
+}
+
+var ignoreTests = []ignoreTest{
+ // Decode normal struct into an empty struct
+ {&struct{ A int }{23}, &struct{}{}},
+ // Decode normal struct into a nil.
+ {&struct{ A int }{23}, nil},
+ // Decode singleton string into a nil.
+ {"hello, world", nil},
+ // Decode singleton slice into a nil.
+ {[]int{1, 2, 3, 4}, nil},
+ // Decode struct containing an interface into a nil.
+ {&Struct0{&NewType0{"value0"}}, nil},
+ // Decode singleton slice of interfaces into a nil.
+ {[]interface{}{"hi", &NewType0{"value1"}, 23}, nil},
+}
+
+func TestDecodeIntoNothing(t *testing.T) {
+ Register(new(NewType0))
+ for i, test := range ignoreTests {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(test.in)
+ if err != nil {
+ t.Errorf("%d: encode error %s:", i, err)
+ continue
+ }
+ dec := NewDecoder(b)
+ err = dec.Decode(test.out)
+ if err != nil {
+ t.Errorf("%d: decode error: %s", i, err)
+ continue
+ }
+ // Now see if the encoder and decoder are in a consistent state.
+ str := fmt.Sprintf("Value %d", i)
+ err = enc.Encode(&NewType0{str})
+ if err != nil {
+ t.Fatalf("%d: NewType0 encode error: %s", i, err)
+ }
+ ns := new(NewType0)
+ err = dec.Decode(ns)
+ if err != nil {
+ t.Fatalf("%d: NewType0 decode error: %s", i, err)
+ }
+ if ns.S != str {
+ t.Fatalf("%d: expected %q got %q", i, str, ns.S)
+ }
+ }
+}
+
+// Another bug from golang-nuts, involving nested interfaces.
+type Bug0Outer struct {
+ Bug0Field interface{}
+}
+
+type Bug0Inner struct {
+ A int
+}
+
+func TestNestedInterfaces(t *testing.T) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+ d := NewDecoder(&buf)
+ Register(new(Bug0Outer))
+ Register(new(Bug0Inner))
+ f := &Bug0Outer{&Bug0Outer{&Bug0Inner{7}}}
+ var v interface{} = f
+ err := e.Encode(&v)
+ if err != nil {
+ t.Fatal("Encode:", err)
+ }
+ err = d.Decode(&v)
+ if err != nil {
+ t.Fatal("Decode:", err)
+ }
+ // Make sure it decoded correctly.
+ outer1, ok := v.(*Bug0Outer)
+ if !ok {
+ t.Fatalf("v not Bug0Outer: %T", v)
+ }
+ outer2, ok := outer1.Bug0Field.(*Bug0Outer)
+ if !ok {
+ t.Fatalf("v.Bug0Field not Bug0Outer: %T", outer1.Bug0Field)
+ }
+ inner, ok := outer2.Bug0Field.(*Bug0Inner)
+ if !ok {
+ t.Fatalf("v.Bug0Field.Bug0Field not Bug0Inner: %T", outer2.Bug0Field)
+ }
+ if inner.A != 7 {
+ t.Fatalf("final value %d; expected %d", inner.A, 7)
+ }
+}
+
+// The bugs keep coming. We forgot to send map subtypes before the map.
+
+type Bug1Elem struct {
+ Name string
+ Id int
+}
+
+type Bug1StructMap map[string]Bug1Elem
+
+func bug1EncDec(in Bug1StructMap, out *Bug1StructMap) error {
+ return nil
+}
+
+func TestMapBug1(t *testing.T) {
+ in := make(Bug1StructMap)
+ in["val1"] = Bug1Elem{"elem1", 1}
+ in["val2"] = Bug1Elem{"elem2", 2}
+
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(in)
+ if err != nil {
+ t.Fatal("encode:", err)
+ }
+ dec := NewDecoder(b)
+ out := make(Bug1StructMap)
+ err = dec.Decode(&out)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ if !reflect.DeepEqual(in, out) {
+ t.Errorf("mismatch: %v %v", in, out)
+ }
+}
+
+func TestGobMapInterfaceEncode(t *testing.T) {
+ m := map[string]interface{}{
+ "up": uintptr(0),
+ "i0": []int{-1},
+ "i1": []int8{-1},
+ "i2": []int16{-1},
+ "i3": []int32{-1},
+ "i4": []int64{-1},
+ "u0": []uint{1},
+ "u1": []uint8{1},
+ "u2": []uint16{1},
+ "u3": []uint32{1},
+ "u4": []uint64{1},
+ "f0": []float32{1},
+ "f1": []float64{1},
+ "c0": []complex64{complex(2, -2)},
+ "c1": []complex128{complex(2, float64(-2))},
+ "us": []uintptr{0},
+ "bo": []bool{false},
+ "st": []string{"s"},
+ }
+ buf := bytes.NewBuffer(nil)
+ enc := NewEncoder(buf)
+ err := enc.Encode(m)
+ if err != nil {
+ t.Errorf("encode map: %s", err)
+ }
+}
+
+func TestSliceReusesMemory(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ // Bytes
+ {
+ x := []byte("abcd")
+ enc := NewEncoder(buf)
+ err := enc.Encode(x)
+ if err != nil {
+ t.Errorf("bytes: encode: %s", err)
+ }
+ // Decode into y, which is big enough.
+ y := []byte("ABCDE")
+ addr := &y[0]
+ dec := NewDecoder(buf)
+ err = dec.Decode(&y)
+ if err != nil {
+ t.Fatal("bytes: decode:", err)
+ }
+ if !bytes.Equal(x, y) {
+ t.Errorf("bytes: expected %q got %q\n", x, y)
+ }
+ if addr != &y[0] {
+ t.Errorf("bytes: unnecessary reallocation")
+ }
+ }
+ // general slice
+ {
+ x := []rune("abcd")
+ enc := NewEncoder(buf)
+ err := enc.Encode(x)
+ if err != nil {
+ t.Errorf("ints: encode: %s", err)
+ }
+ // Decode into y, which is big enough.
+ y := []rune("ABCDE")
+ addr := &y[0]
+ dec := NewDecoder(buf)
+ err = dec.Decode(&y)
+ if err != nil {
+ t.Fatal("ints: decode:", err)
+ }
+ if !reflect.DeepEqual(x, y) {
+ t.Errorf("ints: expected %q got %q\n", x, y)
+ }
+ if addr != &y[0] {
+ t.Errorf("ints: unnecessary reallocation")
+ }
+ }
+}
+
+// Used to crash: negative count in recvMessage.
+func TestBadCount(t *testing.T) {
+ b := []byte{0xfb, 0xa5, 0x82, 0x2f, 0xca, 0x1}
+ if err := NewDecoder(bytes.NewBuffer(b)).Decode(nil); err == nil {
+ t.Error("expected error from bad count")
+ } else if err.Error() != errBadCount.Error() {
+ t.Error("expected bad count error; got", err)
+ }
+}
+
+// Verify that sequential Decoders built on a single input will
+// succeed if the input implements ReadByte and there is no
+// type information in the stream.
+func TestSequentialDecoder(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ const count = 10
+ for i := 0; i < count; i++ {
+ s := fmt.Sprintf("%d", i)
+ if err := enc.Encode(s); err != nil {
+ t.Error("encoder fail:", err)
+ }
+ }
+ for i := 0; i < count; i++ {
+ dec := NewDecoder(b)
+ var s string
+ if err := dec.Decode(&s); err != nil {
+ t.Fatal("decoder fail:", err)
+ }
+ if s != fmt.Sprintf("%d", i) {
+ t.Fatalf("decode expected %d got %s", i, s)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import "fmt"
+
+// Errors in decoding and encoding are handled using panic and recover.
+// Panics caused by user error (that is, everything except run-time panics
+// such as "index out of bounds" errors) do not leave the file that caused
+// them, but are instead turned into plain error returns. Encoding and
+// decoding functions and methods that do not return an error either use
+// panic to report an error or are guaranteed error-free.
+
+// A gobError is used to distinguish errors (panics) generated in this package.
+type gobError struct {
+ err error
+}
+
+// errorf is like error_ but takes Printf-style arguments to construct an error.
+// It always prefixes the message with "gob: ".
+func errorf(format string, args ...interface{}) {
+ error_(fmt.Errorf("gob: "+format, args...))
+}
+
+// error wraps the argument error and uses it as the argument to panic.
+func error_(err error) {
+ panic(gobError{err})
+}
+
+// catchError is meant to be used as a deferred function to turn a panic(gobError) into a
+// plain error. It overwrites the error return of the function that deferred its call.
+func catchError(err *error) {
+ if e := recover(); e != nil {
+ *err = e.(gobError).err // Will re-panic if not one of our errors, such as a runtime error.
+ }
+ return
+}
--- /dev/null
+// Copyright 20011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests of the GobEncoder/GobDecoder support.
+
+package gob
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+)
+
+// Types that implement the GobEncoder/Decoder interfaces.
+
+type ByteStruct struct {
+ a byte // not an exported field
+}
+
+type StringStruct struct {
+ s string // not an exported field
+}
+
+type ArrayStruct struct {
+ a [8192]byte // not an exported field
+}
+
+type Gobber int
+
+type ValueGobber string // encodes with a value, decodes with a pointer.
+
+// The relevant methods
+
+func (g *ByteStruct) GobEncode() ([]byte, error) {
+ b := make([]byte, 3)
+ b[0] = g.a
+ b[1] = g.a + 1
+ b[2] = g.a + 2
+ return b, nil
+}
+
+func (g *ByteStruct) GobDecode(data []byte) error {
+ if g == nil {
+ return errors.New("NIL RECEIVER")
+ }
+ // Expect N sequential-valued bytes.
+ if len(data) == 0 {
+ return io.EOF
+ }
+ g.a = data[0]
+ for i, c := range data {
+ if c != g.a+byte(i) {
+ return errors.New("invalid data sequence")
+ }
+ }
+ return nil
+}
+
+func (g *StringStruct) GobEncode() ([]byte, error) {
+ return []byte(g.s), nil
+}
+
+func (g *StringStruct) GobDecode(data []byte) error {
+ // Expect N sequential-valued bytes.
+ if len(data) == 0 {
+ return io.EOF
+ }
+ a := data[0]
+ for i, c := range data {
+ if c != a+byte(i) {
+ return errors.New("invalid data sequence")
+ }
+ }
+ g.s = string(data)
+ return nil
+}
+
+func (a *ArrayStruct) GobEncode() ([]byte, error) {
+ return a.a[:], nil
+}
+
+func (a *ArrayStruct) GobDecode(data []byte) error {
+ if len(data) != len(a.a) {
+ return errors.New("wrong length in array decode")
+ }
+ copy(a.a[:], data)
+ return nil
+}
+
+func (g *Gobber) GobEncode() ([]byte, error) {
+ return []byte(fmt.Sprintf("VALUE=%d", *g)), nil
+}
+
+func (g *Gobber) GobDecode(data []byte) error {
+ _, err := fmt.Sscanf(string(data), "VALUE=%d", (*int)(g))
+ return err
+}
+
+func (v ValueGobber) GobEncode() ([]byte, error) {
+ return []byte(fmt.Sprintf("VALUE=%s", v)), nil
+}
+
+func (v *ValueGobber) GobDecode(data []byte) error {
+ _, err := fmt.Sscanf(string(data), "VALUE=%s", (*string)(v))
+ return err
+}
+
+// Structs that include GobEncodable fields.
+
+type GobTest0 struct {
+ X int // guarantee we have something in common with GobTest*
+ G *ByteStruct
+}
+
+type GobTest1 struct {
+ X int // guarantee we have something in common with GobTest*
+ G *StringStruct
+}
+
+type GobTest2 struct {
+ X int // guarantee we have something in common with GobTest*
+ G string // not a GobEncoder - should give us errors
+}
+
+type GobTest3 struct {
+ X int // guarantee we have something in common with GobTest*
+ G *Gobber
+}
+
+type GobTest4 struct {
+ X int // guarantee we have something in common with GobTest*
+ V ValueGobber
+}
+
+type GobTest5 struct {
+ X int // guarantee we have something in common with GobTest*
+ V *ValueGobber
+}
+
+type GobTestIgnoreEncoder struct {
+ X int // guarantee we have something in common with GobTest*
+}
+
+type GobTestValueEncDec struct {
+ X int // guarantee we have something in common with GobTest*
+ G StringStruct // not a pointer.
+}
+
+type GobTestIndirectEncDec struct {
+ X int // guarantee we have something in common with GobTest*
+ G ***StringStruct // indirections to the receiver.
+}
+
+type GobTestArrayEncDec struct {
+ X int // guarantee we have something in common with GobTest*
+ A ArrayStruct // not a pointer.
+}
+
+type GobTestIndirectArrayEncDec struct {
+ X int // guarantee we have something in common with GobTest*
+ A ***ArrayStruct // indirections to a large receiver.
+}
+
+func TestGobEncoderField(t *testing.T) {
+ b := new(bytes.Buffer)
+ // First a field that's a structure.
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTest0{17, &ByteStruct{'A'}})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTest0)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.G.a != 'A' {
+ t.Errorf("expected 'A' got %c", x.G.a)
+ }
+ // Now a field that's not a structure.
+ b.Reset()
+ gobber := Gobber(23)
+ err = enc.Encode(GobTest3{17, &gobber})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ y := new(GobTest3)
+ err = dec.Decode(y)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if *y.G != 23 {
+ t.Errorf("expected '23 got %d", *y.G)
+ }
+}
+
+// Even though the field is a value, we can still take its address
+// and should be able to call the methods.
+func TestGobEncoderValueField(t *testing.T) {
+ b := new(bytes.Buffer)
+ // First a field that's a structure.
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTestValueEncDec{17, StringStruct{"HIJKL"}})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTestValueEncDec)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.G.s != "HIJKL" {
+ t.Errorf("expected `HIJKL` got %s", x.G.s)
+ }
+}
+
+// GobEncode/Decode should work even if the value is
+// more indirect than the receiver.
+func TestGobEncoderIndirectField(t *testing.T) {
+ b := new(bytes.Buffer)
+ // First a field that's a structure.
+ enc := NewEncoder(b)
+ s := &StringStruct{"HIJKL"}
+ sp := &s
+ err := enc.Encode(GobTestIndirectEncDec{17, &sp})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTestIndirectEncDec)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if (***x.G).s != "HIJKL" {
+ t.Errorf("expected `HIJKL` got %s", (***x.G).s)
+ }
+}
+
+// Test with a large field with methods.
+func TestGobEncoderArrayField(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ var a GobTestArrayEncDec
+ a.X = 17
+ for i := range a.A.a {
+ a.A.a[i] = byte(i)
+ }
+ err := enc.Encode(a)
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTestArrayEncDec)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ for i, v := range x.A.a {
+ if v != byte(i) {
+ t.Errorf("expected %x got %x", byte(i), v)
+ break
+ }
+ }
+}
+
+// Test an indirection to a large field with methods.
+func TestGobEncoderIndirectArrayField(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ var a GobTestIndirectArrayEncDec
+ a.X = 17
+ var array ArrayStruct
+ ap := &array
+ app := &ap
+ a.A = &app
+ for i := range array.a {
+ array.a[i] = byte(i)
+ }
+ err := enc.Encode(a)
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTestIndirectArrayEncDec)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ for i, v := range (***x.A).a {
+ if v != byte(i) {
+ t.Errorf("expected %x got %x", byte(i), v)
+ break
+ }
+ }
+}
+
+// As long as the fields have the same name and implement the
+// interface, we can cross-connect them. Not sure it's useful
+// and may even be bad but it works and it's hard to prevent
+// without exposing the contents of the object, which would
+// defeat the purpose.
+func TestGobEncoderFieldsOfDifferentType(t *testing.T) {
+ // first, string in field to byte in field
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTest0)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.G.a != 'A' {
+ t.Errorf("expected 'A' got %c", x.G.a)
+ }
+ // now the other direction, byte in field to string in field
+ b.Reset()
+ err = enc.Encode(GobTest0{17, &ByteStruct{'X'}})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ y := new(GobTest1)
+ err = dec.Decode(y)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if y.G.s != "XYZ" {
+ t.Fatalf("expected `XYZ` got %c", y.G.s)
+ }
+}
+
+// Test that we can encode a value and decode into a pointer.
+func TestGobEncoderValueEncoder(t *testing.T) {
+ // first, string in field to byte in field
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTest4{17, ValueGobber("hello")})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTest5)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if *x.V != "hello" {
+ t.Errorf("expected `hello` got %s", x.V)
+ }
+}
+
+func TestGobEncoderFieldTypeError(t *testing.T) {
+ // GobEncoder to non-decoder: error
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := &GobTest2{}
+ err = dec.Decode(x)
+ if err == nil {
+ t.Fatal("expected decode error for mismatched fields (encoder to non-decoder)")
+ }
+ if strings.Index(err.Error(), "type") < 0 {
+ t.Fatal("expected type error; got", err)
+ }
+ // Non-encoder to GobDecoder: error
+ b.Reset()
+ err = enc.Encode(GobTest2{17, "ABC"})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ y := &GobTest1{}
+ err = dec.Decode(y)
+ if err == nil {
+ t.Fatal("expected decode error for mismatched fields (non-encoder to decoder)")
+ }
+ if strings.Index(err.Error(), "type") < 0 {
+ t.Fatal("expected type error; got", err)
+ }
+}
+
+// Even though ByteStruct is a struct, it's treated as a singleton at the top level.
+func TestGobEncoderStructSingleton(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(&ByteStruct{'A'})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(ByteStruct)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.a != 'A' {
+ t.Errorf("expected 'A' got %c", x.a)
+ }
+}
+
+func TestGobEncoderNonStructSingleton(t *testing.T) {
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ err := enc.Encode(Gobber(1234))
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ var x Gobber
+ err = dec.Decode(&x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x != 1234 {
+ t.Errorf("expected 1234 got %d", x)
+ }
+}
+
+func TestGobEncoderIgnoreStructField(t *testing.T) {
+ b := new(bytes.Buffer)
+ // First a field that's a structure.
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTest0{17, &ByteStruct{'A'}})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTestIgnoreEncoder)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.X != 17 {
+ t.Errorf("expected 17 got %c", x.X)
+ }
+}
+
+func TestGobEncoderIgnoreNonStructField(t *testing.T) {
+ b := new(bytes.Buffer)
+ // First a field that's a structure.
+ enc := NewEncoder(b)
+ gobber := Gobber(23)
+ err := enc.Encode(GobTest3{17, &gobber})
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTestIgnoreEncoder)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.X != 17 {
+ t.Errorf("expected 17 got %c", x.X)
+ }
+}
+
+func TestGobEncoderIgnoreNilEncoder(t *testing.T) {
+ b := new(bytes.Buffer)
+ // First a field that's a structure.
+ enc := NewEncoder(b)
+ err := enc.Encode(GobTest0{X: 18}) // G is nil
+ if err != nil {
+ t.Fatal("encode error:", err)
+ }
+ dec := NewDecoder(b)
+ x := new(GobTest0)
+ err = dec.Decode(x)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if x.X != 18 {
+ t.Errorf("expected x.X = 18, got %v", x.X)
+ }
+ if x.G != nil {
+ t.Errorf("expected x.G = nil, got %v", x.G)
+ }
+}
+
+type gobDecoderBug0 struct {
+ foo, bar string
+}
+
+func (br *gobDecoderBug0) String() string {
+ return br.foo + "-" + br.bar
+}
+
+func (br *gobDecoderBug0) GobEncode() ([]byte, error) {
+ return []byte(br.String()), nil
+}
+
+func (br *gobDecoderBug0) GobDecode(b []byte) error {
+ br.foo = "foo"
+ br.bar = "bar"
+ return nil
+}
+
+// This was a bug: the receiver has a different indirection level
+// than the variable.
+func TestGobEncoderExtraIndirect(t *testing.T) {
+ gdb := &gobDecoderBug0{"foo", "bar"}
+ buf := new(bytes.Buffer)
+ e := NewEncoder(buf)
+ if err := e.Encode(gdb); err != nil {
+ t.Fatalf("encode: %v", err)
+ }
+ d := NewDecoder(buf)
+ var got *gobDecoderBug0
+ if err := d.Decode(&got); err != nil {
+ t.Fatalf("decode: %v", err)
+ }
+ if got.foo != gdb.foo || got.bar != gdb.bar {
+ t.Errorf("got = %q, want %q", got, gdb)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "testing"
+)
+
+type Bench struct {
+ A int
+ B float64
+ C string
+ D []byte
+}
+
+func benchmarkEndToEnd(r io.Reader, w io.Writer, b *testing.B) {
+ b.StopTimer()
+ enc := NewEncoder(w)
+ dec := NewDecoder(r)
+ bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if enc.Encode(bench) != nil {
+ panic("encode error")
+ }
+ if dec.Decode(bench) != nil {
+ panic("decode error")
+ }
+ }
+}
+
+func BenchmarkEndToEndPipe(b *testing.B) {
+ r, w, err := os.Pipe()
+ if err != nil {
+ panic("can't get pipe:" + err.Error())
+ }
+ benchmarkEndToEnd(r, w, b)
+}
+
+func BenchmarkEndToEndByteBuffer(b *testing.B) {
+ var buf bytes.Buffer
+ benchmarkEndToEnd(&buf, &buf, b)
+}
+
+func TestCountEncodeMallocs(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
+ runtime.UpdateMemStats()
+ mallocs := 0 - runtime.MemStats.Mallocs
+ const count = 1000
+ for i := 0; i < count; i++ {
+ err := enc.Encode(bench)
+ if err != nil {
+ t.Fatal("encode:", err)
+ }
+ }
+ runtime.UpdateMemStats()
+ mallocs += runtime.MemStats.Mallocs
+ fmt.Printf("mallocs per encode of type Bench: %d\n", mallocs/count)
+}
+
+func TestCountDecodeMallocs(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
+ const count = 1000
+ for i := 0; i < count; i++ {
+ err := enc.Encode(bench)
+ if err != nil {
+ t.Fatal("encode:", err)
+ }
+ }
+ dec := NewDecoder(&buf)
+ runtime.UpdateMemStats()
+ mallocs := 0 - runtime.MemStats.Mallocs
+ for i := 0; i < count; i++ {
+ *bench = Bench{}
+ err := dec.Decode(&bench)
+ if err != nil {
+ t.Fatal("decode:", err)
+ }
+ }
+ runtime.UpdateMemStats()
+ mallocs += runtime.MemStats.Mallocs
+ fmt.Printf("mallocs per decode of type Bench: %d\n", mallocs/count)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// userTypeInfo stores the information associated with a type the user has handed
+// to the package. It's computed once and stored in a map keyed by reflection
+// type.
+type userTypeInfo struct {
+ user reflect.Type // the type the user handed us
+ base reflect.Type // the base type after all indirections
+ indir int // number of indirections to reach the base type
+ isGobEncoder bool // does the type implement GobEncoder?
+ isGobDecoder bool // does the type implement GobDecoder?
+ encIndir int8 // number of indirections to reach the receiver type; may be negative
+ decIndir int8 // number of indirections to reach the receiver type; may be negative
+}
+
+var (
+ // Protected by an RWMutex because we read it a lot and write
+ // it only when we see a new type, typically when compiling.
+ userTypeLock sync.RWMutex
+ userTypeCache = make(map[reflect.Type]*userTypeInfo)
+)
+
+// validType returns, and saves, the information associated with user-provided type rt.
+// If the user type is not valid, err will be non-nil. To be used when the error handler
+// is not set up.
+func validUserType(rt reflect.Type) (ut *userTypeInfo, err error) {
+ userTypeLock.RLock()
+ ut = userTypeCache[rt]
+ userTypeLock.RUnlock()
+ if ut != nil {
+ return
+ }
+ // Now set the value under the write lock.
+ userTypeLock.Lock()
+ defer userTypeLock.Unlock()
+ if ut = userTypeCache[rt]; ut != nil {
+ // Lost the race; not a problem.
+ return
+ }
+ ut = new(userTypeInfo)
+ ut.base = rt
+ ut.user = rt
+ // A type that is just a cycle of pointers (such as type T *T) cannot
+ // be represented in gobs, which need some concrete data. We use a
+ // cycle detection algorithm from Knuth, Vol 2, Section 3.1, Ex 6,
+ // pp 539-540. As we step through indirections, run another type at
+ // half speed. If they meet up, there's a cycle.
+ slowpoke := ut.base // walks half as fast as ut.base
+ for {
+ pt := ut.base
+ if pt.Kind() != reflect.Ptr {
+ break
+ }
+ ut.base = pt.Elem()
+ if ut.base == slowpoke { // ut.base lapped slowpoke
+ // recursive pointer type.
+ return nil, errors.New("can't represent recursive pointer type " + ut.base.String())
+ }
+ if ut.indir%2 == 0 {
+ slowpoke = slowpoke.Elem()
+ }
+ ut.indir++
+ }
+ ut.isGobEncoder, ut.encIndir = implementsInterface(ut.user, gobEncoderInterfaceType)
+ ut.isGobDecoder, ut.decIndir = implementsInterface(ut.user, gobDecoderInterfaceType)
+ userTypeCache[rt] = ut
+ return
+}
+
+var (
+ gobEncoderInterfaceType = reflect.TypeOf((*GobEncoder)(nil)).Elem()
+ gobDecoderInterfaceType = reflect.TypeOf((*GobDecoder)(nil)).Elem()
+)
+
+// implementsInterface reports whether the type implements the
+// gobEncoder/gobDecoder interface.
+// It also returns the number of indirections required to get to the
+// implementation.
+func implementsInterface(typ, gobEncDecType reflect.Type) (success bool, indir int8) {
+ if typ == nil {
+ return
+ }
+ rt := typ
+ // The type might be a pointer and we need to keep
+ // dereferencing to the base type until we find an implementation.
+ for {
+ if rt.Implements(gobEncDecType) {
+ return true, indir
+ }
+ if p := rt; p.Kind() == reflect.Ptr {
+ indir++
+ if indir > 100 { // insane number of indirections
+ return false, 0
+ }
+ rt = p.Elem()
+ continue
+ }
+ break
+ }
+ // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
+ if typ.Kind() != reflect.Ptr {
+ // Not a pointer, but does the pointer work?
+ if reflect.PtrTo(typ).Implements(gobEncDecType) {
+ return true, -1
+ }
+ }
+ return false, 0
+}
+
+// userType returns, and saves, the information associated with user-provided type rt.
+// If the user type is not valid, it calls error.
+func userType(rt reflect.Type) *userTypeInfo {
+ ut, err := validUserType(rt)
+ if err != nil {
+ error_(err)
+ }
+ return ut
+}
+// A typeId represents a gob Type as an integer that can be passed on the wire.
+// Internally, typeIds are used as keys to a map to recover the underlying type info.
+type typeId int32
+
+var nextId typeId // incremented for each new type we build
+var typeLock sync.Mutex // set while building a type
+const firstUserId = 64 // lowest id number granted to user
+
+type gobType interface {
+ id() typeId
+ setId(id typeId)
+ name() string
+ string() string // not public; only for debugging
+ safeString(seen map[typeId]bool) string
+}
+
+var types = make(map[reflect.Type]gobType)
+var idToType = make(map[typeId]gobType)
+var builtinIdToType map[typeId]gobType // set in init() after builtins are established
+
+func setTypeId(typ gobType) {
+ nextId++
+ typ.setId(nextId)
+ idToType[nextId] = typ
+}
+
+func (t typeId) gobType() gobType {
+ if t == 0 {
+ return nil
+ }
+ return idToType[t]
+}
+
+// string returns the string representation of the type associated with the typeId.
+func (t typeId) string() string {
+ if t.gobType() == nil {
+ return "<nil>"
+ }
+ return t.gobType().string()
+}
+
+// Name returns the name of the type associated with the typeId.
+func (t typeId) name() string {
+ if t.gobType() == nil {
+ return "<nil>"
+ }
+ return t.gobType().name()
+}
+
+// Common elements of all types.
+type CommonType struct {
+ Name string
+ Id typeId
+}
+
+func (t *CommonType) id() typeId { return t.Id }
+
+func (t *CommonType) setId(id typeId) { t.Id = id }
+
+func (t *CommonType) string() string { return t.Name }
+
+func (t *CommonType) safeString(seen map[typeId]bool) string {
+ return t.Name
+}
+
+func (t *CommonType) name() string { return t.Name }
+
+// Create and check predefined types
+// The string for tBytes is "bytes" not "[]byte" to signify its specialness.
+
+var (
+ // Primordial types, needed during initialization.
+ // Always passed as pointers so the interface{} type
+ // goes through without losing its interfaceness.
+ tBool = bootstrapType("bool", (*bool)(nil), 1)
+ tInt = bootstrapType("int", (*int)(nil), 2)
+ tUint = bootstrapType("uint", (*uint)(nil), 3)
+ tFloat = bootstrapType("float", (*float64)(nil), 4)
+ tBytes = bootstrapType("bytes", (*[]byte)(nil), 5)
+ tString = bootstrapType("string", (*string)(nil), 6)
+ tComplex = bootstrapType("complex", (*complex128)(nil), 7)
+ tInterface = bootstrapType("interface", (*interface{})(nil), 8)
+ // Reserve some Ids for compatible expansion
+ tReserved7 = bootstrapType("_reserved1", (*struct{ r7 int })(nil), 9)
+ tReserved6 = bootstrapType("_reserved1", (*struct{ r6 int })(nil), 10)
+ tReserved5 = bootstrapType("_reserved1", (*struct{ r5 int })(nil), 11)
+ tReserved4 = bootstrapType("_reserved1", (*struct{ r4 int })(nil), 12)
+ tReserved3 = bootstrapType("_reserved1", (*struct{ r3 int })(nil), 13)
+ tReserved2 = bootstrapType("_reserved1", (*struct{ r2 int })(nil), 14)
+ tReserved1 = bootstrapType("_reserved1", (*struct{ r1 int })(nil), 15)
+)
+
+// Predefined because it's needed by the Decoder
+var tWireType = mustGetTypeInfo(reflect.TypeOf(wireType{})).id
+var wireTypeUserInfo *userTypeInfo // userTypeInfo of (*wireType)
+
+func init() {
+ // Some magic numbers to make sure there are no surprises.
+ checkId(16, tWireType)
+ checkId(17, mustGetTypeInfo(reflect.TypeOf(arrayType{})).id)
+ checkId(18, mustGetTypeInfo(reflect.TypeOf(CommonType{})).id)
+ checkId(19, mustGetTypeInfo(reflect.TypeOf(sliceType{})).id)
+ checkId(20, mustGetTypeInfo(reflect.TypeOf(structType{})).id)
+ checkId(21, mustGetTypeInfo(reflect.TypeOf(fieldType{})).id)
+ checkId(23, mustGetTypeInfo(reflect.TypeOf(mapType{})).id)
+
+ builtinIdToType = make(map[typeId]gobType)
+ for k, v := range idToType {
+ builtinIdToType[k] = v
+ }
+
+ // Move the id space upwards to allow for growth in the predefined world
+ // without breaking existing files.
+ if nextId > firstUserId {
+ panic(fmt.Sprintln("nextId too large:", nextId))
+ }
+ nextId = firstUserId
+ registerBasics()
+ wireTypeUserInfo = userType(reflect.TypeOf((*wireType)(nil)))
+}
+
+// Array type
+type arrayType struct {
+ CommonType
+ Elem typeId
+ Len int
+}
+
+func newArrayType(name string) *arrayType {
+ a := &arrayType{CommonType{Name: name}, 0, 0}
+ return a
+}
+
+func (a *arrayType) init(elem gobType, len int) {
+ // Set our type id before evaluating the element's, in case it's our own.
+ setTypeId(a)
+ a.Elem = elem.id()
+ a.Len = len
+}
+
+func (a *arrayType) safeString(seen map[typeId]bool) string {
+ if seen[a.Id] {
+ return a.Name
+ }
+ seen[a.Id] = true
+ return fmt.Sprintf("[%d]%s", a.Len, a.Elem.gobType().safeString(seen))
+}
+
+func (a *arrayType) string() string { return a.safeString(make(map[typeId]bool)) }
+
+// GobEncoder type (something that implements the GobEncoder interface)
+type gobEncoderType struct {
+ CommonType
+}
+
+func newGobEncoderType(name string) *gobEncoderType {
+ g := &gobEncoderType{CommonType{Name: name}}
+ setTypeId(g)
+ return g
+}
+
+func (g *gobEncoderType) safeString(seen map[typeId]bool) string {
+ return g.Name
+}
+
+func (g *gobEncoderType) string() string { return g.Name }
+
+// Map type
+type mapType struct {
+ CommonType
+ Key typeId
+ Elem typeId
+}
+
+func newMapType(name string) *mapType {
+ m := &mapType{CommonType{Name: name}, 0, 0}
+ return m
+}
+
+func (m *mapType) init(key, elem gobType) {
+ // Set our type id before evaluating the element's, in case it's our own.
+ setTypeId(m)
+ m.Key = key.id()
+ m.Elem = elem.id()
+}
+
+func (m *mapType) safeString(seen map[typeId]bool) string {
+ if seen[m.Id] {
+ return m.Name
+ }
+ seen[m.Id] = true
+ key := m.Key.gobType().safeString(seen)
+ elem := m.Elem.gobType().safeString(seen)
+ return fmt.Sprintf("map[%s]%s", key, elem)
+}
+
+func (m *mapType) string() string { return m.safeString(make(map[typeId]bool)) }
+
+// Slice type
+type sliceType struct {
+ CommonType
+ Elem typeId
+}
+
+func newSliceType(name string) *sliceType {
+ s := &sliceType{CommonType{Name: name}, 0}
+ return s
+}
+
+func (s *sliceType) init(elem gobType) {
+ // Set our type id before evaluating the element's, in case it's our own.
+ setTypeId(s)
+ s.Elem = elem.id()
+}
+
+func (s *sliceType) safeString(seen map[typeId]bool) string {
+ if seen[s.Id] {
+ return s.Name
+ }
+ seen[s.Id] = true
+ return fmt.Sprintf("[]%s", s.Elem.gobType().safeString(seen))
+}
+
+func (s *sliceType) string() string { return s.safeString(make(map[typeId]bool)) }
+
+// Struct type
+type fieldType struct {
+ Name string
+ Id typeId
+}
+
+type structType struct {
+ CommonType
+ Field []*fieldType
+}
+
+func (s *structType) safeString(seen map[typeId]bool) string {
+ if s == nil {
+ return "<nil>"
+ }
+ if _, ok := seen[s.Id]; ok {
+ return s.Name
+ }
+ seen[s.Id] = true
+ str := s.Name + " = struct { "
+ for _, f := range s.Field {
+ str += fmt.Sprintf("%s %s; ", f.Name, f.Id.gobType().safeString(seen))
+ }
+ str += "}"
+ return str
+}
+
+func (s *structType) string() string { return s.safeString(make(map[typeId]bool)) }
+
+func newStructType(name string) *structType {
+ s := &structType{CommonType{Name: name}, nil}
+ // For historical reasons we set the id here rather than init.
+ // See the comment in newTypeObject for details.
+ setTypeId(s)
+ return s
+}
+
+// newTypeObject allocates a gobType for the reflection type rt.
+// Unless ut represents a GobEncoder, rt should be the base type
+// of ut.
+// This is only called from the encoding side. The decoding side
+// works through typeIds and userTypeInfos alone.
+func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
+ // Does this type implement GobEncoder?
+ if ut.isGobEncoder {
+ return newGobEncoderType(name), nil
+ }
+ var err error
+ var type0, type1 gobType
+ defer func() {
+ if err != nil {
+ delete(types, rt)
+ }
+ }()
+ // Install the top-level type before the subtypes (e.g. struct before
+ // fields) so recursive types can be constructed safely.
+ switch t := rt; t.Kind() {
+ // All basic types are easy: they are predefined.
+ case reflect.Bool:
+ return tBool.gobType(), nil
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return tInt.gobType(), nil
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return tUint.gobType(), nil
+
+ case reflect.Float32, reflect.Float64:
+ return tFloat.gobType(), nil
+
+ case reflect.Complex64, reflect.Complex128:
+ return tComplex.gobType(), nil
+
+ case reflect.String:
+ return tString.gobType(), nil
+
+ case reflect.Interface:
+ return tInterface.gobType(), nil
+
+ case reflect.Array:
+ at := newArrayType(name)
+ types[rt] = at
+ type0, err = getBaseType("", t.Elem())
+ if err != nil {
+ return nil, err
+ }
+ // Historical aside:
+ // For arrays, maps, and slices, we set the type id after the elements
+ // are constructed. This is to retain the order of type id allocation after
+ // a fix made to handle recursive types, which changed the order in
+ // which types are built. Delaying the setting in this way preserves
+ // type ids while allowing recursive types to be described. Structs,
+ // done below, were already handling recursion correctly so they
+ // assign the top-level id before those of the field.
+ at.init(type0, t.Len())
+ return at, nil
+
+ case reflect.Map:
+ mt := newMapType(name)
+ types[rt] = mt
+ type0, err = getBaseType("", t.Key())
+ if err != nil {
+ return nil, err
+ }
+ type1, err = getBaseType("", t.Elem())
+ if err != nil {
+ return nil, err
+ }
+ mt.init(type0, type1)
+ return mt, nil
+
+ case reflect.Slice:
+ // []byte == []uint8 is a special case
+ if t.Elem().Kind() == reflect.Uint8 {
+ return tBytes.gobType(), nil
+ }
+ st := newSliceType(name)
+ types[rt] = st
+ type0, err = getBaseType(t.Elem().Name(), t.Elem())
+ if err != nil {
+ return nil, err
+ }
+ st.init(type0)
+ return st, nil
+
+ case reflect.Struct:
+ st := newStructType(name)
+ types[rt] = st
+ idToType[st.id()] = st
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !isExported(f.Name) {
+ continue
+ }
+ typ := userType(f.Type).base
+ tname := typ.Name()
+ if tname == "" {
+ t := userType(f.Type).base
+ tname = t.String()
+ }
+ gt, err := getBaseType(tname, f.Type)
+ if err != nil {
+ return nil, err
+ }
+ st.Field = append(st.Field, &fieldType{f.Name, gt.id()})
+ }
+ return st, nil
+
+ default:
+ return nil, errors.New("gob NewTypeObject can't handle type: " + rt.String())
+ }
+ return nil, nil
+}
+
+// isExported reports whether this is an exported - upper case - name.
+func isExported(name string) bool {
+ rune, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(rune)
+}
+
+// getBaseType returns the Gob type describing the given reflect.Type's base type.
+// typeLock must be held.
+func getBaseType(name string, rt reflect.Type) (gobType, error) {
+ ut := userType(rt)
+ return getType(name, ut, ut.base)
+}
+
+// getType returns the Gob type describing the given reflect.Type.
+// Should be called only when handling GobEncoders/Decoders,
+// which may be pointers. All other types are handled through the
+// base type, never a pointer.
+// typeLock must be held.
+func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
+ typ, present := types[rt]
+ if present {
+ return typ, nil
+ }
+ typ, err := newTypeObject(name, ut, rt)
+ if err == nil {
+ types[rt] = typ
+ }
+ return typ, err
+}
+
+func checkId(want, got typeId) {
+ if want != got {
+ fmt.Fprintf(os.Stderr, "checkId: %d should be %d\n", int(got), int(want))
+ panic("bootstrap type wrong id: " + got.name() + " " + got.string() + " not " + want.string())
+ }
+}
+
+// used for building the basic types; called only from init(). the incoming
+// interface always refers to a pointer.
+func bootstrapType(name string, e interface{}, expect typeId) typeId {
+ rt := reflect.TypeOf(e).Elem()
+ _, present := types[rt]
+ if present {
+ panic("bootstrap type already present: " + name + ", " + rt.String())
+ }
+ typ := &CommonType{Name: name}
+ types[rt] = typ
+ setTypeId(typ)
+ checkId(expect, nextId)
+ userType(rt) // might as well cache it now
+ return nextId
+}
+
+// Representation of the information we send and receive about this type.
+// Each value we send is preceded by its type definition: an encoded int.
+// However, the very first time we send the value, we first send the pair
+// (-id, wireType).
+// For bootstrapping purposes, we assume that the recipient knows how
+// to decode a wireType; it is exactly the wireType struct here, interpreted
+// using the gob rules for sending a structure, except that we assume the
+// ids for wireType and structType etc. are known. The relevant pieces
+// are built in encode.go's init() function.
+// To maintain binary compatibility, if you extend this type, always put
+// the new fields last.
+type wireType struct {
+ ArrayT *arrayType
+ SliceT *sliceType
+ StructT *structType
+ MapT *mapType
+ GobEncoderT *gobEncoderType
+}
+
+func (w *wireType) string() string {
+ const unknown = "unknown type"
+ if w == nil {
+ return unknown
+ }
+ switch {
+ case w.ArrayT != nil:
+ return w.ArrayT.Name
+ case w.SliceT != nil:
+ return w.SliceT.Name
+ case w.StructT != nil:
+ return w.StructT.Name
+ case w.MapT != nil:
+ return w.MapT.Name
+ case w.GobEncoderT != nil:
+ return w.GobEncoderT.Name
+ }
+ return unknown
+}
+
+type typeInfo struct {
+ id typeId
+ encoder *encEngine
+ wire *wireType
+}
+
+var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock
+
+// typeLock must be held.
+func getTypeInfo(ut *userTypeInfo) (*typeInfo, error) {
+ rt := ut.base
+ if ut.isGobEncoder {
+ // We want the user type, not the base type.
+ rt = ut.user
+ }
+ info, ok := typeInfoMap[rt]
+ if ok {
+ return info, nil
+ }
+ info = new(typeInfo)
+ gt, err := getBaseType(rt.Name(), rt)
+ if err != nil {
+ return nil, err
+ }
+ info.id = gt.id()
+
+ if ut.isGobEncoder {
+ userType, err := getType(rt.Name(), ut, rt)
+ if err != nil {
+ return nil, err
+ }
+ info.wire = &wireType{GobEncoderT: userType.id().gobType().(*gobEncoderType)}
+ typeInfoMap[ut.user] = info
+ return info, nil
+ }
+
+ t := info.id.gobType()
+ switch typ := rt; typ.Kind() {
+ case reflect.Array:
+ info.wire = &wireType{ArrayT: t.(*arrayType)}
+ case reflect.Map:
+ info.wire = &wireType{MapT: t.(*mapType)}
+ case reflect.Slice:
+ // []byte == []uint8 is a special case handled separately
+ if typ.Elem().Kind() != reflect.Uint8 {
+ info.wire = &wireType{SliceT: t.(*sliceType)}
+ }
+ case reflect.Struct:
+ info.wire = &wireType{StructT: t.(*structType)}
+ }
+ typeInfoMap[rt] = info
+ return info, nil
+}
+
+// Called only when a panic is acceptable and unexpected.
+func mustGetTypeInfo(rt reflect.Type) *typeInfo {
+ t, err := getTypeInfo(userType(rt))
+ if err != nil {
+ panic("getTypeInfo: " + err.Error())
+ }
+ return t
+}
+
+// GobEncoder is the interface describing data that provides its own
+// representation for encoding values for transmission to a GobDecoder.
+// A type that implements GobEncoder and GobDecoder has complete
+// control over the representation of its data and may therefore
+// contain things such as private fields, channels, and functions,
+// which are not usually transmissible in gob streams.
+//
+// Note: Since gobs can be stored permanently, It is good design
+// to guarantee the encoding used by a GobEncoder is stable as the
+// software evolves. For instance, it might make sense for GobEncode
+// to include a version number in the encoding.
+type GobEncoder interface {
+ // GobEncode returns a byte slice representing the encoding of the
+ // receiver for transmission to a GobDecoder, usually of the same
+ // concrete type.
+ GobEncode() ([]byte, error)
+}
+
+// GobDecoder is the interface describing data that provides its own
+// routine for decoding transmitted values sent by a GobEncoder.
+type GobDecoder interface {
+ // GobDecode overwrites the receiver, which must be a pointer,
+ // with the value represented by the byte slice, which was written
+ // by GobEncode, usually for the same concrete type.
+ GobDecode([]byte) error
+}
+
+var (
+ nameToConcreteType = make(map[string]reflect.Type)
+ concreteTypeToName = make(map[reflect.Type]string)
+)
+
+// RegisterName is like Register but uses the provided name rather than the
+// type's default.
+func RegisterName(name string, value interface{}) {
+ if name == "" {
+ // reserved for nil
+ panic("attempt to register empty name")
+ }
+ ut := userType(reflect.TypeOf(value))
+ // Check for incompatible duplicates. The name must refer to the
+ // same user type, and vice versa.
+ if t, ok := nameToConcreteType[name]; ok && t != ut.user {
+ panic(fmt.Sprintf("gob: registering duplicate types for %q: %s != %s", name, t, ut.user))
+ }
+ if n, ok := concreteTypeToName[ut.base]; ok && n != name {
+ panic(fmt.Sprintf("gob: registering duplicate names for %s: %q != %q", ut.user, n, name))
+ }
+ // Store the name and type provided by the user....
+ nameToConcreteType[name] = reflect.TypeOf(value)
+ // but the flattened type in the type table, since that's what decode needs.
+ concreteTypeToName[ut.base] = name
+}
+
+// Register records a type, identified by a value for that type, under its
+// internal type name. That name will identify the concrete type of a value
+// sent or received as an interface variable. Only types that will be
+// transferred as implementations of interface values need to be registered.
+// Expecting to be used only during initialization, it panics if the mapping
+// between types and names is not a bijection.
+func Register(value interface{}) {
+ // Default to printed representation for unnamed types
+ rt := reflect.TypeOf(value)
+ name := rt.String()
+
+ // But for named types (or pointers to them), qualify with import path.
+ // Dereference one pointer looking for a named type.
+ star := ""
+ if rt.Name() == "" {
+ if pt := rt; pt.Kind() == reflect.Ptr {
+ star = "*"
+ rt = pt
+ }
+ }
+ if rt.Name() != "" {
+ if rt.PkgPath() == "" {
+ name = star + rt.Name()
+ } else {
+ name = star + rt.PkgPath() + "." + rt.Name()
+ }
+ }
+
+ RegisterName(name, value)
+}
+
+func registerBasics() {
+ Register(int(0))
+ Register(int8(0))
+ Register(int16(0))
+ Register(int32(0))
+ Register(int64(0))
+ Register(uint(0))
+ Register(uint8(0))
+ Register(uint16(0))
+ Register(uint32(0))
+ Register(uint64(0))
+ Register(float32(0))
+ Register(float64(0))
+ Register(complex64(0i))
+ Register(complex128(0i))
+ Register(uintptr(0))
+ Register(false)
+ Register("")
+ Register([]byte(nil))
+ Register([]int(nil))
+ Register([]int8(nil))
+ Register([]int16(nil))
+ Register([]int32(nil))
+ Register([]int64(nil))
+ Register([]uint(nil))
+ Register([]uint8(nil))
+ Register([]uint16(nil))
+ Register([]uint32(nil))
+ Register([]uint64(nil))
+ Register([]float32(nil))
+ Register([]float64(nil))
+ Register([]complex64(nil))
+ Register([]complex128(nil))
+ Register([]uintptr(nil))
+ Register([]bool(nil))
+ Register([]string(nil))
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "reflect"
+ "testing"
+)
+
+type typeT struct {
+ id typeId
+ str string
+}
+
+var basicTypes = []typeT{
+ {tBool, "bool"},
+ {tInt, "int"},
+ {tUint, "uint"},
+ {tFloat, "float"},
+ {tBytes, "bytes"},
+ {tString, "string"},
+}
+
+func getTypeUnlocked(name string, rt reflect.Type) gobType {
+ typeLock.Lock()
+ defer typeLock.Unlock()
+ t, err := getBaseType(name, rt)
+ if err != nil {
+ panic("getTypeUnlocked: " + err.Error())
+ }
+ return t
+}
+
+// Sanity checks
+func TestBasic(t *testing.T) {
+ for _, tt := range basicTypes {
+ if tt.id.string() != tt.str {
+ t.Errorf("checkType: expected %q got %s", tt.str, tt.id.string())
+ }
+ if tt.id == 0 {
+ t.Errorf("id for %q is zero", tt.str)
+ }
+ }
+}
+
+// Reregister some basic types to check registration is idempotent.
+func TestReregistration(t *testing.T) {
+ newtyp := getTypeUnlocked("int", reflect.TypeOf(int(0)))
+ if newtyp != tInt.gobType() {
+ t.Errorf("reregistration of %s got new type", newtyp.string())
+ }
+ newtyp = getTypeUnlocked("uint", reflect.TypeOf(uint(0)))
+ if newtyp != tUint.gobType() {
+ t.Errorf("reregistration of %s got new type", newtyp.string())
+ }
+ newtyp = getTypeUnlocked("string", reflect.TypeOf("hello"))
+ if newtyp != tString.gobType() {
+ t.Errorf("reregistration of %s got new type", newtyp.string())
+ }
+}
+
+func TestArrayType(t *testing.T) {
+ var a3 [3]int
+ a3int := getTypeUnlocked("foo", reflect.TypeOf(a3))
+ newa3int := getTypeUnlocked("bar", reflect.TypeOf(a3))
+ if a3int != newa3int {
+ t.Errorf("second registration of [3]int creates new type")
+ }
+ var a4 [4]int
+ a4int := getTypeUnlocked("goo", reflect.TypeOf(a4))
+ if a3int == a4int {
+ t.Errorf("registration of [3]int creates same type as [4]int")
+ }
+ var b3 [3]bool
+ a3bool := getTypeUnlocked("", reflect.TypeOf(b3))
+ if a3int == a3bool {
+ t.Errorf("registration of [3]bool creates same type as [3]int")
+ }
+ str := a3bool.string()
+ expected := "[3]bool"
+ if str != expected {
+ t.Errorf("array printed as %q; expected %q", str, expected)
+ }
+}
+
+func TestSliceType(t *testing.T) {
+ var s []int
+ sint := getTypeUnlocked("slice", reflect.TypeOf(s))
+ var news []int
+ newsint := getTypeUnlocked("slice1", reflect.TypeOf(news))
+ if sint != newsint {
+ t.Errorf("second registration of []int creates new type")
+ }
+ var b []bool
+ sbool := getTypeUnlocked("", reflect.TypeOf(b))
+ if sbool == sint {
+ t.Errorf("registration of []bool creates same type as []int")
+ }
+ str := sbool.string()
+ expected := "[]bool"
+ if str != expected {
+ t.Errorf("slice printed as %q; expected %q", str, expected)
+ }
+}
+
+func TestMapType(t *testing.T) {
+ var m map[string]int
+ mapStringInt := getTypeUnlocked("map", reflect.TypeOf(m))
+ var newm map[string]int
+ newMapStringInt := getTypeUnlocked("map1", reflect.TypeOf(newm))
+ if mapStringInt != newMapStringInt {
+ t.Errorf("second registration of map[string]int creates new type")
+ }
+ var b map[string]bool
+ mapStringBool := getTypeUnlocked("", reflect.TypeOf(b))
+ if mapStringBool == mapStringInt {
+ t.Errorf("registration of map[string]bool creates same type as map[string]int")
+ }
+ str := mapStringBool.string()
+ expected := "map[string]bool"
+ if str != expected {
+ t.Errorf("map printed as %q; expected %q", str, expected)
+ }
+}
+
+type Bar struct {
+ X string
+}
+
+// This structure has pointers and refers to itself, making it a good test case.
+type Foo struct {
+ A int
+ B int32 // will become int
+ C string
+ D []byte
+ E *float64 // will become float64
+ F ****float64 // will become float64
+ G *Bar
+ H *Bar // should not interpolate the definition of Bar again
+ I *Foo // will not explode
+}
+
+func TestStructType(t *testing.T) {
+ sstruct := getTypeUnlocked("Foo", reflect.TypeOf(Foo{}))
+ str := sstruct.string()
+ // If we can print it correctly, we built it correctly.
+ expected := "Foo = struct { A int; B int; C string; D bytes; E float; F float; G Bar = struct { X string; }; H Bar; I Foo; }"
+ if str != expected {
+ t.Errorf("struct printed as %q; expected %q", str, expected)
+ }
+}
+
+// Should be OK to register the same type multiple times, as long as they're
+// at the same level of indirection.
+func TestRegistration(t *testing.T) {
+ type T struct{ a int }
+ Register(new(T))
+ Register(new(T))
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "encoding/base64"
+ "errors"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into an interface value, Unmarshal unmarshals
+// the JSON into the concrete value contained in the interface value.
+// If the interface value is nil, that is, has no concrete value stored in it,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshalling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ d := new(decodeState).init(data)
+
+ // Quick check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid JSON object
+// encoding. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ pv := rv
+ if pv.Kind() != reflect.Ptr || pv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not pv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ tempstr string // scratch space to avoid some allocations
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := int(d.data[d.off])
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.step == stateRedo {
+ panic("redo")
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ var isUnmarshaler bool
+ if v.Type().NumMethod() > 0 {
+ // Remember that this is an unmarshaler,
+ // but wait to return it until after allocating
+ // the pointer (if necessary).
+ _, isUnmarshaler = v.Interface().(Unmarshaler)
+ }
+
+ if iv := v; iv.Kind() == reflect.Interface && !iv.IsNil() {
+ v = iv.Elem()
+ continue
+ }
+
+ pv := v
+ if pv.Kind() != reflect.Ptr {
+ break
+ }
+
+ if pv.Elem().Kind() != reflect.Ptr && decodingNull && pv.CanSet() {
+ return nil, pv
+ }
+ if pv.IsNil() {
+ pv.Set(reflect.New(pv.Type().Elem()))
+ }
+ if isUnmarshaler {
+ // Using v.Interface().(Unmarshaler)
+ // here means that we have to use a pointer
+ // as the struct field. We cannot use a value inside
+ // a pointer to a struct, because in that case
+ // v.Interface() is the value (x.f) not the pointer (&x.f).
+ // This is an unfortunate consequence of reflect.
+ // An alternative would be to look up the
+ // UnmarshalJSON method and return a FuncValue.
+ return v.Interface().(Unmarshaler), reflect.Value{}
+ }
+ v = pv.Elem()
+ }
+ return nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ unmarshaler, pv := d.indirect(v, false)
+ if unmarshaler != nil {
+ d.off--
+ err := unmarshaler.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ iv := v
+ ok := iv.Kind() == reflect.Interface
+ if ok {
+ iv.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+
+ // Check type of target.
+ av := v
+ if av.Kind() != reflect.Array && av.Kind() != reflect.Slice {
+ d.saveError(&UnmarshalTypeError{"array", v.Type()})
+ d.off--
+ d.next()
+ return
+ }
+
+ sv := v
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if i >= av.Cap() && sv.IsValid() {
+ newcap := sv.Cap() + sv.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(sv.Type(), sv.Len(), newcap)
+ reflect.Copy(newv, sv)
+ sv.Set(newv)
+ }
+ if i >= av.Len() && sv.IsValid() {
+ // Must be slice; gave up on array during i >= av.Cap().
+ sv.SetLen(i + 1)
+ }
+
+ // Decode into element.
+ if i < av.Len() {
+ d.value(av.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ if i < av.Len() {
+ if !sv.IsValid() {
+ // Array. Zero the rest.
+ z := reflect.Zero(av.Type().Elem())
+ for ; i < av.Len(); i++ {
+ av.Index(i).Set(z)
+ }
+ } else {
+ sv.SetLen(i)
+ }
+ }
+}
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte of the object ('{') has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ unmarshaler, pv := d.indirect(v, false)
+ if unmarshaler != nil {
+ d.off--
+ err := unmarshaler.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ iv := v
+ if iv.Kind() == reflect.Interface {
+ iv.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ var (
+ mv reflect.Value
+ sv reflect.Value
+ )
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string type
+ t := v.Type()
+ if t.Key() != reflect.TypeOf("") {
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ break
+ }
+ mv = v
+ if mv.IsNil() {
+ mv.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+ sv = v
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ }
+
+ if !mv.IsValid() && !sv.IsValid() {
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if mv.IsValid() {
+ elemType := mv.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f reflect.StructField
+ var ok bool
+ st := sv.Type()
+ for i := 0; i < sv.NumField(); i++ {
+ sf := st.Field(i)
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ // Pretend this field doesn't exist.
+ continue
+ }
+ // First, tag match
+ tagName, _ := parseTag(tag)
+ if tagName == key {
+ f = sf
+ ok = true
+ break // no better match possible
+ }
+ // Second, exact field name match
+ if sf.Name == key {
+ f = sf
+ ok = true
+ }
+ // Third, case-insensitive field name match,
+ // but only if a better match hasn't already been seen
+ if !ok && strings.EqualFold(sf.Name, key) {
+ f = sf
+ ok = true
+ }
+ }
+
+ // Extract value; name must be exported.
+ if ok {
+ if f.PkgPath != "" {
+ d.saveError(&UnmarshalFieldError{key, st, f})
+ } else {
+ subv = sv.FieldByIndex(f.Index)
+ }
+ _, opts := parseTag(f.Tag.Get("json"))
+ destring = opts.Contains("string")
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ d.value(reflect.ValueOf(&d.tempstr))
+ d.literalStore([]byte(d.tempstr), subv)
+ } else {
+ d.value(subv)
+ }
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if mv.IsValid() {
+ mv.SetMapIndex(reflect.ValueOf(key), subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v)
+}
+
+// literalStore decodes a literal stored in item into v.
+func (d *decodeState) literalStore(item []byte, v reflect.Value) {
+ // Check for unmarshaler.
+ wantptr := item[0] == 'n' // null
+ unmarshaler, pv := d.indirect(v, wantptr)
+ if unmarshaler != nil {
+ err := unmarshaler.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"null", v.Type()})
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ }
+
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(value))
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ case reflect.Slice:
+ if v.Type() != byteSliceType {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.Set(reflect.ValueOf(b[0:n]))
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(string(s)))
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ d.error(&UnmarshalTypeError{"number", v.Type()})
+ case reflect.Interface:
+ n, err := strconv.Atof64(s)
+ if err != nil {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.Atoi64(s)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.Atoui64(s)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.AtofN(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+ panic("unreachable")
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v []interface{}
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := strconv.Atof64(string(item))
+ if err != nil {
+ d.saveError(&UnmarshalTypeError{"number " + string(item), reflect.TypeOf(0.0)})
+ }
+ return n
+ }
+ panic("unreachable")
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.Btoui64(string(s[2:6]), 16)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+type T struct {
+ X string
+ Y int
+ Z int `json:"-"`
+}
+
+type tx struct {
+ x int
+}
+
+var txType = reflect.TypeOf((*tx)(nil)).Elem()
+
+// A type that can unmarshal itself.
+
+type unmarshaler struct {
+ T bool
+}
+
+func (u *unmarshaler) UnmarshalJSON(b []byte) error {
+ *u = unmarshaler{true} // All we need to see that UnmarshalJson is called.
+ return nil
+}
+
+type ustruct struct {
+ M unmarshaler
+}
+
+var (
+ um0, um1 unmarshaler // target2 of unmarshaling
+ ump = &um1
+ umtrue = unmarshaler{true}
+ umslice = []unmarshaler{{true}}
+ umslicep = new([]unmarshaler)
+ umstruct = ustruct{unmarshaler{true}}
+)
+
+type unmarshalTest struct {
+ in string
+ ptr interface{}
+ out interface{}
+ err error
+}
+
+var unmarshalTests = []unmarshalTest{
+ // basic types
+ {`true`, new(bool), true, nil},
+ {`1`, new(int), 1, nil},
+ {`1.2`, new(float64), 1.2, nil},
+ {`-5`, new(int16), int16(-5), nil},
+ {`"a\u1234"`, new(string), "a\u1234", nil},
+ {`"http:\/\/"`, new(string), "http://", nil},
+ {`"g-clef: \uD834\uDD1E"`, new(string), "g-clef: \U0001D11E", nil},
+ {`"invalid: \uD834x\uDD1E"`, new(string), "invalid: \uFFFDx\uFFFD", nil},
+ {"null", new(interface{}), nil, nil},
+ {`{"X": [1,2,3], "Y": 4}`, new(T), T{Y: 4}, &UnmarshalTypeError{"array", reflect.TypeOf("")}},
+ {`{"x": 1}`, new(tx), tx{}, &UnmarshalFieldError{"x", txType, txType.Field(0)}},
+
+ // Z has a "-" tag.
+ {`{"Y": 1, "Z": 2}`, new(T), T{Y: 1}, nil},
+
+ // syntax errors
+ {`{"X": "foo", "Y"}`, nil, nil, &SyntaxError{"invalid character '}' after object key", 17}},
+
+ // composite tests
+ {allValueIndent, new(All), allValue, nil},
+ {allValueCompact, new(All), allValue, nil},
+ {allValueIndent, new(*All), &allValue, nil},
+ {allValueCompact, new(*All), &allValue, nil},
+ {pallValueIndent, new(All), pallValue, nil},
+ {pallValueCompact, new(All), pallValue, nil},
+ {pallValueIndent, new(*All), &pallValue, nil},
+ {pallValueCompact, new(*All), &pallValue, nil},
+
+ // unmarshal interface test
+ {`{"T":false}`, &um0, umtrue, nil}, // use "false" so test will fail if custom unmarshaler is not called
+ {`{"T":false}`, &ump, &umtrue, nil},
+ {`[{"T":false}]`, &umslice, umslice, nil},
+ {`[{"T":false}]`, &umslicep, &umslice, nil},
+ {`{"M":{"T":false}}`, &umstruct, umstruct, nil},
+}
+
+func TestMarshal(t *testing.T) {
+ b, err := Marshal(allValue)
+ if err != nil {
+ t.Fatalf("Marshal allValue: %v", err)
+ }
+ if string(b) != allValueCompact {
+ t.Errorf("Marshal allValueCompact")
+ diff(t, b, []byte(allValueCompact))
+ return
+ }
+
+ b, err = Marshal(pallValue)
+ if err != nil {
+ t.Fatalf("Marshal pallValue: %v", err)
+ }
+ if string(b) != pallValueCompact {
+ t.Errorf("Marshal pallValueCompact")
+ diff(t, b, []byte(pallValueCompact))
+ return
+ }
+}
+
+func TestMarshalBadUTF8(t *testing.T) {
+ s := "hello\xffworld"
+ b, err := Marshal(s)
+ if err == nil {
+ t.Fatal("Marshal bad UTF8: no error")
+ }
+ if len(b) != 0 {
+ t.Fatal("Marshal returned data")
+ }
+ if _, ok := err.(*InvalidUTF8Error); !ok {
+ t.Fatalf("Marshal did not return InvalidUTF8Error: %T %v", err, err)
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, tt := range unmarshalTests {
+ var scan scanner
+ in := []byte(tt.in)
+ if err := checkValid(in, &scan); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: checkValid: %#v", i, err)
+ continue
+ }
+ }
+ if tt.ptr == nil {
+ continue
+ }
+ // v = new(right-type)
+ v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ if err := Unmarshal([]byte(in), v.Interface()); !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v want %v", i, err, tt.err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
+ data, _ := Marshal(v.Elem().Interface())
+ println(string(data))
+ data, _ = Marshal(tt.out)
+ println(string(data))
+ continue
+ }
+ }
+}
+
+func TestUnmarshalMarshal(t *testing.T) {
+ initBig()
+ var v interface{}
+ if err := Unmarshal(jsonBig, &v); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if bytes.Compare(jsonBig, b) != 0 {
+ t.Errorf("Marshal jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+func TestLargeByteSlice(t *testing.T) {
+ s0 := make([]byte, 2000)
+ for i := range s0 {
+ s0[i] = byte(i)
+ }
+ b, err := Marshal(s0)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ var s1 []byte
+ if err := Unmarshal(b, &s1); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if bytes.Compare(s0, s1) != 0 {
+ t.Errorf("Marshal large byte slice")
+ diff(t, s0, s1)
+ }
+}
+
+type Xint struct {
+ X int
+}
+
+func TestUnmarshalInterface(t *testing.T) {
+ var xint Xint
+ var i interface{} = &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestUnmarshalPtrPtr(t *testing.T) {
+ var xint Xint
+ pxint := &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestEscape(t *testing.T) {
+ const input = `"foobar"<html>`
+ const expected = `"\"foobar\"\u003chtml\u003e"`
+ b, err := Marshal(input)
+ if err != nil {
+ t.Fatalf("Marshal error: %v", err)
+ }
+ if s := string(b); s != expected {
+ t.Errorf("Encoding of [%s] was [%s], want [%s]", input, s, expected)
+ }
+}
+
+func TestHTMLEscape(t *testing.T) {
+ b, err := MarshalForHTML("foobarbaz<>&quux")
+ if err != nil {
+ t.Fatalf("MarshalForHTML error: %v", err)
+ }
+ if !bytes.Equal(b, []byte(`"foobarbaz\u003c\u003e\u0026quux"`)) {
+ t.Fatalf("Unexpected encoding of \"<>&\": %s", b)
+ }
+}
+
+func noSpace(c rune) rune {
+ if isSpace(c) {
+ return -1
+ }
+ return c
+}
+
+type All struct {
+ Bool bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+
+ Foo string `json:"bar"`
+ Foo2 string `json:"bar2,dummyopt"`
+
+ IntStr int64 `json:",string"`
+
+ PBool *bool
+ PInt *int
+ PInt8 *int8
+ PInt16 *int16
+ PInt32 *int32
+ PInt64 *int64
+ PUint *uint
+ PUint8 *uint8
+ PUint16 *uint16
+ PUint32 *uint32
+ PUint64 *uint64
+ PUintptr *uintptr
+ PFloat32 *float32
+ PFloat64 *float64
+
+ String string
+ PString *string
+
+ Map map[string]Small
+ MapP map[string]*Small
+ PMap *map[string]Small
+ PMapP *map[string]*Small
+
+ EmptyMap map[string]Small
+ NilMap map[string]Small
+
+ Slice []Small
+ SliceP []*Small
+ PSlice *[]Small
+ PSliceP *[]*Small
+
+ EmptySlice []Small
+ NilSlice []Small
+
+ StringSlice []string
+ ByteSlice []byte
+
+ Small Small
+ PSmall *Small
+ PPSmall **Small
+
+ Interface interface{}
+ PInterface *interface{}
+
+ unexported int
+}
+
+type Small struct {
+ Tag string
+}
+
+var allValue = All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Uintptr: 12,
+ Float32: 14.1,
+ Float64: 15.1,
+ Foo: "foo",
+ Foo2: "foo2",
+ IntStr: 42,
+ String: "16",
+ Map: map[string]Small{
+ "17": {Tag: "tag17"},
+ "18": {Tag: "tag18"},
+ },
+ MapP: map[string]*Small{
+ "19": &Small{Tag: "tag19"},
+ "20": nil,
+ },
+ EmptyMap: map[string]Small{},
+ Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}},
+ SliceP: []*Small{&Small{Tag: "tag22"}, nil, &Small{Tag: "tag23"}},
+ EmptySlice: []Small{},
+ StringSlice: []string{"str24", "str25", "str26"},
+ ByteSlice: []byte{27, 28, 29},
+ Small: Small{Tag: "tag30"},
+ PSmall: &Small{Tag: "tag31"},
+ Interface: 5.2,
+}
+
+var pallValue = All{
+ PBool: &allValue.Bool,
+ PInt: &allValue.Int,
+ PInt8: &allValue.Int8,
+ PInt16: &allValue.Int16,
+ PInt32: &allValue.Int32,
+ PInt64: &allValue.Int64,
+ PUint: &allValue.Uint,
+ PUint8: &allValue.Uint8,
+ PUint16: &allValue.Uint16,
+ PUint32: &allValue.Uint32,
+ PUint64: &allValue.Uint64,
+ PUintptr: &allValue.Uintptr,
+ PFloat32: &allValue.Float32,
+ PFloat64: &allValue.Float64,
+ PString: &allValue.String,
+ PMap: &allValue.Map,
+ PMapP: &allValue.MapP,
+ PSlice: &allValue.Slice,
+ PSliceP: &allValue.SliceP,
+ PPSmall: &allValue.PSmall,
+ PInterface: &allValue.Interface,
+}
+
+var allValueIndent = `{
+ "Bool": true,
+ "Int": 2,
+ "Int8": 3,
+ "Int16": 4,
+ "Int32": 5,
+ "Int64": 6,
+ "Uint": 7,
+ "Uint8": 8,
+ "Uint16": 9,
+ "Uint32": 10,
+ "Uint64": 11,
+ "Uintptr": 12,
+ "Float32": 14.1,
+ "Float64": 15.1,
+ "bar": "foo",
+ "bar2": "foo2",
+ "IntStr": "42",
+ "PBool": null,
+ "PInt": null,
+ "PInt8": null,
+ "PInt16": null,
+ "PInt32": null,
+ "PInt64": null,
+ "PUint": null,
+ "PUint8": null,
+ "PUint16": null,
+ "PUint32": null,
+ "PUint64": null,
+ "PUintptr": null,
+ "PFloat32": null,
+ "PFloat64": null,
+ "String": "16",
+ "PString": null,
+ "Map": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "MapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "PMap": null,
+ "PMapP": null,
+ "EmptyMap": {},
+ "NilMap": null,
+ "Slice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "SliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "PSlice": null,
+ "PSliceP": null,
+ "EmptySlice": [],
+ "NilSlice": null,
+ "StringSlice": [
+ "str24",
+ "str25",
+ "str26"
+ ],
+ "ByteSlice": "Gxwd",
+ "Small": {
+ "Tag": "tag30"
+ },
+ "PSmall": {
+ "Tag": "tag31"
+ },
+ "PPSmall": null,
+ "Interface": 5.2,
+ "PInterface": null
+}`
+
+var allValueCompact = strings.Map(noSpace, allValueIndent)
+
+var pallValueIndent = `{
+ "Bool": false,
+ "Int": 0,
+ "Int8": 0,
+ "Int16": 0,
+ "Int32": 0,
+ "Int64": 0,
+ "Uint": 0,
+ "Uint8": 0,
+ "Uint16": 0,
+ "Uint32": 0,
+ "Uint64": 0,
+ "Uintptr": 0,
+ "Float32": 0,
+ "Float64": 0,
+ "bar": "",
+ "bar2": "",
+ "IntStr": "0",
+ "PBool": true,
+ "PInt": 2,
+ "PInt8": 3,
+ "PInt16": 4,
+ "PInt32": 5,
+ "PInt64": 6,
+ "PUint": 7,
+ "PUint8": 8,
+ "PUint16": 9,
+ "PUint32": 10,
+ "PUint64": 11,
+ "PUintptr": 12,
+ "PFloat32": 14.1,
+ "PFloat64": 15.1,
+ "String": "",
+ "PString": "16",
+ "Map": null,
+ "MapP": null,
+ "PMap": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "PMapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "EmptyMap": null,
+ "NilMap": null,
+ "Slice": null,
+ "SliceP": null,
+ "PSlice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "PSliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "EmptySlice": null,
+ "NilSlice": null,
+ "StringSlice": null,
+ "ByteSlice": null,
+ "Small": {
+ "Tag": ""
+ },
+ "PSmall": null,
+ "PPSmall": {
+ "Tag": "tag31"
+ },
+ "Interface": null,
+ "PInterface": 5.2
+}`
+
+var pallValueCompact = strings.Map(noSpace, pallValueIndent)
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627.
+//
+// See "JSON and Go" for an introduction to this package:
+// http://blog.golang.org/2011/01/json-and-go.html
+package json
+
+import (
+ "bytes"
+ "encoding/base64"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point and integer values encode as JSON numbers.
+//
+// String values encode as JSON strings, with each invalid UTF-8 sequence
+// replaced by the encoding of the Unicode replacement character U+FFFD.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. This extra level of encoding is sometimes
+// used when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, hyphens, and underscores.
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the object keys are used directly
+// as map keys.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an InvalidTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalForHTML is like Marshal but applies HTMLEscape to the output.
+func MarshalForHTML(v interface{}) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ HTMLEscape(&buf, b)
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, and &
+// characters inside string literals changed to \u003c, \u003e, \u0026
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // < > & can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type InvalidUTF8Error struct {
+ S string
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+type interfaceOrPtrValue interface {
+ IsNil() bool
+ Elem() reflect.Value
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v))
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+var byteSliceType = reflect.TypeOf([]byte(nil))
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+ e.reflectValueQuoted(v, false)
+}
+
+// reflectValueQuoted writes the value in v to the output.
+// If quoted is true, the serialization is wrapped in a JSON string.
+func (e *encodeState) reflectValueQuoted(v reflect.Value, quoted bool) {
+ if !v.IsValid() {
+ e.WriteString("null")
+ return
+ }
+
+ if j, ok := v.Interface().(Marshaler); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+ b, err := j.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = Compact(&e.Buffer, b)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ return
+ }
+
+ writeString := (*encodeState).WriteString
+ if quoted {
+ writeString = (*encodeState).string
+ }
+
+ switch v.Kind() {
+ case reflect.Bool:
+ x := v.Bool()
+ if x {
+ writeString(e, "true")
+ } else {
+ writeString(e, "false")
+ }
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ writeString(e, strconv.Itoa64(v.Int()))
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ writeString(e, strconv.Uitoa64(v.Uint()))
+
+ case reflect.Float32, reflect.Float64:
+ writeString(e, strconv.FtoaN(v.Float(), 'g', -1, v.Type().Bits()))
+
+ case reflect.String:
+ if quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb))
+ } else {
+ e.string(v.String())
+ }
+
+ case reflect.Struct:
+ e.WriteByte('{')
+ t := v.Type()
+ n := v.NumField()
+ first := true
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.PkgPath != "" {
+ continue
+ }
+ tag, omitEmpty, quoted := f.Name, false, false
+ if tv := f.Tag.Get("json"); tv != "" {
+ if tv == "-" {
+ continue
+ }
+ name, opts := parseTag(tv)
+ if isValidTag(name) {
+ tag = name
+ }
+ omitEmpty = opts.Contains("omitempty")
+ quoted = opts.Contains("string")
+ }
+ fieldValue := v.Field(i)
+ if omitEmpty && isEmptyValue(fieldValue) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(tag)
+ e.WriteByte(':')
+ e.reflectValueQuoted(fieldValue, quoted)
+ }
+ e.WriteByte('}')
+
+ case reflect.Map:
+ if v.Type().Key().Kind() != reflect.String {
+ e.error(&UnsupportedTypeError{v.Type()})
+ }
+ if v.IsNil() {
+ e.WriteString("null")
+ break
+ }
+ e.WriteByte('{')
+ var sv stringValues = v.MapKeys()
+ sort.Sort(sv)
+ for i, k := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(k.String())
+ e.WriteByte(':')
+ e.reflectValue(v.MapIndex(k))
+ }
+ e.WriteByte('}')
+
+ case reflect.Slice:
+ if v.IsNil() {
+ e.WriteString("null")
+ break
+ }
+ // Slices can be marshalled as nil, but otherwise are handled
+ // as arrays.
+ fallthrough
+ case reflect.Array:
+ if v.Type() == byteSliceType {
+ e.WriteByte('"')
+ s := v.Interface().([]byte)
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+ break
+ }
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.reflectValue(v.Index(i))
+ }
+ e.WriteByte(']')
+
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem())
+
+ default:
+ e.error(&UnsupportedTypeError{v.Type()})
+ }
+ return
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ if c != '$' && c != '-' && c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ return true
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+func (e *encodeState) string(s string) (int, error) {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as < and >. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ e.error(&InvalidUTF8Error{s})
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0, nil
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+type Optionals struct {
+ Sr string `json:"sr"`
+ So string `json:"so,omitempty"`
+ Sw string `json:"-"`
+
+ Ir int `json:"omitempty"` // actually named omitempty, not an option
+ Io int `json:"io,omitempty"`
+
+ Slr []string `json:"slr,random"`
+ Slo []string `json:"slo,omitempty"`
+
+ Mr map[string]interface{} `json:"mr"`
+ Mo map[string]interface{} `json:",omitempty"`
+}
+
+var optionalsExpected = `{
+ "sr": "",
+ "omitempty": 0,
+ "slr": null,
+ "mr": {}
+}`
+
+func TestOmitEmpty(t *testing.T) {
+ var o Optionals
+ o.Sw = "something"
+ o.Mr = map[string]interface{}{}
+ o.Mo = map[string]interface{}{}
+
+ got, err := MarshalIndent(&o, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != optionalsExpected {
+ t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
+ }
+}
+
+type StringTag struct {
+ BoolStr bool `json:",string"`
+ IntStr int64 `json:",string"`
+ StrStr string `json:",string"`
+}
+
+var stringTagExpected = `{
+ "BoolStr": "true",
+ "IntStr": "42",
+ "StrStr": "\"xzbit\""
+}`
+
+func TestStringTag(t *testing.T) {
+ var s StringTag
+ s.BoolStr = true
+ s.IntStr = 42
+ s.StrStr = "xzbit"
+ got, err := MarshalIndent(&s, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != stringTagExpected {
+ t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
+ }
+
+ // Verify that it round-trips.
+ var s2 StringTag
+ err = NewDecoder(bytes.NewBuffer(got)).Decode(&s2)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ if !reflect.DeepEqual(s, s2) {
+ t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ start := 0
+ for i, c := range src {
+ v := scan.step(&scan, int(c))
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst has no trailing newline, to make it easier
+// to embed inside other formatted JSON data.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(&scan, int(c))
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, int(c)) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+ scan.reset()
+ for i, c := range data {
+ v := scan.step(scan, int(c))
+ if v >= scanEnd {
+ switch v {
+ case scanError:
+ return nil, nil, scan.err
+ case scanEnd:
+ return data[0:i], data[i:], nil
+ }
+ }
+ }
+ if scan.eof() == scanError {
+ return nil, nil, scan.err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, int) int
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // 1-byte redo (see undo method)
+ redoCode int
+ redoState func(*scanner, int) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.step == stateEndTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.step == stateEndTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ if n == 0 {
+ s.step = stateEndTop
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c rune) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// NOTE(rsc): The various instances of
+//
+// if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+//
+// below should all be if c <= ' ' && isSpace(c), but inlining
+// the checks makes a significant difference (>10%) in tight loops
+// such as nextValue. These should be rewritten with the clearer
+// function call once 6g knows to inline the call.
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c int) int {
+ if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c int) int {
+ if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return scanBeginObject
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return scanBeginArray
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c int) int {
+ if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c int) int {
+ if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c int) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c int) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c int) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c int) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ }
+ if c == 'u' {
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `-` during a number.
+func stateNeg(s *scanner, c int) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c int) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c int) int {
+ if c == '+' {
+ s.step = stateESign
+ return scanContinue
+ }
+ if c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c int) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c int) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c int) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c int) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c int) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c int) int {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c int) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c int) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c int) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c int, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c int) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+ if s.step == stateRedo {
+ panic("invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.step
+ s.step = stateRedo
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c int) int {
+ s.step = s.redoState
+ return s.redoCode
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "math"
+ "math/rand"
+ "reflect"
+ "testing"
+)
+
+// Tests of simple examples.
+
+type example struct {
+ compact string
+ indent string
+}
+
+var examples = []example{
+ {`1`, `1`},
+ {`{}`, `{}`},
+ {`[]`, `[]`},
+ {`{"":2}`, "{\n\t\"\": 2\n}"},
+ {`[3]`, "[\n\t3\n]"},
+ {`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
+ {`{"x":1}`, "{\n\t\"x\": 1\n}"},
+ {ex1, ex1i},
+}
+
+var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
+
+var ex1i = `[
+ true,
+ false,
+ null,
+ "x",
+ 1,
+ 1.5,
+ 0,
+ -5e+2
+]`
+
+func TestCompact(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.compact)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.compact, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
+ }
+
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.indent)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.indent, err)
+ continue
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
+ }
+ }
+}
+
+func TestIndent(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.indent, err)
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
+ }
+
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.compact, err)
+ continue
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
+ }
+ }
+}
+
+// Tests of a large random structure.
+
+func TestCompactBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Compact(&buf, jsonBig); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b := buf.Bytes()
+ if bytes.Compare(b, jsonBig) != 0 {
+ t.Error("Compact(jsonBig) != jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+func TestIndentBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
+ t.Fatalf("Indent1: %v", err)
+ }
+ b := buf.Bytes()
+ if len(b) == len(jsonBig) {
+ // jsonBig is compact (no unnecessary spaces);
+ // indenting should make it bigger
+ t.Fatalf("Indent(jsonBig) did not get bigger")
+ }
+
+ // should be idempotent
+ var buf1 bytes.Buffer
+ if err := Indent(&buf1, b, "", "\t"); err != nil {
+ t.Fatalf("Indent2: %v", err)
+ }
+ b1 := buf1.Bytes()
+ if bytes.Compare(b1, b) != 0 {
+ t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
+ diff(t, b1, b)
+ return
+ }
+
+ // should get back to original
+ buf1.Reset()
+ if err := Compact(&buf1, b); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b1 = buf1.Bytes()
+ if bytes.Compare(b1, jsonBig) != 0 {
+ t.Error("Compact(Indent(jsonBig)) != jsonBig")
+ diff(t, b1, jsonBig)
+ return
+ }
+}
+
+type indentErrorTest struct {
+ in string
+ err error
+}
+
+var indentErrorTests = []indentErrorTest{
+ {`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
+ {`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
+}
+
+func TestIndentErrors(t *testing.T) {
+ for i, tt := range indentErrorTests {
+ slice := make([]uint8, 0)
+ buf := bytes.NewBuffer(slice)
+ if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: Indent: %#v", i, err)
+ continue
+ }
+ }
+ }
+}
+
+func TestNextValueBig(t *testing.T) {
+ initBig()
+ var scan scanner
+ item, rest, err := nextValue(jsonBig, &scan)
+ if err != nil {
+ t.Fatalf("nextValue: %s", err)
+ }
+ if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if len(rest) != 0 {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+
+ item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
+ if err != nil {
+ t.Fatalf("nextValue extra: %s", err)
+ }
+ if len(item) != len(jsonBig) {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if string(rest) != "HELLO WORLD" {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+}
+
+func BenchmarkSkipValue(b *testing.B) {
+ initBig()
+ var scan scanner
+ for i := 0; i < b.N; i++ {
+ nextValue(jsonBig, &scan)
+ }
+ b.SetBytes(int64(len(jsonBig)))
+}
+
+func diff(t *testing.T, a, b []byte) {
+ for i := 0; ; i++ {
+ if i >= len(a) || i >= len(b) || a[i] != b[i] {
+ j := i - 10
+ if j < 0 {
+ j = 0
+ }
+ t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
+ return
+ }
+ }
+}
+
+func trim(b []byte) []byte {
+ if len(b) > 20 {
+ return b[0:20]
+ }
+ return b
+}
+
+// Generate a random JSON object.
+
+var jsonBig []byte
+
+const (
+ big = 10000
+ small = 100
+)
+
+func initBig() {
+ n := big
+ if testing.Short() {
+ n = small
+ }
+ if len(jsonBig) != n {
+ b, err := Marshal(genValue(n))
+ if err != nil {
+ panic(err)
+ }
+ jsonBig = b
+ }
+}
+
+func genValue(n int) interface{} {
+ if n > 1 {
+ switch rand.Intn(2) {
+ case 0:
+ return genArray(n)
+ case 1:
+ return genMap(n)
+ }
+ }
+ switch rand.Intn(3) {
+ case 0:
+ return rand.Intn(2) == 0
+ case 1:
+ return rand.NormFloat64()
+ case 2:
+ return genString(30)
+ }
+ panic("unreachable")
+}
+
+func genString(stddev float64) string {
+ n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
+ c := make([]rune, n)
+ for i := range c {
+ f := math.Abs(rand.NormFloat64()*64 + 32)
+ if f > 0x10ffff {
+ f = 0x10ffff
+ }
+ c[i] = rune(f)
+ }
+ return string(c)
+}
+
+func genArray(n int) []interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if n > 0 && f == 0 {
+ f = 1
+ }
+ x := make([]interface{}, f)
+ for i := range x {
+ x[i] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
+
+func genMap(n int) map[string]interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if n > 0 && f == 0 {
+ f = 1
+ }
+ x := make(map[string]interface{})
+ for i := 0; i < f; i++ {
+ x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scan scanner
+ err error
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ dec.d.init(dec.buf[0:n])
+ err = dec.d.unmarshal(v)
+
+ // Slide rest of data down.
+ rest := copy(dec.buf, dec.buf[n:])
+ dec.buf = dec.buf[0:rest]
+
+ return err
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := 0
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.step(&dec.scan, int(c))
+ if v == scanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if v == scanEndObject && dec.scan.step(&dec.scan, ' ') == scanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == scanError {
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+ scanp = len(dec.buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ // Make room to read more into the buffer.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ var n int
+ n, err = dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+ }
+ return scanp, nil
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(rune(c)) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ e encodeState
+ err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the connection.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ enc.e.Reset()
+ err := enc.e.marshal(v)
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ enc.e.WriteByte('\n')
+
+ if _, err = enc.w.Write(enc.e.Bytes()); err != nil {
+ enc.err = err
+ }
+ return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+// Test values for the stream test.
+// One of each JSON kind.
+var streamTest = []interface{}{
+ 0.1,
+ "hello",
+ nil,
+ true,
+ false,
+ []interface{}{"a", "b", "c"},
+ map[string]interface{}{"K": "Kelvin", "ß": "long s"},
+ 3.14, // another value to make sure something can follow map
+}
+
+var streamEncoded = `0.1
+"hello"
+null
+true
+false
+["a","b","c"]
+{"ß":"long s","K":"Kelvin"}
+3.14
+`
+
+func TestEncoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ for j, v := range streamTest[0:i] {
+ if err := enc.Encode(v); err != nil {
+ t.Fatalf("encode #%d: %v", j, err)
+ }
+ }
+ if have, want := buf.String(), nlines(streamEncoded, i); have != want {
+ t.Errorf("encoding %d items: mismatch", i)
+ diff(t, []byte(have), []byte(want))
+ break
+ }
+ }
+}
+
+func TestDecoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ // Use stream without newlines as input,
+ // just to stress the decoder even more.
+ // Our test input does not include back-to-back numbers.
+ // Otherwise stripping the newlines would
+ // merge two adjacent JSON values.
+ var buf bytes.Buffer
+ for _, c := range nlines(streamEncoded, i) {
+ if c != '\n' {
+ buf.WriteRune(c)
+ }
+ }
+ out := make([]interface{}, i)
+ dec := NewDecoder(&buf)
+ for j := range out {
+ if err := dec.Decode(&out[j]); err != nil {
+ t.Fatalf("decode #%d/%d: %v", j, i, err)
+ }
+ }
+ if !reflect.DeepEqual(out, streamTest[0:i]) {
+ t.Errorf("decoding %d items: mismatch", i)
+ for j := range out {
+ if !reflect.DeepEqual(out[j], streamTest[j]) {
+ t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
+ }
+ }
+ break
+ }
+ }
+}
+
+func nlines(s string, n int) string {
+ if n <= 0 {
+ return ""
+ }
+ for i, c := range s {
+ if c == '\n' {
+ if n--; n == 0 {
+ return s[0 : i+1]
+ }
+ }
+ }
+ return s
+}
+
+func TestRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ const raw = `["\u0056",null]`
+ const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if string([]byte(*data.Id)) != raw {
+ t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+func TestNullRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ data.Id = new(RawMessage)
+ const msg = `{"X":0.1,"Id":null,"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if data.Id != nil {
+ t.Fatalf("Raw mismatch: have non-nil, want nil")
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+type basicLatin2xTag struct {
+ V string `json:"$-"`
+}
+
+type basicLatin3xTag struct {
+ V string `json:"0123456789"`
+}
+
+type basicLatin4xTag struct {
+ V string `json:"ABCDEFGHIJKLMO"`
+}
+
+type basicLatin5xTag struct {
+ V string `json:"PQRSTUVWXYZ_"`
+}
+
+type basicLatin6xTag struct {
+ V string `json:"abcdefghijklmno"`
+}
+
+type basicLatin7xTag struct {
+ V string `json:"pqrstuvwxyz"`
+}
+
+type miscPlaneTag struct {
+ V string `json:"色は匂へど"`
+}
+
+type emptyTag struct {
+ W string
+}
+
+type misnamedTag struct {
+ X string `jsom:"Misnamed"`
+}
+
+type badFormatTag struct {
+ Y string `:"BadFormat"`
+}
+
+type badCodeTag struct {
+ Z string `json:" !\"#%&'()*+,./"`
+}
+
+var structTagObjectKeyTests = []struct {
+ raw interface{}
+ value string
+ key string
+}{
+ {basicLatin2xTag{"2x"}, "2x", "$-"},
+ {basicLatin3xTag{"3x"}, "3x", "0123456789"},
+ {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
+ {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
+ {basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
+ {basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
+ {miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
+ {emptyTag{"Pour Moi"}, "Pour Moi", "W"},
+ {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
+ {badFormatTag{"Orfevre"}, "Orfevre", "Y"},
+ {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
+}
+
+func TestStructTagObjectKey(t *testing.T) {
+ for _, tt := range structTagObjectKeyTests {
+ b, err := Marshal(tt.raw)
+ if err != nil {
+ t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
+ }
+ var f interface{}
+ err = Unmarshal(b, &f)
+ if err != nil {
+ t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
+ }
+ for i, v := range f.(map[string]interface{}) {
+ switch i {
+ case tt.key:
+ if s, ok := v.(string); !ok || s != tt.value {
+ t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
+ }
+ default:
+ t.Fatalf("Unexpected key: %#q", i)
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains returns whether checks that a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+func TestTagParsing(t *testing.T) {
+ name, opts := parseTag("field,foobar,foo")
+ if name != "field" {
+ t.Fatalf("name = %q, want field", name)
+ }
+ for _, tt := range []struct {
+ opt string
+ want bool
+ }{
+ {"foobar", true},
+ {"foo", true},
+ {"bar", false},
+ } {
+ if opts.Contains(tt.opt) != tt.want {
+ t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+var atomValue = &Feed{
+ Title: "Example Feed",
+ Link: []Link{{Href: "http://example.org/"}},
+ Updated: ParseTime("2003-12-13T18:30:02Z"),
+ Author: Person{Name: "John Doe"},
+ Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
+
+ Entry: []Entry{
+ {
+ Title: "Atom-Powered Robots Run Amok",
+ Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
+ Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
+ Updated: ParseTime("2003-12-13T18:30:02Z"),
+ Summary: NewText("Some text."),
+ },
+ },
+}
+
+var atomXml = `` +
+ `<feed xmlns="http://www.w3.org/2005/Atom">` +
+ `<Title>Example Feed</Title>` +
+ `<Id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</Id>` +
+ `<Link href="http://example.org/"></Link>` +
+ `<Updated>2003-12-13T18:30:02Z</Updated>` +
+ `<Author><Name>John Doe</Name><URI></URI><Email></Email></Author>` +
+ `<Entry>` +
+ `<Title>Atom-Powered Robots Run Amok</Title>` +
+ `<Id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</Id>` +
+ `<Link href="http://example.org/2003/12/13/atom03"></Link>` +
+ `<Updated>2003-12-13T18:30:02Z</Updated>` +
+ `<Author><Name></Name><URI></URI><Email></Email></Author>` +
+ `<Summary>Some text.</Summary>` +
+ `</Entry>` +
+ `</feed>`
+
+func ParseTime(str string) Time {
+ return Time(str)
+}
+
+func NewText(text string) Text {
+ return Text{
+ Body: text,
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import "testing"
+
+type C struct {
+ Name string
+ Open bool
+}
+
+type A struct {
+ XMLName Name `xml:"http://domain a"`
+ C
+ B B
+ FieldA string
+}
+
+type B struct {
+ XMLName Name `xml:"b"`
+ C
+ FieldB string
+}
+
+const _1a = `
+<?xml version="1.0" encoding="UTF-8"?>
+<a xmlns="http://domain">
+ <name>KmlFile</name>
+ <open>1</open>
+ <b>
+ <name>Absolute</name>
+ <open>0</open>
+ <fieldb>bar</fieldb>
+ </b>
+ <fielda>foo</fielda>
+</a>
+`
+
+// Tests that embedded structs are marshalled.
+func TestEmbedded1(t *testing.T) {
+ var a A
+ if e := Unmarshal(StringReader(_1a), &a); e != nil {
+ t.Fatalf("Unmarshal: %s", e)
+ }
+ if a.FieldA != "foo" {
+ t.Fatalf("Unmarshal: expected 'foo' but found '%s'", a.FieldA)
+ }
+ if a.Name != "KmlFile" {
+ t.Fatalf("Unmarshal: expected 'KmlFile' but found '%s'", a.Name)
+ }
+ if !a.Open {
+ t.Fatal("Unmarshal: expected 'true' but found otherwise")
+ }
+ if a.B.FieldB != "bar" {
+ t.Fatalf("Unmarshal: expected 'bar' but found '%s'", a.B.FieldB)
+ }
+ if a.B.Name != "Absolute" {
+ t.Fatalf("Unmarshal: expected 'Absolute' but found '%s'", a.B.Name)
+ }
+ if a.B.Open {
+ t.Fatal("Unmarshal: expected 'false' but found otherwise")
+ }
+}
+
+type A2 struct {
+ XMLName Name `xml:"http://domain a"`
+ XY string
+ Xy string
+}
+
+const _2a = `
+<?xml version="1.0" encoding="UTF-8"?>
+<a xmlns="http://domain">
+ <xy>foo</xy>
+</a>
+`
+
+// Tests that conflicting field names get excluded.
+func TestEmbedded2(t *testing.T) {
+ var a A2
+ if e := Unmarshal(StringReader(_2a), &a); e != nil {
+ t.Fatalf("Unmarshal: %s", e)
+ }
+ if a.XY != "" {
+ t.Fatalf("Unmarshal: expected empty string but found '%s'", a.XY)
+ }
+ if a.Xy != "" {
+ t.Fatalf("Unmarshal: expected empty string but found '%s'", a.Xy)
+ }
+}
+
+type A3 struct {
+ XMLName Name `xml:"http://domain a"`
+ xy string
+}
+
+// Tests that private fields are not set.
+func TestEmbedded3(t *testing.T) {
+ var a A3
+ if e := Unmarshal(StringReader(_2a), &a); e != nil {
+ t.Fatalf("Unmarshal: %s", e)
+ }
+ if a.xy != "" {
+ t.Fatalf("Unmarshal: expected empty string but found '%s'", a.xy)
+ }
+}
+
+type A4 struct {
+ XMLName Name `xml:"http://domain a"`
+ Any string
+}
+
+// Tests that private fields are not set.
+func TestEmbedded4(t *testing.T) {
+ var a A4
+ if e := Unmarshal(StringReader(_2a), &a); e != nil {
+ t.Fatalf("Unmarshal: %s", e)
+ }
+ if a.Any != "foo" {
+ t.Fatalf("Unmarshal: expected 'foo' but found '%s'", a.Any)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bufio"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ // A generic XML header suitable for use with the output of Marshal and
+ // MarshalIndent. This is not automatically added to any output of this
+ // package, it is provided as a convenience.
+ Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
+)
+
+// A Marshaler can produce well-formatted XML representing its internal state.
+// It is used by both Marshal and MarshalIndent.
+type Marshaler interface {
+ MarshalXML() ([]byte, error)
+}
+
+type printer struct {
+ *bufio.Writer
+}
+
+// Marshal writes an XML-formatted representation of v to w.
+//
+// If v implements Marshaler, then Marshal calls its MarshalXML method.
+// Otherwise, Marshal uses the following procedure to create the XML.
+//
+// Marshal handles an array or slice by marshalling each of the elements.
+// Marshal handles a pointer by marshalling the value it points at or, if the
+// pointer is nil, by writing nothing. Marshal handles an interface value by
+// marshalling the value it contains or, if the interface value is nil, by
+// writing nothing. Marshal handles all other data by writing one or more XML
+// elements containing the data.
+//
+// The name for the XML elements is taken from, in order of preference:
+// - the tag on an XMLName field, if the data is a struct
+// - the value of an XMLName field of type xml.Name
+// - the tag of the struct field used to obtain the data
+// - the name of the struct field used to obtain the data
+// - the name '???'.
+//
+// The XML element for a struct contains marshalled elements for each of the
+// exported fields of the struct, with these exceptions:
+// - the XMLName field, described above, is omitted.
+// - a field with tag "attr" becomes an attribute in the XML element.
+// - a field with tag "chardata" is written as character data,
+// not as an XML element.
+// - a field with tag "innerxml" is written verbatim,
+// not subject to the usual marshalling procedure.
+//
+// If a field uses a tag "a>b>c", then the element c will be nested inside
+// parent elements a and b. Fields that appear next to each other that name
+// the same parent will be enclosed in one XML element. For example:
+//
+// type Result struct {
+// XMLName xml.Name `xml:"result"`
+// FirstName string `xml:"person>name>first"`
+// LastName string `xml:"person>name>last"`
+// Age int `xml:"person>age"`
+// }
+//
+// xml.Marshal(w, &Result{FirstName: "John", LastName: "Doe", Age: 42})
+//
+// would be marshalled as:
+//
+// <result>
+// <person>
+// <name>
+// <first>John</first>
+// <last>Doe</last>
+// </name>
+// <age>42</age>
+// </person>
+// </result>
+//
+// Marshal will return an error if asked to marshal a channel, function, or map.
+func Marshal(w io.Writer, v interface{}) (err error) {
+ p := &printer{bufio.NewWriter(w)}
+ err = p.marshalValue(reflect.ValueOf(v), "???")
+ p.Flush()
+ return err
+}
+
+func (p *printer) marshalValue(val reflect.Value, name string) error {
+ if !val.IsValid() {
+ return nil
+ }
+
+ kind := val.Kind()
+ typ := val.Type()
+
+ // Try Marshaler
+ if typ.NumMethod() > 0 {
+ if marshaler, ok := val.Interface().(Marshaler); ok {
+ bytes, err := marshaler.MarshalXML()
+ if err != nil {
+ return err
+ }
+ p.Write(bytes)
+ return nil
+ }
+ }
+
+ // Drill into pointers/interfaces
+ if kind == reflect.Ptr || kind == reflect.Interface {
+ if val.IsNil() {
+ return nil
+ }
+ return p.marshalValue(val.Elem(), name)
+ }
+
+ // Slices and arrays iterate over the elements. They do not have an enclosing tag.
+ if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {
+ for i, n := 0, val.Len(); i < n; i++ {
+ if err := p.marshalValue(val.Index(i), name); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // Find XML name
+ xmlns := ""
+ if kind == reflect.Struct {
+ if f, ok := typ.FieldByName("XMLName"); ok {
+ if tag := f.Tag.Get("xml"); tag != "" {
+ if i := strings.Index(tag, " "); i >= 0 {
+ xmlns, name = tag[:i], tag[i+1:]
+ } else {
+ name = tag
+ }
+ } else if v, ok := val.FieldByIndex(f.Index).Interface().(Name); ok && v.Local != "" {
+ xmlns, name = v.Space, v.Local
+ }
+ }
+ }
+
+ p.WriteByte('<')
+ p.WriteString(name)
+
+ // Attributes
+ if kind == reflect.Struct {
+ if len(xmlns) > 0 {
+ p.WriteString(` xmlns="`)
+ Escape(p, []byte(xmlns))
+ p.WriteByte('"')
+ }
+
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ if f := typ.Field(i); f.PkgPath == "" && f.Tag.Get("xml") == "attr" {
+ if f.Type.Kind() == reflect.String {
+ if str := val.Field(i).String(); str != "" {
+ p.WriteByte(' ')
+ p.WriteString(strings.ToLower(f.Name))
+ p.WriteString(`="`)
+ Escape(p, []byte(str))
+ p.WriteByte('"')
+ }
+ }
+ }
+ }
+ }
+ p.WriteByte('>')
+
+ switch k := val.Kind(); k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.WriteString(strconv.Itoa64(val.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p.WriteString(strconv.Uitoa64(val.Uint()))
+ case reflect.Float32, reflect.Float64:
+ p.WriteString(strconv.Ftoa64(val.Float(), 'g', -1))
+ case reflect.String:
+ Escape(p, []byte(val.String()))
+ case reflect.Bool:
+ p.WriteString(strconv.Btoa(val.Bool()))
+ case reflect.Array:
+ // will be [...]byte
+ bytes := make([]byte, val.Len())
+ for i := range bytes {
+ bytes[i] = val.Index(i).Interface().(byte)
+ }
+ Escape(p, bytes)
+ case reflect.Slice:
+ // will be []byte
+ bytes := val.Interface().([]byte)
+ Escape(p, bytes)
+ case reflect.Struct:
+ s := parentStack{printer: p}
+ for i, n := 0, val.NumField(); i < n; i++ {
+ if f := typ.Field(i); f.Name != "XMLName" && f.PkgPath == "" {
+ name := f.Name
+ vf := val.Field(i)
+ switch tag := f.Tag.Get("xml"); tag {
+ case "":
+ s.trim(nil)
+ case "chardata":
+ if tk := f.Type.Kind(); tk == reflect.String {
+ Escape(p, []byte(vf.String()))
+ } else if tk == reflect.Slice {
+ if elem, ok := vf.Interface().([]byte); ok {
+ Escape(p, elem)
+ }
+ }
+ continue
+ case "innerxml":
+ iface := vf.Interface()
+ switch raw := iface.(type) {
+ case []byte:
+ p.Write(raw)
+ continue
+ case string:
+ p.WriteString(raw)
+ continue
+ }
+ case "attr":
+ continue
+ default:
+ parents := strings.Split(tag, ">")
+ if len(parents) == 1 {
+ parents, name = nil, tag
+ } else {
+ parents, name = parents[:len(parents)-1], parents[len(parents)-1]
+ if parents[0] == "" {
+ parents[0] = f.Name
+ }
+ }
+
+ s.trim(parents)
+ if !(vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) || !vf.IsNil() {
+ s.push(parents[len(s.stack):])
+ }
+ }
+
+ if err := p.marshalValue(vf, name); err != nil {
+ return err
+ }
+ }
+ }
+ s.trim(nil)
+ default:
+ return &UnsupportedTypeError{typ}
+ }
+
+ p.WriteByte('<')
+ p.WriteByte('/')
+ p.WriteString(name)
+ p.WriteByte('>')
+
+ return nil
+}
+
+type parentStack struct {
+ *printer
+ stack []string
+}
+
+// trim updates the XML context to match the longest common prefix of the stack
+// and the given parents. A closing tag will be written for every parent
+// popped. Passing a zero slice or nil will close all the elements.
+func (s *parentStack) trim(parents []string) {
+ split := 0
+ for ; split < len(parents) && split < len(s.stack); split++ {
+ if parents[split] != s.stack[split] {
+ break
+ }
+ }
+
+ for i := len(s.stack) - 1; i >= split; i-- {
+ s.WriteString("</")
+ s.WriteString(s.stack[i])
+ s.WriteByte('>')
+ }
+
+ s.stack = parents[:split]
+}
+
+// push adds parent elements to the stack and writes open tags.
+func (s *parentStack) push(parents []string) {
+ for i := 0; i < len(parents); i++ {
+ s.WriteString("<")
+ s.WriteString(parents[i])
+ s.WriteByte('>')
+ }
+ s.stack = append(s.stack, parents...)
+}
+
+// A MarshalXMLError is returned when Marshal or MarshalIndent encounter a type
+// that cannot be converted into XML.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "xml: unsupported type: " + e.Type.String()
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+type DriveType int
+
+const (
+ HyperDrive DriveType = iota
+ ImprobabilityDrive
+)
+
+type Passenger struct {
+ Name []string `xml:"name"`
+ Weight float32 `xml:"weight"`
+}
+
+type Ship struct {
+ XMLName Name `xml:"spaceship"`
+
+ Name string `xml:"attr"`
+ Pilot string `xml:"attr"`
+ Drive DriveType `xml:"drive"`
+ Age uint `xml:"age"`
+ Passenger []*Passenger `xml:"passenger"`
+ secret string
+}
+
+type RawXML string
+
+func (rx RawXML) MarshalXML() ([]byte, error) {
+ return []byte(rx), nil
+}
+
+type NamedType string
+
+type Port struct {
+ XMLName Name `xml:"port"`
+ Type string `xml:"attr"`
+ Number string `xml:"chardata"`
+}
+
+type Domain struct {
+ XMLName Name `xml:"domain"`
+ Country string `xml:"attr"`
+ Name []byte `xml:"chardata"`
+}
+
+type Book struct {
+ XMLName Name `xml:"book"`
+ Title string `xml:"chardata"`
+}
+
+type SecretAgent struct {
+ XMLName Name `xml:"agent"`
+ Handle string `xml:"attr"`
+ Identity string
+ Obfuscate string `xml:"innerxml"`
+}
+
+type NestedItems struct {
+ XMLName Name `xml:"result"`
+ Items []string `xml:">item"`
+ Item1 []string `xml:"Items>item1"`
+}
+
+type NestedOrder struct {
+ XMLName Name `xml:"result"`
+ Field1 string `xml:"parent>c"`
+ Field2 string `xml:"parent>b"`
+ Field3 string `xml:"parent>a"`
+}
+
+type MixedNested struct {
+ XMLName Name `xml:"result"`
+ A string `xml:"parent1>a"`
+ B string `xml:"b"`
+ C string `xml:"parent1>parent2>c"`
+ D string `xml:"parent1>d"`
+}
+
+type NilTest struct {
+ A interface{} `xml:"parent1>parent2>a"`
+ B interface{} `xml:"parent1>b"`
+ C interface{} `xml:"parent1>parent2>c"`
+}
+
+type Service struct {
+ XMLName Name `xml:"service"`
+ Domain *Domain `xml:"host>domain"`
+ Port *Port `xml:"host>port"`
+ Extra1 interface{}
+ Extra2 interface{} `xml:"host>extra2"`
+}
+
+var nilStruct *Ship
+
+var marshalTests = []struct {
+ Value interface{}
+ ExpectXML string
+}{
+ // Test nil marshals to nothing
+ {Value: nil, ExpectXML: ``},
+ {Value: nilStruct, ExpectXML: ``},
+
+ // Test value types (no tag name, so ???)
+ {Value: true, ExpectXML: `<???>true</???>`},
+ {Value: int(42), ExpectXML: `<???>42</???>`},
+ {Value: int8(42), ExpectXML: `<???>42</???>`},
+ {Value: int16(42), ExpectXML: `<???>42</???>`},
+ {Value: int32(42), ExpectXML: `<???>42</???>`},
+ {Value: uint(42), ExpectXML: `<???>42</???>`},
+ {Value: uint8(42), ExpectXML: `<???>42</???>`},
+ {Value: uint16(42), ExpectXML: `<???>42</???>`},
+ {Value: uint32(42), ExpectXML: `<???>42</???>`},
+ {Value: float32(1.25), ExpectXML: `<???>1.25</???>`},
+ {Value: float64(1.25), ExpectXML: `<???>1.25</???>`},
+ {Value: uintptr(0xFFDD), ExpectXML: `<???>65501</???>`},
+ {Value: "gopher", ExpectXML: `<???>gopher</???>`},
+ {Value: []byte("gopher"), ExpectXML: `<???>gopher</???>`},
+ {Value: "</>", ExpectXML: `<???></></???>`},
+ {Value: []byte("</>"), ExpectXML: `<???></></???>`},
+ {Value: [3]byte{'<', '/', '>'}, ExpectXML: `<???></></???>`},
+ {Value: NamedType("potato"), ExpectXML: `<???>potato</???>`},
+ {Value: []int{1, 2, 3}, ExpectXML: `<???>1</???><???>2</???><???>3</???>`},
+ {Value: [3]int{1, 2, 3}, ExpectXML: `<???>1</???><???>2</???><???>3</???>`},
+
+ // Test innerxml
+ {Value: RawXML("</>"), ExpectXML: `</>`},
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<redacted/>",
+ },
+ //ExpectXML: `<agent handle="007"><redacted/></agent>`,
+ ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`,
+ },
+
+ // Test structs
+ {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `<port type="ssl">443</port>`},
+ {Value: &Port{Number: "443"}, ExpectXML: `<port>443</port>`},
+ {Value: &Port{Type: "<unix>"}, ExpectXML: `<port type="<unix>"></port>`},
+ {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `<domain>google.com&friends</domain>`},
+ {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `<book>Pride & Prejudice</book>`},
+ {Value: atomValue, ExpectXML: atomXml},
+ {
+ Value: &Ship{
+ Name: "Heart of Gold",
+ Pilot: "Computer",
+ Age: 1,
+ Drive: ImprobabilityDrive,
+ Passenger: []*Passenger{
+ &Passenger{
+ Name: []string{"Zaphod", "Beeblebrox"},
+ Weight: 7.25,
+ },
+ &Passenger{
+ Name: []string{"Trisha", "McMillen"},
+ Weight: 5.5,
+ },
+ &Passenger{
+ Name: []string{"Ford", "Prefect"},
+ Weight: 7,
+ },
+ &Passenger{
+ Name: []string{"Arthur", "Dent"},
+ Weight: 6.75,
+ },
+ },
+ },
+ ExpectXML: `<spaceship name="Heart of Gold" pilot="Computer">` +
+ `<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` +
+ `<age>1</age>` +
+ `<passenger>` +
+ `<name>Zaphod</name>` +
+ `<name>Beeblebrox</name>` +
+ `<weight>7.25</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Trisha</name>` +
+ `<name>McMillen</name>` +
+ `<weight>5.5</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Ford</name>` +
+ `<name>Prefect</name>` +
+ `<weight>7</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Arthur</name>` +
+ `<name>Dent</name>` +
+ `<weight>6.75</weight>` +
+ `</passenger>` +
+ `</spaceship>`,
+ },
+ // Test a>b
+ {
+ Value: NestedItems{Items: []string{}, Item1: []string{}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: NestedItems{Items: []string{}, Item1: []string{"A"}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item1>A</item1>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: NestedItems{Items: []string{"A", "B"}, Item1: []string{}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item>A</item>` +
+ `<item>B</item>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item>A</item>` +
+ `<item>B</item>` +
+ `<item1>C</item1>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: NestedOrder{Field1: "C", Field2: "B", Field3: "A"},
+ ExpectXML: `<result>` +
+ `<parent>` +
+ `<c>C</c>` +
+ `<b>B</b>` +
+ `<a>A</a>` +
+ `</parent>` +
+ `</result>`,
+ },
+ {
+ Value: NilTest{A: "A", B: nil, C: "C"},
+ ExpectXML: `<???>` +
+ `<parent1>` +
+ `<parent2><a>A</a></parent2>` +
+ `<parent2><c>C</c></parent2>` +
+ `</parent1>` +
+ `</???>`,
+ },
+ {
+ Value: MixedNested{A: "A", B: "B", C: "C", D: "D"},
+ ExpectXML: `<result>` +
+ `<parent1><a>A</a></parent1>` +
+ `<b>B</b>` +
+ `<parent1>` +
+ `<parent2><c>C</c></parent2>` +
+ `<d>D</d>` +
+ `</parent1>` +
+ `</result>`,
+ },
+ {
+ Value: Service{Port: &Port{Number: "80"}},
+ ExpectXML: `<service><host><port>80</port></host></service>`,
+ },
+ {
+ Value: Service{},
+ ExpectXML: `<service></service>`,
+ },
+ {
+ Value: Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"},
+ ExpectXML: `<service>` +
+ `<host><port>80</port></host>` +
+ `<Extra1>A</Extra1>` +
+ `<host><extra2>B</extra2></host>` +
+ `</service>`,
+ },
+ {
+ Value: Service{Port: &Port{Number: "80"}, Extra2: "example"},
+ ExpectXML: `<service>` +
+ `<host><port>80</port></host>` +
+ `<host><extra2>example</extra2></host>` +
+ `</service>`,
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ for idx, test := range marshalTests {
+ buf := bytes.NewBuffer(nil)
+ err := Marshal(buf, test.Value)
+ if err != nil {
+ t.Errorf("#%d: Error: %s", idx, err)
+ continue
+ }
+ if got, want := buf.String(), test.ExpectXML; got != want {
+ if strings.Contains(want, "\n") {
+ t.Errorf("#%d: marshal(%#v) - GOT:\n%s\nWANT:\n%s", idx, test.Value, got, want)
+ } else {
+ t.Errorf("#%d: marshal(%#v) = %#q want %#q", idx, test.Value, got, want)
+ }
+ }
+ }
+}
+
+var marshalErrorTests = []struct {
+ Value interface{}
+ Err string
+ Kind reflect.Kind
+}{
+ {
+ Value: make(chan bool),
+ Err: "xml: unsupported type: chan bool",
+ Kind: reflect.Chan,
+ },
+ {
+ Value: map[string]string{
+ "question": "What do you get when you multiply six by nine?",
+ "answer": "42",
+ },
+ Err: "xml: unsupported type: map[string] string",
+ Kind: reflect.Map,
+ },
+ {
+ Value: map[*Ship]bool{nil: false},
+ Err: "xml: unsupported type: map[*xml.Ship] bool",
+ Kind: reflect.Map,
+ },
+}
+
+func TestMarshalErrors(t *testing.T) {
+ for idx, test := range marshalErrorTests {
+ buf := bytes.NewBuffer(nil)
+ err := Marshal(buf, test.Value)
+ if err == nil || err.Error() != test.Err {
+ t.Errorf("#%d: marshal(%#v) = [error] %q, want %q", idx, test.Value, err, test.Err)
+ }
+ if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind {
+ t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind)
+ }
+ }
+}
+
+// Do invertibility testing on the various structures that we test
+func TestUnmarshal(t *testing.T) {
+ for i, test := range marshalTests {
+ // Skip the nil pointers
+ if i <= 1 {
+ continue
+ }
+
+ var dest interface{}
+
+ switch test.Value.(type) {
+ case *Ship, Ship:
+ dest = &Ship{}
+ case *Port, Port:
+ dest = &Port{}
+ case *Domain, Domain:
+ dest = &Domain{}
+ case *Feed, Feed:
+ dest = &Feed{}
+ default:
+ continue
+ }
+
+ buffer := bytes.NewBufferString(test.ExpectXML)
+ err := Unmarshal(buffer, dest)
+
+ // Don't compare XMLNames
+ switch fix := dest.(type) {
+ case *Ship:
+ fix.XMLName = Name{}
+ case *Port:
+ fix.XMLName = Name{}
+ case *Domain:
+ fix.XMLName = Name{}
+ case *Feed:
+ fix.XMLName = Name{}
+ fix.Author.InnerXML = ""
+ for i := range fix.Entry {
+ fix.Entry[i].Author.InnerXML = ""
+ }
+ }
+
+ if err != nil {
+ t.Errorf("#%d: unexpected error: %#v", i, err)
+ } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {
+ t.Errorf("#%d: unmarshal(%#s) = %#v, want %#v", i, test.ExpectXML, got, want)
+ }
+ }
+}
+
+func BenchmarkMarshal(b *testing.B) {
+ idx := len(marshalTests) - 1
+ test := marshalTests[idx]
+
+ buf := bytes.NewBuffer(nil)
+ for i := 0; i < b.N; i++ {
+ Marshal(buf, test.Value)
+ buf.Truncate(0)
+ }
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ idx := len(marshalTests) - 1
+ test := marshalTests[idx]
+ sm := &Ship{}
+ xml := []byte(test.ExpectXML)
+
+ for i := 0; i < b.N; i++ {
+ buffer := bytes.NewBuffer(xml)
+ Unmarshal(buffer, sm)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
+// an XML element is an order-dependent collection of anonymous
+// values, while a data structure is an order-independent collection
+// of named values.
+// See package json for a textual representation more suitable
+// to data structures.
+
+// Unmarshal parses an XML element from r and uses the
+// reflect library to fill in an arbitrary struct, slice, or string
+// pointed at by val. Well-formed data that does not fit
+// into val is discarded.
+//
+// For example, given these definitions:
+//
+// type Email struct {
+// Where string `xml:"attr"`
+// Addr string
+// }
+//
+// type Result struct {
+// XMLName xml.Name `xml:"result"`
+// Name string
+// Phone string
+// Email []Email
+// Groups []string `xml:"group>value"`
+// }
+//
+// result := Result{Name: "name", Phone: "phone", Email: nil}
+//
+// unmarshalling the XML input
+//
+// <result>
+// <email where="home">
+// <addr>gre@example.com</addr>
+// </email>
+// <email where='work'>
+// <addr>gre@work.com</addr>
+// </email>
+// <name>Grace R. Emlin</name>
+// <group>
+// <value>Friends</value>
+// <value>Squash</value>
+// </group>
+// <address>123 Main Street</address>
+// </result>
+//
+// via Unmarshal(r, &result) is equivalent to assigning
+//
+// r = Result{xml.Name{"", "result"},
+// "Grace R. Emlin", // name
+// "phone", // no phone given
+// []Email{
+// Email{"home", "gre@example.com"},
+// Email{"work", "gre@work.com"},
+// },
+// []string{"Friends", "Squash"},
+// }
+//
+// Note that the field r.Phone has not been modified and
+// that the XML <address> element was discarded. Also, the field
+// Groups was assigned considering the element path provided in the
+// field tag.
+//
+// Because Unmarshal uses the reflect package, it can only assign
+// to exported (upper case) fields. Unmarshal uses a case-insensitive
+// comparison to match XML element names to struct field names.
+//
+// Unmarshal maps an XML element to a struct using the following rules.
+// In the rules, the tag of a field refers to the value associated with the
+// key 'xml' in the struct field's tag (see the example above).
+//
+// * If the struct has a field of type []byte or string with tag "innerxml",
+// Unmarshal accumulates the raw XML nested inside the element
+// in that field. The rest of the rules still apply.
+//
+// * If the struct has a field named XMLName of type xml.Name,
+// Unmarshal records the element name in that field.
+//
+// * If the XMLName field has an associated tag of the form
+// "name" or "namespace-URL name", the XML element must have
+// the given name (and, optionally, name space) or else Unmarshal
+// returns an error.
+//
+// * If the XML element has an attribute whose name matches a
+// struct field of type string with tag "attr", Unmarshal records
+// the attribute value in that field.
+//
+// * If the XML element contains character data, that data is
+// accumulated in the first struct field that has tag "chardata".
+// The struct field may have type []byte or string.
+// If there is no such field, the character data is discarded.
+//
+// * If the XML element contains comments, they are accumulated in
+// the first struct field that has tag "comments". The struct
+// field may have type []byte or string. If there is no such
+// field, the comments are discarded.
+//
+// * If the XML element contains a sub-element whose name matches
+// the prefix of a tag formatted as "a>b>c", unmarshal
+// will descend into the XML structure looking for elements with the
+// given names, and will map the innermost elements to that struct field.
+// A tag starting with ">" is equivalent to one starting
+// with the field name followed by ">".
+//
+// * If the XML element contains a sub-element whose name
+// matches a field whose tag is neither "attr" nor "chardata",
+// Unmarshal maps the sub-element to that struct field.
+// Otherwise, if the struct has a field named Any, unmarshal
+// maps the sub-element to that struct field.
+//
+// Unmarshal maps an XML element to a string or []byte by saving the
+// concatenation of that element's character data in the string or
+// []byte.
+//
+// Unmarshal maps an attribute value to a string or []byte by saving
+// the value in the string or slice.
+//
+// Unmarshal maps an XML element to a slice by extending the length of
+// the slice and mapping the element to the newly created value.
+//
+// Unmarshal maps an XML element or attribute value to a bool by
+// setting it to the boolean value represented by the string.
+//
+// Unmarshal maps an XML element or attribute value to an integer or
+// floating-point field by setting the field to the result of
+// interpreting the string value in decimal. There is no check for
+// overflow.
+//
+// Unmarshal maps an XML element to an xml.Name by recording the
+// element name.
+//
+// Unmarshal maps an XML element to a pointer by setting the pointer
+// to a freshly allocated value and then mapping the element to that value.
+//
+func Unmarshal(r io.Reader, val interface{}) error {
+ v := reflect.ValueOf(val)
+ if v.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+ p := NewParser(r)
+ elem := v.Elem()
+ err := p.unmarshal(elem, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// An UnmarshalError represents an error in the unmarshalling process.
+type UnmarshalError string
+
+func (e UnmarshalError) Error() string { return string(e) }
+
+// A TagPathError represents an error in the unmarshalling process
+// caused by the use of field tags with conflicting paths.
+type TagPathError struct {
+ Struct reflect.Type
+ Field1, Tag1 string
+ Field2, Tag2 string
+}
+
+func (e *TagPathError) Error() string {
+ return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
+}
+
+// The Parser's Unmarshal method is like xml.Unmarshal
+// except that it can be passed a pointer to the initial start element,
+// useful when a client reads some raw XML tokens itself
+// but also defers to Unmarshal for some elements.
+// Passing a nil start element indicates that Unmarshal should
+// read the token stream to find the start element.
+func (p *Parser) Unmarshal(val interface{}, start *StartElement) error {
+ v := reflect.ValueOf(val)
+ if v.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+ return p.unmarshal(v.Elem(), start)
+}
+
+// fieldName strips invalid characters from an XML name
+// to create a valid Go struct name. It also converts the
+// name to lower case letters.
+func fieldName(original string) string {
+
+ var i int
+ //remove leading underscores, without exhausting all characters
+ for i = 0; i < len(original)-1 && original[i] == '_'; i++ {
+ }
+
+ return strings.Map(
+ func(x rune) rune {
+ if x == '_' || unicode.IsDigit(x) || unicode.IsLetter(x) {
+ return unicode.ToLower(x)
+ }
+ return -1
+ },
+ original[i:])
+}
+
+// Unmarshal a single XML element into val.
+func (p *Parser) unmarshal(val reflect.Value, start *StartElement) error {
+ // Find start element if we need it.
+ if start == nil {
+ for {
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ if t, ok := tok.(StartElement); ok {
+ start = &t
+ break
+ }
+ }
+ }
+
+ if pv := val; pv.Kind() == reflect.Ptr {
+ if pv.IsNil() {
+ pv.Set(reflect.New(pv.Type().Elem()))
+ }
+ val = pv.Elem()
+ }
+
+ var (
+ data []byte
+ saveData reflect.Value
+ comment []byte
+ saveComment reflect.Value
+ saveXML reflect.Value
+ saveXMLIndex int
+ saveXMLData []byte
+ sv reflect.Value
+ styp reflect.Type
+ fieldPaths map[string]pathInfo
+ )
+
+ switch v := val; v.Kind() {
+ default:
+ return errors.New("unknown type " + v.Type().String())
+
+ case reflect.Slice:
+ typ := v.Type()
+ if typ.Elem().Kind() == reflect.Uint8 {
+ // []byte
+ saveData = v
+ break
+ }
+
+ // Slice of element values.
+ // Grow slice.
+ n := v.Len()
+ if n >= v.Cap() {
+ ncap := 2 * n
+ if ncap < 4 {
+ ncap = 4
+ }
+ new := reflect.MakeSlice(typ, n, ncap)
+ reflect.Copy(new, v)
+ v.Set(new)
+ }
+ v.SetLen(n + 1)
+
+ // Recur to read element into slice.
+ if err := p.unmarshal(v.Index(n), start); err != nil {
+ v.SetLen(n)
+ return err
+ }
+ return nil
+
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
+ saveData = v
+
+ case reflect.Struct:
+ if _, ok := v.Interface().(Name); ok {
+ v.Set(reflect.ValueOf(start.Name))
+ break
+ }
+
+ sv = v
+ typ := sv.Type()
+ styp = typ
+ // Assign name.
+ if f, ok := typ.FieldByName("XMLName"); ok {
+ // Validate element name.
+ if tag := f.Tag.Get("xml"); tag != "" {
+ ns := ""
+ i := strings.LastIndex(tag, " ")
+ if i >= 0 {
+ ns, tag = tag[0:i], tag[i+1:]
+ }
+ if tag != start.Name.Local {
+ return UnmarshalError("expected element type <" + tag + "> but have <" + start.Name.Local + ">")
+ }
+ if ns != "" && ns != start.Name.Space {
+ e := "expected element <" + tag + "> in name space " + ns + " but have "
+ if start.Name.Space == "" {
+ e += "no name space"
+ } else {
+ e += start.Name.Space
+ }
+ return UnmarshalError(e)
+ }
+ }
+
+ // Save
+ v := sv.FieldByIndex(f.Index)
+ if _, ok := v.Interface().(Name); ok {
+ v.Set(reflect.ValueOf(start.Name))
+ }
+ }
+
+ // Assign attributes.
+ // Also, determine whether we need to save character data or comments.
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ f := typ.Field(i)
+ switch f.Tag.Get("xml") {
+ case "attr":
+ strv := sv.FieldByIndex(f.Index)
+ // Look for attribute.
+ val := ""
+ k := strings.ToLower(f.Name)
+ for _, a := range start.Attr {
+ if fieldName(a.Name.Local) == k {
+ val = a.Value
+ break
+ }
+ }
+ copyValue(strv, []byte(val))
+
+ case "comment":
+ if !saveComment.IsValid() {
+ saveComment = sv.FieldByIndex(f.Index)
+ }
+
+ case "chardata":
+ if !saveData.IsValid() {
+ saveData = sv.FieldByIndex(f.Index)
+ }
+
+ case "innerxml":
+ if !saveXML.IsValid() {
+ saveXML = sv.FieldByIndex(f.Index)
+ if p.saved == nil {
+ saveXMLIndex = 0
+ p.saved = new(bytes.Buffer)
+ } else {
+ saveXMLIndex = p.savedOffset()
+ }
+ }
+
+ default:
+ if tag := f.Tag.Get("xml"); strings.Contains(tag, ">") {
+ if fieldPaths == nil {
+ fieldPaths = make(map[string]pathInfo)
+ }
+ path := strings.ToLower(tag)
+ if strings.HasPrefix(tag, ">") {
+ path = strings.ToLower(f.Name) + path
+ }
+ if strings.HasSuffix(tag, ">") {
+ path = path[:len(path)-1]
+ }
+ err := addFieldPath(sv, fieldPaths, path, f.Index)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+
+ // Find end element.
+ // Process sub-elements along the way.
+Loop:
+ for {
+ var savedOffset int
+ if saveXML.IsValid() {
+ savedOffset = p.savedOffset()
+ }
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ // Sub-element.
+ // Look up by tag name.
+ if sv.IsValid() {
+ k := fieldName(t.Name.Local)
+
+ if fieldPaths != nil {
+ if _, found := fieldPaths[k]; found {
+ if err := p.unmarshalPaths(sv, fieldPaths, k, &t); err != nil {
+ return err
+ }
+ continue Loop
+ }
+ }
+
+ match := func(s string) bool {
+ // check if the name matches ignoring case
+ if strings.ToLower(s) != k {
+ return false
+ }
+ // now check that it's public
+ c, _ := utf8.DecodeRuneInString(s)
+ return unicode.IsUpper(c)
+ }
+
+ f, found := styp.FieldByNameFunc(match)
+ if !found { // fall back to mop-up field named "Any"
+ f, found = styp.FieldByName("Any")
+ }
+ if found {
+ if err := p.unmarshal(sv.FieldByIndex(f.Index), &t); err != nil {
+ return err
+ }
+ continue Loop
+ }
+ }
+ // Not saving sub-element but still have to skip over it.
+ if err := p.Skip(); err != nil {
+ return err
+ }
+
+ case EndElement:
+ if saveXML.IsValid() {
+ saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
+ if saveXMLIndex == 0 {
+ p.saved = nil
+ }
+ }
+ break Loop
+
+ case CharData:
+ if saveData.IsValid() {
+ data = append(data, t...)
+ }
+
+ case Comment:
+ if saveComment.IsValid() {
+ comment = append(comment, t...)
+ }
+ }
+ }
+
+ if err := copyValue(saveData, data); err != nil {
+ return err
+ }
+
+ switch t := saveComment; t.Kind() {
+ case reflect.String:
+ t.SetString(string(comment))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(comment))
+ }
+
+ switch t := saveXML; t.Kind() {
+ case reflect.String:
+ t.SetString(string(saveXMLData))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(saveXMLData))
+ }
+
+ return nil
+}
+
+func copyValue(dst reflect.Value, src []byte) (err error) {
+ // Helper functions for integer and unsigned integer conversions
+ var itmp int64
+ getInt64 := func() bool {
+ itmp, err = strconv.Atoi64(string(src))
+ // TODO: should check sizes
+ return err == nil
+ }
+ var utmp uint64
+ getUint64 := func() bool {
+ utmp, err = strconv.Atoui64(string(src))
+ // TODO: check for overflow?
+ return err == nil
+ }
+ var ftmp float64
+ getFloat64 := func() bool {
+ ftmp, err = strconv.Atof64(string(src))
+ // TODO: check for overflow?
+ return err == nil
+ }
+
+ // Save accumulated data and comments
+ switch t := dst; t.Kind() {
+ case reflect.Invalid:
+ // Probably a comment, handled below
+ default:
+ return errors.New("cannot happen: unknown type " + t.Type().String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if !getInt64() {
+ return err
+ }
+ t.SetInt(itmp)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ if !getUint64() {
+ return err
+ }
+ t.SetUint(utmp)
+ case reflect.Float32, reflect.Float64:
+ if !getFloat64() {
+ return err
+ }
+ t.SetFloat(ftmp)
+ case reflect.Bool:
+ value, err := strconv.Atob(strings.TrimSpace(string(src)))
+ if err != nil {
+ return err
+ }
+ t.SetBool(value)
+ case reflect.String:
+ t.SetString(string(src))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(src))
+ }
+ return nil
+}
+
+type pathInfo struct {
+ fieldIdx []int
+ complete bool
+}
+
+// addFieldPath takes an element path such as "a>b>c" and fills the
+// paths map with all paths leading to it ("a", "a>b", and "a>b>c").
+// It is okay for paths to share a common, shorter prefix but not ok
+// for one path to itself be a prefix of another.
+func addFieldPath(sv reflect.Value, paths map[string]pathInfo, path string, fieldIdx []int) error {
+ if info, found := paths[path]; found {
+ return tagError(sv, info.fieldIdx, fieldIdx)
+ }
+ paths[path] = pathInfo{fieldIdx, true}
+ for {
+ i := strings.LastIndex(path, ">")
+ if i < 0 {
+ break
+ }
+ path = path[:i]
+ if info, found := paths[path]; found {
+ if info.complete {
+ return tagError(sv, info.fieldIdx, fieldIdx)
+ }
+ } else {
+ paths[path] = pathInfo{fieldIdx, false}
+ }
+ }
+ return nil
+
+}
+
+func tagError(sv reflect.Value, idx1 []int, idx2 []int) error {
+ t := sv.Type()
+ f1 := t.FieldByIndex(idx1)
+ f2 := t.FieldByIndex(idx2)
+ return &TagPathError{t, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
+}
+
+// unmarshalPaths walks down an XML structure looking for
+// wanted paths, and calls unmarshal on them.
+func (p *Parser) unmarshalPaths(sv reflect.Value, paths map[string]pathInfo, path string, start *StartElement) error {
+ if info, _ := paths[path]; info.complete {
+ return p.unmarshal(sv.FieldByIndex(info.fieldIdx), start)
+ }
+ for {
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ k := path + ">" + fieldName(t.Name.Local)
+ if _, found := paths[k]; found {
+ if err := p.unmarshalPaths(sv, paths, k, &t); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := p.Skip(); err != nil {
+ return err
+ }
+ case EndElement:
+ return nil
+ }
+ }
+ panic("unreachable")
+}
+
+// Have already read a start element.
+// Read tokens until we find the end element.
+// Token is taking care of making sure the
+// end element matches the start element we saw.
+func (p *Parser) Skip() error {
+ for {
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch tok.(type) {
+ case StartElement:
+ if err := p.Skip(); err != nil {
+ return err
+ }
+ case EndElement:
+ return nil
+ }
+ }
+ panic("unreachable")
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Stripped down Atom feed data structures.
+
+func TestUnmarshalFeed(t *testing.T) {
+ var f Feed
+ if err := Unmarshal(StringReader(atomFeedString), &f); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(f, atomFeed) {
+ t.Fatalf("have %#v\nwant %#v", f, atomFeed)
+ }
+}
+
+// hget http://codereview.appspot.com/rss/mine/rsc
+const atomFeedString = `
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><li-nk href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></li-nk><id>http://codereview.appspot.com/</id><updated>2009-10-04T01:35:58+00:00</updated><author><name>rietveld<></name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link hre-f="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+ An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+ 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
+ feeds that will be pubsubhubbubbed.
+ 2. every time one of those feeds changes, tell the hub
+ with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&#39;t quite get the server to work, but I think the bug
+is not in my code. I think that the server expects to be
+able to grab the feed and see the feed&#39;s actual URL in
+the link rel=&quot;self&quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+ This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&#39;t know where to put the tab stops. Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites. I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> `
+
+type Feed struct {
+ XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
+ Title string
+ Id string
+ Link []Link
+ Updated Time
+ Author Person
+ Entry []Entry
+}
+
+type Entry struct {
+ Title string
+ Id string
+ Link []Link
+ Updated Time
+ Author Person
+ Summary Text
+}
+
+type Link struct {
+ Rel string `xml:"attr"`
+ Href string `xml:"attr"`
+}
+
+type Person struct {
+ Name string
+ URI string
+ Email string
+ InnerXML string `xml:"innerxml"`
+}
+
+type Text struct {
+ Type string `xml:"attr"`
+ Body string `xml:"chardata"`
+}
+
+type Time string
+
+var atomFeed = Feed{
+ XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
+ Title: "Code Review - My issues",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/"},
+ {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
+ },
+ Id: "http://codereview.appspot.com/",
+ Updated: "2009-10-04T01:35:58+00:00",
+ Author: Person{
+ Name: "rietveld<>",
+ InnerXML: "<name>rietveld<></name>",
+ },
+ Entry: []Entry{
+ {
+ Title: "rietveld: an attempt at pubsubhubbub\n",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
+ },
+ Updated: "2009-10-04T01:35:58+00:00",
+ Author: Person{
+ Name: "email-address-removed",
+ InnerXML: "<name>email-address-removed</name>",
+ },
+ Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
+ Summary: Text{
+ Type: "html",
+ Body: `
+ An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+ 1. add a <link rel="hub" href="hub-server"> tag to all
+ feeds that will be pubsubhubbubbed.
+ 2. every time one of those feeds changes, tell the hub
+ with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can't quite get the server to work, but I think the bug
+is not in my code. I think that the server expects to be
+able to grab the feed and see the feed's actual URL in
+the link rel="self", but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+`,
+ },
+ },
+ {
+ Title: "rietveld: correct tab handling\n",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
+ },
+ Updated: "2009-10-03T23:02:17+00:00",
+ Author: Person{
+ Name: "email-address-removed",
+ InnerXML: "<name>email-address-removed</name>",
+ },
+ Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
+ Summary: Text{
+ Type: "html",
+ Body: `
+ This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn't know where to put the tab stops. Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites. I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+`,
+ },
+ },
+ },
+}
+
+type FieldNameTest struct {
+ in, out string
+}
+
+var FieldNameTests = []FieldNameTest{
+ {"Profile-Image", "profileimage"},
+ {"_score", "score"},
+}
+
+func TestFieldName(t *testing.T) {
+ for _, tt := range FieldNameTests {
+ a := fieldName(tt.in)
+ if a != tt.out {
+ t.Fatalf("have %#v\nwant %#v\n\n", a, tt.out)
+ }
+ }
+}
+
+const pathTestString = `
+<result>
+ <before>1</before>
+ <items>
+ <item1>
+ <value>A</value>
+ </item1>
+ <item2>
+ <value>B</value>
+ </item2>
+ <Item1>
+ <Value>C</Value>
+ <Value>D</Value>
+ </Item1>
+ <_>
+ <value>E</value>
+ </_>
+ </items>
+ <after>2</after>
+</result>
+`
+
+type PathTestItem struct {
+ Value string
+}
+
+type PathTestA struct {
+ Items []PathTestItem `xml:">item1"`
+ Before, After string
+}
+
+type PathTestB struct {
+ Other []PathTestItem `xml:"items>Item1"`
+ Before, After string
+}
+
+type PathTestC struct {
+ Values1 []string `xml:"items>item1>value"`
+ Values2 []string `xml:"items>item2>value"`
+ Before, After string
+}
+
+type PathTestSet struct {
+ Item1 []PathTestItem
+}
+
+type PathTestD struct {
+ Other PathTestSet `xml:"items>"`
+ Before, After string
+}
+
+type PathTestE struct {
+ Underline string `xml:"items>_>value"`
+ Before, After string
+}
+
+var pathTests = []interface{}{
+ &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
+ &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
+ &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
+ &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
+ &PathTestE{Underline: "E", Before: "1", After: "2"},
+}
+
+func TestUnmarshalPaths(t *testing.T) {
+ for _, pt := range pathTests {
+ v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
+ if err := Unmarshal(StringReader(pathTestString), v); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(v, pt) {
+ t.Fatalf("have %#v\nwant %#v", v, pt)
+ }
+ }
+}
+
+type BadPathTestA struct {
+ First string `xml:"items>item1"`
+ Other string `xml:"items>item2"`
+ Second string `xml:"items>"`
+}
+
+type BadPathTestB struct {
+ Other string `xml:"items>item2>value"`
+ First string `xml:"items>item1"`
+ Second string `xml:"items>item1>value"`
+}
+
+var badPathTests = []struct {
+ v, e interface{}
+}{
+ {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items>"}},
+ {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
+}
+
+func TestUnmarshalBadPaths(t *testing.T) {
+ for _, tt := range badPathTests {
+ err := Unmarshal(StringReader(pathTestString), tt.v)
+ if !reflect.DeepEqual(err, tt.e) {
+ t.Fatalf("Unmarshal with %#v didn't fail properly: %#v", tt.v, err)
+ }
+ }
+}
+
+func TestUnmarshalAttrs(t *testing.T) {
+ var f AttrTest
+ if err := Unmarshal(StringReader(attrString), &f); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(f, attrStruct) {
+ t.Fatalf("have %#v\nwant %#v", f, attrStruct)
+ }
+}
+
+type AttrTest struct {
+ Test1 Test1
+ Test2 Test2
+}
+
+type Test1 struct {
+ Int int `xml:"attr"`
+ Float float64 `xml:"attr"`
+ Uint8 uint8 `xml:"attr"`
+}
+
+type Test2 struct {
+ Bool bool `xml:"attr"`
+}
+
+const attrString = `
+<?xml version="1.0" charset="utf-8"?>
+<attrtest>
+ <test1 int="8" float="23.5" uint8="255"/>
+ <test2 bool="true"/>
+</attrtest>
+`
+
+var attrStruct = AttrTest{
+ Test1: Test1{
+ Int: 8,
+ Float: 23.5,
+ Uint8: 255,
+ },
+ Test2: Test2{
+ Bool: true,
+ },
+}
+
+// test data for TestUnmarshalWithoutNameType
+
+const OK = "OK"
+const withoutNameTypeData = `
+<?xml version="1.0" charset="utf-8"?>
+<Test3 attr="OK" />`
+
+type TestThree struct {
+ XMLName bool `xml:"Test3"` // XMLName field without an xml.Name type
+ Attr string `xml:"attr"`
+}
+
+func TestUnmarshalWithoutNameType(t *testing.T) {
+ var x TestThree
+ if err := Unmarshal(StringReader(withoutNameTypeData), &x); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if x.Attr != OK {
+ t.Fatalf("have %v\nwant %v", x.Attr, OK)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xml implements a simple XML 1.0 parser that
+// understands XML name spaces.
+package xml
+
+// References:
+// Annotated XML spec: http://www.xml.com/axml/testaxml.htm
+// XML name spaces: http://www.w3.org/TR/REC-xml-names/
+
+// TODO(rsc):
+// Test error handling.
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A SyntaxError represents a syntax error in the XML input stream.
+type SyntaxError struct {
+ Msg string
+ Line int
+}
+
+func (e *SyntaxError) Error() string {
+ return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg
+}
+
+// A Name represents an XML name (Local) annotated
+// with a name space identifier (Space).
+// In tokens returned by Parser.Token, the Space identifier
+// is given as a canonical URL, not the short prefix used
+// in the document being parsed.
+type Name struct {
+ Space, Local string
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+ Name Name
+ Value string
+}
+
+// A Token is an interface holding one of the token types:
+// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.
+type Token interface{}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+ Name Name
+ Attr []Attr
+}
+
+func (e StartElement) Copy() StartElement {
+ attrs := make([]Attr, len(e.Attr))
+ copy(e.Attr, attrs)
+ e.Attr = attrs
+ return e
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+ Name Name
+}
+
+// A CharData represents XML character data (raw text),
+// in which XML escape sequences have been replaced by
+// the characters they represent.
+type CharData []byte
+
+func makeCopy(b []byte) []byte {
+ b1 := make([]byte, len(b))
+ copy(b1, b)
+ return b1
+}
+
+func (c CharData) Copy() CharData { return CharData(makeCopy(c)) }
+
+// A Comment represents an XML comment of the form <!--comment-->.
+// The bytes do not include the <!-- and --> comment markers.
+type Comment []byte
+
+func (c Comment) Copy() Comment { return Comment(makeCopy(c)) }
+
+// A ProcInst represents an XML processing instruction of the form <?target inst?>
+type ProcInst struct {
+ Target string
+ Inst []byte
+}
+
+func (p ProcInst) Copy() ProcInst {
+ p.Inst = makeCopy(p.Inst)
+ return p
+}
+
+// A Directive represents an XML directive of the form <!text>.
+// The bytes do not include the <! and > markers.
+type Directive []byte
+
+func (d Directive) Copy() Directive { return Directive(makeCopy(d)) }
+
+// CopyToken returns a copy of a Token.
+func CopyToken(t Token) Token {
+ switch v := t.(type) {
+ case CharData:
+ return v.Copy()
+ case Comment:
+ return v.Copy()
+ case Directive:
+ return v.Copy()
+ case ProcInst:
+ return v.Copy()
+ case StartElement:
+ return v.Copy()
+ }
+ return t
+}
+
+// A Parser represents an XML parser reading a particular input stream.
+// The parser assumes that its input is encoded in UTF-8.
+type Parser struct {
+ // Strict defaults to true, enforcing the requirements
+ // of the XML specification.
+ // If set to false, the parser allows input containing common
+ // mistakes:
+ // * If an element is missing an end tag, the parser invents
+ // end tags as necessary to keep the return values from Token
+ // properly balanced.
+ // * In attribute values and character data, unknown or malformed
+ // character entities (sequences beginning with &) are left alone.
+ //
+ // Setting:
+ //
+ // p.Strict = false;
+ // p.AutoClose = HTMLAutoClose;
+ // p.Entity = HTMLEntity
+ //
+ // creates a parser that can handle typical HTML.
+ Strict bool
+
+ // When Strict == false, AutoClose indicates a set of elements to
+ // consider closed immediately after they are opened, regardless
+ // of whether an end element is present.
+ AutoClose []string
+
+ // Entity can be used to map non-standard entity names to string replacements.
+ // The parser behaves as if these standard mappings are present in the map,
+ // regardless of the actual map content:
+ //
+ // "lt": "<",
+ // "gt": ">",
+ // "amp": "&",
+ // "apos": "'",
+ // "quot": `"`,
+ Entity map[string]string
+
+ // CharsetReader, if non-nil, defines a function to generate
+ // charset-conversion readers, converting from the provided
+ // non-UTF-8 charset into UTF-8. If CharsetReader is nil or
+ // returns an error, parsing stops with an error. One of the
+ // the CharsetReader's result values must be non-nil.
+ CharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+ r io.ByteReader
+ buf bytes.Buffer
+ saved *bytes.Buffer
+ stk *stack
+ free *stack
+ needClose bool
+ toClose Name
+ nextToken Token
+ nextByte int
+ ns map[string]string
+ err error
+ line int
+ tmp [32]byte
+}
+
+// NewParser creates a new XML parser reading from r.
+func NewParser(r io.Reader) *Parser {
+ p := &Parser{
+ ns: make(map[string]string),
+ nextByte: -1,
+ line: 1,
+ Strict: true,
+ }
+ p.switchToReader(r)
+ return p
+}
+
+// Token returns the next XML token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Slices of bytes in the returned token data refer to the
+// parser's internal buffer and remain valid only until the next
+// call to Token. To acquire a copy of the bytes, call CopyToken
+// or the token's Copy method.
+//
+// Token expands self-closing elements such as <br/>
+// into separate start and end elements returned by successive calls.
+//
+// Token guarantees that the StartElement and EndElement
+// tokens it returns are properly nested and matched:
+// if Token encounters an unexpected end element,
+// it will return an error.
+//
+// Token implements XML name spaces as described by
+// http://www.w3.org/TR/REC-xml-names/. Each of the
+// Name structures contained in the Token has the Space
+// set to the URL identifying its name space when known.
+// If Token encounters an unrecognized name space prefix,
+// it uses the prefix as the Space rather than report an error.
+func (p *Parser) Token() (t Token, err error) {
+ if p.nextToken != nil {
+ t = p.nextToken
+ p.nextToken = nil
+ } else if t, err = p.RawToken(); err != nil {
+ return
+ }
+
+ if !p.Strict {
+ if t1, ok := p.autoClose(t); ok {
+ p.nextToken = t
+ t = t1
+ }
+ }
+ switch t1 := t.(type) {
+ case StartElement:
+ // In XML name spaces, the translations listed in the
+ // attributes apply to the element name and
+ // to the other attribute names, so process
+ // the translations first.
+ for _, a := range t1.Attr {
+ if a.Name.Space == "xmlns" {
+ v, ok := p.ns[a.Name.Local]
+ p.pushNs(a.Name.Local, v, ok)
+ p.ns[a.Name.Local] = a.Value
+ }
+ if a.Name.Space == "" && a.Name.Local == "xmlns" {
+ // Default space for untagged names
+ v, ok := p.ns[""]
+ p.pushNs("", v, ok)
+ p.ns[""] = a.Value
+ }
+ }
+
+ p.translate(&t1.Name, true)
+ for i := range t1.Attr {
+ p.translate(&t1.Attr[i].Name, false)
+ }
+ p.pushElement(t1.Name)
+ t = t1
+
+ case EndElement:
+ p.translate(&t1.Name, true)
+ if !p.popElement(&t1) {
+ return nil, p.err
+ }
+ t = t1
+ }
+ return
+}
+
+// Apply name space translation to name n.
+// The default name space (for Space=="")
+// applies only to element names, not to attribute names.
+func (p *Parser) translate(n *Name, isElementName bool) {
+ switch {
+ case n.Space == "xmlns":
+ return
+ case n.Space == "" && !isElementName:
+ return
+ case n.Space == "" && n.Local == "xmlns":
+ return
+ }
+ if v, ok := p.ns[n.Space]; ok {
+ n.Space = v
+ }
+}
+
+func (p *Parser) switchToReader(r io.Reader) {
+ // Get efficient byte at a time reader.
+ // Assume that if reader has its own
+ // ReadByte, it's efficient enough.
+ // Otherwise, use bufio.
+ if rb, ok := r.(io.ByteReader); ok {
+ p.r = rb
+ } else {
+ p.r = bufio.NewReader(r)
+ }
+}
+
+// Parsing state - stack holds old name space translations
+// and the current set of open elements. The translations to pop when
+// ending a given tag are *below* it on the stack, which is
+// more work but forced on us by XML.
+type stack struct {
+ next *stack
+ kind int
+ name Name
+ ok bool
+}
+
+const (
+ stkStart = iota
+ stkNs
+)
+
+func (p *Parser) push(kind int) *stack {
+ s := p.free
+ if s != nil {
+ p.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.next = p.stk
+ s.kind = kind
+ p.stk = s
+ return s
+}
+
+func (p *Parser) pop() *stack {
+ s := p.stk
+ if s != nil {
+ p.stk = s.next
+ s.next = p.free
+ p.free = s
+ }
+ return s
+}
+
+// Record that we are starting an element with the given name.
+func (p *Parser) pushElement(name Name) {
+ s := p.push(stkStart)
+ s.name = name
+}
+
+// Record that we are changing the value of ns[local].
+// The old value is url, ok.
+func (p *Parser) pushNs(local string, url string, ok bool) {
+ s := p.push(stkNs)
+ s.name.Local = local
+ s.name.Space = url
+ s.ok = ok
+}
+
+// Creates a SyntaxError with the current line number.
+func (p *Parser) syntaxError(msg string) error {
+ return &SyntaxError{Msg: msg, Line: p.line}
+}
+
+// Record that we are ending an element with the given name.
+// The name must match the record at the top of the stack,
+// which must be a pushElement record.
+// After popping the element, apply any undo records from
+// the stack to restore the name translations that existed
+// before we saw this element.
+func (p *Parser) popElement(t *EndElement) bool {
+ s := p.pop()
+ name := t.Name
+ switch {
+ case s == nil || s.kind != stkStart:
+ p.err = p.syntaxError("unexpected end element </" + name.Local + ">")
+ return false
+ case s.name.Local != name.Local:
+ if !p.Strict {
+ p.needClose = true
+ p.toClose = t.Name
+ t.Name = s.name
+ return true
+ }
+ p.err = p.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">")
+ return false
+ case s.name.Space != name.Space:
+ p.err = p.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space +
+ "closed by </" + name.Local + "> in space " + name.Space)
+ return false
+ }
+
+ // Pop stack until a Start is on the top, undoing the
+ // translations that were associated with the element we just closed.
+ for p.stk != nil && p.stk.kind != stkStart {
+ s := p.pop()
+ if s.ok {
+ p.ns[s.name.Local] = s.name.Space
+ } else {
+ delete(p.ns, s.name.Local)
+ }
+ }
+
+ return true
+}
+
+// If the top element on the stack is autoclosing and
+// t is not the end tag, invent the end tag.
+func (p *Parser) autoClose(t Token) (Token, bool) {
+ if p.stk == nil || p.stk.kind != stkStart {
+ return nil, false
+ }
+ name := strings.ToLower(p.stk.name.Local)
+ for _, s := range p.AutoClose {
+ if strings.ToLower(s) == name {
+ // This one should be auto closed if t doesn't close it.
+ et, ok := t.(EndElement)
+ if !ok || et.Name.Local != name {
+ return EndElement{p.stk.name}, true
+ }
+ break
+ }
+ }
+ return nil, false
+}
+
+// RawToken is like Token but does not verify that
+// start and end elements match and does not translate
+// name space prefixes to their corresponding URLs.
+func (p *Parser) RawToken() (Token, error) {
+ if p.err != nil {
+ return nil, p.err
+ }
+ if p.needClose {
+ // The last element we read was self-closing and
+ // we returned just the StartElement half.
+ // Return the EndElement half now.
+ p.needClose = false
+ return EndElement{p.toClose}, nil
+ }
+
+ b, ok := p.getc()
+ if !ok {
+ return nil, p.err
+ }
+
+ if b != '<' {
+ // Text section.
+ p.ungetc(b)
+ data := p.text(-1, false)
+ if data == nil {
+ return nil, p.err
+ }
+ return CharData(data), nil
+ }
+
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ switch b {
+ case '/':
+ // </: End element
+ var name Name
+ if name, ok = p.nsname(); !ok {
+ if p.err == nil {
+ p.err = p.syntaxError("expected element name after </")
+ }
+ return nil, p.err
+ }
+ p.space()
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if b != '>' {
+ p.err = p.syntaxError("invalid characters between </" + name.Local + " and >")
+ return nil, p.err
+ }
+ return EndElement{name}, nil
+
+ case '?':
+ // <?: Processing instruction.
+ // TODO(rsc): Should parse the <?xml declaration to make sure
+ // the version is 1.0 and the encoding is UTF-8.
+ var target string
+ if target, ok = p.name(); !ok {
+ if p.err == nil {
+ p.err = p.syntaxError("expected target name after <?")
+ }
+ return nil, p.err
+ }
+ p.space()
+ p.buf.Reset()
+ var b0 byte
+ for {
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ p.buf.WriteByte(b)
+ if b0 == '?' && b == '>' {
+ break
+ }
+ b0 = b
+ }
+ data := p.buf.Bytes()
+ data = data[0 : len(data)-2] // chop ?>
+
+ if target == "xml" {
+ enc := procInstEncoding(string(data))
+ if enc != "" && enc != "utf-8" && enc != "UTF-8" {
+ if p.CharsetReader == nil {
+ p.err = fmt.Errorf("xml: encoding %q declared but Parser.CharsetReader is nil", enc)
+ return nil, p.err
+ }
+ newr, err := p.CharsetReader(enc, p.r.(io.Reader))
+ if err != nil {
+ p.err = fmt.Errorf("xml: opening charset %q: %v", enc, err)
+ return nil, p.err
+ }
+ if newr == nil {
+ panic("CharsetReader returned a nil Reader for charset " + enc)
+ }
+ p.switchToReader(newr)
+ }
+ }
+ return ProcInst{target, data}, nil
+
+ case '!':
+ // <!: Maybe comment, maybe CDATA.
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ switch b {
+ case '-': // <!-
+ // Probably <!-- for a comment.
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if b != '-' {
+ p.err = p.syntaxError("invalid sequence <!- not part of <!--")
+ return nil, p.err
+ }
+ // Look for terminator.
+ p.buf.Reset()
+ var b0, b1 byte
+ for {
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ p.buf.WriteByte(b)
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ data := p.buf.Bytes()
+ data = data[0 : len(data)-3] // chop -->
+ return Comment(data), nil
+
+ case '[': // <![
+ // Probably <![CDATA[.
+ for i := 0; i < 6; i++ {
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if b != "CDATA["[i] {
+ p.err = p.syntaxError("invalid <![ sequence")
+ return nil, p.err
+ }
+ }
+ // Have <![CDATA[. Read text until ]]>.
+ data := p.text(-1, true)
+ if data == nil {
+ return nil, p.err
+ }
+ return CharData(data), nil
+ }
+
+ // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.
+ // We don't care, but accumulate for caller. Quoted angle
+ // brackets do not count for nesting.
+ p.buf.Reset()
+ p.buf.WriteByte(b)
+ inquote := uint8(0)
+ depth := 0
+ for {
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if inquote == 0 && b == '>' && depth == 0 {
+ break
+ }
+ p.buf.WriteByte(b)
+ switch {
+ case b == inquote:
+ inquote = 0
+
+ case inquote != 0:
+ // in quotes, no special action
+
+ case b == '\'' || b == '"':
+ inquote = b
+
+ case b == '>' && inquote == 0:
+ depth--
+
+ case b == '<' && inquote == 0:
+ depth++
+ }
+ }
+ return Directive(p.buf.Bytes()), nil
+ }
+
+ // Must be an open element like <a href="foo">
+ p.ungetc(b)
+
+ var (
+ name Name
+ empty bool
+ attr []Attr
+ )
+ if name, ok = p.nsname(); !ok {
+ if p.err == nil {
+ p.err = p.syntaxError("expected element name after <")
+ }
+ return nil, p.err
+ }
+
+ attr = make([]Attr, 0, 4)
+ for {
+ p.space()
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if b == '/' {
+ empty = true
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if b != '>' {
+ p.err = p.syntaxError("expected /> in element")
+ return nil, p.err
+ }
+ break
+ }
+ if b == '>' {
+ break
+ }
+ p.ungetc(b)
+
+ n := len(attr)
+ if n >= cap(attr) {
+ nattr := make([]Attr, n, 2*cap(attr))
+ copy(nattr, attr)
+ attr = nattr
+ }
+ attr = attr[0 : n+1]
+ a := &attr[n]
+ if a.Name, ok = p.nsname(); !ok {
+ if p.err == nil {
+ p.err = p.syntaxError("expected attribute name in element")
+ }
+ return nil, p.err
+ }
+ p.space()
+ if b, ok = p.mustgetc(); !ok {
+ return nil, p.err
+ }
+ if b != '=' {
+ if p.Strict {
+ p.err = p.syntaxError("attribute name without = in element")
+ return nil, p.err
+ } else {
+ p.ungetc(b)
+ a.Value = a.Name.Local
+ }
+ } else {
+ p.space()
+ data := p.attrval()
+ if data == nil {
+ return nil, p.err
+ }
+ a.Value = string(data)
+ }
+ }
+ if empty {
+ p.needClose = true
+ p.toClose = name
+ }
+ return StartElement{name, attr}, nil
+}
+
+func (p *Parser) attrval() []byte {
+ b, ok := p.mustgetc()
+ if !ok {
+ return nil
+ }
+ // Handle quoted attribute values
+ if b == '"' || b == '\'' {
+ return p.text(int(b), false)
+ }
+ // Handle unquoted attribute values for strict parsers
+ if p.Strict {
+ p.err = p.syntaxError("unquoted or missing attribute value in element")
+ return nil
+ }
+ // Handle unquoted attribute values for unstrict parsers
+ p.ungetc(b)
+ p.buf.Reset()
+ for {
+ b, ok = p.mustgetc()
+ if !ok {
+ return nil
+ }
+ // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
+ if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
+ p.buf.WriteByte(b)
+ } else {
+ p.ungetc(b)
+ break
+ }
+ }
+ return p.buf.Bytes()
+}
+
+// Skip spaces if any
+func (p *Parser) space() {
+ for {
+ b, ok := p.getc()
+ if !ok {
+ return
+ }
+ switch b {
+ case ' ', '\r', '\n', '\t':
+ default:
+ p.ungetc(b)
+ return
+ }
+ }
+}
+
+// Read a single byte.
+// If there is no byte to read, return ok==false
+// and leave the error in p.err.
+// Maintain line number.
+func (p *Parser) getc() (b byte, ok bool) {
+ if p.err != nil {
+ return 0, false
+ }
+ if p.nextByte >= 0 {
+ b = byte(p.nextByte)
+ p.nextByte = -1
+ } else {
+ b, p.err = p.r.ReadByte()
+ if p.err != nil {
+ return 0, false
+ }
+ if p.saved != nil {
+ p.saved.WriteByte(b)
+ }
+ }
+ if b == '\n' {
+ p.line++
+ }
+ return b, true
+}
+
+// Return saved offset.
+// If we did ungetc (nextByte >= 0), have to back up one.
+func (p *Parser) savedOffset() int {
+ n := p.saved.Len()
+ if p.nextByte >= 0 {
+ n--
+ }
+ return n
+}
+
+// Must read a single byte.
+// If there is no byte to read,
+// set p.err to SyntaxError("unexpected EOF")
+// and return ok==false
+func (p *Parser) mustgetc() (b byte, ok bool) {
+ if b, ok = p.getc(); !ok {
+ if p.err == io.EOF {
+ p.err = p.syntaxError("unexpected EOF")
+ }
+ }
+ return
+}
+
+// Unread a single byte.
+func (p *Parser) ungetc(b byte) {
+ if b == '\n' {
+ p.line--
+ }
+ p.nextByte = int(b)
+}
+
+var entity = map[string]int{
+ "lt": '<',
+ "gt": '>',
+ "amp": '&',
+ "apos": '\'',
+ "quot": '"',
+}
+
+// Read plain text section (XML calls it character data).
+// If quote >= 0, we are in a quoted string and need to find the matching quote.
+// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.
+// On failure return nil and leave the error in p.err.
+func (p *Parser) text(quote int, cdata bool) []byte {
+ var b0, b1 byte
+ var trunc int
+ p.buf.Reset()
+Input:
+ for {
+ b, ok := p.getc()
+ if !ok {
+ if cdata {
+ if p.err == io.EOF {
+ p.err = p.syntaxError("unexpected EOF in CDATA section")
+ }
+ return nil
+ }
+ break Input
+ }
+
+ // <![CDATA[ section ends with ]]>.
+ // It is an error for ]]> to appear in ordinary text.
+ if b0 == ']' && b1 == ']' && b == '>' {
+ if cdata {
+ trunc = 2
+ break Input
+ }
+ p.err = p.syntaxError("unescaped ]]> not in CDATA section")
+ return nil
+ }
+
+ // Stop reading text if we see a <.
+ if b == '<' && !cdata {
+ if quote >= 0 {
+ p.err = p.syntaxError("unescaped < inside quoted string")
+ return nil
+ }
+ p.ungetc('<')
+ break Input
+ }
+ if quote >= 0 && b == byte(quote) {
+ break Input
+ }
+ if b == '&' && !cdata {
+ // Read escaped character expression up to semicolon.
+ // XML in all its glory allows a document to define and use
+ // its own character names with <!ENTITY ...> directives.
+ // Parsers are required to recognize lt, gt, amp, apos, and quot
+ // even if they have not been declared. That's all we allow.
+ var i int
+ for i = 0; i < len(p.tmp); i++ {
+ var ok bool
+ p.tmp[i], ok = p.getc()
+ if !ok {
+ if p.err == io.EOF {
+ p.err = p.syntaxError("unexpected EOF")
+ }
+ return nil
+ }
+ c := p.tmp[i]
+ if c == ';' {
+ break
+ }
+ if 'a' <= c && c <= 'z' ||
+ 'A' <= c && c <= 'Z' ||
+ '0' <= c && c <= '9' ||
+ c == '_' || c == '#' {
+ continue
+ }
+ p.ungetc(c)
+ break
+ }
+ s := string(p.tmp[0:i])
+ if i >= len(p.tmp) {
+ if !p.Strict {
+ b0, b1 = 0, 0
+ p.buf.WriteByte('&')
+ p.buf.Write(p.tmp[0:i])
+ continue Input
+ }
+ p.err = p.syntaxError("character entity expression &" + s + "... too long")
+ return nil
+ }
+ var haveText bool
+ var text string
+ if i >= 2 && s[0] == '#' {
+ var n uint64
+ var err error
+ if i >= 3 && s[1] == 'x' {
+ n, err = strconv.Btoui64(s[2:], 16)
+ } else {
+ n, err = strconv.Btoui64(s[1:], 10)
+ }
+ if err == nil && n <= unicode.MaxRune {
+ text = string(n)
+ haveText = true
+ }
+ } else {
+ if r, ok := entity[s]; ok {
+ text = string(r)
+ haveText = true
+ } else if p.Entity != nil {
+ text, haveText = p.Entity[s]
+ }
+ }
+ if !haveText {
+ if !p.Strict {
+ b0, b1 = 0, 0
+ p.buf.WriteByte('&')
+ p.buf.Write(p.tmp[0:i])
+ continue Input
+ }
+ p.err = p.syntaxError("invalid character entity &" + s + ";")
+ return nil
+ }
+ p.buf.Write([]byte(text))
+ b0, b1 = 0, 0
+ continue Input
+ }
+ p.buf.WriteByte(b)
+ b0, b1 = b1, b
+ }
+ data := p.buf.Bytes()
+ data = data[0 : len(data)-trunc]
+
+ // Inspect each rune for being a disallowed character.
+ buf := data
+ for len(buf) > 0 {
+ r, size := utf8.DecodeRune(buf)
+ if r == utf8.RuneError && size == 1 {
+ p.err = p.syntaxError("invalid UTF-8")
+ return nil
+ }
+ buf = buf[size:]
+ if !isInCharacterRange(r) {
+ p.err = p.syntaxError(fmt.Sprintf("illegal character code %U", r))
+ return nil
+ }
+ }
+
+ // Must rewrite \r and \r\n into \n.
+ w := 0
+ for r := 0; r < len(data); r++ {
+ b := data[r]
+ if b == '\r' {
+ if r+1 < len(data) && data[r+1] == '\n' {
+ continue
+ }
+ b = '\n'
+ }
+ data[w] = b
+ w++
+ }
+ return data[0:w]
+}
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of http://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xDF77 ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+// Get name space name: name with a : stuck in the middle.
+// The part before the : is the name space identifier.
+func (p *Parser) nsname() (name Name, ok bool) {
+ s, ok := p.name()
+ if !ok {
+ return
+ }
+ i := strings.Index(s, ":")
+ if i < 0 {
+ name.Local = s
+ } else {
+ name.Space = s[0:i]
+ name.Local = s[i+1:]
+ }
+ return name, true
+}
+
+// Get name: /first(first|second)*/
+// Do not set p.err if the name is missing (unless unexpected EOF is received):
+// let the caller provide better context.
+func (p *Parser) name() (s string, ok bool) {
+ var b byte
+ if b, ok = p.mustgetc(); !ok {
+ return
+ }
+
+ // As a first approximation, we gather the bytes [A-Za-z_:.-\x80-\xFF]*
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ p.ungetc(b)
+ return "", false
+ }
+ p.buf.Reset()
+ p.buf.WriteByte(b)
+ for {
+ if b, ok = p.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ p.ungetc(b)
+ break
+ }
+ p.buf.WriteByte(b)
+ }
+
+ // Then we check the characters.
+ s = p.buf.String()
+ for i, c := range s {
+ if !unicode.Is(first, c) && (i == 0 || !unicode.Is(second, c)) {
+ p.err = p.syntaxError("invalid XML name: " + s)
+ return "", false
+ }
+ }
+ return s, true
+}
+
+func isNameByte(c byte) bool {
+ return 'A' <= c && c <= 'Z' ||
+ 'a' <= c && c <= 'z' ||
+ '0' <= c && c <= '9' ||
+ c == '_' || c == ':' || c == '.' || c == '-'
+}
+
+// These tables were generated by cut and paste from Appendix B of
+// the XML spec at http://www.xml.com/axml/testaxml.htm
+// and then reformatting. First corresponds to (Letter | '_' | ':')
+// and second corresponds to NameChar.
+
+var first = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x003A, 0x003A, 1},
+ {0x0041, 0x005A, 1},
+ {0x005F, 0x005F, 1},
+ {0x0061, 0x007A, 1},
+ {0x00C0, 0x00D6, 1},
+ {0x00D8, 0x00F6, 1},
+ {0x00F8, 0x00FF, 1},
+ {0x0100, 0x0131, 1},
+ {0x0134, 0x013E, 1},
+ {0x0141, 0x0148, 1},
+ {0x014A, 0x017E, 1},
+ {0x0180, 0x01C3, 1},
+ {0x01CD, 0x01F0, 1},
+ {0x01F4, 0x01F5, 1},
+ {0x01FA, 0x0217, 1},
+ {0x0250, 0x02A8, 1},
+ {0x02BB, 0x02C1, 1},
+ {0x0386, 0x0386, 1},
+ {0x0388, 0x038A, 1},
+ {0x038C, 0x038C, 1},
+ {0x038E, 0x03A1, 1},
+ {0x03A3, 0x03CE, 1},
+ {0x03D0, 0x03D6, 1},
+ {0x03DA, 0x03E0, 2},
+ {0x03E2, 0x03F3, 1},
+ {0x0401, 0x040C, 1},
+ {0x040E, 0x044F, 1},
+ {0x0451, 0x045C, 1},
+ {0x045E, 0x0481, 1},
+ {0x0490, 0x04C4, 1},
+ {0x04C7, 0x04C8, 1},
+ {0x04CB, 0x04CC, 1},
+ {0x04D0, 0x04EB, 1},
+ {0x04EE, 0x04F5, 1},
+ {0x04F8, 0x04F9, 1},
+ {0x0531, 0x0556, 1},
+ {0x0559, 0x0559, 1},
+ {0x0561, 0x0586, 1},
+ {0x05D0, 0x05EA, 1},
+ {0x05F0, 0x05F2, 1},
+ {0x0621, 0x063A, 1},
+ {0x0641, 0x064A, 1},
+ {0x0671, 0x06B7, 1},
+ {0x06BA, 0x06BE, 1},
+ {0x06C0, 0x06CE, 1},
+ {0x06D0, 0x06D3, 1},
+ {0x06D5, 0x06D5, 1},
+ {0x06E5, 0x06E6, 1},
+ {0x0905, 0x0939, 1},
+ {0x093D, 0x093D, 1},
+ {0x0958, 0x0961, 1},
+ {0x0985, 0x098C, 1},
+ {0x098F, 0x0990, 1},
+ {0x0993, 0x09A8, 1},
+ {0x09AA, 0x09B0, 1},
+ {0x09B2, 0x09B2, 1},
+ {0x09B6, 0x09B9, 1},
+ {0x09DC, 0x09DD, 1},
+ {0x09DF, 0x09E1, 1},
+ {0x09F0, 0x09F1, 1},
+ {0x0A05, 0x0A0A, 1},
+ {0x0A0F, 0x0A10, 1},
+ {0x0A13, 0x0A28, 1},
+ {0x0A2A, 0x0A30, 1},
+ {0x0A32, 0x0A33, 1},
+ {0x0A35, 0x0A36, 1},
+ {0x0A38, 0x0A39, 1},
+ {0x0A59, 0x0A5C, 1},
+ {0x0A5E, 0x0A5E, 1},
+ {0x0A72, 0x0A74, 1},
+ {0x0A85, 0x0A8B, 1},
+ {0x0A8D, 0x0A8D, 1},
+ {0x0A8F, 0x0A91, 1},
+ {0x0A93, 0x0AA8, 1},
+ {0x0AAA, 0x0AB0, 1},
+ {0x0AB2, 0x0AB3, 1},
+ {0x0AB5, 0x0AB9, 1},
+ {0x0ABD, 0x0AE0, 0x23},
+ {0x0B05, 0x0B0C, 1},
+ {0x0B0F, 0x0B10, 1},
+ {0x0B13, 0x0B28, 1},
+ {0x0B2A, 0x0B30, 1},
+ {0x0B32, 0x0B33, 1},
+ {0x0B36, 0x0B39, 1},
+ {0x0B3D, 0x0B3D, 1},
+ {0x0B5C, 0x0B5D, 1},
+ {0x0B5F, 0x0B61, 1},
+ {0x0B85, 0x0B8A, 1},
+ {0x0B8E, 0x0B90, 1},
+ {0x0B92, 0x0B95, 1},
+ {0x0B99, 0x0B9A, 1},
+ {0x0B9C, 0x0B9C, 1},
+ {0x0B9E, 0x0B9F, 1},
+ {0x0BA3, 0x0BA4, 1},
+ {0x0BA8, 0x0BAA, 1},
+ {0x0BAE, 0x0BB5, 1},
+ {0x0BB7, 0x0BB9, 1},
+ {0x0C05, 0x0C0C, 1},
+ {0x0C0E, 0x0C10, 1},
+ {0x0C12, 0x0C28, 1},
+ {0x0C2A, 0x0C33, 1},
+ {0x0C35, 0x0C39, 1},
+ {0x0C60, 0x0C61, 1},
+ {0x0C85, 0x0C8C, 1},
+ {0x0C8E, 0x0C90, 1},
+ {0x0C92, 0x0CA8, 1},
+ {0x0CAA, 0x0CB3, 1},
+ {0x0CB5, 0x0CB9, 1},
+ {0x0CDE, 0x0CDE, 1},
+ {0x0CE0, 0x0CE1, 1},
+ {0x0D05, 0x0D0C, 1},
+ {0x0D0E, 0x0D10, 1},
+ {0x0D12, 0x0D28, 1},
+ {0x0D2A, 0x0D39, 1},
+ {0x0D60, 0x0D61, 1},
+ {0x0E01, 0x0E2E, 1},
+ {0x0E30, 0x0E30, 1},
+ {0x0E32, 0x0E33, 1},
+ {0x0E40, 0x0E45, 1},
+ {0x0E81, 0x0E82, 1},
+ {0x0E84, 0x0E84, 1},
+ {0x0E87, 0x0E88, 1},
+ {0x0E8A, 0x0E8D, 3},
+ {0x0E94, 0x0E97, 1},
+ {0x0E99, 0x0E9F, 1},
+ {0x0EA1, 0x0EA3, 1},
+ {0x0EA5, 0x0EA7, 2},
+ {0x0EAA, 0x0EAB, 1},
+ {0x0EAD, 0x0EAE, 1},
+ {0x0EB0, 0x0EB0, 1},
+ {0x0EB2, 0x0EB3, 1},
+ {0x0EBD, 0x0EBD, 1},
+ {0x0EC0, 0x0EC4, 1},
+ {0x0F40, 0x0F47, 1},
+ {0x0F49, 0x0F69, 1},
+ {0x10A0, 0x10C5, 1},
+ {0x10D0, 0x10F6, 1},
+ {0x1100, 0x1100, 1},
+ {0x1102, 0x1103, 1},
+ {0x1105, 0x1107, 1},
+ {0x1109, 0x1109, 1},
+ {0x110B, 0x110C, 1},
+ {0x110E, 0x1112, 1},
+ {0x113C, 0x1140, 2},
+ {0x114C, 0x1150, 2},
+ {0x1154, 0x1155, 1},
+ {0x1159, 0x1159, 1},
+ {0x115F, 0x1161, 1},
+ {0x1163, 0x1169, 2},
+ {0x116D, 0x116E, 1},
+ {0x1172, 0x1173, 1},
+ {0x1175, 0x119E, 0x119E - 0x1175},
+ {0x11A8, 0x11AB, 0x11AB - 0x11A8},
+ {0x11AE, 0x11AF, 1},
+ {0x11B7, 0x11B8, 1},
+ {0x11BA, 0x11BA, 1},
+ {0x11BC, 0x11C2, 1},
+ {0x11EB, 0x11F0, 0x11F0 - 0x11EB},
+ {0x11F9, 0x11F9, 1},
+ {0x1E00, 0x1E9B, 1},
+ {0x1EA0, 0x1EF9, 1},
+ {0x1F00, 0x1F15, 1},
+ {0x1F18, 0x1F1D, 1},
+ {0x1F20, 0x1F45, 1},
+ {0x1F48, 0x1F4D, 1},
+ {0x1F50, 0x1F57, 1},
+ {0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
+ {0x1F5D, 0x1F5D, 1},
+ {0x1F5F, 0x1F7D, 1},
+ {0x1F80, 0x1FB4, 1},
+ {0x1FB6, 0x1FBC, 1},
+ {0x1FBE, 0x1FBE, 1},
+ {0x1FC2, 0x1FC4, 1},
+ {0x1FC6, 0x1FCC, 1},
+ {0x1FD0, 0x1FD3, 1},
+ {0x1FD6, 0x1FDB, 1},
+ {0x1FE0, 0x1FEC, 1},
+ {0x1FF2, 0x1FF4, 1},
+ {0x1FF6, 0x1FFC, 1},
+ {0x2126, 0x2126, 1},
+ {0x212A, 0x212B, 1},
+ {0x212E, 0x212E, 1},
+ {0x2180, 0x2182, 1},
+ {0x3007, 0x3007, 1},
+ {0x3021, 0x3029, 1},
+ {0x3041, 0x3094, 1},
+ {0x30A1, 0x30FA, 1},
+ {0x3105, 0x312C, 1},
+ {0x4E00, 0x9FA5, 1},
+ {0xAC00, 0xD7A3, 1},
+ },
+}
+
+var second = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x002D, 0x002E, 1},
+ {0x0030, 0x0039, 1},
+ {0x00B7, 0x00B7, 1},
+ {0x02D0, 0x02D1, 1},
+ {0x0300, 0x0345, 1},
+ {0x0360, 0x0361, 1},
+ {0x0387, 0x0387, 1},
+ {0x0483, 0x0486, 1},
+ {0x0591, 0x05A1, 1},
+ {0x05A3, 0x05B9, 1},
+ {0x05BB, 0x05BD, 1},
+ {0x05BF, 0x05BF, 1},
+ {0x05C1, 0x05C2, 1},
+ {0x05C4, 0x0640, 0x0640 - 0x05C4},
+ {0x064B, 0x0652, 1},
+ {0x0660, 0x0669, 1},
+ {0x0670, 0x0670, 1},
+ {0x06D6, 0x06DC, 1},
+ {0x06DD, 0x06DF, 1},
+ {0x06E0, 0x06E4, 1},
+ {0x06E7, 0x06E8, 1},
+ {0x06EA, 0x06ED, 1},
+ {0x06F0, 0x06F9, 1},
+ {0x0901, 0x0903, 1},
+ {0x093C, 0x093C, 1},
+ {0x093E, 0x094C, 1},
+ {0x094D, 0x094D, 1},
+ {0x0951, 0x0954, 1},
+ {0x0962, 0x0963, 1},
+ {0x0966, 0x096F, 1},
+ {0x0981, 0x0983, 1},
+ {0x09BC, 0x09BC, 1},
+ {0x09BE, 0x09BF, 1},
+ {0x09C0, 0x09C4, 1},
+ {0x09C7, 0x09C8, 1},
+ {0x09CB, 0x09CD, 1},
+ {0x09D7, 0x09D7, 1},
+ {0x09E2, 0x09E3, 1},
+ {0x09E6, 0x09EF, 1},
+ {0x0A02, 0x0A3C, 0x3A},
+ {0x0A3E, 0x0A3F, 1},
+ {0x0A40, 0x0A42, 1},
+ {0x0A47, 0x0A48, 1},
+ {0x0A4B, 0x0A4D, 1},
+ {0x0A66, 0x0A6F, 1},
+ {0x0A70, 0x0A71, 1},
+ {0x0A81, 0x0A83, 1},
+ {0x0ABC, 0x0ABC, 1},
+ {0x0ABE, 0x0AC5, 1},
+ {0x0AC7, 0x0AC9, 1},
+ {0x0ACB, 0x0ACD, 1},
+ {0x0AE6, 0x0AEF, 1},
+ {0x0B01, 0x0B03, 1},
+ {0x0B3C, 0x0B3C, 1},
+ {0x0B3E, 0x0B43, 1},
+ {0x0B47, 0x0B48, 1},
+ {0x0B4B, 0x0B4D, 1},
+ {0x0B56, 0x0B57, 1},
+ {0x0B66, 0x0B6F, 1},
+ {0x0B82, 0x0B83, 1},
+ {0x0BBE, 0x0BC2, 1},
+ {0x0BC6, 0x0BC8, 1},
+ {0x0BCA, 0x0BCD, 1},
+ {0x0BD7, 0x0BD7, 1},
+ {0x0BE7, 0x0BEF, 1},
+ {0x0C01, 0x0C03, 1},
+ {0x0C3E, 0x0C44, 1},
+ {0x0C46, 0x0C48, 1},
+ {0x0C4A, 0x0C4D, 1},
+ {0x0C55, 0x0C56, 1},
+ {0x0C66, 0x0C6F, 1},
+ {0x0C82, 0x0C83, 1},
+ {0x0CBE, 0x0CC4, 1},
+ {0x0CC6, 0x0CC8, 1},
+ {0x0CCA, 0x0CCD, 1},
+ {0x0CD5, 0x0CD6, 1},
+ {0x0CE6, 0x0CEF, 1},
+ {0x0D02, 0x0D03, 1},
+ {0x0D3E, 0x0D43, 1},
+ {0x0D46, 0x0D48, 1},
+ {0x0D4A, 0x0D4D, 1},
+ {0x0D57, 0x0D57, 1},
+ {0x0D66, 0x0D6F, 1},
+ {0x0E31, 0x0E31, 1},
+ {0x0E34, 0x0E3A, 1},
+ {0x0E46, 0x0E46, 1},
+ {0x0E47, 0x0E4E, 1},
+ {0x0E50, 0x0E59, 1},
+ {0x0EB1, 0x0EB1, 1},
+ {0x0EB4, 0x0EB9, 1},
+ {0x0EBB, 0x0EBC, 1},
+ {0x0EC6, 0x0EC6, 1},
+ {0x0EC8, 0x0ECD, 1},
+ {0x0ED0, 0x0ED9, 1},
+ {0x0F18, 0x0F19, 1},
+ {0x0F20, 0x0F29, 1},
+ {0x0F35, 0x0F39, 2},
+ {0x0F3E, 0x0F3F, 1},
+ {0x0F71, 0x0F84, 1},
+ {0x0F86, 0x0F8B, 1},
+ {0x0F90, 0x0F95, 1},
+ {0x0F97, 0x0F97, 1},
+ {0x0F99, 0x0FAD, 1},
+ {0x0FB1, 0x0FB7, 1},
+ {0x0FB9, 0x0FB9, 1},
+ {0x20D0, 0x20DC, 1},
+ {0x20E1, 0x3005, 0x3005 - 0x20E1},
+ {0x302A, 0x302F, 1},
+ {0x3031, 0x3035, 1},
+ {0x3099, 0x309A, 1},
+ {0x309D, 0x309E, 1},
+ {0x30FC, 0x30FE, 1},
+ },
+}
+
+// HTMLEntity is an entity map containing translations for the
+// standard HTML entity characters.
+var HTMLEntity = htmlEntity
+
+var htmlEntity = map[string]string{
+ /*
+ hget http://www.w3.org/TR/html4/sgml/entities.html |
+ ssam '
+ ,y /\>/ x/\<(.|\n)+/ s/\n/ /g
+ ,x v/^\<!ENTITY/d
+ ,s/\<!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g
+ '
+ */
+ "nbsp": "\u00A0",
+ "iexcl": "\u00A1",
+ "cent": "\u00A2",
+ "pound": "\u00A3",
+ "curren": "\u00A4",
+ "yen": "\u00A5",
+ "brvbar": "\u00A6",
+ "sect": "\u00A7",
+ "uml": "\u00A8",
+ "copy": "\u00A9",
+ "ordf": "\u00AA",
+ "laquo": "\u00AB",
+ "not": "\u00AC",
+ "shy": "\u00AD",
+ "reg": "\u00AE",
+ "macr": "\u00AF",
+ "deg": "\u00B0",
+ "plusmn": "\u00B1",
+ "sup2": "\u00B2",
+ "sup3": "\u00B3",
+ "acute": "\u00B4",
+ "micro": "\u00B5",
+ "para": "\u00B6",
+ "middot": "\u00B7",
+ "cedil": "\u00B8",
+ "sup1": "\u00B9",
+ "ordm": "\u00BA",
+ "raquo": "\u00BB",
+ "frac14": "\u00BC",
+ "frac12": "\u00BD",
+ "frac34": "\u00BE",
+ "iquest": "\u00BF",
+ "Agrave": "\u00C0",
+ "Aacute": "\u00C1",
+ "Acirc": "\u00C2",
+ "Atilde": "\u00C3",
+ "Auml": "\u00C4",
+ "Aring": "\u00C5",
+ "AElig": "\u00C6",
+ "Ccedil": "\u00C7",
+ "Egrave": "\u00C8",
+ "Eacute": "\u00C9",
+ "Ecirc": "\u00CA",
+ "Euml": "\u00CB",
+ "Igrave": "\u00CC",
+ "Iacute": "\u00CD",
+ "Icirc": "\u00CE",
+ "Iuml": "\u00CF",
+ "ETH": "\u00D0",
+ "Ntilde": "\u00D1",
+ "Ograve": "\u00D2",
+ "Oacute": "\u00D3",
+ "Ocirc": "\u00D4",
+ "Otilde": "\u00D5",
+ "Ouml": "\u00D6",
+ "times": "\u00D7",
+ "Oslash": "\u00D8",
+ "Ugrave": "\u00D9",
+ "Uacute": "\u00DA",
+ "Ucirc": "\u00DB",
+ "Uuml": "\u00DC",
+ "Yacute": "\u00DD",
+ "THORN": "\u00DE",
+ "szlig": "\u00DF",
+ "agrave": "\u00E0",
+ "aacute": "\u00E1",
+ "acirc": "\u00E2",
+ "atilde": "\u00E3",
+ "auml": "\u00E4",
+ "aring": "\u00E5",
+ "aelig": "\u00E6",
+ "ccedil": "\u00E7",
+ "egrave": "\u00E8",
+ "eacute": "\u00E9",
+ "ecirc": "\u00EA",
+ "euml": "\u00EB",
+ "igrave": "\u00EC",
+ "iacute": "\u00ED",
+ "icirc": "\u00EE",
+ "iuml": "\u00EF",
+ "eth": "\u00F0",
+ "ntilde": "\u00F1",
+ "ograve": "\u00F2",
+ "oacute": "\u00F3",
+ "ocirc": "\u00F4",
+ "otilde": "\u00F5",
+ "ouml": "\u00F6",
+ "divide": "\u00F7",
+ "oslash": "\u00F8",
+ "ugrave": "\u00F9",
+ "uacute": "\u00FA",
+ "ucirc": "\u00FB",
+ "uuml": "\u00FC",
+ "yacute": "\u00FD",
+ "thorn": "\u00FE",
+ "yuml": "\u00FF",
+ "fnof": "\u0192",
+ "Alpha": "\u0391",
+ "Beta": "\u0392",
+ "Gamma": "\u0393",
+ "Delta": "\u0394",
+ "Epsilon": "\u0395",
+ "Zeta": "\u0396",
+ "Eta": "\u0397",
+ "Theta": "\u0398",
+ "Iota": "\u0399",
+ "Kappa": "\u039A",
+ "Lambda": "\u039B",
+ "Mu": "\u039C",
+ "Nu": "\u039D",
+ "Xi": "\u039E",
+ "Omicron": "\u039F",
+ "Pi": "\u03A0",
+ "Rho": "\u03A1",
+ "Sigma": "\u03A3",
+ "Tau": "\u03A4",
+ "Upsilon": "\u03A5",
+ "Phi": "\u03A6",
+ "Chi": "\u03A7",
+ "Psi": "\u03A8",
+ "Omega": "\u03A9",
+ "alpha": "\u03B1",
+ "beta": "\u03B2",
+ "gamma": "\u03B3",
+ "delta": "\u03B4",
+ "epsilon": "\u03B5",
+ "zeta": "\u03B6",
+ "eta": "\u03B7",
+ "theta": "\u03B8",
+ "iota": "\u03B9",
+ "kappa": "\u03BA",
+ "lambda": "\u03BB",
+ "mu": "\u03BC",
+ "nu": "\u03BD",
+ "xi": "\u03BE",
+ "omicron": "\u03BF",
+ "pi": "\u03C0",
+ "rho": "\u03C1",
+ "sigmaf": "\u03C2",
+ "sigma": "\u03C3",
+ "tau": "\u03C4",
+ "upsilon": "\u03C5",
+ "phi": "\u03C6",
+ "chi": "\u03C7",
+ "psi": "\u03C8",
+ "omega": "\u03C9",
+ "thetasym": "\u03D1",
+ "upsih": "\u03D2",
+ "piv": "\u03D6",
+ "bull": "\u2022",
+ "hellip": "\u2026",
+ "prime": "\u2032",
+ "Prime": "\u2033",
+ "oline": "\u203E",
+ "frasl": "\u2044",
+ "weierp": "\u2118",
+ "image": "\u2111",
+ "real": "\u211C",
+ "trade": "\u2122",
+ "alefsym": "\u2135",
+ "larr": "\u2190",
+ "uarr": "\u2191",
+ "rarr": "\u2192",
+ "darr": "\u2193",
+ "harr": "\u2194",
+ "crarr": "\u21B5",
+ "lArr": "\u21D0",
+ "uArr": "\u21D1",
+ "rArr": "\u21D2",
+ "dArr": "\u21D3",
+ "hArr": "\u21D4",
+ "forall": "\u2200",
+ "part": "\u2202",
+ "exist": "\u2203",
+ "empty": "\u2205",
+ "nabla": "\u2207",
+ "isin": "\u2208",
+ "notin": "\u2209",
+ "ni": "\u220B",
+ "prod": "\u220F",
+ "sum": "\u2211",
+ "minus": "\u2212",
+ "lowast": "\u2217",
+ "radic": "\u221A",
+ "prop": "\u221D",
+ "infin": "\u221E",
+ "ang": "\u2220",
+ "and": "\u2227",
+ "or": "\u2228",
+ "cap": "\u2229",
+ "cup": "\u222A",
+ "int": "\u222B",
+ "there4": "\u2234",
+ "sim": "\u223C",
+ "cong": "\u2245",
+ "asymp": "\u2248",
+ "ne": "\u2260",
+ "equiv": "\u2261",
+ "le": "\u2264",
+ "ge": "\u2265",
+ "sub": "\u2282",
+ "sup": "\u2283",
+ "nsub": "\u2284",
+ "sube": "\u2286",
+ "supe": "\u2287",
+ "oplus": "\u2295",
+ "otimes": "\u2297",
+ "perp": "\u22A5",
+ "sdot": "\u22C5",
+ "lceil": "\u2308",
+ "rceil": "\u2309",
+ "lfloor": "\u230A",
+ "rfloor": "\u230B",
+ "lang": "\u2329",
+ "rang": "\u232A",
+ "loz": "\u25CA",
+ "spades": "\u2660",
+ "clubs": "\u2663",
+ "hearts": "\u2665",
+ "diams": "\u2666",
+ "quot": "\u0022",
+ "amp": "\u0026",
+ "lt": "\u003C",
+ "gt": "\u003E",
+ "OElig": "\u0152",
+ "oelig": "\u0153",
+ "Scaron": "\u0160",
+ "scaron": "\u0161",
+ "Yuml": "\u0178",
+ "circ": "\u02C6",
+ "tilde": "\u02DC",
+ "ensp": "\u2002",
+ "emsp": "\u2003",
+ "thinsp": "\u2009",
+ "zwnj": "\u200C",
+ "zwj": "\u200D",
+ "lrm": "\u200E",
+ "rlm": "\u200F",
+ "ndash": "\u2013",
+ "mdash": "\u2014",
+ "lsquo": "\u2018",
+ "rsquo": "\u2019",
+ "sbquo": "\u201A",
+ "ldquo": "\u201C",
+ "rdquo": "\u201D",
+ "bdquo": "\u201E",
+ "dagger": "\u2020",
+ "Dagger": "\u2021",
+ "permil": "\u2030",
+ "lsaquo": "\u2039",
+ "rsaquo": "\u203A",
+ "euro": "\u20AC",
+}
+
+// HTMLAutoClose is the set of HTML elements that
+// should be considered to close automatically.
+var HTMLAutoClose = htmlAutoClose
+
+var htmlAutoClose = []string{
+ /*
+ hget http://www.w3.org/TR/html4/loose.dtd |
+ 9 sed -n 's/<!ELEMENT (.*) - O EMPTY.+/ "\1",/p' | tr A-Z a-z
+ */
+ "basefont",
+ "br",
+ "area",
+ "link",
+ "img",
+ "param",
+ "hr",
+ "input",
+ "col ",
+ "frame",
+ "isindex",
+ "base",
+ "meta",
+}
+
+var (
+ esc_quot = []byte(""") // shorter than """
+ esc_apos = []byte("'") // shorter than "'"
+ esc_amp = []byte("&")
+ esc_lt = []byte("<")
+ esc_gt = []byte(">")
+)
+
+// Escape writes to w the properly escaped XML equivalent
+// of the plain text data s.
+func Escape(w io.Writer, s []byte) {
+ var esc []byte
+ last := 0
+ for i, c := range s {
+ switch c {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ default:
+ continue
+ }
+ w.Write(s[last:i])
+ w.Write(esc)
+ last = i + 1
+ }
+ w.Write(s[last:])
+}
+
+// procInstEncoding parses the `encoding="..."` or `encoding='...'`
+// value out of the provided string, returning "" if not found.
+func procInstEncoding(s string) string {
+ // TODO: this parsing is somewhat lame and not exact.
+ // It works for all actual cases, though.
+ idx := strings.Index(s, "encoding=")
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len("encoding="):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+const testInput = `
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
+ "\r\n\t" + ` >
+ <hello lang="en">World <>'" 白鵬翔</hello>
+ <goodbye />
+ <outer foo:attr="value" xmlns:tag="ns4">
+ <inner/>
+ </outer>
+ <tag:name>
+ <![CDATA[Some text here.]]>
+ </tag:name>
+</body><!-- missing final newline -->`
+
+var rawTokens = []Token{
+ CharData([]byte("\n")),
+ ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
+ ),
+ CharData([]byte("\n")),
+ StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
+ CharData([]byte("World <>'\" 白鵬翔")),
+ EndElement{Name{"", "hello"}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"", "goodbye"}, nil},
+ EndElement{Name{"", "goodbye"}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"", "inner"}, nil},
+ EndElement{Name{"", "inner"}},
+ CharData([]byte("\n ")),
+ EndElement{Name{"", "outer"}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"tag", "name"}, nil},
+ CharData([]byte("\n ")),
+ CharData([]byte("Some text here.")),
+ CharData([]byte("\n ")),
+ EndElement{Name{"tag", "name"}},
+ CharData([]byte("\n")),
+ EndElement{Name{"", "body"}},
+ Comment([]byte(" missing final newline ")),
+}
+
+var cookedTokens = []Token{
+ CharData([]byte("\n")),
+ ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
+ ),
+ CharData([]byte("\n")),
+ StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
+ CharData([]byte("World <>'\" 白鵬翔")),
+ EndElement{Name{"ns2", "hello"}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"ns2", "goodbye"}, nil},
+ EndElement{Name{"ns2", "goodbye"}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"ns2", "inner"}, nil},
+ EndElement{Name{"ns2", "inner"}},
+ CharData([]byte("\n ")),
+ EndElement{Name{"ns2", "outer"}},
+ CharData([]byte("\n ")),
+ StartElement{Name{"ns3", "name"}, nil},
+ CharData([]byte("\n ")),
+ CharData([]byte("Some text here.")),
+ CharData([]byte("\n ")),
+ EndElement{Name{"ns3", "name"}},
+ CharData([]byte("\n")),
+ EndElement{Name{"ns2", "body"}},
+ Comment([]byte(" missing final newline ")),
+}
+
+const testInputAltEncoding = `
+<?xml version="1.0" encoding="x-testing-uppercase"?>
+<TAG>VALUE</TAG>`
+
+var rawTokensAltEncoding = []Token{
+ CharData([]byte("\n")),
+ ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
+ CharData([]byte("\n")),
+ StartElement{Name{"", "tag"}, nil},
+ CharData([]byte("value")),
+ EndElement{Name{"", "tag"}},
+}
+
+var xmlInput = []string{
+ // unexpected EOF cases
+ "<",
+ "<t",
+ "<t ",
+ "<t/",
+ "<!",
+ "<!-",
+ "<!--",
+ "<!--c-",
+ "<!--c--",
+ "<!d",
+ "<t></",
+ "<t></t",
+ "<?",
+ "<?p",
+ "<t a",
+ "<t a=",
+ "<t a='",
+ "<t a=''",
+ "<t/><![",
+ "<t/><![C",
+ "<t/><![CDATA[d",
+ "<t/><![CDATA[d]",
+ "<t/><![CDATA[d]]",
+
+ // other Syntax errors
+ "<>",
+ "<t/a",
+ "<0 />",
+ "<?0 >",
+ // "<!0 >", // let the Token() caller handle
+ "</0>",
+ "<t 0=''>",
+ "<t a='&'>",
+ "<t a='<'>",
+ "<t> c;</t>",
+ "<t a>",
+ "<t a=>",
+ "<t a=v>",
+ // "<![CDATA[d]]>", // let the Token() caller handle
+ "<t></e>",
+ "<t></>",
+ "<t></t!",
+ "<t>cdata]]></t>",
+}
+
+type stringReader struct {
+ s string
+ off int
+}
+
+func (r *stringReader) Read(b []byte) (n int, err error) {
+ if r.off >= len(r.s) {
+ return 0, io.EOF
+ }
+ for r.off < len(r.s) && n < len(b) {
+ b[n] = r.s[r.off]
+ n++
+ r.off++
+ }
+ return
+}
+
+func (r *stringReader) ReadByte() (b byte, err error) {
+ if r.off >= len(r.s) {
+ return 0, io.EOF
+ }
+ b = r.s[r.off]
+ r.off++
+ return
+}
+
+func StringReader(s string) io.Reader { return &stringReader{s, 0} }
+
+func TestRawToken(t *testing.T) {
+ p := NewParser(StringReader(testInput))
+ testRawToken(t, p, rawTokens)
+}
+
+type downCaser struct {
+ t *testing.T
+ r io.ByteReader
+}
+
+func (d *downCaser) ReadByte() (c byte, err error) {
+ c, err = d.r.ReadByte()
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ return
+}
+
+func (d *downCaser) Read(p []byte) (int, error) {
+ d.t.Fatalf("unexpected Read call on downCaser reader")
+ return 0, os.EINVAL
+}
+
+func TestRawTokenAltEncoding(t *testing.T) {
+ sawEncoding := ""
+ p := NewParser(StringReader(testInputAltEncoding))
+ p.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
+ sawEncoding = charset
+ if charset != "x-testing-uppercase" {
+ t.Fatalf("unexpected charset %q", charset)
+ }
+ return &downCaser{t, input.(io.ByteReader)}, nil
+ }
+ testRawToken(t, p, rawTokensAltEncoding)
+}
+
+func TestRawTokenAltEncodingNoConverter(t *testing.T) {
+ p := NewParser(StringReader(testInputAltEncoding))
+ token, err := p.RawToken()
+ if token == nil {
+ t.Fatalf("expected a token on first RawToken call")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ token, err = p.RawToken()
+ if token != nil {
+ t.Errorf("expected a nil token; got %#v", token)
+ }
+ if err == nil {
+ t.Fatalf("expected an error on second RawToken call")
+ }
+ const encoding = "x-testing-uppercase"
+ if !strings.Contains(err.Error(), encoding) {
+ t.Errorf("expected error to contain %q; got error: %v",
+ encoding, err)
+ }
+}
+
+func testRawToken(t *testing.T, p *Parser, rawTokens []Token) {
+ for i, want := range rawTokens {
+ have, err := p.RawToken()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+// Ensure that directives (specifically !DOCTYPE) include the complete
+// text of any nested directives, noting that < and > do not change
+// nesting depth if they are in single or double quotes.
+
+var nestedDirectivesInput = `
+<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
+<!DOCTYPE [<!ENTITY xlt ">">]>
+<!DOCTYPE [<!ENTITY xlt "<">]>
+<!DOCTYPE [<!ENTITY xlt '>'>]>
+<!DOCTYPE [<!ENTITY xlt '<'>]>
+<!DOCTYPE [<!ENTITY xlt '">'>]>
+<!DOCTYPE [<!ENTITY xlt "'<">]>
+`
+
+var nestedDirectivesTokens = []Token{
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`)),
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY xlt ">">]`)),
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY xlt "<">]`)),
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY xlt '>'>]`)),
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY xlt '<'>]`)),
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY xlt '">'>]`)),
+ CharData([]byte("\n")),
+ Directive([]byte(`DOCTYPE [<!ENTITY xlt "'<">]`)),
+ CharData([]byte("\n")),
+}
+
+func TestNestedDirectives(t *testing.T) {
+ p := NewParser(StringReader(nestedDirectivesInput))
+
+ for i, want := range nestedDirectivesTokens {
+ have, err := p.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+func TestToken(t *testing.T) {
+ p := NewParser(StringReader(testInput))
+
+ for i, want := range cookedTokens {
+ have, err := p.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+func TestSyntax(t *testing.T) {
+ for i := range xmlInput {
+ p := NewParser(StringReader(xmlInput[i]))
+ var err error
+ for _, err = p.Token(); err == nil; _, err = p.Token() {
+ }
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
+ }
+ }
+}
+
+type allScalars struct {
+ True1 bool
+ True2 bool
+ False1 bool
+ False2 bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint int
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+ String string
+ PtrString *string
+}
+
+var all = allScalars{
+ True1: true,
+ True2: true,
+ False1: false,
+ False2: false,
+ Int: 1,
+ Int8: -2,
+ Int16: 3,
+ Int32: -4,
+ Int64: 5,
+ Uint: 6,
+ Uint8: 7,
+ Uint16: 8,
+ Uint32: 9,
+ Uint64: 10,
+ Uintptr: 11,
+ Float32: 13.0,
+ Float64: 14.0,
+ String: "15",
+ PtrString: &sixteen,
+}
+
+var sixteen = "16"
+
+const testScalarsInput = `<allscalars>
+ <true1>true</true1>
+ <true2>1</true2>
+ <false1>false</false1>
+ <false2>0</false2>
+ <int>1</int>
+ <int8>-2</int8>
+ <int16>3</int16>
+ <int32>-4</int32>
+ <int64>5</int64>
+ <uint>6</uint>
+ <uint8>7</uint8>
+ <uint16>8</uint16>
+ <uint32>9</uint32>
+ <uint64>10</uint64>
+ <uintptr>11</uintptr>
+ <float>12.0</float>
+ <float32>13.0</float32>
+ <float64>14.0</float64>
+ <string>15</string>
+ <ptrstring>16</ptrstring>
+</allscalars>`
+
+func TestAllScalars(t *testing.T) {
+ var a allScalars
+ buf := bytes.NewBufferString(testScalarsInput)
+ err := Unmarshal(buf, &a)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, all) {
+ t.Errorf("have %+v want %+v", a, all)
+ }
+}
+
+type item struct {
+ Field_a string
+}
+
+func TestIssue569(t *testing.T) {
+ data := `<item><field_a>abcd</field_a></item>`
+ var i item
+ buf := bytes.NewBufferString(data)
+ err := Unmarshal(buf, &i)
+
+ if err != nil || i.Field_a != "abcd" {
+ t.Fatal("Expecting abcd")
+ }
+}
+
+func TestUnquotedAttrs(t *testing.T) {
+ data := "<tag attr=azAZ09:-_\t>"
+ p := NewParser(StringReader(data))
+ p.Strict = false
+ token, err := p.Token()
+ if _, ok := err.(*SyntaxError); ok {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if token.(StartElement).Name.Local != "tag" {
+ t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
+ }
+ attr := token.(StartElement).Attr[0]
+ if attr.Value != "azAZ09:-_" {
+ t.Errorf("Unexpected attribute value: %v", attr.Value)
+ }
+ if attr.Name.Local != "attr" {
+ t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
+ }
+}
+
+func TestValuelessAttrs(t *testing.T) {
+ tests := [][3]string{
+ {"<p nowrap>", "p", "nowrap"},
+ {"<p nowrap >", "p", "nowrap"},
+ {"<input checked/>", "input", "checked"},
+ {"<input checked />", "input", "checked"},
+ }
+ for _, test := range tests {
+ p := NewParser(StringReader(test[0]))
+ p.Strict = false
+ token, err := p.Token()
+ if _, ok := err.(*SyntaxError); ok {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if token.(StartElement).Name.Local != test[1] {
+ t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
+ }
+ attr := token.(StartElement).Attr[0]
+ if attr.Value != test[2] {
+ t.Errorf("Unexpected attribute value: %v", attr.Value)
+ }
+ if attr.Name.Local != test[2] {
+ t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
+ }
+ }
+}
+
+func TestCopyTokenCharData(t *testing.T) {
+ data := []byte("same data")
+ var tok1 Token = CharData(data)
+ tok2 := CopyToken(tok1)
+ if !reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) != CharData")
+ }
+ data[1] = 'o'
+ if reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) uses same buffer.")
+ }
+}
+
+func TestCopyTokenStartElement(t *testing.T) {
+ elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
+ var tok1 Token = elt
+ tok2 := CopyToken(tok1)
+ if !reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(StartElement) != StartElement")
+ }
+ elt.Attr[0] = Attr{Name{"", "lang"}, "de"}
+ if reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) uses same buffer.")
+ }
+}
+
+func TestSyntaxErrorLineNum(t *testing.T) {
+ testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
+ p := NewParser(StringReader(testInput))
+ var err error
+ for _, err = p.Token(); err == nil; _, err = p.Token() {
+ }
+ synerr, ok := err.(*SyntaxError)
+ if !ok {
+ t.Error("Expected SyntaxError.")
+ }
+ if synerr.Line != 3 {
+ t.Error("SyntaxError didn't have correct line number.")
+ }
+}
+
+func TestTrailingRawToken(t *testing.T) {
+ input := `<FOO></FOO> `
+ p := NewParser(StringReader(input))
+ var err error
+ for _, err = p.RawToken(); err == nil; _, err = p.RawToken() {
+ }
+ if err != io.EOF {
+ t.Fatalf("p.RawToken() = _, %v, want _, io.EOF", err)
+ }
+}
+
+func TestTrailingToken(t *testing.T) {
+ input := `<FOO></FOO> `
+ p := NewParser(StringReader(input))
+ var err error
+ for _, err = p.Token(); err == nil; _, err = p.Token() {
+ }
+ if err != io.EOF {
+ t.Fatalf("p.Token() = _, %v, want _, io.EOF", err)
+ }
+}
+
+func TestEntityInsideCDATA(t *testing.T) {
+ input := `<test><![CDATA[ &val=foo ]]></test>`
+ p := NewParser(StringReader(input))
+ var err error
+ for _, err = p.Token(); err == nil; _, err = p.Token() {
+ }
+ if err != io.EOF {
+ t.Fatalf("p.Token() = _, %v, want _, io.EOF", err)
+ }
+}
+
+// The last three tests (respectively one for characters in attribute
+// names and two for character entities) pass not because of code
+// changed for issue 1259, but instead pass with the given messages
+// from other parts of xml.Parser. I provide these to note the
+// current behavior of situations where one might think that character
+// range checking would detect the error, but it does not in fact.
+
+var characterTests = []struct {
+ in string
+ err string
+}{
+ {"\x12<doc/>", "illegal character code U+0012"},
+ {"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
+ {"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
+ {"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
+ {"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
+ {"<doc>&\x01;</doc>", "invalid character entity &;"},
+ {"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &;"},
+}
+
+func TestDisallowedCharacters(t *testing.T) {
+
+ for i, tt := range characterTests {
+ p := NewParser(StringReader(tt.in))
+ var err error
+
+ for err == nil {
+ _, err = p.Token()
+ }
+ synerr, ok := err.(*SyntaxError)
+ if !ok {
+ t.Fatalf("input %d p.Token() = _, %v, want _, *SyntaxError", i, err)
+ }
+ if synerr.Msg != tt.err {
+ t.Fatalf("input %d synerr.Msg wrong: want '%s', got '%s'", i, tt.err, synerr.Msg)
+ }
+ }
+}
+
+type procInstEncodingTest struct {
+ expect, got string
+}
+
+var procInstTests = []struct {
+ input, expect string
+}{
+ {`version="1.0" encoding="utf-8"`, "utf-8"},
+ {`version="1.0" encoding='utf-8'`, "utf-8"},
+ {`version="1.0" encoding='utf-8' `, "utf-8"},
+ {`version="1.0" encoding=utf-8`, ""},
+ {`encoding="FOO" `, "FOO"},
+}
+
+func TestProcInstEncoding(t *testing.T) {
+ for _, test := range procInstTests {
+ got := procInstEncoding(test.input)
+ if got != test.expect {
+ t.Errorf("procInstEncoding(%q) = %q; want %q", test.input, got, test.expect)
+ }
+ }
+}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package exec runs external commands. It wraps os.StartProcess to make it
-// easier to remap stdin and stdout, connect I/O with pipes, and do other
-// adjustments.
-package exec
-
-import (
- "bytes"
- "errors"
- "io"
- "os"
- "strconv"
- "syscall"
-)
-
-// Error records the name of a binary that failed to be be executed
-// and the reason it failed.
-type Error struct {
- Name string
- Err error
-}
-
-func (e *Error) Error() string {
- return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error()
-}
-
-// Cmd represents an external command being prepared or run.
-type Cmd struct {
- // Path is the path of the command to run.
- //
- // This is the only field that must be set to a non-zero
- // value.
- Path string
-
- // Args holds command line arguments, including the command as Args[0].
- // If the Args field is empty or nil, Run uses {Path}.
- //
- // In typical use, both Path and Args are set by calling Command.
- Args []string
-
- // Env specifies the environment of the process.
- // If Env is nil, Run uses the current process's environment.
- Env []string
-
- // Dir specifies the working directory of the command.
- // If Dir is the empty string, Run runs the command in the
- // calling process's current directory.
- Dir string
-
- // Stdin specifies the process's standard input.
- // If Stdin is nil, the process reads from DevNull.
- Stdin io.Reader
-
- // Stdout and Stderr specify the process's standard output and error.
- //
- // If either is nil, Run connects the
- // corresponding file descriptor to /dev/null.
- //
- // If Stdout and Stderr are are the same writer, at most one
- // goroutine at a time will call Write.
- Stdout io.Writer
- Stderr io.Writer
-
- // ExtraFiles specifies additional open files to be inherited by the
- // new process. It does not include standard input, standard output, or
- // standard error. If non-nil, entry i becomes file descriptor 3+i.
- ExtraFiles []*os.File
-
- // SysProcAttr holds optional, operating system-specific attributes.
- // Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
- SysProcAttr *syscall.SysProcAttr
-
- // Process is the underlying process, once started.
- Process *os.Process
-
- err error // last error (from LookPath, stdin, stdout, stderr)
- finished bool // when Wait was called
- childFiles []*os.File
- closeAfterStart []io.Closer
- closeAfterWait []io.Closer
- goroutine []func() error
- errch chan error // one send per goroutine
-}
-
-// Command returns the Cmd struct to execute the named program with
-// the given arguments.
-//
-// It sets Path and Args in the returned structure and zeroes the
-// other fields.
-//
-// If name contains no path separators, Command uses LookPath to
-// resolve the path to a complete name if possible. Otherwise it uses
-// name directly.
-//
-// The returned Cmd's Args field is constructed from the command name
-// followed by the elements of arg, so arg should not include the
-// command name itself. For example, Command("echo", "hello")
-func Command(name string, arg ...string) *Cmd {
- aname, err := LookPath(name)
- if err != nil {
- aname = name
- }
- return &Cmd{
- Path: aname,
- Args: append([]string{name}, arg...),
- err: err,
- }
-}
-
-// interfaceEqual protects against panics from doing equality tests on
-// two interfaces with non-comparable underlying types
-func interfaceEqual(a, b interface{}) bool {
- defer func() {
- recover()
- }()
- return a == b
-}
-
-func (c *Cmd) envv() []string {
- if c.Env != nil {
- return c.Env
- }
- return os.Environ()
-}
-
-func (c *Cmd) argv() []string {
- if len(c.Args) > 0 {
- return c.Args
- }
- return []string{c.Path}
-}
-
-func (c *Cmd) stdin() (f *os.File, err error) {
- if c.Stdin == nil {
- f, err = os.Open(os.DevNull)
- c.closeAfterStart = append(c.closeAfterStart, f)
- return
- }
-
- if f, ok := c.Stdin.(*os.File); ok {
- return f, nil
- }
-
- pr, pw, err := os.Pipe()
- if err != nil {
- return
- }
-
- c.closeAfterStart = append(c.closeAfterStart, pr)
- c.closeAfterWait = append(c.closeAfterWait, pw)
- c.goroutine = append(c.goroutine, func() error {
- _, err := io.Copy(pw, c.Stdin)
- if err1 := pw.Close(); err == nil {
- err = err1
- }
- return err
- })
- return pr, nil
-}
-
-func (c *Cmd) stdout() (f *os.File, err error) {
- return c.writerDescriptor(c.Stdout)
-}
-
-func (c *Cmd) stderr() (f *os.File, err error) {
- if c.Stderr != nil && interfaceEqual(c.Stderr, c.Stdout) {
- return c.childFiles[1], nil
- }
- return c.writerDescriptor(c.Stderr)
-}
-
-func (c *Cmd) writerDescriptor(w io.Writer) (f *os.File, err error) {
- if w == nil {
- f, err = os.OpenFile(os.DevNull, os.O_WRONLY, 0)
- c.closeAfterStart = append(c.closeAfterStart, f)
- return
- }
-
- if f, ok := w.(*os.File); ok {
- return f, nil
- }
-
- pr, pw, err := os.Pipe()
- if err != nil {
- return
- }
-
- c.closeAfterStart = append(c.closeAfterStart, pw)
- c.closeAfterWait = append(c.closeAfterWait, pr)
- c.goroutine = append(c.goroutine, func() error {
- _, err := io.Copy(w, pr)
- return err
- })
- return pw, nil
-}
-
-// Run starts the specified command and waits for it to complete.
-//
-// The returned error is nil if the command runs, has no problems
-// copying stdin, stdout, and stderr, and exits with a zero exit
-// status.
-//
-// If the command fails to run or doesn't complete successfully, the
-// error is of type *ExitError. Other error types may be
-// returned for I/O problems.
-func (c *Cmd) Run() error {
- if err := c.Start(); err != nil {
- return err
- }
- return c.Wait()
-}
-
-// Start starts the specified command but does not wait for it to complete.
-func (c *Cmd) Start() error {
- if c.err != nil {
- return c.err
- }
- if c.Process != nil {
- return errors.New("exec: already started")
- }
-
- type F func(*Cmd) (*os.File, error)
- for _, setupFd := range []F{(*Cmd).stdin, (*Cmd).stdout, (*Cmd).stderr} {
- fd, err := setupFd(c)
- if err != nil {
- return err
- }
- c.childFiles = append(c.childFiles, fd)
- }
- c.childFiles = append(c.childFiles, c.ExtraFiles...)
-
- var err error
- c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
- Dir: c.Dir,
- Files: c.childFiles,
- Env: c.envv(),
- Sys: c.SysProcAttr,
- })
- if err != nil {
- return err
- }
-
- for _, fd := range c.closeAfterStart {
- fd.Close()
- }
-
- c.errch = make(chan error, len(c.goroutine))
- for _, fn := range c.goroutine {
- go func(fn func() error) {
- c.errch <- fn()
- }(fn)
- }
-
- return nil
-}
-
-// An ExitError reports an unsuccessful exit by a command.
-type ExitError struct {
- *os.Waitmsg
-}
-
-func (e *ExitError) Error() string {
- return e.Waitmsg.String()
-}
-
-// Wait waits for the command to exit.
-// It must have been started by Start.
-//
-// The returned error is nil if the command runs, has no problems
-// copying stdin, stdout, and stderr, and exits with a zero exit
-// status.
-//
-// If the command fails to run or doesn't complete successfully, the
-// error is of type *ExitError. Other error types may be
-// returned for I/O problems.
-func (c *Cmd) Wait() error {
- if c.Process == nil {
- return errors.New("exec: not started")
- }
- if c.finished {
- return errors.New("exec: Wait was already called")
- }
- c.finished = true
- msg, err := c.Process.Wait(0)
-
- var copyError error
- for _ = range c.goroutine {
- if err := <-c.errch; err != nil && copyError == nil {
- copyError = err
- }
- }
-
- for _, fd := range c.closeAfterWait {
- fd.Close()
- }
-
- if err != nil {
- return err
- } else if !msg.Exited() || msg.ExitStatus() != 0 {
- return &ExitError{msg}
- }
-
- return copyError
-}
-
-// Output runs the command and returns its standard output.
-func (c *Cmd) Output() ([]byte, error) {
- if c.Stdout != nil {
- return nil, errors.New("exec: Stdout already set")
- }
- var b bytes.Buffer
- c.Stdout = &b
- err := c.Run()
- return b.Bytes(), err
-}
-
-// CombinedOutput runs the command and returns its combined standard
-// output and standard error.
-func (c *Cmd) CombinedOutput() ([]byte, error) {
- if c.Stdout != nil {
- return nil, errors.New("exec: Stdout already set")
- }
- if c.Stderr != nil {
- return nil, errors.New("exec: Stderr already set")
- }
- var b bytes.Buffer
- c.Stdout = &b
- c.Stderr = &b
- err := c.Run()
- return b.Bytes(), err
-}
-
-// StdinPipe returns a pipe that will be connected to the command's
-// standard input when the command starts.
-func (c *Cmd) StdinPipe() (io.WriteCloser, error) {
- if c.Stdin != nil {
- return nil, errors.New("exec: Stdin already set")
- }
- if c.Process != nil {
- return nil, errors.New("exec: StdinPipe after process started")
- }
- pr, pw, err := os.Pipe()
- if err != nil {
- return nil, err
- }
- c.Stdin = pr
- c.closeAfterStart = append(c.closeAfterStart, pr)
- c.closeAfterWait = append(c.closeAfterWait, pw)
- return pw, nil
-}
-
-// StdoutPipe returns a pipe that will be connected to the command's
-// standard output when the command starts.
-// The pipe will be closed automatically after Wait sees the command exit.
-func (c *Cmd) StdoutPipe() (io.ReadCloser, error) {
- if c.Stdout != nil {
- return nil, errors.New("exec: Stdout already set")
- }
- if c.Process != nil {
- return nil, errors.New("exec: StdoutPipe after process started")
- }
- pr, pw, err := os.Pipe()
- if err != nil {
- return nil, err
- }
- c.Stdout = pw
- c.closeAfterStart = append(c.closeAfterStart, pw)
- c.closeAfterWait = append(c.closeAfterWait, pr)
- return pr, nil
-}
-
-// StderrPipe returns a pipe that will be connected to the command's
-// standard error when the command starts.
-// The pipe will be closed automatically after Wait sees the command exit.
-func (c *Cmd) StderrPipe() (io.ReadCloser, error) {
- if c.Stderr != nil {
- return nil, errors.New("exec: Stderr already set")
- }
- if c.Process != nil {
- return nil, errors.New("exec: StderrPipe after process started")
- }
- pr, pw, err := os.Pipe()
- if err != nil {
- return nil, err
- }
- c.Stderr = pw
- c.closeAfterStart = append(c.closeAfterStart, pw)
- c.closeAfterWait = append(c.closeAfterWait, pr)
- return pr, nil
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package exec
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "testing"
- "os"
- "runtime"
- "strconv"
- "strings"
-)
-
-func helperCommand(s ...string) *Cmd {
- cs := []string{"-test.run=exec.TestHelperProcess", "--"}
- cs = append(cs, s...)
- cmd := Command(os.Args[0], cs...)
- cmd.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
- return cmd
-}
-
-func TestEcho(t *testing.T) {
- bs, err := helperCommand("echo", "foo bar", "baz").Output()
- if err != nil {
- t.Errorf("echo: %v", err)
- }
- if g, e := string(bs), "foo bar baz\n"; g != e {
- t.Errorf("echo: want %q, got %q", e, g)
- }
-}
-
-func TestCatStdin(t *testing.T) {
- // Cat, testing stdin and stdout.
- input := "Input string\nLine 2"
- p := helperCommand("cat")
- p.Stdin = strings.NewReader(input)
- bs, err := p.Output()
- if err != nil {
- t.Errorf("cat: %v", err)
- }
- s := string(bs)
- if s != input {
- t.Errorf("cat: want %q, got %q", input, s)
- }
-}
-
-func TestCatGoodAndBadFile(t *testing.T) {
- // Testing combined output and error values.
- bs, err := helperCommand("cat", "/bogus/file.foo", "exec_test.go").CombinedOutput()
- if _, ok := err.(*ExitError); !ok {
- t.Errorf("expected *ExitError from cat combined; got %T: %v", err, err)
- }
- s := string(bs)
- sp := strings.SplitN(s, "\n", 2)
- if len(sp) != 2 {
- t.Fatalf("expected two lines from cat; got %q", s)
- }
- errLine, body := sp[0], sp[1]
- if !strings.HasPrefix(errLine, "Error: open /bogus/file.foo") {
- t.Errorf("expected stderr to complain about file; got %q", errLine)
- }
- if !strings.Contains(body, "func TestHelperProcess(t *testing.T)") {
- t.Errorf("expected test code; got %q (len %d)", body, len(body))
- }
-}
-
-func TestNoExistBinary(t *testing.T) {
- // Can't run a non-existent binary
- err := Command("/no-exist-binary").Run()
- if err == nil {
- t.Error("expected error from /no-exist-binary")
- }
-}
-
-func TestExitStatus(t *testing.T) {
- // Test that exit values are returned correctly
- err := helperCommand("exit", "42").Run()
- if werr, ok := err.(*ExitError); ok {
- if s, e := werr.Error(), "exit status 42"; s != e {
- t.Errorf("from exit 42 got exit %q, want %q", s, e)
- }
- } else {
- t.Fatalf("expected *ExitError from exit 42; got %T: %v", err, err)
- }
-}
-
-func TestPipes(t *testing.T) {
- check := func(what string, err error) {
- if err != nil {
- t.Fatalf("%s: %v", what, err)
- }
- }
- // Cat, testing stdin and stdout.
- c := helperCommand("pipetest")
- stdin, err := c.StdinPipe()
- check("StdinPipe", err)
- stdout, err := c.StdoutPipe()
- check("StdoutPipe", err)
- stderr, err := c.StderrPipe()
- check("StderrPipe", err)
-
- outbr := bufio.NewReader(stdout)
- errbr := bufio.NewReader(stderr)
- line := func(what string, br *bufio.Reader) string {
- line, _, err := br.ReadLine()
- if err != nil {
- t.Fatalf("%s: %v", what, err)
- }
- return string(line)
- }
-
- err = c.Start()
- check("Start", err)
-
- _, err = stdin.Write([]byte("O:I am output\n"))
- check("first stdin Write", err)
- if g, e := line("first output line", outbr), "O:I am output"; g != e {
- t.Errorf("got %q, want %q", g, e)
- }
-
- _, err = stdin.Write([]byte("E:I am error\n"))
- check("second stdin Write", err)
- if g, e := line("first error line", errbr), "E:I am error"; g != e {
- t.Errorf("got %q, want %q", g, e)
- }
-
- _, err = stdin.Write([]byte("O:I am output2\n"))
- check("third stdin Write 3", err)
- if g, e := line("second output line", outbr), "O:I am output2"; g != e {
- t.Errorf("got %q, want %q", g, e)
- }
-
- stdin.Close()
- err = c.Wait()
- check("Wait", err)
-}
-
-func TestExtraFiles(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Logf("no operating system support; skipping")
- return
- }
- tf, err := ioutil.TempFile("", "")
- if err != nil {
- t.Fatalf("TempFile: %v", err)
- }
- defer os.Remove(tf.Name())
- defer tf.Close()
-
- const text = "Hello, fd 3!"
- _, err = tf.Write([]byte(text))
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
- _, err = tf.Seek(0, os.SEEK_SET)
- if err != nil {
- t.Fatalf("Seek: %v", err)
- }
-
- c := helperCommand("read3")
- c.ExtraFiles = []*os.File{tf}
- bs, err := c.CombinedOutput()
- if err != nil {
- t.Fatalf("CombinedOutput: %v", err)
- }
- if string(bs) != text {
- t.Errorf("got %q; want %q", string(bs), text)
- }
-}
-
-// TestHelperProcess isn't a real test. It's used as a helper process
-// for TestParameterRun.
-func TestHelperProcess(*testing.T) {
- if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
- return
- }
- defer os.Exit(0)
-
- args := os.Args
- for len(args) > 0 {
- if args[0] == "--" {
- args = args[1:]
- break
- }
- args = args[1:]
- }
- if len(args) == 0 {
- fmt.Fprintf(os.Stderr, "No command\n")
- os.Exit(2)
- }
-
- cmd, args := args[0], args[1:]
- switch cmd {
- case "echo":
- iargs := []interface{}{}
- for _, s := range args {
- iargs = append(iargs, s)
- }
- fmt.Println(iargs...)
- case "cat":
- if len(args) == 0 {
- io.Copy(os.Stdout, os.Stdin)
- return
- }
- exit := 0
- for _, fn := range args {
- f, err := os.Open(fn)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error: %v\n", err)
- exit = 2
- } else {
- defer f.Close()
- io.Copy(os.Stdout, f)
- }
- }
- os.Exit(exit)
- case "pipetest":
- bufr := bufio.NewReader(os.Stdin)
- for {
- line, _, err := bufr.ReadLine()
- if err == io.EOF {
- break
- } else if err != nil {
- os.Exit(1)
- }
- if bytes.HasPrefix(line, []byte("O:")) {
- os.Stdout.Write(line)
- os.Stdout.Write([]byte{'\n'})
- } else if bytes.HasPrefix(line, []byte("E:")) {
- os.Stderr.Write(line)
- os.Stderr.Write([]byte{'\n'})
- } else {
- os.Exit(1)
- }
- }
- case "read3": // read fd 3
- fd3 := os.NewFile(3, "fd3")
- bs, err := ioutil.ReadAll(fd3)
- if err != nil {
- fmt.Printf("ReadAll from fd 3: %v", err)
- os.Exit(1)
- }
- os.Stderr.Write(bs)
- case "exit":
- n, _ := strconv.Atoi(args[0])
- os.Exit(n)
- default:
- fmt.Fprintf(os.Stderr, "Unknown command %q\n", cmd)
- os.Exit(2)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package exec
-
-import (
- "errors"
- "os"
- "strings"
-)
-
-// ErrNotFound is the error resulting if a path search failed to find an executable file.
-var ErrNotFound = errors.New("executable file not found in $path")
-
-func findExecutable(file string) error {
- d, err := os.Stat(file)
- if err != nil {
- return err
- }
- if d.IsRegular() && d.Permission()&0111 != 0 {
- return nil
- }
- return os.EPERM
-}
-
-// LookPath searches for an executable binary named file
-// in the directories named by the path environment variable.
-// If file begins with "/", "#", "./", or "../", it is tried
-// directly and the path is not consulted.
-func LookPath(file string) (string, error) {
- // skip the path lookup for these prefixes
- skip := []string{"/", "#", "./", "../"}
-
- for _, p := range skip {
- if strings.HasPrefix(file, p) {
- err := findExecutable(file)
- if err == nil {
- return file, nil
- }
- return "", &Error{file, err}
- }
- }
-
- path := os.Getenv("path")
- for _, dir := range strings.Split(path, "\000") {
- if err := findExecutable(dir + "/" + file); err == nil {
- return dir + "/" + file, nil
- }
- }
- return "", &Error{file, ErrNotFound}
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package exec
-
-import (
- "testing"
-)
-
-var nonExistentPaths = []string{
- "some-non-existent-path",
- "non-existent-path/slashed",
-}
-
-func TestLookPathNotFound(t *testing.T) {
- for _, name := range nonExistentPaths {
- path, err := LookPath(name)
- if err == nil {
- t.Fatalf("LookPath found %q in $PATH", name)
- }
- if path != "" {
- t.Fatalf("LookPath path == %q when err != nil", path)
- }
- perr, ok := err.(*Error)
- if !ok {
- t.Fatal("LookPath error is not an exec.Error")
- }
- if perr.Name != name {
- t.Fatalf("want Error name %q, got %q", name, perr.Name)
- }
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin freebsd linux openbsd
-
-package exec
-
-import (
- "errors"
- "os"
- "strings"
-)
-
-// ErrNotFound is the error resulting if a path search failed to find an executable file.
-var ErrNotFound = errors.New("executable file not found in $PATH")
-
-func findExecutable(file string) error {
- d, err := os.Stat(file)
- if err != nil {
- return err
- }
- if d.IsRegular() && d.Permission()&0111 != 0 {
- return nil
- }
- return os.EPERM
-}
-
-// LookPath searches for an executable binary named file
-// in the directories named by the PATH environment variable.
-// If file contains a slash, it is tried directly and the PATH is not consulted.
-func LookPath(file string) (string, error) {
- // NOTE(rsc): I wish we could use the Plan 9 behavior here
- // (only bypass the path if file begins with / or ./ or ../)
- // but that would not match all the Unix shells.
-
- if strings.Contains(file, "/") {
- err := findExecutable(file)
- if err == nil {
- return file, nil
- }
- return "", &Error{file, err}
- }
- pathenv := os.Getenv("PATH")
- for _, dir := range strings.Split(pathenv, ":") {
- if dir == "" {
- // Unix shell semantics: path element "" means "."
- dir = "."
- }
- if err := findExecutable(dir + "/" + file); err == nil {
- return dir + "/" + file, nil
- }
- }
- return "", &Error{file, ErrNotFound}
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package exec
-
-import (
- "errors"
- "os"
- "strings"
-)
-
-// ErrNotFound is the error resulting if a path search failed to find an executable file.
-var ErrNotFound = errors.New("executable file not found in %PATH%")
-
-func chkStat(file string) error {
- d, err := os.Stat(file)
- if err != nil {
- return err
- }
- if d.IsRegular() {
- return nil
- }
- return os.EPERM
-}
-
-func findExecutable(file string, exts []string) (string, error) {
- if len(exts) == 0 {
- return file, chkStat(file)
- }
- f := strings.ToLower(file)
- for _, e := range exts {
- if strings.HasSuffix(f, e) {
- return file, chkStat(file)
- }
- }
- for _, e := range exts {
- if f := file + e; chkStat(f) == nil {
- return f, nil
- }
- }
- return ``, os.ENOENT
-}
-
-func LookPath(file string) (f string, err error) {
- x := os.Getenv(`PATHEXT`)
- if x == `` {
- x = `.COM;.EXE;.BAT;.CMD`
- }
- exts := []string{}
- for _, e := range strings.Split(strings.ToLower(x), `;`) {
- if e == "" {
- continue
- }
- if e[0] != '.' {
- e = "." + e
- }
- exts = append(exts, e)
- }
- if strings.IndexAny(file, `:\/`) != -1 {
- if f, err = findExecutable(file, exts); err == nil {
- return
- }
- return ``, &Error{file, err}
- }
- if pathenv := os.Getenv(`PATH`); pathenv == `` {
- if f, err = findExecutable(`.\`+file, exts); err == nil {
- return
- }
- } else {
- for _, dir := range strings.Split(pathenv, `;`) {
- if f, err = findExecutable(dir+`\`+file, exts); err == nil {
- return
- }
- }
- }
- return ``, &Error{file, ErrNotFound}
-}
import (
"errors"
"fmt"
- "scanner"
+ "text/scanner"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// ----------------------------------------------------------------------------
import (
"io"
- "scanner"
"strconv"
+ "text/scanner"
)
type parser struct {
package norm
-import "utf8"
+import "unicode/utf8"
const (
maxCombiningChars = 30
package norm
-import "utf8"
+import "unicode/utf8"
type input interface {
skipASCII(p int) int
"bytes"
"flag"
"fmt"
- "http"
"io"
"log"
+ "net/http"
"os"
"regexp"
"strconv"
// Package norm contains types and functions for normalizing Unicode strings.
package norm
-import "utf8"
+import "unicode/utf8"
// A Form denotes a canonical representation of Unicode code points.
// The Unicode-defined normalization and equivalence forms are:
"exp/norm"
"flag"
"fmt"
- "http"
"io"
"log"
+ "net/http"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
- "utf8"
+ "unicode/utf8"
)
func main() {
import (
"testing"
- "utf8"
+ "unicode/utf8"
)
// Test data is located in triedata_test.go; generated by maketesttables.
"fmt"
"hash/crc32"
"log"
- "utf8"
+ "unicode/utf8"
)
const blockSize = 64
import (
"compress/zlib"
"encoding/binary"
- "http"
"io"
+ "net/http"
"strings"
)
import (
"bytes"
- "http"
"io"
+ "net/http"
"reflect"
"testing"
)
import (
"bytes"
"compress/zlib"
- "http"
"io"
+ "net/http"
)
// Data Frame Format
import (
"encoding/binary"
- "http"
"io"
+ "net/http"
"strings"
)
import (
"errors"
+ "exp/sql/driver"
"fmt"
"reflect"
"strconv"
}
}
- sv := reflect.ValueOf(src)
+ var sv reflect.Value
switch d := dest.(type) {
case *string:
+ sv = reflect.ValueOf(src)
switch sv.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
*d = fmt.Sprintf("%v", src)
return nil
}
+ case *bool:
+ bv, err := driver.Bool.ConvertValue(src)
+ if err == nil {
+ *d = bv.(bool)
+ }
+ return err
}
if scanner, ok := dest.(ScannerInto); ok {
return errors.New("destination not a pointer")
}
+ if !sv.IsValid() {
+ sv = reflect.ValueOf(src)
+ }
+
dv := reflect.Indirect(dpv)
if dv.Kind() == sv.Kind() {
dv.Set(sv)
switch dv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if s, ok := asString(src); ok {
- i64, err := strconv.Atoi64(s)
- if err != nil {
- return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
- }
- if dv.OverflowInt(i64) {
- return fmt.Errorf("string %q overflows %s", s, dv.Kind())
- }
- dv.SetInt(i64)
- return nil
+ s := asString(src)
+ i64, err := strconv.Atoi64(s)
+ if err != nil {
+ return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
+ }
+ if dv.OverflowInt(i64) {
+ return fmt.Errorf("string %q overflows %s", s, dv.Kind())
}
+ dv.SetInt(i64)
+ return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if s, ok := asString(src); ok {
- u64, err := strconv.Atoui64(s)
- if err != nil {
- return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
- }
- if dv.OverflowUint(u64) {
- return fmt.Errorf("string %q overflows %s", s, dv.Kind())
- }
- dv.SetUint(u64)
- return nil
+ s := asString(src)
+ u64, err := strconv.Atoui64(s)
+ if err != nil {
+ return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
+ }
+ if dv.OverflowUint(u64) {
+ return fmt.Errorf("string %q overflows %s", s, dv.Kind())
+ }
+ dv.SetUint(u64)
+ return nil
+ case reflect.Float32, reflect.Float64:
+ s := asString(src)
+ f64, err := strconv.Atof64(s)
+ if err != nil {
+ return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
+ }
+ if dv.OverflowFloat(f64) {
+ return fmt.Errorf("value %q overflows %s", s, dv.Kind())
}
+ dv.SetFloat(f64)
+ return nil
}
return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest)
}
-func asString(src interface{}) (s string, ok bool) {
+func asString(src interface{}) string {
switch v := src.(type) {
case string:
- return v, true
+ return v
case []byte:
- return string(v), true
+ return string(v)
}
- return "", false
+ return fmt.Sprintf("%v", src)
}
wantint int64
wantuint uint64
wantstr string
+ wantf32 float32
+ wantf64 float64
+ wantbool bool // used if d is of type *bool
wanterr string
}
scanint32 int32
scanuint8 uint8
scanuint16 uint16
+ scanbool bool
+ scanf32 float32
+ scanf64 float64
)
var conversionTests = []conversionTest{
{s: "256", d: &scanuint16, wantuint: 256},
{s: "-1", d: &scanint, wantint: -1},
{s: "foo", d: &scanint, wanterr: `converting string "foo" to a int: parsing "foo": invalid syntax`},
+
+ // True bools
+ {s: true, d: &scanbool, wantbool: true},
+ {s: "True", d: &scanbool, wantbool: true},
+ {s: "TRUE", d: &scanbool, wantbool: true},
+ {s: "1", d: &scanbool, wantbool: true},
+ {s: 1, d: &scanbool, wantbool: true},
+ {s: int64(1), d: &scanbool, wantbool: true},
+ {s: uint16(1), d: &scanbool, wantbool: true},
+
+ // False bools
+ {s: false, d: &scanbool, wantbool: false},
+ {s: "false", d: &scanbool, wantbool: false},
+ {s: "FALSE", d: &scanbool, wantbool: false},
+ {s: "0", d: &scanbool, wantbool: false},
+ {s: 0, d: &scanbool, wantbool: false},
+ {s: int64(0), d: &scanbool, wantbool: false},
+ {s: uint16(0), d: &scanbool, wantbool: false},
+
+ // Not bools
+ {s: "yup", d: &scanbool, wanterr: `sql/driver: couldn't convert "yup" into type bool`},
+ {s: 2, d: &scanbool, wanterr: `sql/driver: couldn't convert 2 into type bool`},
+
+ // Floats
+ {s: float64(1.5), d: &scanf64, wantf64: float64(1.5)},
+ {s: int64(1), d: &scanf64, wantf64: float64(1)},
+ {s: float64(1.5), d: &scanf32, wantf32: float32(1.5)},
+ {s: "1.5", d: &scanf32, wantf32: float32(1.5)},
+ {s: "1.5", d: &scanf64, wantf64: float64(1.5)},
}
func intValue(intptr interface{}) int64 {
return reflect.Indirect(reflect.ValueOf(intptr)).Uint()
}
+func float64Value(ptr interface{}) float64 {
+ return *(ptr.(*float64))
+}
+
+func float32Value(ptr interface{}) float32 {
+ return *(ptr.(*float32))
+}
+
func TestConversions(t *testing.T) {
for n, ct := range conversionTests {
err := convertAssign(ct.d, ct.s)
if ct.wantuint != 0 && ct.wantuint != uintValue(ct.d) {
errf("want uint %d, got %d", ct.wantuint, uintValue(ct.d))
}
+ if ct.wantf32 != 0 && ct.wantf32 != float32Value(ct.d) {
+ errf("want float32 %v, got %v", ct.wantf32, float32Value(ct.d))
+ }
+ if ct.wantf64 != 0 && ct.wantf64 != float64Value(ct.d) {
+ errf("want float32 %v, got %v", ct.wantf64, float64Value(ct.d))
+ }
+ if bp, boolTest := ct.d.(*bool); boolTest && *bp != ct.wantbool && ct.wanterr == "" {
+ errf("want bool %v, got %v", ct.wantbool, *bp)
+ }
}
}
// Driver is the interface that must be implemented by a database
// driver.
type Driver interface {
- // Open returns a new or cached connection to the database.
+ // Open returns a new connection to the database.
// The name is a string in a driver-specific format.
//
+ // Open may return a cached connection (one previously
+ // closed), but doing so is unnecessary; the sql package
+ // maintains a pool of idle connections for efficient re-use.
+ //
// The returned connection is only used by one goroutine at a
// time.
Open(name string) (Conn, error)
// Close invalidates and potentially stops any current
// prepared statements and transactions, marking this
- // connection as no longer in use. The driver may cache or
- // close its underlying connection to its database.
+ // connection as no longer in use.
+ //
+ // Because the sql package maintains a free pool of
+ // connections and only calls Close when there's a surplus of
+ // idle connections, it shouldn't be necessary for drivers to
+ // do their own connection caching.
Close() error
// Begin starts and returns a new transaction.
)
// ValueConverter is the interface providing the ConvertValue method.
+//
+// Various implementations of ValueConverter are provided by the
+// driver package to provide consistent implementations of conversions
+// between drivers. The ValueConverters have several uses:
+//
+// * converting from the subset types as provided by the sql package
+// into a database table's specific column type and making sure it
+// fits, such as making sure a particular int64 fits in a
+// table's uint16 column.
+//
+// * converting a value as given from the database into one of the
+// subset types.
+//
+// * by the sql package, for converting from a driver's subset type
+// to a user's type in a scan.
type ValueConverter interface {
// ConvertValue converts a value to a restricted subset type.
ConvertValue(v interface{}) (interface{}, error)
// Bool is a ValueConverter that converts input values to bools.
//
// The conversion rules are:
-// - .... TODO(bradfitz): TBD
+// - booleans are returned unchanged
+// - for integer types,
+// 1 is true
+// 0 is false,
+// other integers are an error
+// - for strings and []byte, same rules as strconv.Atob
+// - all other types are an error
var Bool boolType
type boolType struct{}
var _ ValueConverter = boolType{}
-func (boolType) ConvertValue(v interface{}) (interface{}, error) {
- return nil, fmt.Errorf("TODO(bradfitz): bool conversions")
+func (boolType) String() string { return "Bool" }
+
+func (boolType) ConvertValue(src interface{}) (interface{}, error) {
+ switch s := src.(type) {
+ case bool:
+ return s, nil
+ case string:
+ b, err := strconv.Atob(s)
+ if err != nil {
+ return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
+ }
+ return b, nil
+ case []byte:
+ b, err := strconv.Atob(string(s))
+ if err != nil {
+ return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
+ }
+ return b, nil
+ }
+
+ sv := reflect.ValueOf(src)
+ switch sv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ iv := sv.Int()
+ if iv == 1 || iv == 0 {
+ return iv == 1, nil
+ }
+ return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", iv)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ uv := sv.Uint()
+ if uv == 1 || uv == 0 {
+ return uv == 1, nil
+ }
+ return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", uv)
+ }
+
+ return nil, fmt.Errorf("sql/driver: couldn't convert %v (%T) into type bool", src, src)
}
// Int32 is a ValueConverter that converts input values to int64,
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package driver
+
+import (
+ "reflect"
+ "testing"
+)
+
+type valueConverterTest struct {
+ c ValueConverter
+ in interface{}
+ out interface{}
+ err string
+}
+
+var valueConverterTests = []valueConverterTest{
+ {Bool, "true", true, ""},
+ {Bool, "True", true, ""},
+ {Bool, []byte("t"), true, ""},
+ {Bool, true, true, ""},
+ {Bool, "1", true, ""},
+ {Bool, 1, true, ""},
+ {Bool, int64(1), true, ""},
+ {Bool, uint16(1), true, ""},
+ {Bool, "false", false, ""},
+ {Bool, false, false, ""},
+ {Bool, "0", false, ""},
+ {Bool, 0, false, ""},
+ {Bool, int64(0), false, ""},
+ {Bool, uint16(0), false, ""},
+ {c: Bool, in: "foo", err: "sql/driver: couldn't convert \"foo\" into type bool"},
+ {c: Bool, in: 2, err: "sql/driver: couldn't convert 2 into type bool"},
+}
+
+func TestValueConverters(t *testing.T) {
+ for i, tt := range valueConverterTests {
+ out, err := tt.c.ConvertValue(tt.in)
+ goterr := ""
+ if err != nil {
+ goterr = err.Error()
+ }
+ if goterr != tt.err {
+ t.Errorf("test %d: %s(%T(%v)) error = %q; want error = %q",
+ i, tt.c, tt.in, tt.in, goterr, tt.err)
+ }
+ if tt.err != "" {
+ continue
+ }
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("test %d: %s(%T(%v)) = %v (%T); want %v (%T)",
+ i, tt.c, tt.in, tt.in, out, out, tt.out, tt.out)
+ }
+ }
+}
for i, v := range rc.rows[rc.pos].cols {
// TODO(bradfitz): convert to subset types? naah, I
// think the subset types should only be input to
- // driver, but the db package should be able to handle
+ // driver, but the sql package should be able to handle
// a wider range of types coming out of drivers. all
// for ease of drivers, and to prevent drivers from
// messing up conversions or doing them differently.
"errors"
"fmt"
"io"
- "runtime"
"sync"
"exp/sql/driver"
// If the driver does not implement driver.Execer, we need
// a connection.
- conn, err := db.conn()
+ ci, err := db.conn()
if err != nil {
return nil, err
}
- defer db.putConn(conn)
+ defer db.putConn(ci)
- if execer, ok := conn.(driver.Execer); ok {
+ if execer, ok := ci.(driver.Execer); ok {
resi, err := execer.Exec(query, args)
if err != nil {
return nil, err
return result{resi}, nil
}
- sti, err := conn.Prepare(query)
+ sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
- if err != nil {
- return &Row{err: err}
- }
- return &Row{rows: rows}
+ return &Row{rows: rows, err: err}
}
-// Begin starts a transaction. The isolation level is dependent on
+// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
- // TODO(bradfitz): add another method for beginning a transaction
- // at a specific isolation level.
- panic(todo())
+ ci, err := db.conn()
+ if err != nil {
+ return nil, err
+ }
+ txi, err := ci.Begin()
+ if err != nil {
+ db.putConn(ci)
+ return nil, fmt.Errorf("sql: failed to Begin transaction: %v", err)
+ }
+ return &Tx{
+ db: db,
+ ci: ci,
+ txi: txi,
+ }, nil
}
// DriverDatabase returns the database's underlying driver.
}
// Tx is an in-progress database transaction.
+//
+// A transaction must end with a call to Commit or Rollback.
+//
+// After a call to Commit or Rollback, all operations on the
+// transaction fail with ErrTransactionFinished.
type Tx struct {
+ db *DB
+
+ // ci is owned exclusively until Commit or Rollback, at which point
+ // it's returned with putConn.
+ ci driver.Conn
+ txi driver.Tx
+
+ // cimu is held while somebody is using ci (between grabConn
+ // and releaseConn)
+ cimu sync.Mutex
+ // done transitions from false to true exactly once, on Commit
+ // or Rollback. once done, all operations fail with
+ // ErrTransactionFinished.
+ done bool
+}
+
+var ErrTransactionFinished = errors.New("sql: Transaction has already been committed or rolled back")
+
+func (tx *Tx) close() {
+ if tx.done {
+ panic("double close") // internal error
+ }
+ tx.done = true
+ tx.db.putConn(tx.ci)
+ tx.ci = nil
+ tx.txi = nil
+}
+
+func (tx *Tx) grabConn() (driver.Conn, error) {
+ if tx.done {
+ return nil, ErrTransactionFinished
+ }
+ tx.cimu.Lock()
+ return tx.ci, nil
+}
+
+func (tx *Tx) releaseConn() {
+ tx.cimu.Unlock()
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
- panic(todo())
+ if tx.done {
+ return ErrTransactionFinished
+ }
+ defer tx.close()
+ return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
- panic(todo())
+ if tx.done {
+ return ErrTransactionFinished
+ }
+ defer tx.close()
+ return tx.txi.Rollback()
}
// Prepare creates a prepared statement.
+//
+// The statement is only valid within the scope of this transaction.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
- panic(todo())
+ // TODO(bradfitz): the restriction that the returned statement
+ // is only valid for this Transaction is lame and negates a
+ // lot of the benefit of prepared statements. We could be
+ // more efficient here and either provide a method to take an
+ // existing Stmt (created on perhaps a different Conn), and
+ // re-create it on this Conn if necessary. Or, better: keep a
+ // map in DB of query string to Stmts, and have Stmt.Execute
+ // do the right thing and re-prepare if the Conn in use
+ // doesn't have that prepared statement. But we'll want to
+ // avoid caching the statement in the case where we only call
+ // conn.Prepare implicitly (such as in db.Exec or tx.Exec),
+ // but the caller package can't be holding a reference to the
+ // returned statement. Perhaps just looking at the reference
+ // count (by noting Stmt.Close) would be enough. We might also
+ // want a finalizer on Stmt to drop the reference count.
+ ci, err := tx.grabConn()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.releaseConn()
+
+ si, err := ci.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+
+ stmt := &Stmt{
+ db: tx.db,
+ tx: tx,
+ txsi: si,
+ query: query,
+ }
+ return stmt, nil
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
-func (tx *Tx) Exec(query string, args ...interface{}) {
- panic(todo())
+func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
+ ci, err := tx.grabConn()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.releaseConn()
+
+ if execer, ok := ci.(driver.Execer); ok {
+ resi, err := execer.Exec(query, args)
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
+ }
+
+ sti, err := ci.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ defer sti.Close()
+ resi, err := sti.Exec(args)
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
- panic(todo())
+ if tx.done {
+ return nil, ErrTransactionFinished
+ }
+ stmt, err := tx.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ return stmt.Query(args...)
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
- panic(todo())
+ rows, err := tx.Query(query, args...)
+ return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
db *DB // where we came from
query string // that created the Sttm
- mu sync.Mutex
+ // If in a transaction, else both nil:
+ tx *Tx
+ txsi driver.Stmt
+
+ mu sync.Mutex // protects the rest of the fields
closed bool
- css []connStmt // can use any that have idle connections
-}
-func todo() string {
- _, file, line, _ := runtime.Caller(1)
- return fmt.Sprintf("%s:%d: TODO: implement", file, line)
+ // css is a list of underlying driver statement interfaces
+ // that are valid on particular connections. This is only
+ // used if tx == nil and one is found that has idle
+ // connections. If tx != nil, txsi is always used.
+ css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
- ci, si, err := s.connStmt()
+ _, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
- defer s.db.putConn(ci)
+ defer releaseConn()
if want := si.NumInput(); len(args) != want {
return nil, fmt.Errorf("db: expected %d arguments, got %d", want, len(args))
return result{resi}, nil
}
-func (s *Stmt) connStmt(args ...interface{}) (driver.Conn, driver.Stmt, error) {
+// connStmt returns a free driver connection on which to execute the
+// statement, a function to call to release the connection, and a
+// statement bound to that connection.
+func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(), si driver.Stmt, err error) {
s.mu.Lock()
if s.closed {
- return nil, nil, errors.New("db: statement is closed")
+ s.mu.Unlock()
+ err = errors.New("db: statement is closed")
+ return
}
+
+ // In a transaction, we always use the connection that the
+ // transaction was created on.
+ if s.tx != nil {
+ s.mu.Unlock()
+ ci, err = s.tx.grabConn() // blocks, waiting for the connection.
+ if err != nil {
+ return
+ }
+ releaseConn = func() { s.tx.releaseConn() }
+ return ci, releaseConn, s.txsi, nil
+ }
+
var cs connStmt
match := false
for _, v := range s.css {
if !match {
ci, err := s.db.conn()
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
si, err := ci.Prepare(s.query)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{ci, si}
s.mu.Unlock()
}
- return cs.ci, cs.si, nil
+ conn := cs.ci
+ releaseConn = func() { s.db.putConn(conn) }
+ return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
- ci, si, err := s.connStmt(args...)
+ ci, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
s.db.putConn(ci)
return nil, err
}
- // Note: ownership of ci passes to the *Rows
+ // Note: ownership of ci passes to the *Rows, to be freed
+ // with releaseConn.
rows := &Rows{
- db: s.db,
- ci: ci,
- rowsi: rowsi,
+ db: s.db,
+ ci: ci,
+ releaseConn: releaseConn,
+ rowsi: rowsi,
}
return rows, nil
}
// Close closes the statement.
func (s *Stmt) Close() error {
s.mu.Lock()
- defer s.mu.Unlock() // TODO(bradfitz): move this unlock after 'closed = true'?
+ defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
- for _, v := range s.css {
- if ci, match := s.db.connIfFree(v.ci); match {
- v.si.Close()
- s.db.putConn(ci)
- } else {
- // TODO(bradfitz): care that we can't close
- // this statement because the statement's
- // connection is in use?
+
+ if s.tx != nil {
+ s.txsi.Close()
+ } else {
+ for _, v := range s.css {
+ if ci, match := s.db.connIfFree(v.ci); match {
+ v.si.Close()
+ s.db.putConn(ci)
+ } else {
+ // TODO(bradfitz): care that we can't close
+ // this statement because the statement's
+ // connection is in use?
+ }
}
}
return nil
// err = rows.Scan(&id, &name)
// ...
// }
-// err = rows.Error() // get any Error encountered during iteration
+// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
- db *DB
- ci driver.Conn // owned; must be returned when Rows is closed
- rowsi driver.Rows
+ db *DB
+ ci driver.Conn // owned; must call putconn when closed to release
+ releaseConn func()
+ rowsi driver.Rows
closed bool
lastcols []interface{}
return rs.lasterr == nil
}
-// Error returns the error, if any, that was encountered during iteration.
-func (rs *Rows) Error() error {
+// Err returns the error, if any, that was encountered during iteration.
+func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
}
rs.closed = true
err := rs.rowsi.Close()
- rs.db.putConn(rs.ci)
+ rs.releaseConn()
return err
}
package ssh
import (
- "big"
"crypto"
"crypto/rand"
"errors"
"fmt"
"io"
+ "math/big"
"net"
"sync"
)
return c.transport.reader.setupKeys(serverKeys, K, H, H, hashFunc)
}
-// authenticate authenticates with the remote server. See RFC 4252.
-// Only "password" authentication is supported.
-func (c *ClientConn) authenticate() error {
- if err := c.writePacket(marshal(msgServiceRequest, serviceRequestMsg{serviceUserAuth})); err != nil {
- return err
- }
- packet, err := c.readPacket()
- if err != nil {
- return err
- }
-
- var serviceAccept serviceAcceptMsg
- if err = unmarshal(&serviceAccept, packet, msgServiceAccept); err != nil {
- return err
- }
-
- // TODO(dfc) support proper authentication method negotation
- method := "none"
- if c.config.Password != "" {
- method = "password"
- }
- if err := c.sendUserAuthReq(method); err != nil {
- return err
- }
-
- if packet, err = c.readPacket(); err != nil {
- return err
- }
-
- if packet[0] != msgUserAuthSuccess {
- return UnexpectedMessageError{msgUserAuthSuccess, packet[0]}
- }
- return nil
-}
-
-func (c *ClientConn) sendUserAuthReq(method string) error {
- length := stringLength([]byte(c.config.Password)) + 1
- payload := make([]byte, length)
- // always false for password auth, see RFC 4252 Section 8.
- payload[0] = 0
- marshalString(payload[1:], []byte(c.config.Password))
-
- return c.writePacket(marshal(msgUserAuthRequest, userAuthRequestMsg{
- User: c.config.User,
- Service: serviceSSH,
- Method: method,
- Payload: payload,
- }))
-}
-
// kexDH performs Diffie-Hellman key agreement on a ClientConn. The
// returned values are given the same names as in RFC 4253, section 8.
func (c *ClientConn) kexDH(group *dhGroup, hashFunc crypto.Hash, magics *handshakeMagics, hostKeyAlgo string) ([]byte, []byte, error) {
// The username to authenticate.
User string
- // Used for "password" method authentication.
- Password string
+ // A slice of ClientAuth methods. Only the first instance
+ // of a particular RFC 4252 method will be used during authentication.
+ Auth []ClientAuth
}
func (c *ClientConfig) rand() io.Reader {
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "errors"
+)
+
+// authenticate authenticates with the remote server. See RFC 4252.
+func (c *ClientConn) authenticate() error {
+ // initiate user auth session
+ if err := c.writePacket(marshal(msgServiceRequest, serviceRequestMsg{serviceUserAuth})); err != nil {
+ return err
+ }
+ packet, err := c.readPacket()
+ if err != nil {
+ return err
+ }
+ var serviceAccept serviceAcceptMsg
+ if err := unmarshal(&serviceAccept, packet, msgServiceAccept); err != nil {
+ return err
+ }
+ // during the authentication phase the client first attempts the "none" method
+ // then any untried methods suggested by the server.
+ tried, remain := make(map[string]bool), make(map[string]bool)
+ for auth := ClientAuth(new(noneAuth)); auth != nil; {
+ ok, methods, err := auth.auth(c.config.User, c.transport)
+ if err != nil {
+ return err
+ }
+ if ok {
+ // success
+ return nil
+ }
+ tried[auth.method()] = true
+ delete(remain, auth.method())
+ for _, meth := range methods {
+ if tried[meth] {
+ // if we've tried meth already, skip it.
+ continue
+ }
+ remain[meth] = true
+ }
+ auth = nil
+ for _, a := range c.config.Auth {
+ if remain[a.method()] {
+ auth = a
+ break
+ }
+ }
+ }
+ return errors.New("ssh: unable to authenticate, no supported methods remain")
+}
+
+// A ClientAuth represents an instance of an RFC 4252 authentication method.
+type ClientAuth interface {
+ // auth authenticates user over transport t.
+ // Returns true if authentication is successful.
+ // If authentication is not successful, a []string of alternative
+ // method names is returned.
+ auth(user string, t *transport) (bool, []string, error)
+
+ // method returns the RFC 4252 method name.
+ method() string
+}
+
+// "none" authentication, RFC 4252 section 5.2.
+type noneAuth int
+
+func (n *noneAuth) auth(user string, t *transport) (bool, []string, error) {
+ if err := t.writePacket(marshal(msgUserAuthRequest, userAuthRequestMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: "none",
+ })); err != nil {
+ return false, nil, err
+ }
+
+ packet, err := t.readPacket()
+ if err != nil {
+ return false, nil, err
+ }
+
+ switch packet[0] {
+ case msgUserAuthSuccess:
+ return true, nil, nil
+ case msgUserAuthFailure:
+ msg := decode(packet).(*userAuthFailureMsg)
+ return false, msg.Methods, nil
+ }
+ return false, nil, UnexpectedMessageError{msgUserAuthSuccess, packet[0]}
+}
+
+func (n *noneAuth) method() string {
+ return "none"
+}
+
+// "password" authentication, RFC 4252 Section 8.
+type passwordAuth struct {
+ ClientPassword
+}
+
+func (p *passwordAuth) auth(user string, t *transport) (bool, []string, error) {
+ type passwordAuthMsg struct {
+ User string
+ Service string
+ Method string
+ Reply bool
+ Password string
+ }
+
+ pw, err := p.Password(user)
+ if err != nil {
+ return false, nil, err
+ }
+
+ if err := t.writePacket(marshal(msgUserAuthRequest, passwordAuthMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: "password",
+ Reply: false,
+ Password: pw,
+ })); err != nil {
+ return false, nil, err
+ }
+
+ packet, err := t.readPacket()
+ if err != nil {
+ return false, nil, err
+ }
+
+ switch packet[0] {
+ case msgUserAuthSuccess:
+ return true, nil, nil
+ case msgUserAuthFailure:
+ msg := decode(packet).(*userAuthFailureMsg)
+ return false, msg.Methods, nil
+ }
+ return false, nil, UnexpectedMessageError{msgUserAuthSuccess, packet[0]}
+}
+
+func (p *passwordAuth) method() string {
+ return "password"
+}
+
+// A ClientPassword implements access to a client's passwords.
+type ClientPassword interface {
+ // Password returns the password to use for user.
+ Password(user string) (password string, err error)
+}
+
+// ClientAuthPassword returns a ClientAuth using password authentication.
+func ClientAuthPassword(impl ClientPassword) ClientAuth {
+ return &passwordAuth{impl}
+}
package ssh
import (
- "big"
+ "math/big"
"strconv"
"sync"
)
config := &ClientConfig{
User: "username",
- Password: "123456",
+ Auth: []ClientAuth{ ... },
}
client, err := Dial("yourserver.com:22", config)
package ssh
import (
- "big"
"bytes"
"io"
+ "math/big"
"reflect"
)
package ssh
import (
- "big"
- "rand"
+ "math/big"
+ "math/rand"
"reflect"
"testing"
"testing/quick"
package ssh
import (
- "big"
"bytes"
"crypto"
"crypto/rand"
"encoding/pem"
"errors"
"io"
+ "math/big"
"net"
"sync"
)
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "strings"
-)
-
-// attrTypeMap[n] describes the value of the given attribute.
-// If an attribute affects (or can mask) the encoding or interpretation of
-// other content, or affects the contents, idempotency, or credentials of a
-// network message, then the value in this map is contentTypeUnsafe.
-// This map is derived from HTML5, specifically
-// http://www.w3.org/TR/html5/Overview.html#attributes-1
-// as well as "%URI"-typed attributes from
-// http://www.w3.org/TR/html4/index/attributes.html
-var attrTypeMap = map[string]contentType{
- "accept": contentTypePlain,
- "accept-charset": contentTypeUnsafe,
- "action": contentTypeURL,
- "alt": contentTypePlain,
- "archive": contentTypeURL,
- "async": contentTypeUnsafe,
- "autocomplete": contentTypePlain,
- "autofocus": contentTypePlain,
- "autoplay": contentTypePlain,
- "background": contentTypeURL,
- "border": contentTypePlain,
- "checked": contentTypePlain,
- "cite": contentTypeURL,
- "challenge": contentTypeUnsafe,
- "charset": contentTypeUnsafe,
- "class": contentTypePlain,
- "classid": contentTypeURL,
- "codebase": contentTypeURL,
- "cols": contentTypePlain,
- "colspan": contentTypePlain,
- "content": contentTypeUnsafe,
- "contenteditable": contentTypePlain,
- "contextmenu": contentTypePlain,
- "controls": contentTypePlain,
- "coords": contentTypePlain,
- "crossorigin": contentTypeUnsafe,
- "data": contentTypeURL,
- "datetime": contentTypePlain,
- "default": contentTypePlain,
- "defer": contentTypeUnsafe,
- "dir": contentTypePlain,
- "dirname": contentTypePlain,
- "disabled": contentTypePlain,
- "draggable": contentTypePlain,
- "dropzone": contentTypePlain,
- "enctype": contentTypeUnsafe,
- "for": contentTypePlain,
- "form": contentTypeUnsafe,
- "formaction": contentTypeURL,
- "formenctype": contentTypeUnsafe,
- "formmethod": contentTypeUnsafe,
- "formnovalidate": contentTypeUnsafe,
- "formtarget": contentTypePlain,
- "headers": contentTypePlain,
- "height": contentTypePlain,
- "hidden": contentTypePlain,
- "high": contentTypePlain,
- "href": contentTypeURL,
- "hreflang": contentTypePlain,
- "http-equiv": contentTypeUnsafe,
- "icon": contentTypeURL,
- "id": contentTypePlain,
- "ismap": contentTypePlain,
- "keytype": contentTypeUnsafe,
- "kind": contentTypePlain,
- "label": contentTypePlain,
- "lang": contentTypePlain,
- "language": contentTypeUnsafe,
- "list": contentTypePlain,
- "longdesc": contentTypeURL,
- "loop": contentTypePlain,
- "low": contentTypePlain,
- "manifest": contentTypeURL,
- "max": contentTypePlain,
- "maxlength": contentTypePlain,
- "media": contentTypePlain,
- "mediagroup": contentTypePlain,
- "method": contentTypeUnsafe,
- "min": contentTypePlain,
- "multiple": contentTypePlain,
- "name": contentTypePlain,
- "novalidate": contentTypeUnsafe,
- // Skip handler names from
- // http://www.w3.org/TR/html5/Overview.html#event-handlers-on-elements-document-objects-and-window-objects
- // since we have special handling in attrType.
- "open": contentTypePlain,
- "optimum": contentTypePlain,
- "pattern": contentTypeUnsafe,
- "placeholder": contentTypePlain,
- "poster": contentTypeURL,
- "profile": contentTypeURL,
- "preload": contentTypePlain,
- "pubdate": contentTypePlain,
- "radiogroup": contentTypePlain,
- "readonly": contentTypePlain,
- "rel": contentTypeUnsafe,
- "required": contentTypePlain,
- "reversed": contentTypePlain,
- "rows": contentTypePlain,
- "rowspan": contentTypePlain,
- "sandbox": contentTypeUnsafe,
- "spellcheck": contentTypePlain,
- "scope": contentTypePlain,
- "scoped": contentTypePlain,
- "seamless": contentTypePlain,
- "selected": contentTypePlain,
- "shape": contentTypePlain,
- "size": contentTypePlain,
- "sizes": contentTypePlain,
- "span": contentTypePlain,
- "src": contentTypeURL,
- "srcdoc": contentTypeHTML,
- "srclang": contentTypePlain,
- "start": contentTypePlain,
- "step": contentTypePlain,
- "style": contentTypeCSS,
- "tabindex": contentTypePlain,
- "target": contentTypePlain,
- "title": contentTypePlain,
- "type": contentTypeUnsafe,
- "usemap": contentTypeURL,
- "value": contentTypeUnsafe,
- "width": contentTypePlain,
- "wrap": contentTypePlain,
- "xmlns": contentTypeURL,
-}
-
-// attrType returns a conservative (upper-bound on authority) guess at the
-// type of the named attribute.
-func attrType(name string) contentType {
- name = strings.ToLower(name)
- if strings.HasPrefix(name, "data-") {
- // Strip data- so that custom attribute heuristics below are
- // widely applied.
- // Treat data-action as URL below.
- name = name[5:]
- } else if colon := strings.IndexRune(name, ':'); colon != -1 {
- if name[:colon] == "xmlns" {
- return contentTypeURL
- }
- // Treat svg:href and xlink:href as href below.
- name = name[colon+1:]
- }
- if t, ok := attrTypeMap[name]; ok {
- return t
- }
- // Treat partial event handler names as script.
- if strings.HasPrefix(name, "on") {
- return contentTypeJS
- }
-
- // Heuristics to prevent "javascript:..." injection in custom
- // data attributes and custom attributes like g:tweetUrl.
- // http://www.w3.org/TR/html5/elements.html#embedding-custom-non-visible-data-with-the-data-attributes:
- // "Custom data attributes are intended to store custom data
- // private to the page or application, for which there are no
- // more appropriate attributes or elements."
- // Developers seem to store URL content in data URLs that start
- // or end with "URI" or "URL".
- if strings.Contains(name, "src") ||
- strings.Contains(name, "uri") ||
- strings.Contains(name, "url") {
- return contentTypeURL
- }
- return contentTypePlain
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "template/parse"
-)
-
-// clone clones a template Node.
-func clone(n parse.Node) parse.Node {
- switch t := n.(type) {
- case *parse.ActionNode:
- return cloneAction(t)
- case *parse.IfNode:
- b := new(parse.IfNode)
- copyBranch(&b.BranchNode, &t.BranchNode)
- return b
- case *parse.ListNode:
- return cloneList(t)
- case *parse.RangeNode:
- b := new(parse.RangeNode)
- copyBranch(&b.BranchNode, &t.BranchNode)
- return b
- case *parse.TemplateNode:
- return cloneTemplate(t)
- case *parse.TextNode:
- return cloneText(t)
- case *parse.WithNode:
- b := new(parse.WithNode)
- copyBranch(&b.BranchNode, &t.BranchNode)
- return b
- }
- panic("cloning " + n.String() + " is unimplemented")
-}
-
-// cloneAction returns a deep clone of n.
-func cloneAction(n *parse.ActionNode) *parse.ActionNode {
- // We use keyless fields because they won't compile if a field is added.
- return &parse.ActionNode{n.NodeType, n.Line, clonePipe(n.Pipe)}
-}
-
-// cloneList returns a deep clone of n.
-func cloneList(n *parse.ListNode) *parse.ListNode {
- if n == nil {
- return nil
- }
- // We use keyless fields because they won't compile if a field is added.
- c := parse.ListNode{n.NodeType, make([]parse.Node, len(n.Nodes))}
- for i, child := range n.Nodes {
- c.Nodes[i] = clone(child)
- }
- return &c
-}
-
-// clonePipe returns a shallow clone of n.
-// The escaper does not modify pipe descendants in place so there's no need to
-// clone deeply.
-func clonePipe(n *parse.PipeNode) *parse.PipeNode {
- if n == nil {
- return nil
- }
- // We use keyless fields because they won't compile if a field is added.
- return &parse.PipeNode{n.NodeType, n.Line, n.Decl, n.Cmds}
-}
-
-// cloneTemplate returns a deep clone of n.
-func cloneTemplate(n *parse.TemplateNode) *parse.TemplateNode {
- // We use keyless fields because they won't compile if a field is added.
- return &parse.TemplateNode{n.NodeType, n.Line, n.Name, clonePipe(n.Pipe)}
-}
-
-// cloneText clones the given node sharing its []byte.
-func cloneText(n *parse.TextNode) *parse.TextNode {
- // We use keyless fields because they won't compile if a field is added.
- return &parse.TextNode{n.NodeType, n.Text}
-}
-
-// copyBranch clones src into dst.
-func copyBranch(dst, src *parse.BranchNode) {
- // We use keyless fields because they won't compile if a field is added.
- *dst = parse.BranchNode{
- src.NodeType,
- src.Line,
- clonePipe(src.Pipe),
- cloneList(src.List),
- cloneList(src.ElseList),
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "template"
- "template/parse"
- "testing"
-)
-
-func TestClone(t *testing.T) {
- tests := []struct {
- input, want, wantClone string
- }{
- {
- `Hello, {{if true}}{{"<World>"}}{{end}}!`,
- "Hello, <World>!",
- "Hello, <World>!",
- },
- {
- `Hello, {{if false}}{{.X}}{{else}}{{"<World>"}}{{end}}!`,
- "Hello, <World>!",
- "Hello, <World>!",
- },
- {
- `Hello, {{with "<World>"}}{{.}}{{end}}!`,
- "Hello, <World>!",
- "Hello, <World>!",
- },
- {
- `{{range .}}<p>{{.}}</p>{{end}}`,
- "<p>foo</p><p><bar></p><p>baz</p>",
- "<p>foo</p><p><bar></p><p>baz</p>",
- },
- {
- `Hello, {{"<World>" | html}}!`,
- "Hello, <World>!",
- "Hello, <World>!",
- },
- {
- `Hello{{if 1}}, World{{else}}{{template "d"}}{{end}}!`,
- "Hello, World!",
- "Hello, World!",
- },
- }
-
- for _, test := range tests {
- s := template.Must(template.New("s").Parse(test.input))
- d := template.New("d")
- d.Tree = &parse.Tree{Name: d.Name(), Root: cloneList(s.Root)}
-
- if want, got := s.Root.String(), d.Root.String(); want != got {
- t.Errorf("want %q, got %q", want, got)
- }
-
- d, err := Escape(d)
- if err != nil {
- t.Errorf("%q: failed to escape: %s", test.input, err)
- continue
- }
-
- if want, got := "s", s.Name(); want != got {
- t.Errorf("want %q, got %q", want, got)
- continue
- }
- if want, got := "d", d.Name(); want != got {
- t.Errorf("want %q, got %q", want, got)
- continue
- }
-
- data := []string{"foo", "<bar>", "baz"}
-
- // Make sure escaping d did not affect s.
- var b bytes.Buffer
- s.Execute(&b, data)
- if got := b.String(); got != test.want {
- t.Errorf("%q: want %q, got %q", test.input, test.want, got)
- continue
- }
-
- b.Reset()
- d.Execute(&b, data)
- if got := b.String(); got != test.wantClone {
- t.Errorf("%q: want %q, got %q", test.input, test.wantClone, got)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "fmt"
-)
-
-// Strings of content from a trusted source.
-type (
- // CSS encapsulates known safe content that matches any of:
- // (1) The CSS3 stylesheet production, such as `p { color: purple }`.
- // (2) The CSS3 rule production, such as `a[href=~"https:"].foo#bar`.
- // (3) CSS3 declaration productions, such as `color: red; margin: 2px`.
- // (4) The CSS3 value production, such as `rgba(0, 0, 255, 127)`.
- // See http://www.w3.org/TR/css3-syntax/#style
- CSS string
-
- // HTML encapsulates a known safe HTML document fragment.
- // It should not be used for HTML from a third-party, or HTML with
- // unclosed tags or comments. The outputs of a sound HTML sanitizer
- // and a template escaped by this package are fine for use with HTML.
- HTML string
-
- // HTMLAttr encapsulates an HTML attribute from a trusted source,
- // for example: ` dir="ltr"`.
- HTMLAttr string
-
- // JS encapsulates a known safe EcmaScript5 Expression, or example,
- // `(x + y * z())`.
- // Template authors are responsible for ensuring that typed expressions
- // do not break the intended precedence and that there is no
- // statement/expression ambiguity as when passing an expression like
- // "{ foo: bar() }\n['foo']()", which is both a valid Expression and a
- // valid Program with a very different meaning.
- JS string
-
- // JSStr encapsulates a sequence of characters meant to be embedded
- // between quotes in a JavaScript expression.
- // The string must match a series of StringCharacters:
- // StringCharacter :: SourceCharacter but not `\` or LineTerminator
- // | EscapeSequence
- // Note that LineContinuations are not allowed.
- // JSStr("foo\\nbar") is fine, but JSStr("foo\\\nbar") is not.
- JSStr string
-
- // URL encapsulates a known safe URL as defined in RFC 3896.
- // A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`
- // from a trusted source should go in the page, but by default dynamic
- // `javascript:` URLs are filtered out since they are a frequently
- // exploited injection vector.
- URL string
-)
-
-type contentType uint8
-
-const (
- contentTypePlain contentType = iota
- contentTypeCSS
- contentTypeHTML
- contentTypeHTMLAttr
- contentTypeJS
- contentTypeJSStr
- contentTypeURL
- // contentTypeUnsafe is used in attr.go for values that affect how
- // embedded content and network messages are formed, vetted,
- // or interpreted; or which credentials network messages carry.
- contentTypeUnsafe
-)
-
-// stringify converts its arguments to a string and the type of the content.
-func stringify(args ...interface{}) (string, contentType) {
- if len(args) == 1 {
- switch s := args[0].(type) {
- case string:
- return s, contentTypePlain
- case CSS:
- return string(s), contentTypeCSS
- case HTML:
- return string(s), contentTypeHTML
- case HTMLAttr:
- return string(s), contentTypeHTMLAttr
- case JS:
- return string(s), contentTypeJS
- case JSStr:
- return string(s), contentTypeJSStr
- case URL:
- return string(s), contentTypeURL
- }
- }
- return fmt.Sprint(args...), contentTypePlain
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "strings"
- "template"
- "testing"
-)
-
-func TestTypedContent(t *testing.T) {
- data := []interface{}{
- `<b> "foo%" O'Reilly &bar;`,
- CSS(`a[href =~ "//example.com"]#foo`),
- HTML(`Hello, <b>World</b> &tc!`),
- HTMLAttr(` dir="ltr"`),
- JS(`c && alert("Hello, World!");`),
- JSStr(`Hello, World & O'Reilly\x21`),
- URL(`greeting=H%69&addressee=(World)`),
- }
-
- // For each content sensitive escaper, see how it does on
- // each of the typed strings above.
- tests := []struct {
- // A template containing a single {{.}}.
- input string
- want []string
- }{
- {
- `<style>{{.}} { color: blue }</style>`,
- []string{
- `ZgotmplZ`,
- // Allowed but not escaped.
- `a[href =~ "//example.com"]#foo`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- },
- },
- {
- `<div style="{{.}}">`,
- []string{
- `ZgotmplZ`,
- // Allowed and HTML escaped.
- `a[href =~ "//example.com"]#foo`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- },
- },
- {
- `{{.}}`,
- []string{
- `<b> "foo%" O'Reilly &bar;`,
- `a[href =~ "//example.com"]#foo`,
- // Not escaped.
- `Hello, <b>World</b> &tc!`,
- ` dir="ltr"`,
- `c && alert("Hello, World!");`,
- `Hello, World & O'Reilly\x21`,
- `greeting=H%69&addressee=(World)`,
- },
- },
- {
- `<a{{.}}>`,
- []string{
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- // Allowed and HTML escaped.
- ` dir="ltr"`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- `ZgotmplZ`,
- },
- },
- {
- `<a title={{.}}>`,
- []string{
- `<b> "foo%" O'Reilly &bar;`,
- `a[href =~ "//example.com"]#foo`,
- // Tags stripped, spaces escaped, entity not re-escaped.
- `Hello, World &tc!`,
- ` dir="ltr"`,
- `c && alert("Hello, World!");`,
- `Hello, World & O'Reilly\x21`,
- `greeting=H%69&addressee=(World)`,
- },
- },
- {
- `<a title='{{.}}'>`,
- []string{
- `<b> "foo%" O'Reilly &bar;`,
- `a[href =~ "//example.com"]#foo`,
- // Tags stripped, entity not re-escaped.
- `Hello, World &tc!`,
- ` dir="ltr"`,
- `c && alert("Hello, World!");`,
- `Hello, World & O'Reilly\x21`,
- `greeting=H%69&addressee=(World)`,
- },
- },
- {
- `<textarea>{{.}}</textarea>`,
- []string{
- `<b> "foo%" O'Reilly &bar;`,
- `a[href =~ "//example.com"]#foo`,
- // Angle brackets escaped to prevent injection of close tags, entity not re-escaped.
- `Hello, <b>World</b> &tc!`,
- ` dir="ltr"`,
- `c && alert("Hello, World!");`,
- `Hello, World & O'Reilly\x21`,
- `greeting=H%69&addressee=(World)`,
- },
- },
- {
- `<script>alert({{.}})</script>`,
- []string{
- `"\u003cb\u003e \"foo%\" O'Reilly &bar;"`,
- `"a[href =~ \"//example.com\"]#foo"`,
- `"Hello, \u003cb\u003eWorld\u003c/b\u003e &tc!"`,
- `" dir=\"ltr\""`,
- // Not escaped.
- `c && alert("Hello, World!");`,
- // Escape sequence not over-escaped.
- `"Hello, World & O'Reilly\x21"`,
- `"greeting=H%69&addressee=(World)"`,
- },
- },
- {
- `<button onclick="alert({{.}})">`,
- []string{
- `"\u003cb\u003e \"foo%\" O'Reilly &bar;"`,
- `"a[href =~ \"//example.com\"]#foo"`,
- `"Hello, \u003cb\u003eWorld\u003c/b\u003e &amp;tc!"`,
- `" dir=\"ltr\""`,
- // Not JS escaped but HTML escaped.
- `c && alert("Hello, World!");`,
- // Escape sequence not over-escaped.
- `"Hello, World & O'Reilly\x21"`,
- `"greeting=H%69&addressee=(World)"`,
- },
- },
- {
- `<script>alert("{{.}}")</script>`,
- []string{
- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
- `a[href =~ \x22\/\/example.com\x22]#foo`,
- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
- ` dir=\x22ltr\x22`,
- `c \x26\x26 alert(\x22Hello, World!\x22);`,
- // Escape sequence not over-escaped.
- `Hello, World \x26 O\x27Reilly\x21`,
- `greeting=H%69\x26addressee=(World)`,
- },
- },
- {
- `<button onclick='alert("{{.}}")'>`,
- []string{
- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
- `a[href =~ \x22\/\/example.com\x22]#foo`,
- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
- ` dir=\x22ltr\x22`,
- `c \x26\x26 alert(\x22Hello, World!\x22);`,
- // Escape sequence not over-escaped.
- `Hello, World \x26 O\x27Reilly\x21`,
- `greeting=H%69\x26addressee=(World)`,
- },
- },
- {
- `<a href="?q={{.}}">`,
- []string{
- `%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
- `a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
- `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
- `%20dir%3d%22ltr%22`,
- `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
- `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
- // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
- `greeting=H%69&addressee=%28World%29`,
- },
- },
- {
- `<style>body { background: url('?img={{.}}') }</style>`,
- []string{
- `%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
- `a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
- `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
- `%20dir%3d%22ltr%22`,
- `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
- `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
- // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
- `greeting=H%69&addressee=%28World%29`,
- },
- },
- }
-
- for _, test := range tests {
- tmpl := template.Must(Escape(template.Must(template.New("x").Parse(test.input))))
- pre := strings.Index(test.input, "{{.}}")
- post := len(test.input) - (pre + 5)
- var b bytes.Buffer
- for i, x := range data {
- b.Reset()
- if err := tmpl.Execute(&b, x); err != nil {
- t.Errorf("%q with %v: %s", test.input, x, err)
- continue
- }
- if want, got := test.want[i], b.String()[pre:b.Len()-post]; want != got {
- t.Errorf("%q with %v:\nwant\n\t%q,\ngot\n\t%q\n", test.input, x, want, got)
- continue
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "fmt"
-)
-
-// context describes the state an HTML parser must be in when it reaches the
-// portion of HTML produced by evaluating a particular template node.
-//
-// The zero value of type context is the start context for a template that
-// produces an HTML fragment as defined at
-// http://www.w3.org/TR/html5/the-end.html#parsing-html-fragments
-// where the context element is null.
-type context struct {
- state state
- delim delim
- urlPart urlPart
- jsCtx jsCtx
- attr attr
- element element
- err *Error
-}
-
-func (c context) String() string {
- return fmt.Sprintf("{%v %v %v %v %v %v %v}", c.state, c.delim, c.urlPart, c.jsCtx, c.attr, c.element, c.err)
-}
-
-// eq returns whether two contexts are equal.
-func (c context) eq(d context) bool {
- return c.state == d.state &&
- c.delim == d.delim &&
- c.urlPart == d.urlPart &&
- c.jsCtx == d.jsCtx &&
- c.attr == d.attr &&
- c.element == d.element &&
- c.err == d.err
-}
-
-// mangle produces an identifier that includes a suffix that distinguishes it
-// from template names mangled with different contexts.
-func (c context) mangle(templateName string) string {
- // The mangled name for the default context is the input templateName.
- if c.state == stateText {
- return templateName
- }
- s := templateName + "$htmltemplate_" + c.state.String()
- if c.delim != 0 {
- s += "_" + c.delim.String()
- }
- if c.urlPart != 0 {
- s += "_" + c.urlPart.String()
- }
- if c.jsCtx != 0 {
- s += "_" + c.jsCtx.String()
- }
- if c.attr != 0 {
- s += "_" + c.attr.String()
- }
- if c.element != 0 {
- s += "_" + c.element.String()
- }
- return s
-}
-
-// state describes a high-level HTML parser state.
-//
-// It bounds the top of the element stack, and by extension the HTML insertion
-// mode, but also contains state that does not correspond to anything in the
-// HTML5 parsing algorithm because a single token production in the HTML
-// grammar may contain embedded actions in a template. For instance, the quoted
-// HTML attribute produced by
-// <div title="Hello {{.World}}">
-// is a single token in HTML's grammar but in a template spans several nodes.
-type state uint8
-
-const (
- // stateText is parsed character data. An HTML parser is in
- // this state when its parse position is outside an HTML tag,
- // directive, comment, and special element body.
- stateText state = iota
- // stateTag occurs before an HTML attribute or the end of a tag.
- stateTag
- // stateAttrName occurs inside an attribute name.
- // It occurs between the ^'s in ` ^name^ = value`.
- stateAttrName
- // stateAfterName occurs after an attr name has ended but before any
- // equals sign. It occurs between the ^'s in ` name^ ^= value`.
- stateAfterName
- // stateBeforeValue occurs after the equals sign but before the value.
- // It occurs between the ^'s in ` name =^ ^value`.
- stateBeforeValue
- // stateHTMLCmt occurs inside an <!-- HTML comment -->.
- stateHTMLCmt
- // stateRCDATA occurs inside an RCDATA element (<textarea> or <title>)
- // as described at http://dev.w3.org/html5/spec/syntax.html#elements-0
- stateRCDATA
- // stateAttr occurs inside an HTML attribute whose content is text.
- stateAttr
- // stateURL occurs inside an HTML attribute whose content is a URL.
- stateURL
- // stateJS occurs inside an event handler or script element.
- stateJS
- // stateJSDqStr occurs inside a JavaScript double quoted string.
- stateJSDqStr
- // stateJSSqStr occurs inside a JavaScript single quoted string.
- stateJSSqStr
- // stateJSRegexp occurs inside a JavaScript regexp literal.
- stateJSRegexp
- // stateJSBlockCmt occurs inside a JavaScript /* block comment */.
- stateJSBlockCmt
- // stateJSLineCmt occurs inside a JavaScript // line comment.
- stateJSLineCmt
- // stateCSS occurs inside a <style> element or style attribute.
- stateCSS
- // stateCSSDqStr occurs inside a CSS double quoted string.
- stateCSSDqStr
- // stateCSSSqStr occurs inside a CSS single quoted string.
- stateCSSSqStr
- // stateCSSDqURL occurs inside a CSS double quoted url("...").
- stateCSSDqURL
- // stateCSSSqURL occurs inside a CSS single quoted url('...').
- stateCSSSqURL
- // stateCSSURL occurs inside a CSS unquoted url(...).
- stateCSSURL
- // stateCSSBlockCmt occurs inside a CSS /* block comment */.
- stateCSSBlockCmt
- // stateCSSLineCmt occurs inside a CSS // line comment.
- stateCSSLineCmt
- // stateError is an infectious error state outside any valid
- // HTML/CSS/JS construct.
- stateError
-)
-
-var stateNames = [...]string{
- stateText: "stateText",
- stateTag: "stateTag",
- stateAttrName: "stateAttrName",
- stateAfterName: "stateAfterName",
- stateBeforeValue: "stateBeforeValue",
- stateHTMLCmt: "stateHTMLCmt",
- stateRCDATA: "stateRCDATA",
- stateAttr: "stateAttr",
- stateURL: "stateURL",
- stateJS: "stateJS",
- stateJSDqStr: "stateJSDqStr",
- stateJSSqStr: "stateJSSqStr",
- stateJSRegexp: "stateJSRegexp",
- stateJSBlockCmt: "stateJSBlockCmt",
- stateJSLineCmt: "stateJSLineCmt",
- stateCSS: "stateCSS",
- stateCSSDqStr: "stateCSSDqStr",
- stateCSSSqStr: "stateCSSSqStr",
- stateCSSDqURL: "stateCSSDqURL",
- stateCSSSqURL: "stateCSSSqURL",
- stateCSSURL: "stateCSSURL",
- stateCSSBlockCmt: "stateCSSBlockCmt",
- stateCSSLineCmt: "stateCSSLineCmt",
- stateError: "stateError",
-}
-
-func (s state) String() string {
- if int(s) < len(stateNames) {
- return stateNames[s]
- }
- return fmt.Sprintf("illegal state %d", int(s))
-}
-
-// isComment is true for any state that contains content meant for template
-// authors & maintainers, not for end-users or machines.
-func isComment(s state) bool {
- switch s {
- case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
- return true
- }
- return false
-}
-
-// isInTag return whether s occurs solely inside an HTML tag.
-func isInTag(s state) bool {
- switch s {
- case stateTag, stateAttrName, stateAfterName, stateBeforeValue, stateAttr:
- return true
- }
- return false
-}
-
-// delim is the delimiter that will end the current HTML attribute.
-type delim uint8
-
-const (
- // delimNone occurs outside any attribute.
- delimNone delim = iota
- // delimDoubleQuote occurs when a double quote (") closes the attribute.
- delimDoubleQuote
- // delimSingleQuote occurs when a single quote (') closes the attribute.
- delimSingleQuote
- // delimSpaceOrTagEnd occurs when a space or right angle bracket (>)
- // closes the attribute.
- delimSpaceOrTagEnd
-)
-
-var delimNames = [...]string{
- delimNone: "delimNone",
- delimDoubleQuote: "delimDoubleQuote",
- delimSingleQuote: "delimSingleQuote",
- delimSpaceOrTagEnd: "delimSpaceOrTagEnd",
-}
-
-func (d delim) String() string {
- if int(d) < len(delimNames) {
- return delimNames[d]
- }
- return fmt.Sprintf("illegal delim %d", int(d))
-}
-
-// urlPart identifies a part in an RFC 3986 hierarchical URL to allow different
-// encoding strategies.
-type urlPart uint8
-
-const (
- // urlPartNone occurs when not in a URL, or possibly at the start:
- // ^ in "^http://auth/path?k=v#frag".
- urlPartNone urlPart = iota
- // urlPartPreQuery occurs in the scheme, authority, or path; between the
- // ^s in "h^ttp://auth/path^?k=v#frag".
- urlPartPreQuery
- // urlPartQueryOrFrag occurs in the query portion between the ^s in
- // "http://auth/path?^k=v#frag^".
- urlPartQueryOrFrag
- // urlPartUnknown occurs due to joining of contexts both before and
- // after the query separator.
- urlPartUnknown
-)
-
-var urlPartNames = [...]string{
- urlPartNone: "urlPartNone",
- urlPartPreQuery: "urlPartPreQuery",
- urlPartQueryOrFrag: "urlPartQueryOrFrag",
- urlPartUnknown: "urlPartUnknown",
-}
-
-func (u urlPart) String() string {
- if int(u) < len(urlPartNames) {
- return urlPartNames[u]
- }
- return fmt.Sprintf("illegal urlPart %d", int(u))
-}
-
-// jsCtx determines whether a '/' starts a regular expression literal or a
-// division operator.
-type jsCtx uint8
-
-const (
- // jsCtxRegexp occurs where a '/' would start a regexp literal.
- jsCtxRegexp jsCtx = iota
- // jsCtxDivOp occurs where a '/' would start a division operator.
- jsCtxDivOp
- // jsCtxUnknown occurs where a '/' is ambiguous due to context joining.
- jsCtxUnknown
-)
-
-func (c jsCtx) String() string {
- switch c {
- case jsCtxRegexp:
- return "jsCtxRegexp"
- case jsCtxDivOp:
- return "jsCtxDivOp"
- case jsCtxUnknown:
- return "jsCtxUnknown"
- }
- return fmt.Sprintf("illegal jsCtx %d", int(c))
-}
-
-// element identifies the HTML element when inside a start tag or special body.
-// Certain HTML element (for example <script> and <style>) have bodies that are
-// treated differently from stateText so the element type is necessary to
-// transition into the correct context at the end of a tag and to identify the
-// end delimiter for the body.
-type element uint8
-
-const (
- // elementNone occurs outside a special tag or special element body.
- elementNone element = iota
- // elementScript corresponds to the raw text <script> element.
- elementScript
- // elementStyle corresponds to the raw text <style> element.
- elementStyle
- // elementTextarea corresponds to the RCDATA <textarea> element.
- elementTextarea
- // elementTitle corresponds to the RCDATA <title> element.
- elementTitle
-)
-
-var elementNames = [...]string{
- elementNone: "elementNone",
- elementScript: "elementScript",
- elementStyle: "elementStyle",
- elementTextarea: "elementTextarea",
- elementTitle: "elementTitle",
-}
-
-func (e element) String() string {
- if int(e) < len(elementNames) {
- return elementNames[e]
- }
- return fmt.Sprintf("illegal element %d", int(e))
-}
-
-// attr identifies the most recent HTML attribute when inside a start tag.
-type attr uint8
-
-const (
- // attrNone corresponds to a normal attribute or no attribute.
- attrNone attr = iota
- // attrScript corresponds to an event handler attribute.
- attrScript
- // attrStyle corresponds to the style attribute whose value is CSS.
- attrStyle
- // attrURL corresponds to an attribute whose value is a URL.
- attrURL
-)
-
-var attrNames = [...]string{
- attrNone: "attrNone",
- attrScript: "attrScript",
- attrStyle: "attrStyle",
- attrURL: "attrURL",
-}
-
-func (a attr) String() string {
- if int(a) < len(attrNames) {
- return attrNames[a]
- }
- return fmt.Sprintf("illegal attr %d", int(a))
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "fmt"
- "unicode"
- "utf8"
-)
-
-// endsWithCSSKeyword returns whether b ends with an ident that
-// case-insensitively matches the lower-case kw.
-func endsWithCSSKeyword(b []byte, kw string) bool {
- i := len(b) - len(kw)
- if i < 0 {
- // Too short.
- return false
- }
- if i != 0 {
- r, _ := utf8.DecodeLastRune(b[:i])
- if isCSSNmchar(r) {
- // Too long.
- return false
- }
- }
- // Many CSS keywords, such as "!important" can have characters encoded,
- // but the URI production does not allow that according to
- // http://www.w3.org/TR/css3-syntax/#TOK-URI
- // This does not attempt to recognize encoded keywords. For example,
- // given "\75\72\6c" and "url" this return false.
- return string(bytes.ToLower(b[i:])) == kw
-}
-
-// isCSSNmchar returns whether rune is allowed anywhere in a CSS identifier.
-func isCSSNmchar(r rune) bool {
- // Based on the CSS3 nmchar production but ignores multi-rune escape
- // sequences.
- // http://www.w3.org/TR/css3-syntax/#SUBTOK-nmchar
- return 'a' <= r && r <= 'z' ||
- 'A' <= r && r <= 'Z' ||
- '0' <= r && r <= '9' ||
- r == '-' ||
- r == '_' ||
- // Non-ASCII cases below.
- 0x80 <= r && r <= 0xd7ff ||
- 0xe000 <= r && r <= 0xfffd ||
- 0x10000 <= r && r <= 0x10ffff
-}
-
-// decodeCSS decodes CSS3 escapes given a sequence of stringchars.
-// If there is no change, it returns the input, otherwise it returns a slice
-// backed by a new array.
-// http://www.w3.org/TR/css3-syntax/#SUBTOK-stringchar defines stringchar.
-func decodeCSS(s []byte) []byte {
- i := bytes.IndexByte(s, '\\')
- if i == -1 {
- return s
- }
- // The UTF-8 sequence for a codepoint is never longer than 1 + the
- // number hex digits need to represent that codepoint, so len(s) is an
- // upper bound on the output length.
- b := make([]byte, 0, len(s))
- for len(s) != 0 {
- i := bytes.IndexByte(s, '\\')
- if i == -1 {
- i = len(s)
- }
- b, s = append(b, s[:i]...), s[i:]
- if len(s) < 2 {
- break
- }
- // http://www.w3.org/TR/css3-syntax/#SUBTOK-escape
- // escape ::= unicode | '\' [#x20-#x7E#x80-#xD7FF#xE000-#xFFFD#x10000-#x10FFFF]
- if isHex(s[1]) {
- // http://www.w3.org/TR/css3-syntax/#SUBTOK-unicode
- // unicode ::= '\' [0-9a-fA-F]{1,6} wc?
- j := 2
- for j < len(s) && j < 7 && isHex(s[j]) {
- j++
- }
- r := hexDecode(s[1:j])
- if r > unicode.MaxRune {
- r, j = r/16, j-1
- }
- n := utf8.EncodeRune(b[len(b):cap(b)], r)
- // The optional space at the end allows a hex
- // sequence to be followed by a literal hex.
- // string(decodeCSS([]byte(`\A B`))) == "\nB"
- b, s = b[:len(b)+n], skipCSSSpace(s[j:])
- } else {
- // `\\` decodes to `\` and `\"` to `"`.
- _, n := utf8.DecodeRune(s[1:])
- b, s = append(b, s[1:1+n]...), s[1+n:]
- }
- }
- return b
-}
-
-// isHex returns whether the given character is a hex digit.
-func isHex(c byte) bool {
- return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
-}
-
-// hexDecode decodes a short hex digit sequence: "10" -> 16.
-func hexDecode(s []byte) rune {
- n := rune(0)
- for _, c := range s {
- n <<= 4
- switch {
- case '0' <= c && c <= '9':
- n |= rune(c - '0')
- case 'a' <= c && c <= 'f':
- n |= rune(c-'a') + 10
- case 'A' <= c && c <= 'F':
- n |= rune(c-'A') + 10
- default:
- panic(fmt.Sprintf("Bad hex digit in %q", s))
- }
- }
- return n
-}
-
-// skipCSSSpace returns a suffix of c, skipping over a single space.
-func skipCSSSpace(c []byte) []byte {
- if len(c) == 0 {
- return c
- }
- // wc ::= #x9 | #xA | #xC | #xD | #x20
- switch c[0] {
- case '\t', '\n', '\f', ' ':
- return c[1:]
- case '\r':
- // This differs from CSS3's wc production because it contains a
- // probable spec error whereby wc contains all the single byte
- // sequences in nl (newline) but not CRLF.
- if len(c) >= 2 && c[1] == '\n' {
- return c[2:]
- }
- return c[1:]
- }
- return c
-}
-
-// isCSSSpace returns whether b is a CSS space char as defined in wc.
-func isCSSSpace(b byte) bool {
- switch b {
- case '\t', '\n', '\f', '\r', ' ':
- return true
- }
- return false
-}
-
-// cssEscaper escapes HTML and CSS special characters using \<hex>+ escapes.
-func cssEscaper(args ...interface{}) string {
- s, _ := stringify(args...)
- var b bytes.Buffer
- written := 0
- for i, r := range s {
- var repl string
- switch r {
- case 0:
- repl = `\0`
- case '\t':
- repl = `\9`
- case '\n':
- repl = `\a`
- case '\f':
- repl = `\c`
- case '\r':
- repl = `\d`
- // Encode HTML specials as hex so the output can be embedded
- // in HTML attributes without further encoding.
- case '"':
- repl = `\22`
- case '&':
- repl = `\26`
- case '\'':
- repl = `\27`
- case '(':
- repl = `\28`
- case ')':
- repl = `\29`
- case '+':
- repl = `\2b`
- case '/':
- repl = `\2f`
- case ':':
- repl = `\3a`
- case ';':
- repl = `\3b`
- case '<':
- repl = `\3c`
- case '>':
- repl = `\3e`
- case '\\':
- repl = `\\`
- case '{':
- repl = `\7b`
- case '}':
- repl = `\7d`
- default:
- continue
- }
- b.WriteString(s[written:i])
- b.WriteString(repl)
- written = i + utf8.RuneLen(r)
- if repl != `\\` && (written == len(s) || isHex(s[written]) || isCSSSpace(s[written])) {
- b.WriteByte(' ')
- }
- }
- if written == 0 {
- return s
- }
- b.WriteString(s[written:])
- return b.String()
-}
-
-var expressionBytes = []byte("expression")
-var mozBindingBytes = []byte("mozbinding")
-
-// cssValueFilter allows innocuous CSS values in the output including CSS
-// quantities (10px or 25%), ID or class literals (#foo, .bar), keyword values
-// (inherit, blue), and colors (#888).
-// It filters out unsafe values, such as those that affect token boundaries,
-// and anything that might execute scripts.
-func cssValueFilter(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeCSS {
- return s
- }
- b, id := decodeCSS([]byte(s)), make([]byte, 0, 64)
-
- // CSS3 error handling is specified as honoring string boundaries per
- // http://www.w3.org/TR/css3-syntax/#error-handling :
- // Malformed declarations. User agents must handle unexpected
- // tokens encountered while parsing a declaration by reading until
- // the end of the declaration, while observing the rules for
- // matching pairs of (), [], {}, "", and '', and correctly handling
- // escapes. For example, a malformed declaration may be missing a
- // property, colon (:) or value.
- // So we need to make sure that values do not have mismatched bracket
- // or quote characters to prevent the browser from restarting parsing
- // inside a string that might embed JavaScript source.
- for i, c := range b {
- switch c {
- case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
- return filterFailsafe
- case '-':
- // Disallow <!-- or -->.
- // -- should not appear in valid identifiers.
- if i != 0 && b[i-1] == '-' {
- return filterFailsafe
- }
- default:
- if c < 0x80 && isCSSNmchar(rune(c)) {
- id = append(id, c)
- }
- }
- }
- id = bytes.ToLower(id)
- if bytes.Index(id, expressionBytes) != -1 || bytes.Index(id, mozBindingBytes) != -1 {
- return filterFailsafe
- }
- return string(b)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "strconv"
- "strings"
- "testing"
-)
-
-func TestEndsWithCSSKeyword(t *testing.T) {
- tests := []struct {
- css, kw string
- want bool
- }{
- {"", "url", false},
- {"url", "url", true},
- {"URL", "url", true},
- {"Url", "url", true},
- {"url", "important", false},
- {"important", "important", true},
- {"image-url", "url", false},
- {"imageurl", "url", false},
- {"image url", "url", true},
- }
- for _, test := range tests {
- got := endsWithCSSKeyword([]byte(test.css), test.kw)
- if got != test.want {
- t.Errorf("want %t but got %t for css=%v, kw=%v", test.want, got, test.css, test.kw)
- }
- }
-}
-
-func TestIsCSSNmchar(t *testing.T) {
- tests := []struct {
- rune rune
- want bool
- }{
- {0, false},
- {'0', true},
- {'9', true},
- {'A', true},
- {'Z', true},
- {'a', true},
- {'z', true},
- {'_', true},
- {'-', true},
- {':', false},
- {';', false},
- {' ', false},
- {0x7f, false},
- {0x80, true},
- {0x1234, true},
- {0xd800, false},
- {0xdc00, false},
- {0xfffe, false},
- {0x10000, true},
- {0x110000, false},
- }
- for _, test := range tests {
- got := isCSSNmchar(test.rune)
- if got != test.want {
- t.Errorf("%q: want %t but got %t", string(test.rune), test.want, got)
- }
- }
-}
-
-func TestDecodeCSS(t *testing.T) {
- tests := []struct {
- css, want string
- }{
- {``, ``},
- {`foo`, `foo`},
- {`foo\`, `foo`},
- {`foo\\`, `foo\`},
- {`\`, ``},
- {`\A`, "\n"},
- {`\a`, "\n"},
- {`\0a`, "\n"},
- {`\00000a`, "\n"},
- {`\000000a`, "\u0000a"},
- {`\1234 5`, "\u1234" + "5"},
- {`\1234\20 5`, "\u1234" + " 5"},
- {`\1234\A 5`, "\u1234" + "\n5"},
- {"\\1234\t5", "\u1234" + "5"},
- {"\\1234\n5", "\u1234" + "5"},
- {"\\1234\r\n5", "\u1234" + "5"},
- {`\12345`, "\U00012345"},
- {`\\`, `\`},
- {`\\ `, `\ `},
- {`\"`, `"`},
- {`\'`, `'`},
- {`\.`, `.`},
- {`\. .`, `. .`},
- {
- `The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3e dog\3c/canine\3e`,
- "The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>",
- },
- }
- for _, test := range tests {
- got1 := string(decodeCSS([]byte(test.css)))
- if got1 != test.want {
- t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.css, test.want, got1)
- }
- recoded := cssEscaper(got1)
- if got2 := string(decodeCSS([]byte(recoded))); got2 != test.want {
- t.Errorf("%q: escape & decode not dual for %q", test.css, recoded)
- }
- }
-}
-
-func TestHexDecode(t *testing.T) {
- for i := 0; i < 0x200000; i += 101 /* coprime with 16 */ {
- s := strconv.Itob(i, 16)
- if got := int(hexDecode([]byte(s))); got != i {
- t.Errorf("%s: want %d but got %d", s, i, got)
- }
- s = strings.ToUpper(s)
- if got := int(hexDecode([]byte(s))); got != i {
- t.Errorf("%s: want %d but got %d", s, i, got)
- }
- }
-}
-
-func TestSkipCSSSpace(t *testing.T) {
- tests := []struct {
- css, want string
- }{
- {"", ""},
- {"foo", "foo"},
- {"\n", ""},
- {"\r\n", ""},
- {"\r", ""},
- {"\t", ""},
- {" ", ""},
- {"\f", ""},
- {" foo", "foo"},
- {" foo", " foo"},
- {`\20`, `\20`},
- }
- for _, test := range tests {
- got := string(skipCSSSpace([]byte(test.css)))
- if got != test.want {
- t.Errorf("%q: want %q but got %q", test.css, test.want, got)
- }
- }
-}
-
-func TestCSSEscaper(t *testing.T) {
- input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./` +
- `0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmno" +
- "pqrstuvwxyz{|}~\x7f" +
- "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
- want := ("\\0\x01\x02\x03\x04\x05\x06\x07" +
- "\x08\\9 \\a\x0b\\c \\d\x0E\x0F" +
- "\x10\x11\x12\x13\x14\x15\x16\x17" +
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !\22#$%\26\27\28\29*\2b,-.\2f ` +
- `0123456789\3a\3b\3c=\3e?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\\]^_` +
- "`abcdefghijklmno" +
- `pqrstuvwxyz\7b|\7d~` + "\u007f" +
- "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
- got := cssEscaper(input)
- if got != want {
- t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
- }
-
- got = string(decodeCSS([]byte(got)))
- if input != got {
- t.Errorf("decode: want\n\t%q\nbut got\n\t%q", input, got)
- }
-}
-
-func TestCSSValueFilter(t *testing.T) {
- tests := []struct {
- css, want string
- }{
- {"", ""},
- {"foo", "foo"},
- {"0", "0"},
- {"0px", "0px"},
- {"-5px", "-5px"},
- {"1.25in", "1.25in"},
- {"+.33em", "+.33em"},
- {"100%", "100%"},
- {"12.5%", "12.5%"},
- {".foo", ".foo"},
- {"#bar", "#bar"},
- {"corner-radius", "corner-radius"},
- {"-moz-corner-radius", "-moz-corner-radius"},
- {"#000", "#000"},
- {"#48f", "#48f"},
- {"#123456", "#123456"},
- {"U+00-FF, U+980-9FF", "U+00-FF, U+980-9FF"},
- {"color: red", "color: red"},
- {"<!--", "ZgotmplZ"},
- {"-->", "ZgotmplZ"},
- {"<![CDATA[", "ZgotmplZ"},
- {"]]>", "ZgotmplZ"},
- {"</style", "ZgotmplZ"},
- {`"`, "ZgotmplZ"},
- {`'`, "ZgotmplZ"},
- {"`", "ZgotmplZ"},
- {"\x00", "ZgotmplZ"},
- {"/* foo */", "ZgotmplZ"},
- {"//", "ZgotmplZ"},
- {"[href=~", "ZgotmplZ"},
- {"expression(alert(1337))", "ZgotmplZ"},
- {"-expression(alert(1337))", "ZgotmplZ"},
- {"expression", "ZgotmplZ"},
- {"Expression", "ZgotmplZ"},
- {"EXPRESSION", "ZgotmplZ"},
- {"-moz-binding", "ZgotmplZ"},
- {"-expr\x00ession(alert(1337))", "ZgotmplZ"},
- {`-expr\0ession(alert(1337))`, "ZgotmplZ"},
- {`-express\69on(alert(1337))`, "ZgotmplZ"},
- {`-express\69 on(alert(1337))`, "ZgotmplZ"},
- {`-exp\72 ession(alert(1337))`, "ZgotmplZ"},
- {`-exp\52 ession(alert(1337))`, "ZgotmplZ"},
- {`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
- {`-expre\0000073sion`, "-expre\x073sion"},
- {`@import url evil.css`, "ZgotmplZ"},
- }
- for _, test := range tests {
- got := cssValueFilter(test.css)
- if got != test.want {
- t.Errorf("%q: want %q but got %q", test.css, test.want, got)
- }
- }
-}
-
-func BenchmarkCSSEscaper(b *testing.B) {
- for i := 0; i < b.N; i++ {
- cssEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
- }
-}
-
-func BenchmarkCSSEscaperNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- cssEscaper("The quick, brown fox jumps over the lazy dog.")
- }
-}
-
-func BenchmarkDecodeCSS(b *testing.B) {
- s := []byte(`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3edog\3c/canine\3e`)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- decodeCSS(s)
- }
-}
-
-func BenchmarkDecodeCSSNoSpecials(b *testing.B) {
- s := []byte("The quick, brown fox jumps over the lazy dog.")
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- decodeCSS(s)
- }
-}
-
-func BenchmarkCSSValueFilter(b *testing.B) {
- for i := 0; i < b.N; i++ {
- cssValueFilter(` e\78preS\0Sio/**/n(alert(1337))`)
- }
-}
-
-func BenchmarkCSSValueFilterOk(b *testing.B) {
- for i := 0; i < b.N; i++ {
- cssValueFilter(`Times New Roman`)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package html is a specialization of package template that automates the
-construction of HTML output that is safe against code injection.
-
-
-Introduction
-
-To use this package, invoke the standard template package to parse a template
-set, and then use this package’s EscapeSet function to secure the set.
-The arguments to EscapeSet are the template set and the names of all templates
-that will be passed to Execute.
-
- set, err := new(template.Set).Parse(...)
- set, err = EscapeSet(set, "templateName0", ...)
-
-If successful, set will now be injection-safe. Otherwise, the returned set will
-be nil and an error, described below, will explain the problem.
-
-The template names do not need to include helper templates but should include
-all names x used thus:
-
- set.Execute(out, x, ...)
-
-EscapeSet modifies the named templates in place to treat data values as plain
-text safe for embedding in an HTML document. The escaping is contextual, so
-actions can appear within JavaScript, CSS, and URI contexts without introducing'hazards.
-
-The security model used by this package assumes that template authors are
-trusted, while Execute's data parameter is not. More details are provided below.
-
-Example
-
- tmpls, err := new(template.Set).Parse(`{{define "t'}}Hello, {{.}}!{{end}}`)
-
-when used by itself
-
- tmpls.Execute(out, "t", "<script>alert('you have been pwned')</script>")
-
-produces
-
- Hello, <script>alert('you have been pwned')</script>!
-
-but after securing with EscapeSet like this,
-
- tmpls, err := EscapeSet(tmpls, "t")
- tmpls.Execute(out, "t", ...)
-
-produces the safe, escaped HTML output
-
- Hello, <script>alert('you have been pwned')</script>!
-
-
-Contexts
-
-EscapeSet understands HTML, CSS, JavaScript, and URIs. It adds sanitizing
-functions to each simple action pipeline, so given the excerpt
-
- <a href="/search?q={{.}}">{{.}}</a>
-
-EscapeSet will rewrite each {{.}} to add escaping functions where necessary,
-in this case,
-
- <a href="/search?q={{. | urlquery}}">{{. | html}}</a>
-
-
-Errors
-
-See the documentation of ErrorCode for details.
-
-
-A fuller picture
-
-The rest of this package comment may be skipped on first reading; it includes
-details necessary to understand escaping contexts and error messages. Most users
-will not need to understand these details.
-
-
-Contexts
-
-Assuming {{.}} is `O'Reilly: How are <i>you</i>?`, the table below shows
-how {{.}} appears when used in the context to the left.
-
-Context {{.}} After
-{{.}} O'Reilly: How are <i>you</i>?
-<a title='{{.}}'> O'Reilly: How are you?
-<a href="/{{.}}"> O'Reilly: How are %3ci%3eyou%3c/i%3e?
-<a href="?q={{.}}"> O'Reilly%3a%20How%20are%3ci%3e...%3f
-<a onx='f("{{.}}")'> O\x27Reilly: How are \x3ci\x3eyou...?
-<a onx='f({{.}})'> "O\x27Reilly: How are \x3ci\x3eyou...?"
-<a onx='pattern = /{{.}}/;'> O\x27Reilly: How are \x3ci\x3eyou...\x3f
-
-If used in an unsafe context, then the value might be filtered out:
-
-Context {{.}} After
-<a href="{{.}}"> #ZgotmplZ
-
-since "O'Reilly:" is not an allowed protocol like "http:".
-
-
-If {{.}} is the innocuous word, `left`, then it can appear more widely,
-
-Context {{.}} After
-{{.}} left
-<a title='{{.}}'> left
-<a href='{{.}}'> left
-<a href='/{{.}}'> left
-<a href='?dir={{.}}'> left
-<a style="border-{{.}}: 4px"> left
-<a style="align: {{.}}"> left
-<a style="background: '{{.}}'> left
-<a style="background: url('{{.}}')> left
-<style>p.{{.}} {color:red}</style> left
-
-Non-string values can be used in JavaScript contexts.
-If {{.}} is
-
- []struct{A,B string}{ "foo", "bar" }
-
-in the escaped template
-
- <script>var pair = {{.}};</script>
-
-then the template output is
-
- <script>var pair = {"A": "foo", "B": "bar"};</script>
-
-See package json to understand how non-string content is marshalled for
-embedding in JavaScript contexts.
-
-
-Typed Strings
-
-By default, EscapeSet assumes all pipelines produce a plain text string. It
-adds escaping pipeline stages necessary to correctly and safely embed that
-plain text string in the appropriate context.
-
-When a data value is not plain text, you can make sure it is not over-escaped
-by marking it with its type.
-
-Types HTML, JS, URL, and others from content.go can carry safe content that is
-exempted from escaping.
-
-The template
-
- Hello, {{.}}!
-
-can be invoked with
-
- tmpl.Execute(out, HTML(`<b>World</b>`))
-
-to produce
-
- Hello, <b>World</b>!
-
-instead of the
-
- Hello, <b>World<b>!
-
-that would have been produced if {{.}} was a regular string.
-
-
-Security Model
-
-http://js-quasis-libraries-and-repl.googlecode.com/svn/trunk/safetemplate.html#problem_definition defines "safe" as used by this package.
-
-This package assumes that template authors are trusted, that Execute's data
-parameter is not, and seeks to preserve the properties below in the face
-of untrusted data:
-
-Structure Preservation Property
-"... when a template author writes an HTML tag in a safe templating language,
-the browser will interpret the corresponding portion of the output as a tag
-regardless of the values of untrusted data, and similarly for other structures
-such as attribute boundaries and JS and CSS string boundaries."
-
-Code Effect Property
-"... only code specified by the template author should run as a result of
-injecting the template output into a page and all code specified by the
-template author should run as a result of the same."
-
-Least Surprise Property
-"A developer (or code reviewer) familiar with HTML, CSS, and JavaScript;
-who knows that EscapeSet is applied should be able to look at a {{.}}
-and correctly infer what sanitization happens."
-*/
-package html
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "fmt"
-)
-
-// Error describes a problem encountered during template Escaping.
-type Error struct {
- // ErrorCode describes the kind of error.
- ErrorCode ErrorCode
- // Name is the name of the template in which the error was encountered.
- Name string
- // Line is the line number of the error in the template source or 0.
- Line int
- // Description is a human-readable description of the problem.
- Description string
-}
-
-// ErrorCode is a code for a kind of error.
-type ErrorCode int
-
-// We define codes for each error that manifests while escaping templates, but
-// escaped templates may also fail at runtime.
-//
-// Output: "ZgotmplZ"
-// Example:
-// <img src="{{.X}}">
-// where {{.X}} evaluates to `javascript:...`
-// Discussion:
-// "ZgotmplZ" is a special value that indicates that unsafe content reached a
-// CSS or URL context at runtime. The output of the example will be
-// <img src="#ZgotmplZ">
-// If the data comes from a trusted source, use content types to exempt it
-// from filtering: URL(`javascript:...`).
-const (
- // OK indicates the lack of an error.
- OK ErrorCode = iota
-
- // ErrAmbigContext: "... appears in an ambiguous URL context"
- // Example:
- // <a href="
- // {{if .C}}
- // /path/
- // {{else}}
- // /search?q=
- // {{end}}
- // {{.X}}
- // ">
- // Discussion:
- // {{.X}} is in an ambiguous URL context since, depending on {{.C}},
- // it may be either a URL suffix or a query parameter.
- // Moving {{.X}} into the condition removes the ambiguity:
- // <a href="{{if .C}}/path/{{.X}}{{else}}/search?q={{.X}}">
- ErrAmbigContext
-
- // ErrBadHTML: "expected space, attr name, or end of tag, but got ...",
- // "... in unquoted attr", "... in attribute name"
- // Example:
- // <a href = /search?q=foo>
- // <href=foo>
- // <form na<e=...>
- // <option selected<
- // Discussion:
- // This is often due to a typo in an HTML element, but some runes
- // are banned in tag names, attribute names, and unquoted attribute
- // values because they can tickle parser ambiguities.
- // Quoting all attributes is the best policy.
- ErrBadHTML
-
- // ErrBranchEnd: "{{if}} branches end in different contexts"
- // Example:
- // {{if .C}}<a href="{{end}}{{.X}}
- // Discussion:
- // EscapeSet statically examines each possible path when it encounters
- // a {{if}}, {{range}}, or {{with}} to escape any following pipelines.
- // The example is ambiguous since {{.X}} might be an HTML text node,
- // or a URL prefix in an HTML attribute. EscapeSet needs to understand
- // the context of {{.X}} to escape it, but that depends on the
- // run-time value of {{.C}}.
- //
- // The problem is usually something like missing quotes or angle
- // brackets, or can be avoided by refactoring to put the two contexts
- // into different branches of an if, range or with. If the problem
- // is in a {{range}} over a collection that should never be empty,
- // adding a dummy {{else}} can help.
- ErrBranchEnd
-
- // ErrEndContext: "... ends in a non-text context: ..."
- // Examples:
- // <div
- // <div title="no close quote>
- // <script>f()
- // Discussion:
- // EscapeSet assumes the ouput is a DocumentFragment of HTML.
- // Templates that end without closing tags will trigger this error.
- // Templates that produce incomplete Fragments should not be named
- // in the call to EscapeSet.
- //
- // If you have a helper template in your set that is not meant to
- // produce a document fragment, then do not pass its name to
- // EscapeSet(set, ...names).
- //
- // {{define "main"}} <script>{{template "helper"}}</script> {{end}}
- // {{define "helper"}} document.write(' <div title=" ') {{end}}
- //
- // "helper" does not produce a valid document fragment, though it does
- // produce a valid JavaScript Program.
- ErrEndContext
-
- // ErrNoNames: "must specify names of top level templates"
- //
- // EscapeSet does not assume that all templates in a set produce HTML.
- // Some may be helpers that produce snippets of other languages.
- // Passing in no template names is most likely an error,
- // so EscapeSet(set) will panic.
- // If you call EscapeSet with a slice of names, guard it with len:
- //
- // if len(names) != 0 {
- // set, err := EscapeSet(set, ...names)
- // }
- ErrNoNames
-
- // ErrNoSuchTemplate: "no such template ..."
- // Examples:
- // {{define "main"}}<div {{template "attrs"}}>{{end}}
- // {{define "attrs"}}href="{{.URL}}"{{end}}
- // Discussion:
- // EscapeSet looks through template calls to compute the context.
- // Here the {{.URL}} in "attrs" must be treated as a URL when called
- // from "main", but if "attrs" is not in set when
- // EscapeSet(&set, "main") is called, this error will arise.
- ErrNoSuchTemplate
-
- // ErrOutputContext: "cannot compute output context for template ..."
- // Examples:
- // {{define "t"}}{{if .T}}{{template "t" .T}}{{end}}{{.H}}",{{end}}
- // Discussion:
- // A recursive template does not end in the same context in which it
- // starts, and a reliable output context cannot be computed.
- // Look for typos in the named template.
- // If the template should not be called in the named start context,
- // look for calls to that template in unexpected contexts.
- // Maybe refactor recursive templates to not be recursive.
- ErrOutputContext
-
- // ErrPartialCharset: "unfinished JS regexp charset in ..."
- // Example:
- // <script>var pattern = /foo[{{.Chars}}]/</script>
- // Discussion:
- // EscapeSet does not support interpolation into regular expression
- // literal character sets.
- ErrPartialCharset
-
- // ErrPartialEscape: "unfinished escape sequence in ..."
- // Example:
- // <script>alert("\{{.X}}")</script>
- // Discussion:
- // EscapeSet does not support actions following a backslash.
- // This is usually an error and there are better solutions; for
- // our example
- // <script>alert("{{.X}}")</script>
- // should work, and if {{.X}} is a partial escape sequence such as
- // "xA0", mark the whole sequence as safe content: JSStr(`\xA0`)
- ErrPartialEscape
-
- // ErrRangeLoopReentry: "on range loop re-entry: ..."
- // Example:
- // {{range .}}<p class={{.}}{{end}}
- // Discussion:
- // If an iteration through a range would cause it to end in a
- // different context than an earlier pass, there is no single context.
- // In the example, the <p> tag is missing a '>'.
- // EscapeSet cannot tell whether {{.}} is meant to be an HTML class or
- // the content of a broken <p> element and complains because the
- // second iteration would produce something like
- //
- // <p class=foo<p class=bar
- ErrRangeLoopReentry
-
- // ErrSlashAmbig: '/' could start a division or regexp.
- // Example:
- // <script>
- // {{if .C}}var x = 1{{end}}
- // /-{{.N}}/i.test(x) ? doThis : doThat();
- // </script>
- // Discussion:
- // The example above could produce `var x = 1/-2/i.test(s)...`
- // in which the first '/' is a mathematical division operator or it
- // could produce `/-2/i.test(s)` in which the first '/' starts a
- // regexp literal.
- // Look for missing semicolons inside branches, and maybe add
- // parentheses to make it clear which interpretation you intend.
- ErrSlashAmbig
-)
-
-func (e *Error) Error() string {
- if e.Line != 0 {
- return fmt.Sprintf("exp/template/html:%s:%d: %s", e.Name, e.Line, e.Description)
- } else if e.Name != "" {
- return fmt.Sprintf("exp/template/html:%s: %s", e.Name, e.Description)
- }
- return "exp/template/html: " + e.Description
-}
-
-// errorf creates an error given a format string f and args.
-// The template Name still needs to be supplied.
-func errorf(k ErrorCode, line int, f string, args ...interface{}) *Error {
- return &Error{k, "", line, fmt.Sprintf(f, args...)}
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "fmt"
- "html"
- "template"
- "template/parse"
-)
-
-// Escape rewrites each action in the template to guarantee that the output is
-// properly escaped.
-func Escape(t *template.Template) (*template.Template, error) {
- var s template.Set
- s.Add(t)
- if _, err := EscapeSet(&s, t.Name()); err != nil {
- return nil, err
- }
- // TODO: if s contains cloned dependencies due to self-recursion
- // cross-context, error out.
- return t, nil
-}
-
-// EscapeSet rewrites the template set to guarantee that the output of any of
-// the named templates is properly escaped.
-// Names should include the names of all templates that might be Executed but
-// need not include helper templates.
-// If no error is returned, then the named templates have been modified.
-// Otherwise the named templates have been rendered unusable.
-func EscapeSet(s *template.Set, names ...string) (*template.Set, error) {
- if len(names) == 0 {
- // TODO: Maybe add a method to Set to enumerate template names
- // and use those instead.
- return nil, &Error{ErrNoNames, "", 0, "must specify names of top level templates"}
- }
- e := newEscaper(s)
- for _, name := range names {
- c, _ := e.escapeTree(context{}, name, 0)
- var err error
- if c.err != nil {
- err, c.err.Name = c.err, name
- } else if c.state != stateText {
- err = &Error{ErrEndContext, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)}
- }
- if err != nil {
- // Prevent execution of unsafe templates.
- for _, name := range names {
- if t := s.Template(name); t != nil {
- t.Tree = nil
- }
- }
- return nil, err
- }
- }
- e.commit()
- return s, nil
-}
-
-// funcMap maps command names to functions that render their inputs safe.
-var funcMap = template.FuncMap{
- "exp_template_html_attrescaper": attrEscaper,
- "exp_template_html_commentescaper": commentEscaper,
- "exp_template_html_cssescaper": cssEscaper,
- "exp_template_html_cssvaluefilter": cssValueFilter,
- "exp_template_html_htmlnamefilter": htmlNameFilter,
- "exp_template_html_htmlescaper": htmlEscaper,
- "exp_template_html_jsregexpescaper": jsRegexpEscaper,
- "exp_template_html_jsstrescaper": jsStrEscaper,
- "exp_template_html_jsvalescaper": jsValEscaper,
- "exp_template_html_nospaceescaper": htmlNospaceEscaper,
- "exp_template_html_rcdataescaper": rcdataEscaper,
- "exp_template_html_urlescaper": urlEscaper,
- "exp_template_html_urlfilter": urlFilter,
- "exp_template_html_urlnormalizer": urlNormalizer,
-}
-
-// equivEscapers matches contextual escapers to equivalent template builtins.
-var equivEscapers = map[string]string{
- "exp_template_html_attrescaper": "html",
- "exp_template_html_htmlescaper": "html",
- "exp_template_html_nospaceescaper": "html",
- "exp_template_html_rcdataescaper": "html",
- "exp_template_html_urlescaper": "urlquery",
- "exp_template_html_urlnormalizer": "urlquery",
-}
-
-// escaper collects type inferences about templates and changes needed to make
-// templates injection safe.
-type escaper struct {
- // set is the template set being escaped.
- set *template.Set
- // output[templateName] is the output context for a templateName that
- // has been mangled to include its input context.
- output map[string]context
- // derived[c.mangle(name)] maps to a template derived from the template
- // named name templateName for the start context c.
- derived map[string]*template.Template
- // called[templateName] is a set of called mangled template names.
- called map[string]bool
- // xxxNodeEdits are the accumulated edits to apply during commit.
- // Such edits are not applied immediately in case a template set
- // executes a given template in different escaping contexts.
- actionNodeEdits map[*parse.ActionNode][]string
- templateNodeEdits map[*parse.TemplateNode]string
- textNodeEdits map[*parse.TextNode][]byte
-}
-
-// newEscaper creates a blank escaper for the given set.
-func newEscaper(s *template.Set) *escaper {
- return &escaper{
- s,
- map[string]context{},
- map[string]*template.Template{},
- map[string]bool{},
- map[*parse.ActionNode][]string{},
- map[*parse.TemplateNode]string{},
- map[*parse.TextNode][]byte{},
- }
-}
-
-// filterFailsafe is an innocuous word that is emitted in place of unsafe values
-// by sanitizer functions. It is not a keyword in any programming language,
-// contains no special characters, is not empty, and when it appears in output
-// it is distinct enough that a developer can find the source of the problem
-// via a search engine.
-const filterFailsafe = "ZgotmplZ"
-
-// escape escapes a template node.
-func (e *escaper) escape(c context, n parse.Node) context {
- switch n := n.(type) {
- case *parse.ActionNode:
- return e.escapeAction(c, n)
- case *parse.IfNode:
- return e.escapeBranch(c, &n.BranchNode, "if")
- case *parse.ListNode:
- return e.escapeList(c, n)
- case *parse.RangeNode:
- return e.escapeBranch(c, &n.BranchNode, "range")
- case *parse.TemplateNode:
- return e.escapeTemplate(c, n)
- case *parse.TextNode:
- return e.escapeText(c, n)
- case *parse.WithNode:
- return e.escapeBranch(c, &n.BranchNode, "with")
- }
- panic("escaping " + n.String() + " is unimplemented")
-}
-
-// escapeAction escapes an action template node.
-func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
- if len(n.Pipe.Decl) != 0 {
- // A local variable assignment, not an interpolation.
- return c
- }
- c = nudge(c)
- s := make([]string, 0, 3)
- switch c.state {
- case stateError:
- return c
- case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
- switch c.urlPart {
- case urlPartNone:
- s = append(s, "exp_template_html_urlfilter")
- fallthrough
- case urlPartPreQuery:
- switch c.state {
- case stateCSSDqStr, stateCSSSqStr:
- s = append(s, "exp_template_html_cssescaper")
- default:
- s = append(s, "exp_template_html_urlnormalizer")
- }
- case urlPartQueryOrFrag:
- s = append(s, "exp_template_html_urlescaper")
- case urlPartUnknown:
- return context{
- state: stateError,
- err: errorf(ErrAmbigContext, n.Line, "%s appears in an ambiguous URL context", n),
- }
- default:
- panic(c.urlPart.String())
- }
- case stateJS:
- s = append(s, "exp_template_html_jsvalescaper")
- // A slash after a value starts a div operator.
- c.jsCtx = jsCtxDivOp
- case stateJSDqStr, stateJSSqStr:
- s = append(s, "exp_template_html_jsstrescaper")
- case stateJSRegexp:
- s = append(s, "exp_template_html_jsregexpescaper")
- case stateCSS:
- s = append(s, "exp_template_html_cssvaluefilter")
- case stateText:
- s = append(s, "exp_template_html_htmlescaper")
- case stateRCDATA:
- s = append(s, "exp_template_html_rcdataescaper")
- case stateAttr:
- // Handled below in delim check.
- case stateAttrName, stateTag:
- c.state = stateAttrName
- s = append(s, "exp_template_html_htmlnamefilter")
- default:
- if isComment(c.state) {
- s = append(s, "exp_template_html_commentescaper")
- } else {
- panic("unexpected state " + c.state.String())
- }
- }
- switch c.delim {
- case delimNone:
- // No extra-escaping needed for raw text content.
- case delimSpaceOrTagEnd:
- s = append(s, "exp_template_html_nospaceescaper")
- default:
- s = append(s, "exp_template_html_attrescaper")
- }
- e.editActionNode(n, s)
- return c
-}
-
-// ensurePipelineContains ensures that the pipeline has commands with
-// the identifiers in s in order.
-// If the pipeline already has some of the sanitizers, do not interfere.
-// For example, if p is (.X | html) and s is ["escapeJSVal", "html"] then it
-// has one matching, "html", and one to insert, "escapeJSVal", to produce
-// (.X | escapeJSVal | html).
-func ensurePipelineContains(p *parse.PipeNode, s []string) {
- if len(s) == 0 {
- return
- }
- n := len(p.Cmds)
- // Find the identifiers at the end of the command chain.
- idents := p.Cmds
- for i := n - 1; i >= 0; i-- {
- if cmd := p.Cmds[i]; len(cmd.Args) != 0 {
- if id, ok := cmd.Args[0].(*parse.IdentifierNode); ok {
- if id.Ident == "noescape" {
- return
- }
- continue
- }
- }
- idents = p.Cmds[i+1:]
- }
- dups := 0
- for _, id := range idents {
- if escFnsEq(s[dups], (id.Args[0].(*parse.IdentifierNode)).Ident) {
- dups++
- if dups == len(s) {
- return
- }
- }
- }
- newCmds := make([]*parse.CommandNode, n-len(idents), n+len(s)-dups)
- copy(newCmds, p.Cmds)
- // Merge existing identifier commands with the sanitizers needed.
- for _, id := range idents {
- i := indexOfStr((id.Args[0].(*parse.IdentifierNode)).Ident, s, escFnsEq)
- if i != -1 {
- for _, name := range s[:i] {
- newCmds = appendCmd(newCmds, newIdentCmd(name))
- }
- s = s[i+1:]
- }
- newCmds = appendCmd(newCmds, id)
- }
- // Create any remaining sanitizers.
- for _, name := range s {
- newCmds = appendCmd(newCmds, newIdentCmd(name))
- }
- p.Cmds = newCmds
-}
-
-// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
-// for all x.
-var redundantFuncs = map[string]map[string]bool{
- "exp_template_html_commentescaper": {
- "exp_template_html_attrescaper": true,
- "exp_template_html_nospaceescaper": true,
- "exp_template_html_htmlescaper": true,
- },
- "exp_template_html_cssescaper": {
- "exp_template_html_attrescaper": true,
- },
- "exp_template_html_jsregexpescaper": {
- "exp_template_html_attrescaper": true,
- },
- "exp_template_html_jsstrescaper": {
- "exp_template_html_attrescaper": true,
- },
- "exp_template_html_urlescaper": {
- "exp_template_html_urlnormalizer": true,
- },
-}
-
-// appendCmd appends the given command to the end of the command pipeline
-// unless it is redundant with the last command.
-func appendCmd(cmds []*parse.CommandNode, cmd *parse.CommandNode) []*parse.CommandNode {
- if n := len(cmds); n != 0 {
- last, ok := cmds[n-1].Args[0].(*parse.IdentifierNode)
- next, _ := cmd.Args[0].(*parse.IdentifierNode)
- if ok && redundantFuncs[last.Ident][next.Ident] {
- return cmds
- }
- }
- return append(cmds, cmd)
-}
-
-// indexOfStr is the first i such that eq(s, strs[i]) or -1 if s was not found.
-func indexOfStr(s string, strs []string, eq func(a, b string) bool) int {
- for i, t := range strs {
- if eq(s, t) {
- return i
- }
- }
- return -1
-}
-
-// escFnsEq returns whether the two escaping functions are equivalent.
-func escFnsEq(a, b string) bool {
- if e := equivEscapers[a]; e != "" {
- a = e
- }
- if e := equivEscapers[b]; e != "" {
- b = e
- }
- return a == b
-}
-
-// newIdentCmd produces a command containing a single identifier node.
-func newIdentCmd(identifier string) *parse.CommandNode {
- return &parse.CommandNode{
- NodeType: parse.NodeCommand,
- Args: []parse.Node{parse.NewIdentifier(identifier)},
- }
-}
-
-// nudge returns the context that would result from following empty string
-// transitions from the input context.
-// For example, parsing:
-// `<a href=`
-// will end in context{stateBeforeValue, attrURL}, but parsing one extra rune:
-// `<a href=x`
-// will end in context{stateURL, delimSpaceOrTagEnd, ...}.
-// There are two transitions that happen when the 'x' is seen:
-// (1) Transition from a before-value state to a start-of-value state without
-// consuming any character.
-// (2) Consume 'x' and transition past the first value character.
-// In this case, nudging produces the context after (1) happens.
-func nudge(c context) context {
- switch c.state {
- case stateTag:
- // In `<foo {{.}}`, the action should emit an attribute.
- c.state = stateAttrName
- case stateBeforeValue:
- // In `<foo bar={{.}}`, the action is an undelimited value.
- c.state, c.delim, c.attr = attrStartStates[c.attr], delimSpaceOrTagEnd, attrNone
- case stateAfterName:
- // In `<foo bar {{.}}`, the action is an attribute name.
- c.state, c.attr = stateAttrName, attrNone
- }
- return c
-}
-
-// join joins the two contexts of a branch template node. The result is an
-// error context if either of the input contexts are error contexts, or if the
-// the input contexts differ.
-func join(a, b context, line int, nodeName string) context {
- if a.state == stateError {
- return a
- }
- if b.state == stateError {
- return b
- }
- if a.eq(b) {
- return a
- }
-
- c := a
- c.urlPart = b.urlPart
- if c.eq(b) {
- // The contexts differ only by urlPart.
- c.urlPart = urlPartUnknown
- return c
- }
-
- c = a
- c.jsCtx = b.jsCtx
- if c.eq(b) {
- // The contexts differ only by jsCtx.
- c.jsCtx = jsCtxUnknown
- return c
- }
-
- // Allow a nudged context to join with an unnudged one.
- // This means that
- // <p title={{if .C}}{{.}}{{end}}
- // ends in an unquoted value state even though the else branch
- // ends in stateBeforeValue.
- if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) {
- if e := join(c, d, line, nodeName); e.state != stateError {
- return e
- }
- }
-
- return context{
- state: stateError,
- err: errorf(ErrBranchEnd, line, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b),
- }
-}
-
-// escapeBranch escapes a branch template node: "if", "range" and "with".
-func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
- c0 := e.escapeList(c, n.List)
- if nodeName == "range" && c0.state != stateError {
- // The "true" branch of a "range" node can execute multiple times.
- // We check that executing n.List once results in the same context
- // as executing n.List twice.
- c1, _ := e.escapeListConditionally(c0, n.List, nil)
- c0 = join(c0, c1, n.Line, nodeName)
- if c0.state == stateError {
- // Make clear that this is a problem on loop re-entry
- // since developers tend to overlook that branch when
- // debugging templates.
- c0.err.Line = n.Line
- c0.err.Description = "on range loop re-entry: " + c0.err.Description
- return c0
- }
- }
- c1 := e.escapeList(c, n.ElseList)
- return join(c0, c1, n.Line, nodeName)
-}
-
-// escapeList escapes a list template node.
-func (e *escaper) escapeList(c context, n *parse.ListNode) context {
- if n == nil {
- return c
- }
- for _, m := range n.Nodes {
- c = e.escape(c, m)
- }
- return c
-}
-
-// escapeListConditionally escapes a list node but only preserves edits and
-// inferences in e if the inferences and output context satisfy filter.
-// It returns the best guess at an output context, and the result of the filter
-// which is the same as whether e was updated.
-func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
- e1 := newEscaper(e.set)
- // Make type inferences available to f.
- for k, v := range e.output {
- e1.output[k] = v
- }
- c = e1.escapeList(c, n)
- ok := filter != nil && filter(e1, c)
- if ok {
- // Copy inferences and edits from e1 back into e.
- for k, v := range e1.output {
- e.output[k] = v
- }
- for k, v := range e1.derived {
- e.derived[k] = v
- }
- for k, v := range e1.called {
- e.called[k] = v
- }
- for k, v := range e1.actionNodeEdits {
- e.editActionNode(k, v)
- }
- for k, v := range e1.templateNodeEdits {
- e.editTemplateNode(k, v)
- }
- for k, v := range e1.textNodeEdits {
- e.editTextNode(k, v)
- }
- }
- return c, ok
-}
-
-// escapeTemplate escapes a {{template}} call node.
-func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context {
- c, name := e.escapeTree(c, n.Name, n.Line)
- if name != n.Name {
- e.editTemplateNode(n, name)
- }
- return c
-}
-
-// escapeTree escapes the named template starting in the given context as
-// necessary and returns its output context.
-func (e *escaper) escapeTree(c context, name string, line int) (context, string) {
- // Mangle the template name with the input context to produce a reliable
- // identifier.
- dname := c.mangle(name)
- e.called[dname] = true
- if out, ok := e.output[dname]; ok {
- // Already escaped.
- return out, dname
- }
- t := e.template(name)
- if t == nil {
- return context{
- state: stateError,
- err: errorf(ErrNoSuchTemplate, line, "no such template %s", name),
- }, dname
- }
- if dname != name {
- // Use any template derived during an earlier call to EscapeSet
- // with different top level templates, or clone if necessary.
- dt := e.template(dname)
- if dt == nil {
- dt = template.New(dname)
- dt.Tree = &parse.Tree{Name: dname, Root: cloneList(t.Root)}
- e.derived[dname] = dt
- }
- t = dt
- }
- return e.computeOutCtx(c, t), dname
-}
-
-// computeOutCtx takes a template and its start context and computes the output
-// context while storing any inferences in e.
-func (e *escaper) computeOutCtx(c context, t *template.Template) context {
- // Propagate context over the body.
- c1, ok := e.escapeTemplateBody(c, t)
- if !ok {
- // Look for a fixed point by assuming c1 as the output context.
- if c2, ok2 := e.escapeTemplateBody(c1, t); ok2 {
- c1, ok = c2, true
- }
- // Use c1 as the error context if neither assumption worked.
- }
- if !ok && c1.state != stateError {
- return context{
- state: stateError,
- // TODO: Find the first node with a line in t.Tree.Root
- err: errorf(ErrOutputContext, 0, "cannot compute output context for template %s", t.Name()),
- }
- }
- return c1
-}
-
-// escapeTemplateBody escapes the given template assuming the given output
-// context, and returns the best guess at the output context and whether the
-// assumption was correct.
-func (e *escaper) escapeTemplateBody(c context, t *template.Template) (context, bool) {
- filter := func(e1 *escaper, c1 context) bool {
- if c1.state == stateError {
- // Do not update the input escaper, e.
- return false
- }
- if !e1.called[t.Name()] {
- // If t is not recursively called, then c1 is an
- // accurate output context.
- return true
- }
- // c1 is accurate if it matches our assumed output context.
- return c.eq(c1)
- }
- // We need to assume an output context so that recursive template calls
- // take the fast path out of escapeTree instead of infinitely recursing.
- // Naively assuming that the input context is the same as the output
- // works >90% of the time.
- e.output[t.Name()] = c
- return e.escapeListConditionally(c, t.Tree.Root, filter)
-}
-
-// delimEnds maps each delim to a string of characters that terminate it.
-var delimEnds = [...]string{
- delimDoubleQuote: `"`,
- delimSingleQuote: "'",
- // Determined empirically by running the below in various browsers.
- // var div = document.createElement("DIV");
- // for (var i = 0; i < 0x10000; ++i) {
- // div.innerHTML = "<span title=x" + String.fromCharCode(i) + "-bar>";
- // if (div.getElementsByTagName("SPAN")[0].title.indexOf("bar") < 0)
- // document.write("<p>U+" + i.toString(16));
- // }
- delimSpaceOrTagEnd: " \t\n\f\r>",
-}
-
-var doctypeBytes = []byte("<!DOCTYPE")
-
-// escapeText escapes a text template node.
-func (e *escaper) escapeText(c context, n *parse.TextNode) context {
- s, written, i, b := n.Text, 0, 0, new(bytes.Buffer)
- for i != len(s) {
- c1, nread := contextAfterText(c, s[i:])
- i1 := i + nread
- if c.state == stateText || c.state == stateRCDATA {
- end := i1
- if c1.state != c.state {
- for j := end - 1; j >= i; j-- {
- if s[j] == '<' {
- end = j
- break
- }
- }
- }
- for j := i; j < end; j++ {
- if s[j] == '<' && !bytes.HasPrefix(s[j:], doctypeBytes) {
- b.Write(s[written:j])
- b.WriteString("<")
- written = j + 1
- }
- }
- } else if isComment(c.state) && c.delim == delimNone {
- switch c.state {
- case stateJSBlockCmt:
- // http://es5.github.com/#x7.4:
- // "Comments behave like white space and are
- // discarded except that, if a MultiLineComment
- // contains a line terminator character, then
- // the entire comment is considered to be a
- // LineTerminator for purposes of parsing by
- // the syntactic grammar."
- if bytes.IndexAny(s[written:i1], "\n\r\u2028\u2029") != -1 {
- b.WriteByte('\n')
- } else {
- b.WriteByte(' ')
- }
- case stateCSSBlockCmt:
- b.WriteByte(' ')
- }
- written = i1
- }
- if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
- // Preserve the portion between written and the comment start.
- cs := i1 - 2
- if c1.state == stateHTMLCmt {
- // "<!--" instead of "/*" or "//"
- cs -= 2
- }
- b.Write(s[written:cs])
- written = i1
- }
- if i == i1 && c.state == c1.state {
- panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
- }
- c, i = c1, i1
- }
-
- if written != 0 && c.state != stateError {
- if !isComment(c.state) || c.delim != delimNone {
- b.Write(n.Text[written:])
- }
- e.editTextNode(n, b.Bytes())
- }
- return c
-}
-
-// contextAfterText starts in context c, consumes some tokens from the front of
-// s, then returns the context after those tokens and the unprocessed suffix.
-func contextAfterText(c context, s []byte) (context, int) {
- if c.delim == delimNone {
- c1, i := tSpecialTagEnd(c, s)
- if i == 0 {
- // A special end tag (`</script>`) has been seen and
- // all content preceding it has been consumed.
- return c1, 0
- }
- // Consider all content up to any end tag.
- return transitionFunc[c.state](c, s[:i])
- }
-
- i := bytes.IndexAny(s, delimEnds[c.delim])
- if i == -1 {
- i = len(s)
- }
- if c.delim == delimSpaceOrTagEnd {
- // http://www.w3.org/TR/html5/tokenization.html#attribute-value-unquoted-state
- // lists the runes below as error characters.
- // Error out because HTML parsers may differ on whether
- // "<a id= onclick=f(" ends inside id's or onchange's value,
- // "<a class=`foo " ends inside a value,
- // "<a style=font:'Arial'" needs open-quote fixup.
- // IE treats '`' as a quotation character.
- if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 {
- return context{
- state: stateError,
- err: errorf(ErrBadHTML, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]),
- }, len(s)
- }
- }
- if i == len(s) {
- // Remain inside the attribute.
- // Decode the value so non-HTML rules can easily handle
- // <button onclick="alert("Hi!")">
- // without having to entity decode token boundaries.
- for u := []byte(html.UnescapeString(string(s))); len(u) != 0; {
- c1, i1 := transitionFunc[c.state](c, u)
- c, u = c1, u[i1:]
- }
- return c, len(s)
- }
- if c.delim != delimSpaceOrTagEnd {
- // Consume any quote.
- i++
- }
- // On exiting an attribute, we discard all state information
- // except the state and element.
- return context{state: stateTag, element: c.element}, i
-}
-
-// editActionNode records a change to an action pipeline for later commit.
-func (e *escaper) editActionNode(n *parse.ActionNode, cmds []string) {
- if _, ok := e.actionNodeEdits[n]; ok {
- panic(fmt.Sprintf("node %s shared between templates", n))
- }
- e.actionNodeEdits[n] = cmds
-}
-
-// editTemplateNode records a change to a {{template}} callee for later commit.
-func (e *escaper) editTemplateNode(n *parse.TemplateNode, callee string) {
- if _, ok := e.templateNodeEdits[n]; ok {
- panic(fmt.Sprintf("node %s shared between templates", n))
- }
- e.templateNodeEdits[n] = callee
-}
-
-// editTextNode records a change to a text node for later commit.
-func (e *escaper) editTextNode(n *parse.TextNode, text []byte) {
- if _, ok := e.textNodeEdits[n]; ok {
- panic(fmt.Sprintf("node %s shared between templates", n))
- }
- e.textNodeEdits[n] = text
-}
-
-// commit applies changes to actions and template calls needed to contextually
-// autoescape content and adds any derived templates to the set.
-func (e *escaper) commit() {
- for name, _ := range e.output {
- e.template(name).Funcs(funcMap)
- }
- for _, t := range e.derived {
- e.set.Add(t)
- }
- for n, s := range e.actionNodeEdits {
- ensurePipelineContains(n.Pipe, s)
- }
- for n, name := range e.templateNodeEdits {
- n.Name = name
- }
- for n, s := range e.textNodeEdits {
- n.Text = s
- }
-}
-
-// template returns the named template given a mangled template name.
-func (e *escaper) template(name string) *template.Template {
- t := e.set.Template(name)
- if t == nil {
- t = e.derived[name]
- }
- return t
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "fmt"
- "json"
- "strings"
- "template"
- "template/parse"
- "testing"
-)
-
-type badMarshaler struct{}
-
-func (x *badMarshaler) MarshalJSON() ([]byte, error) {
- // Keys in valid JSON must be double quoted as must all strings.
- return []byte("{ foo: 'not quite valid JSON' }"), nil
-}
-
-type goodMarshaler struct{}
-
-func (x *goodMarshaler) MarshalJSON() ([]byte, error) {
- return []byte(`{ "<foo>": "O'Reilly" }`), nil
-}
-
-func TestEscape(t *testing.T) {
- var data = struct {
- F, T bool
- C, G, H string
- A, E []string
- B, M json.Marshaler
- N int
- Z *int
- W HTML
- }{
- F: false,
- T: true,
- C: "<Cincinatti>",
- G: "<Goodbye>",
- H: "<Hello>",
- A: []string{"<a>", "<b>"},
- E: []string{},
- N: 42,
- B: &badMarshaler{},
- M: &goodMarshaler{},
- Z: nil,
- W: HTML(`¡<b class="foo">Hello</b>, <textarea>O'World</textarea>!`),
- }
-
- tests := []struct {
- name string
- input string
- output string
- }{
- {
- "if",
- "{{if .T}}Hello{{end}}, {{.C}}!",
- "Hello, <Cincinatti>!",
- },
- {
- "else",
- "{{if .F}}{{.H}}{{else}}{{.G}}{{end}}!",
- "<Goodbye>!",
- },
- {
- "overescaping1",
- "Hello, {{.C | html}}!",
- "Hello, <Cincinatti>!",
- },
- {
- "overescaping2",
- "Hello, {{html .C}}!",
- "Hello, <Cincinatti>!",
- },
- {
- "overescaping3",
- "{{with .C}}{{$msg := .}}Hello, {{$msg}}!{{end}}",
- "Hello, <Cincinatti>!",
- },
- {
- "assignment",
- "{{if $x := .H}}{{$x}}{{end}}",
- "<Hello>",
- },
- {
- "withBody",
- "{{with .H}}{{.}}{{end}}",
- "<Hello>",
- },
- {
- "withElse",
- "{{with .E}}{{.}}{{else}}{{.H}}{{end}}",
- "<Hello>",
- },
- {
- "rangeBody",
- "{{range .A}}{{.}}{{end}}",
- "<a><b>",
- },
- {
- "rangeElse",
- "{{range .E}}{{.}}{{else}}{{.H}}{{end}}",
- "<Hello>",
- },
- {
- "nonStringValue",
- "{{.T}}",
- "true",
- },
- {
- "constant",
- `<a href="/search?q={{"'a<b'"}}">`,
- `<a href="/search?q=%27a%3cb%27">`,
- },
- {
- "multipleAttrs",
- "<a b=1 c={{.H}}>",
- "<a b=1 c=<Hello>>",
- },
- {
- "urlStartRel",
- `<a href='{{"/foo/bar?a=b&c=d"}}'>`,
- `<a href='/foo/bar?a=b&c=d'>`,
- },
- {
- "urlStartAbsOk",
- `<a href='{{"http://example.com/foo/bar?a=b&c=d"}}'>`,
- `<a href='http://example.com/foo/bar?a=b&c=d'>`,
- },
- {
- "protocolRelativeURLStart",
- `<a href='{{"//example.com:8000/foo/bar?a=b&c=d"}}'>`,
- `<a href='//example.com:8000/foo/bar?a=b&c=d'>`,
- },
- {
- "pathRelativeURLStart",
- `<a href="{{"/javascript:80/foo/bar"}}">`,
- `<a href="/javascript:80/foo/bar">`,
- },
- {
- "dangerousURLStart",
- `<a href='{{"javascript:alert(%22pwned%22)"}}'>`,
- `<a href='#ZgotmplZ'>`,
- },
- {
- "dangerousURLStart2",
- `<a href=' {{"javascript:alert(%22pwned%22)"}}'>`,
- `<a href=' #ZgotmplZ'>`,
- },
- {
- "nonHierURL",
- `<a href={{"mailto:Muhammed \"The Greatest\" Ali <m.ali@example.com>"}}>`,
- `<a href=mailto:Muhammed%20%22The%20Greatest%22%20Ali%20%3cm.ali@example.com%3e>`,
- },
- {
- "urlPath",
- `<a href='http://{{"javascript:80"}}/foo'>`,
- `<a href='http://javascript:80/foo'>`,
- },
- {
- "urlQuery",
- `<a href='/search?q={{.H}}'>`,
- `<a href='/search?q=%3cHello%3e'>`,
- },
- {
- "urlFragment",
- `<a href='/faq#{{.H}}'>`,
- `<a href='/faq#%3cHello%3e'>`,
- },
- {
- "urlBranch",
- `<a href="{{if .F}}/foo?a=b{{else}}/bar{{end}}">`,
- `<a href="/bar">`,
- },
- {
- "urlBranchConflictMoot",
- `<a href="{{if .T}}/foo?a={{else}}/bar#{{end}}{{.C}}">`,
- `<a href="/foo?a=%3cCincinatti%3e">`,
- },
- {
- "jsStrValue",
- "<button onclick='alert({{.H}})'>",
- `<button onclick='alert("\u003cHello\u003e")'>`,
- },
- {
- "jsNumericValue",
- "<button onclick='alert({{.N}})'>",
- `<button onclick='alert( 42 )'>`,
- },
- {
- "jsBoolValue",
- "<button onclick='alert({{.T}})'>",
- `<button onclick='alert( true )'>`,
- },
- {
- "jsNilValue",
- "<button onclick='alert(typeof{{.Z}})'>",
- `<button onclick='alert(typeof null )'>`,
- },
- {
- "jsObjValue",
- "<button onclick='alert({{.A}})'>",
- `<button onclick='alert(["\u003ca\u003e","\u003cb\u003e"])'>`,
- },
- {
- "jsObjValueScript",
- "<script>alert({{.A}})</script>",
- `<script>alert(["\u003ca\u003e","\u003cb\u003e"])</script>`,
- },
- {
- "jsObjValueNotOverEscaped",
- "<button onclick='alert({{.A | html}})'>",
- `<button onclick='alert(["\u003ca\u003e","\u003cb\u003e"])'>`,
- },
- {
- "jsStr",
- "<button onclick='alert("{{.H}}")'>",
- `<button onclick='alert("\x3cHello\x3e")'>`,
- },
- {
- "badMarshaller",
- `<button onclick='alert(1/{{.B}}in numbers)'>`,
- `<button onclick='alert(1/ /* json: error calling MarshalJSON for type *html.badMarshaler: invalid character 'f' looking for beginning of object key string */null in numbers)'>`,
- },
- {
- "jsMarshaller",
- `<button onclick='alert({{.M}})'>`,
- `<button onclick='alert({"<foo>":"O'Reilly"})'>`,
- },
- {
- "jsStrNotUnderEscaped",
- "<button onclick='alert({{.C | urlquery}})'>",
- // URL escaped, then quoted for JS.
- `<button onclick='alert("%3CCincinatti%3E")'>`,
- },
- {
- "jsRe",
- `<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
- `<button onclick='alert(/foo\x2bbar/.test(""))'>`,
- },
- {
- "jsReBlank",
- `<script>alert(/{{""}}/.test(""));</script>`,
- `<script>alert(/(?:)/.test(""));</script>`,
- },
- {
- "jsReAmbigOk",
- `<script>{{if true}}var x = 1{{end}}</script>`,
- // The {if} ends in an ambiguous jsCtx but there is
- // no slash following so we shouldn't care.
- `<script>var x = 1</script>`,
- },
- {
- "styleBidiKeywordPassed",
- `<p style="dir: {{"ltr"}}">`,
- `<p style="dir: ltr">`,
- },
- {
- "styleBidiPropNamePassed",
- `<p style="border-{{"left"}}: 0; border-{{"right"}}: 1in">`,
- `<p style="border-left: 0; border-right: 1in">`,
- },
- {
- "styleExpressionBlocked",
- `<p style="width: {{"expression(alert(1337))"}}">`,
- `<p style="width: ZgotmplZ">`,
- },
- {
- "styleTagSelectorPassed",
- `<style>{{"p"}} { color: pink }</style>`,
- `<style>p { color: pink }</style>`,
- },
- {
- "styleIDPassed",
- `<style>p{{"#my-ID"}} { font: Arial }</style>`,
- `<style>p#my-ID { font: Arial }</style>`,
- },
- {
- "styleClassPassed",
- `<style>p{{".my_class"}} { font: Arial }</style>`,
- `<style>p.my_class { font: Arial }</style>`,
- },
- {
- "styleQuantityPassed",
- `<a style="left: {{"2em"}}; top: {{0}}">`,
- `<a style="left: 2em; top: 0">`,
- },
- {
- "stylePctPassed",
- `<table style=width:{{"100%"}}>`,
- `<table style=width:100%>`,
- },
- {
- "styleColorPassed",
- `<p style="color: {{"#8ff"}}; background: {{"#000"}}">`,
- `<p style="color: #8ff; background: #000">`,
- },
- {
- "styleObfuscatedExpressionBlocked",
- `<p style="width: {{" e\78preS\0Sio/**/n(alert(1337))"}}">`,
- `<p style="width: ZgotmplZ">`,
- },
- {
- "styleMozBindingBlocked",
- `<p style="{{"-moz-binding(alert(1337))"}}: ...">`,
- `<p style="ZgotmplZ: ...">`,
- },
- {
- "styleObfuscatedMozBindingBlocked",
- `<p style="{{" -mo\7a-B\0I/**/nding(alert(1337))"}}: ...">`,
- `<p style="ZgotmplZ: ...">`,
- },
- {
- "styleFontNameString",
- `<p style='font-family: "{{"Times New Roman"}}"'>`,
- `<p style='font-family: "Times New Roman"'>`,
- },
- {
- "styleFontNameString",
- `<p style='font-family: "{{"Times New Roman"}}", "{{"sans-serif"}}"'>`,
- `<p style='font-family: "Times New Roman", "sans-serif"'>`,
- },
- {
- "styleFontNameUnquoted",
- `<p style='font-family: {{"Times New Roman"}}'>`,
- `<p style='font-family: Times New Roman'>`,
- },
- {
- "styleURLQueryEncoded",
- `<p style="background: url(/img?name={{"O'Reilly Animal(1)<2>.png"}})">`,
- `<p style="background: url(/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png)">`,
- },
- {
- "styleQuotedURLQueryEncoded",
- `<p style="background: url('/img?name={{"O'Reilly Animal(1)<2>.png"}}')">`,
- `<p style="background: url('/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png')">`,
- },
- {
- "styleStrQueryEncoded",
- `<p style="background: '/img?name={{"O'Reilly Animal(1)<2>.png"}}'">`,
- `<p style="background: '/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png'">`,
- },
- {
- "styleURLBadProtocolBlocked",
- `<a style="background: url('{{"javascript:alert(1337)"}}')">`,
- `<a style="background: url('#ZgotmplZ')">`,
- },
- {
- "styleStrBadProtocolBlocked",
- `<a style="background: '{{"vbscript:alert(1337)"}}'">`,
- `<a style="background: '#ZgotmplZ'">`,
- },
- {
- "styleStrEncodedProtocolEncoded",
- `<a style="background: '{{"javascript\\3a alert(1337)"}}'">`,
- // The CSS string 'javascript\\3a alert(1337)' does not contains a colon.
- `<a style="background: 'javascript\\3a alert\28 1337\29 '">`,
- },
- {
- "styleURLGoodProtocolPassed",
- `<a style="background: url('{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}')">`,
- `<a style="background: url('http://oreilly.com/O%27Reilly%20Animals%281%29%3c2%3e;%7b%7d.html')">`,
- },
- {
- "styleStrGoodProtocolPassed",
- `<a style="background: '{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}'">`,
- `<a style="background: 'http\3a\2f\2foreilly.com\2fO\27Reilly Animals\28 1\29\3c 2\3e\3b\7b\7d.html'">`,
- },
- {
- "styleURLEncodedForHTMLInAttr",
- `<a style="background: url('{{"/search?img=foo&size=icon"}}')">`,
- `<a style="background: url('/search?img=foo&size=icon')">`,
- },
- {
- "styleURLNotEncodedForHTMLInCdata",
- `<style>body { background: url('{{"/search?img=foo&size=icon"}}') }</style>`,
- `<style>body { background: url('/search?img=foo&size=icon') }</style>`,
- },
- {
- "styleURLMixedCase",
- `<p style="background: URL(#{{.H}})">`,
- `<p style="background: URL(#%3cHello%3e)">`,
- },
- {
- "stylePropertyPairPassed",
- `<a style='{{"color: red"}}'>`,
- `<a style='color: red'>`,
- },
- {
- "styleStrSpecialsEncoded",
- `<a style="font-family: '{{"/**/'\";:// \\"}}', "{{"/**/'\";:// \\"}}"">`,
- `<a style="font-family: '\2f**\2f\27\22\3b\3a\2f\2f \\', "\2f**\2f\27\22\3b\3a\2f\2f \\"">`,
- },
- {
- "styleURLSpecialsEncoded",
- `<a style="border-image: url({{"/**/'\";:// \\"}}), url("{{"/**/'\";:// \\"}}"), url('{{"/**/'\";:// \\"}}'), 'http://www.example.com/?q={{"/**/'\";:// \\"}}''">`,
- `<a style="border-image: url(/**/%27%22;://%20%5c), url("/**/%27%22;://%20%5c"), url('/**/%27%22;://%20%5c'), 'http://www.example.com/?q=%2f%2a%2a%2f%27%22%3b%3a%2f%2f%20%5c''">`,
- },
- {
- "HTML comment",
- "<b>Hello, <!-- name of world -->{{.C}}</b>",
- "<b>Hello, <Cincinatti></b>",
- },
- {
- "HTML comment not first < in text node.",
- "<<!-- -->!--",
- "<!--",
- },
- {
- "HTML normalization 1",
- "a < b",
- "a < b",
- },
- {
- "HTML normalization 2",
- "a << b",
- "a << b",
- },
- {
- "HTML normalization 3",
- "a<<!-- --><!-- -->b",
- "a<b",
- },
- {
- "HTML doctype not normalized",
- "<!DOCTYPE html>Hello, World!",
- "<!DOCTYPE html>Hello, World!",
- },
- {
- "No doctype injection",
- `<!{{"DOCTYPE"}}`,
- "<!DOCTYPE",
- },
- {
- "Split HTML comment",
- "<b>Hello, <!-- name of {{if .T}}city -->{{.C}}{{else}}world -->{{.W}}{{end}}</b>",
- "<b>Hello, <Cincinatti></b>",
- },
- {
- "JS line comment",
- "<script>for (;;) { if (c()) break// foo not a label\n" +
- "foo({{.T}});}</script>",
- "<script>for (;;) { if (c()) break\n" +
- "foo( true );}</script>",
- },
- {
- "JS multiline block comment",
- "<script>for (;;) { if (c()) break/* foo not a label\n" +
- " */foo({{.T}});}</script>",
- // Newline separates break from call. If newline
- // removed, then break will consume label leaving
- // code invalid.
- "<script>for (;;) { if (c()) break\n" +
- "foo( true );}</script>",
- },
- {
- "JS single-line block comment",
- "<script>for (;;) {\n" +
- "if (c()) break/* foo a label */foo;" +
- "x({{.T}});}</script>",
- // Newline separates break from call. If newline
- // removed, then break will consume label leaving
- // code invalid.
- "<script>for (;;) {\n" +
- "if (c()) break foo;" +
- "x( true );}</script>",
- },
- {
- "JS block comment flush with mathematical division",
- "<script>var a/*b*//c\nd</script>",
- "<script>var a /c\nd</script>",
- },
- {
- "JS mixed comments",
- "<script>var a/*b*///c\nd</script>",
- "<script>var a \nd</script>",
- },
- {
- "CSS comments",
- "<style>p// paragraph\n" +
- `{border: 1px/* color */{{"#00f"}}}</style>`,
- "<style>p\n" +
- "{border: 1px #00f}</style>",
- },
- {
- "JS attr block comment",
- `<a onclick="f(""); /* alert({{.H}}) */">`,
- // Attribute comment tests should pass if the comments
- // are successfully elided.
- `<a onclick="f(""); /* alert() */">`,
- },
- {
- "JS attr line comment",
- `<a onclick="// alert({{.G}})">`,
- `<a onclick="// alert()">`,
- },
- {
- "CSS attr block comment",
- `<a style="/* color: {{.H}} */">`,
- `<a style="/* color: */">`,
- },
- {
- "CSS attr line comment",
- `<a style="// color: {{.G}}">`,
- `<a style="// color: ">`,
- },
- {
- "HTML substitution commented out",
- "<p><!-- {{.H}} --></p>",
- "<p></p>",
- },
- {
- "Comment ends flush with start",
- "<!--{{.}}--><script>/*{{.}}*///{{.}}\n</script><style>/*{{.}}*///{{.}}\n</style><a onclick='/*{{.}}*///{{.}}' style='/*{{.}}*///{{.}}'>",
- "<script> \n</script><style> \n</style><a onclick='/**///' style='/**///'>",
- },
- {
- "typed HTML in text",
- `{{.W}}`,
- `¡<b class="foo">Hello</b>, <textarea>O'World</textarea>!`,
- },
- {
- "typed HTML in attribute",
- `<div title="{{.W}}">`,
- `<div title="¡Hello, O'World!">`,
- },
- {
- "typed HTML in script",
- `<button onclick="alert({{.W}})">`,
- `<button onclick="alert("&iexcl;\u003cb class=\"foo\"\u003eHello\u003c/b\u003e, \u003ctextarea\u003eO'World\u003c/textarea\u003e!")">`,
- },
- {
- "typed HTML in RCDATA",
- `<textarea>{{.W}}</textarea>`,
- `<textarea>¡<b class="foo">Hello</b>, <textarea>O'World</textarea>!</textarea>`,
- },
- {
- "range in textarea",
- "<textarea>{{range .A}}{{.}}{{end}}</textarea>",
- "<textarea><a><b></textarea>",
- },
- {
- "auditable exemption from escaping",
- "{{range .A}}{{. | noescape}}{{end}}",
- "<a><b>",
- },
- {
- "No tag injection",
- `{{"10$"}}<{{"script src,evil.org/pwnd.js"}}...`,
- `10$<script src,evil.org/pwnd.js...`,
- },
- {
- "No comment injection",
- `<{{"!--"}}`,
- `<!--`,
- },
- {
- "No RCDATA end tag injection",
- `<textarea><{{"/textarea "}}...</textarea>`,
- `<textarea></textarea ...</textarea>`,
- },
- {
- "optional attrs",
- `<img class="{{"iconClass"}}"` +
- `{{if .T}} id="{{"<iconId>"}}"{{end}}` +
- // Double quotes inside if/else.
- ` src=` +
- `{{if .T}}"?{{"<iconPath>"}}"` +
- `{{else}}"images/cleardot.gif"{{end}}` +
- // Missing space before title, but it is not a
- // part of the src attribute.
- `{{if .T}}title="{{"<title>"}}"{{end}}` +
- // Quotes outside if/else.
- ` alt="` +
- `{{if .T}}{{"<alt>"}}` +
- `{{else}}{{if .F}}{{"<title>"}}{{end}}` +
- `{{end}}"` +
- `>`,
- `<img class="iconClass" id="<iconId>" src="?%3ciconPath%3e"title="<title>" alt="<alt>">`,
- },
- {
- "conditional valueless attr name",
- `<input{{if .T}} checked{{end}} name=n>`,
- `<input checked name=n>`,
- },
- {
- "conditional dynamic valueless attr name 1",
- `<input{{if .T}} {{"checked"}}{{end}} name=n>`,
- `<input checked name=n>`,
- },
- {
- "conditional dynamic valueless attr name 2",
- `<input {{if .T}}{{"checked"}} {{end}}name=n>`,
- `<input checked name=n>`,
- },
- {
- "dynamic attribute name",
- `<img on{{"load"}}="alert({{"loaded"}})">`,
- // Treated as JS since quotes are inserted.
- `<img onload="alert("loaded")">`,
- },
- {
- "bad dynamic attribute name 1",
- // Allow checked, selected, disabled, but not JS or
- // CSS attributes.
- `<input {{"onchange"}}="{{"doEvil()"}}">`,
- `<input ZgotmplZ="doEvil()">`,
- },
- {
- "bad dynamic attribute name 2",
- `<div {{"sTyle"}}="{{"color: expression(alert(1337))"}}">`,
- `<div ZgotmplZ="color: expression(alert(1337))">`,
- },
- {
- "bad dynamic attribute name 3",
- // Allow title or alt, but not a URL.
- `<img {{"src"}}="{{"javascript:doEvil()"}}">`,
- `<img ZgotmplZ="javascript:doEvil()">`,
- },
- {
- "bad dynamic attribute name 4",
- // Structure preservation requires values to associate
- // with a consistent attribute.
- `<input checked {{""}}="Whose value am I?">`,
- `<input checked ZgotmplZ="Whose value am I?">`,
- },
- {
- "dynamic element name",
- `<h{{3}}><table><t{{"head"}}>...</h{{3}}>`,
- `<h3><table><thead>...</h3>`,
- },
- {
- "bad dynamic element name",
- // Dynamic element names are typically used to switch
- // between (thead, tfoot, tbody), (ul, ol), (th, td),
- // and other replaceable sets.
- // We do not currently easily support (ul, ol).
- // If we do change to support that, this test should
- // catch failures to filter out special tag names which
- // would violate the structure preservation property --
- // if any special tag name could be substituted, then
- // the content could be raw text/RCDATA for some inputs
- // and regular HTML content for others.
- `<{{"script"}}>{{"doEvil()"}}</{{"script"}}>`,
- `<script>doEvil()</script>`,
- },
- }
-
- for _, test := range tests {
- tmpl := template.New(test.name)
- // TODO: Move noescape into template/func.go
- tmpl.Funcs(template.FuncMap{
- "noescape": func(a ...interface{}) string {
- return fmt.Sprint(a...)
- },
- })
- tmpl = template.Must(Escape(template.Must(tmpl.Parse(test.input))))
- b := new(bytes.Buffer)
- if err := tmpl.Execute(b, data); err != nil {
- t.Errorf("%s: template execution failed: %s", test.name, err)
- continue
- }
- if w, g := test.output, b.String(); w != g {
- t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
- continue
- }
- }
-}
-
-func TestEscapeSet(t *testing.T) {
- type dataItem struct {
- Children []*dataItem
- X string
- }
-
- data := dataItem{
- Children: []*dataItem{
- &dataItem{X: "foo"},
- &dataItem{X: "<bar>"},
- &dataItem{
- Children: []*dataItem{
- &dataItem{X: "baz"},
- },
- },
- },
- }
-
- tests := []struct {
- inputs map[string]string
- want string
- }{
- // The trivial set.
- {
- map[string]string{
- "main": ``,
- },
- ``,
- },
- // A template called in the start context.
- {
- map[string]string{
- "main": `Hello, {{template "helper"}}!`,
- // Not a valid top level HTML template.
- // "<b" is not a full tag.
- "helper": `{{"<World>"}}`,
- },
- `Hello, <World>!`,
- },
- // A template called in a context other than the start.
- {
- map[string]string{
- "main": `<a onclick='a = {{template "helper"}};'>`,
- // Not a valid top level HTML template.
- // "<b" is not a full tag.
- "helper": `{{"<a>"}}<b`,
- },
- `<a onclick='a = "\u003ca\u003e"<b;'>`,
- },
- // A recursive template that ends in its start context.
- {
- map[string]string{
- "main": `{{range .Children}}{{template "main" .}}{{else}}{{.X}} {{end}}`,
- },
- `foo <bar> baz `,
- },
- // A recursive helper template that ends in its start context.
- {
- map[string]string{
- "main": `{{template "helper" .}}`,
- "helper": `{{if .Children}}<ul>{{range .Children}}<li>{{template "main" .}}</li>{{end}}</ul>{{else}}{{.X}}{{end}}`,
- },
- `<ul><li>foo</li><li><bar></li><li><ul><li>baz</li></ul></li></ul>`,
- },
- // Co-recursive templates that end in its start context.
- {
- map[string]string{
- "main": `<blockquote>{{range .Children}}{{template "helper" .}}{{end}}</blockquote>`,
- "helper": `{{if .Children}}{{template "main" .}}{{else}}{{.X}}<br>{{end}}`,
- },
- `<blockquote>foo<br><bar><br><blockquote>baz<br></blockquote></blockquote>`,
- },
- // A template that is called in two different contexts.
- {
- map[string]string{
- "main": `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
- "helper": `{{11}} of {{"<100>"}}`,
- },
- `<button onclick="title='11 of \x3c100\x3e'; ...">11 of <100></button>`,
- },
- // A non-recursive template that ends in a different context.
- // helper starts in jsCtxRegexp and ends in jsCtxDivOp.
- {
- map[string]string{
- "main": `<script>var x={{template "helper"}}/{{"42"}};</script>`,
- "helper": "{{126}}",
- },
- `<script>var x= 126 /"42";</script>`,
- },
- // A recursive template that ends in a similar context.
- {
- map[string]string{
- "main": `<script>var x=[{{template "countdown" 4}}];</script>`,
- "countdown": `{{.}}{{if .}},{{template "countdown" . | pred}}{{end}}`,
- },
- `<script>var x=[ 4 , 3 , 2 , 1 , 0 ];</script>`,
- },
- // A recursive template that ends in a different context.
- /*
- {
- map[string]string{
- "main": `<a href="/foo{{template "helper" .}}">`,
- "helper": `{{if .Children}}{{range .Children}}{{template "helper" .}}{{end}}{{else}}?x={{.X}}{{end}}`,
- },
- `<a href="/foo?x=foo?x=%3cbar%3e?x=baz">`,
- },
- */
- }
-
- // pred is a template function that returns the predecessor of a
- // natural number for testing recursive templates.
- fns := template.FuncMap{"pred": func(a ...interface{}) (interface{}, error) {
- if len(a) == 1 {
- if i, _ := a[0].(int); i > 0 {
- return i - 1, nil
- }
- }
- return nil, fmt.Errorf("undefined pred(%v)", a)
- }}
-
- for _, test := range tests {
- var s template.Set
- for name, src := range test.inputs {
- t := template.New(name)
- t.Funcs(fns)
- s.Add(template.Must(t.Parse(src)))
- }
- s.Funcs(fns)
- if _, err := EscapeSet(&s, "main"); err != nil {
- t.Errorf("%s for input:\n%v", err, test.inputs)
- continue
- }
- var b bytes.Buffer
-
- if err := s.Execute(&b, "main", data); err != nil {
- t.Errorf("%q executing %v", err.Error(), s.Template("main"))
- continue
- }
- if got := b.String(); test.want != got {
- t.Errorf("want\n\t%q\ngot\n\t%q", test.want, got)
- }
- }
-
-}
-
-func TestErrors(t *testing.T) {
- tests := []struct {
- input string
- err string
- }{
- // Non-error cases.
- {
- "{{if .Cond}}<a>{{else}}<b>{{end}}",
- "",
- },
- {
- "{{if .Cond}}<a>{{end}}",
- "",
- },
- {
- "{{if .Cond}}{{else}}<b>{{end}}",
- "",
- },
- {
- "{{with .Cond}}<div>{{end}}",
- "",
- },
- {
- "{{range .Items}}<a>{{end}}",
- "",
- },
- {
- "<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
- "",
- },
- // Error cases.
- {
- "{{if .Cond}}<a{{end}}",
- "z:1: {{if}} branches",
- },
- {
- "{{if .Cond}}\n{{else}}\n<a{{end}}",
- "z:1: {{if}} branches",
- },
- {
- // Missing quote in the else branch.
- `{{if .Cond}}<a href="foo">{{else}}<a href="bar>{{end}}`,
- "z:1: {{if}} branches",
- },
- {
- // Different kind of attribute: href implies a URL.
- "<a {{if .Cond}}href='{{else}}title='{{end}}{{.X}}'>",
- "z:1: {{if}} branches",
- },
- {
- "\n{{with .X}}<a{{end}}",
- "z:2: {{with}} branches",
- },
- {
- "\n{{with .X}}<a>{{else}}<a{{end}}",
- "z:2: {{with}} branches",
- },
- {
- "{{range .Items}}<a{{end}}",
- `z:1: on range loop re-entry: "<" in attribute name: "<a"`,
- },
- {
- "\n{{range .Items}} x='<a{{end}}",
- "z:2: on range loop re-entry: {{range}} branches",
- },
- {
- "<a b=1 c={{.H}}",
- "z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
- },
- {
- "<script>foo();",
- "z: ends in a non-text context: {stateJS",
- },
- {
- `<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
- "z:1: (action: [(command: [F=[H]])]) appears in an ambiguous URL context",
- },
- {
- `<a onclick="alert('Hello \`,
- `unfinished escape sequence in JS string: "Hello \\"`,
- },
- {
- `<a onclick='alert("Hello\, World\`,
- `unfinished escape sequence in JS string: "Hello\\, World\\"`,
- },
- {
- `<a onclick='alert(/x+\`,
- `unfinished escape sequence in JS string: "x+\\"`,
- },
- {
- `<a onclick="/foo[\]/`,
- `unfinished JS regexp charset: "foo[\\]/"`,
- },
- {
- // It is ambiguous whether 1.5 should be 1\.5 or 1.5.
- // Either `var x = 1/- 1.5 /i.test(x)`
- // where `i.test(x)` is a method call of reference i,
- // or `/-1\.5/i.test(x)` which is a method call on a
- // case insensitive regular expression.
- `<script>{{if false}}var x = 1{{end}}/-{{"1.5"}}/i.test(x)</script>`,
- `'/' could start a division or regexp: "/-"`,
- },
- {
- `{{template "foo"}}`,
- "z:1: no such template foo",
- },
- {
- `{{define "z"}}<div{{template "y"}}>{{end}}` +
- // Illegal starting in stateTag but not in stateText.
- `{{define "y"}} foo<b{{end}}`,
- `"<" in attribute name: " foo<b"`,
- },
- {
- `{{define "z"}}<script>reverseList = [{{template "t"}}]</script>{{end}}` +
- // Missing " after recursive call.
- `{{define "t"}}{{if .Tail}}{{template "t" .Tail}}{{end}}{{.Head}}",{{end}}`,
- `: cannot compute output context for template t$htmltemplate_stateJS_elementScript`,
- },
- {
- `<input type=button value=onclick=>`,
- `exp/template/html:z: "=" in unquoted attr: "onclick="`,
- },
- {
- `<input type=button value= onclick=>`,
- `exp/template/html:z: "=" in unquoted attr: "onclick="`,
- },
- {
- `<input type=button value= 1+1=2>`,
- `exp/template/html:z: "=" in unquoted attr: "1+1=2"`,
- },
- {
- "<a class=`foo>",
- "exp/template/html:z: \"`\" in unquoted attr: \"`foo\"",
- },
- {
- `<a style=font:'Arial'>`,
- `exp/template/html:z: "'" in unquoted attr: "font:'Arial'"`,
- },
- {
- `<a=foo>`,
- `: expected space, attr name, or end of tag, but got "=foo>"`,
- },
- }
-
- for _, test := range tests {
- var err error
- if strings.HasPrefix(test.input, "{{define") {
- var s template.Set
- _, err = s.Parse(test.input)
- if err != nil {
- t.Errorf("Failed to parse %q: %s", test.input, err)
- continue
- }
- _, err = EscapeSet(&s, "z")
- } else {
- tmpl := template.Must(template.New("z").Parse(test.input))
- _, err = Escape(tmpl)
- }
- var got string
- if err != nil {
- got = err.Error()
- }
- if test.err == "" {
- if got != "" {
- t.Errorf("input=%q: unexpected error %q", test.input, got)
- }
- continue
- }
- if strings.Index(got, test.err) == -1 {
- t.Errorf("input=%q: error\n\t%q\ndoes not contain expected string\n\t%q", test.input, got, test.err)
- continue
- }
- }
-}
-
-func TestEscapeText(t *testing.T) {
- tests := []struct {
- input string
- output context
- }{
- {
- ``,
- context{},
- },
- {
- `Hello, World!`,
- context{},
- },
- {
- // An orphaned "<" is OK.
- `I <3 Ponies!`,
- context{},
- },
- {
- `<a`,
- context{state: stateTag},
- },
- {
- `<a `,
- context{state: stateTag},
- },
- {
- `<a>`,
- context{state: stateText},
- },
- {
- `<a href`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a on`,
- context{state: stateAttrName, attr: attrScript},
- },
- {
- `<a href `,
- context{state: stateAfterName, attr: attrURL},
- },
- {
- `<a style = `,
- context{state: stateBeforeValue, attr: attrStyle},
- },
- {
- `<a href=`,
- context{state: stateBeforeValue, attr: attrURL},
- },
- {
- `<a href=x`,
- context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery},
- },
- {
- `<a href=x `,
- context{state: stateTag},
- },
- {
- `<a href=>`,
- context{state: stateText},
- },
- {
- `<a href=x>`,
- context{state: stateText},
- },
- {
- `<a href ='`,
- context{state: stateURL, delim: delimSingleQuote},
- },
- {
- `<a href=''`,
- context{state: stateTag},
- },
- {
- `<a href= "`,
- context{state: stateURL, delim: delimDoubleQuote},
- },
- {
- `<a href=""`,
- context{state: stateTag},
- },
- {
- `<a title="`,
- context{state: stateAttr, delim: delimDoubleQuote},
- },
- {
- `<a HREF='http:`,
- context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a Href='/`,
- context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a href='"`,
- context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a href="'`,
- context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a href=''`,
- context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a href=""`,
- context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a href=""`,
- context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a href="`,
- context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery},
- },
- {
- `<img alt="1">`,
- context{state: stateText},
- },
- {
- `<img alt="1>"`,
- context{state: stateTag},
- },
- {
- `<img alt="1>">`,
- context{state: stateText},
- },
- {
- `<input checked type="checkbox"`,
- context{state: stateTag},
- },
- {
- `<a onclick="`,
- context{state: stateJS, delim: delimDoubleQuote},
- },
- {
- `<a onclick="//foo`,
- context{state: stateJSLineCmt, delim: delimDoubleQuote},
- },
- {
- "<a onclick='//\n",
- context{state: stateJS, delim: delimSingleQuote},
- },
- {
- "<a onclick='//\r\n",
- context{state: stateJS, delim: delimSingleQuote},
- },
- {
- "<a onclick='//\u2028",
- context{state: stateJS, delim: delimSingleQuote},
- },
- {
- `<a onclick="/*`,
- context{state: stateJSBlockCmt, delim: delimDoubleQuote},
- },
- {
- `<a onclick="/*/`,
- context{state: stateJSBlockCmt, delim: delimDoubleQuote},
- },
- {
- `<a onclick="/**/`,
- context{state: stateJS, delim: delimDoubleQuote},
- },
- {
- `<a onkeypress=""`,
- context{state: stateJSDqStr, delim: delimDoubleQuote},
- },
- {
- `<a onclick='"foo"`,
- context{state: stateJS, delim: delimSingleQuote, jsCtx: jsCtxDivOp},
- },
- {
- `<a onclick='foo'`,
- context{state: stateJS, delim: delimSpaceOrTagEnd, jsCtx: jsCtxDivOp},
- },
- {
- `<a onclick='foo`,
- context{state: stateJSSqStr, delim: delimSpaceOrTagEnd},
- },
- {
- `<a onclick=""foo'`,
- context{state: stateJSDqStr, delim: delimDoubleQuote},
- },
- {
- `<a onclick="'foo"`,
- context{state: stateJSSqStr, delim: delimDoubleQuote},
- },
- {
- `<A ONCLICK="'`,
- context{state: stateJSSqStr, delim: delimDoubleQuote},
- },
- {
- `<a onclick="/`,
- context{state: stateJSRegexp, delim: delimDoubleQuote},
- },
- {
- `<a onclick="'foo'`,
- context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
- },
- {
- `<a onclick="'foo\'`,
- context{state: stateJSSqStr, delim: delimDoubleQuote},
- },
- {
- `<a onclick="'foo\'`,
- context{state: stateJSSqStr, delim: delimDoubleQuote},
- },
- {
- `<a onclick="/foo/`,
- context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
- },
- {
- `<script>/foo/ /=`,
- context{state: stateJS, element: elementScript},
- },
- {
- `<a onclick="1 /foo`,
- context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
- },
- {
- `<a onclick="1 /*c*/ /foo`,
- context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
- },
- {
- `<a onclick="/foo[/]`,
- context{state: stateJSRegexp, delim: delimDoubleQuote},
- },
- {
- `<a onclick="/foo\/`,
- context{state: stateJSRegexp, delim: delimDoubleQuote},
- },
- {
- `<a onclick="/foo/`,
- context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
- },
- {
- `<input checked style="`,
- context{state: stateCSS, delim: delimDoubleQuote},
- },
- {
- `<a style="//`,
- context{state: stateCSSLineCmt, delim: delimDoubleQuote},
- },
- {
- `<a style="//</script>`,
- context{state: stateCSSLineCmt, delim: delimDoubleQuote},
- },
- {
- "<a style='//\n",
- context{state: stateCSS, delim: delimSingleQuote},
- },
- {
- "<a style='//\r",
- context{state: stateCSS, delim: delimSingleQuote},
- },
- {
- `<a style="/*`,
- context{state: stateCSSBlockCmt, delim: delimDoubleQuote},
- },
- {
- `<a style="/*/`,
- context{state: stateCSSBlockCmt, delim: delimDoubleQuote},
- },
- {
- `<a style="/**/`,
- context{state: stateCSS, delim: delimDoubleQuote},
- },
- {
- `<a style="background: '`,
- context{state: stateCSSSqStr, delim: delimDoubleQuote},
- },
- {
- `<a style="background: "`,
- context{state: stateCSSDqStr, delim: delimDoubleQuote},
- },
- {
- `<a style="background: '/foo?img=`,
- context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag},
- },
- {
- `<a style="background: '/`,
- context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a style="background: url("/`,
- context{state: stateCSSDqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a style="background: url('/`,
- context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a style="background: url('/)`,
- context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a style="background: url('/ `,
- context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a style="background: url(/`,
- context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
- },
- {
- `<a style="background: url( `,
- context{state: stateCSSURL, delim: delimDoubleQuote},
- },
- {
- `<a style="background: url( /image?name=`,
- context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag},
- },
- {
- `<a style="background: url(x)`,
- context{state: stateCSS, delim: delimDoubleQuote},
- },
- {
- `<a style="background: url('x'`,
- context{state: stateCSS, delim: delimDoubleQuote},
- },
- {
- `<a style="background: url( x `,
- context{state: stateCSS, delim: delimDoubleQuote},
- },
- {
- `<!-- foo`,
- context{state: stateHTMLCmt},
- },
- {
- `<!-->`,
- context{state: stateHTMLCmt},
- },
- {
- `<!--->`,
- context{state: stateHTMLCmt},
- },
- {
- `<!-- foo -->`,
- context{state: stateText},
- },
- {
- `<script`,
- context{state: stateTag, element: elementScript},
- },
- {
- `<script `,
- context{state: stateTag, element: elementScript},
- },
- {
- `<script src="foo.js" `,
- context{state: stateTag, element: elementScript},
- },
- {
- `<script src='foo.js' `,
- context{state: stateTag, element: elementScript},
- },
- {
- `<script type=text/javascript `,
- context{state: stateTag, element: elementScript},
- },
- {
- `<script>foo`,
- context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
- },
- {
- `<script>foo</script>`,
- context{state: stateText},
- },
- {
- `<script>foo</script><!--`,
- context{state: stateHTMLCmt},
- },
- {
- `<script>document.write("<p>foo</p>");`,
- context{state: stateJS, element: elementScript},
- },
- {
- `<script>document.write("<p>foo<\/script>");`,
- context{state: stateJS, element: elementScript},
- },
- {
- `<script>document.write("<script>alert(1)</script>");`,
- context{state: stateText},
- },
- {
- `<Script>`,
- context{state: stateJS, element: elementScript},
- },
- {
- `<SCRIPT>foo`,
- context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
- },
- {
- `<textarea>value`,
- context{state: stateRCDATA, element: elementTextarea},
- },
- {
- `<textarea>value</TEXTAREA>`,
- context{state: stateText},
- },
- {
- `<textarea name=html><b`,
- context{state: stateRCDATA, element: elementTextarea},
- },
- {
- `<title>value`,
- context{state: stateRCDATA, element: elementTitle},
- },
- {
- `<style>value`,
- context{state: stateCSS, element: elementStyle},
- },
- {
- `<a xlink:href`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a xmlns`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a xmlns:foo`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a xmlnsxyz`,
- context{state: stateAttrName},
- },
- {
- `<a data-url`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a data-iconUri`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a data-urlItem`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a g:`,
- context{state: stateAttrName},
- },
- {
- `<a g:url`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a g:iconUri`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a g:urlItem`,
- context{state: stateAttrName, attr: attrURL},
- },
- {
- `<a g:value`,
- context{state: stateAttrName},
- },
- {
- `<a svg:style='`,
- context{state: stateCSS, delim: delimSingleQuote},
- },
- {
- `<svg:font-face`,
- context{state: stateTag},
- },
- {
- `<svg:a svg:onclick="`,
- context{state: stateJS, delim: delimDoubleQuote},
- },
- }
-
- for _, test := range tests {
- b, e := []byte(test.input), newEscaper(nil)
- c := e.escapeText(context{}, &parse.TextNode{parse.NodeText, b})
- if !test.output.eq(c) {
- t.Errorf("input %q: want context\n\t%v\ngot\n\t%v", test.input, test.output, c)
- continue
- }
- if test.input != string(b) {
- t.Errorf("input %q: text node was modified: want %q got %q", test.input, test.input, b)
- continue
- }
- }
-}
-
-func TestEnsurePipelineContains(t *testing.T) {
- tests := []struct {
- input, output string
- ids []string
- }{
- {
- "{{.X}}",
- "[(command: [F=[X]])]",
- []string{},
- },
- {
- "{{.X | html}}",
- "[(command: [F=[X]]) (command: [I=html])]",
- []string{},
- },
- {
- "{{.X}}",
- "[(command: [F=[X]]) (command: [I=html])]",
- []string{"html"},
- },
- {
- "{{.X | html}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
- []string{"urlquery"},
- },
- {
- "{{.X | html | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
- []string{"urlquery"},
- },
- {
- "{{.X | html | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
- []string{"html", "urlquery"},
- },
- {
- "{{.X | html | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
- []string{"html"},
- },
- {
- "{{.X | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
- []string{"html", "urlquery"},
- },
- {
- "{{.X | html | print}}",
- "[(command: [F=[X]]) (command: [I=urlquery]) (command: [I=html]) (command: [I=print])]",
- []string{"urlquery", "html"},
- },
- }
- for _, test := range tests {
- tmpl := template.Must(template.New("test").Parse(test.input))
- action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
- if !ok {
- t.Errorf("First node is not an action: %s", test.input)
- continue
- }
- pipe := action.Pipe
- ensurePipelineContains(pipe, test.ids)
- got := pipe.String()
- if got != test.output {
- t.Errorf("%s, %v: want\n\t%s\ngot\n\t%s", test.input, test.ids, test.output, got)
- }
- }
-}
-
-func expectExecuteFailure(t *testing.T, b *bytes.Buffer, err error) {
- if err != nil {
- if b.Len() != 0 {
- t.Errorf("output on buffer: %q", b.String())
- }
- } else {
- t.Errorf("unescaped template executed")
- }
-}
-
-func TestEscapeErrorsNotIgnorable(t *testing.T) {
- var b bytes.Buffer
- tmpl := template.Must(template.New("dangerous").Parse("<a"))
- Escape(tmpl)
- err := tmpl.Execute(&b, nil)
- expectExecuteFailure(t, &b, err)
-}
-
-func TestEscapeSetErrorsNotIgnorable(t *testing.T) {
- s, err := (&template.Set{}).Parse(`{{define "t"}}<a{{end}}`)
- if err != nil {
- t.Errorf("failed to parse set: %q", err)
- }
- EscapeSet(s, "t")
- var b bytes.Buffer
- err = s.Execute(&b, "t", nil)
- expectExecuteFailure(t, &b, err)
-}
-
-func TestRedundantFuncs(t *testing.T) {
- inputs := []interface{}{
- "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./` +
- `0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmno" +
- "pqrstuvwxyz{|}~\x7f" +
- "\u00A0\u0100\u2028\u2029\ufeff\ufdec\ufffd\uffff\U0001D11E" +
- "&%22\\",
- CSS(`a[href =~ "//example.com"]#foo`),
- HTML(`Hello, <b>World</b> &tc!`),
- HTMLAttr(` dir="ltr"`),
- JS(`c && alert("Hello, World!");`),
- JSStr(`Hello, World & O'Reilly\x21`),
- URL(`greeting=H%69&addressee=(World)`),
- }
-
- for n0, m := range redundantFuncs {
- f0 := funcMap[n0].(func(...interface{}) string)
- for n1, _ := range m {
- f1 := funcMap[n1].(func(...interface{}) string)
- for _, input := range inputs {
- want := f0(input)
- if got := f1(want); want != got {
- t.Errorf("%s %s with %T %q: want\n\t%q,\ngot\n\t%q", n0, n1, input, input, want, got)
- }
- }
- }
- }
-}
-
-func BenchmarkEscapedExecute(b *testing.B) {
- tmpl := template.Must(Escape(template.Must(template.New("t").Parse(`<a onclick="alert('{{.}}')">{{.}}</a>`))))
- var buf bytes.Buffer
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- tmpl.Execute(&buf, "foo & 'bar' & baz")
- buf.Reset()
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "fmt"
- "strings"
- "utf8"
-)
-
-// htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
-func htmlNospaceEscaper(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeHTML {
- return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
- }
- return htmlReplacer(s, htmlNospaceReplacementTable, false)
-}
-
-// attrEscaper escapes for inclusion in quoted attribute values.
-func attrEscaper(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeHTML {
- return htmlReplacer(stripTags(s), htmlNormReplacementTable, true)
- }
- return htmlReplacer(s, htmlReplacementTable, true)
-}
-
-// rcdataEscaper escapes for inclusion in an RCDATA element body.
-func rcdataEscaper(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeHTML {
- return htmlReplacer(s, htmlNormReplacementTable, true)
- }
- return htmlReplacer(s, htmlReplacementTable, true)
-}
-
-// htmlEscaper escapes for inclusion in HTML text.
-func htmlEscaper(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeHTML {
- return s
- }
- return htmlReplacer(s, htmlReplacementTable, true)
-}
-
-// htmlReplacementTable contains the runes that need to be escaped
-// inside a quoted attribute value or in a text node.
-var htmlReplacementTable = []string{
- // http://www.w3.org/TR/html5/tokenization.html#attribute-value-unquoted-state: "
- // U+0000 NULL Parse error. Append a U+FFFD REPLACEMENT
- // CHARACTER character to the current attribute's value.
- // "
- // and similarly
- // http://www.w3.org/TR/html5/tokenization.html#before-attribute-value-state
- 0: "\uFFFD",
- '"': """,
- '&': "&",
- '\'': "'",
- '+': "+",
- '<': "<",
- '>': ">",
-}
-
-// htmlNormReplacementTable is like htmlReplacementTable but without '&' to
-// avoid over-encoding existing entities.
-var htmlNormReplacementTable = []string{
- 0: "\uFFFD",
- '"': """,
- '\'': "'",
- '+': "+",
- '<': "<",
- '>': ">",
-}
-
-// htmlNospaceReplacementTable contains the runes that need to be escaped
-// inside an unquoted attribute value.
-// The set of runes escaped is the union of the HTML specials and
-// those determined by running the JS below in browsers:
-// <div id=d></div>
-// <script>(function () {
-// var a = [], d = document.getElementById("d"), i, c, s;
-// for (i = 0; i < 0x10000; ++i) {
-// c = String.fromCharCode(i);
-// d.innerHTML = "<span title=" + c + "lt" + c + "></span>"
-// s = d.getElementsByTagName("SPAN")[0];
-// if (!s || s.title !== c + "lt" + c) { a.push(i.toString(16)); }
-// }
-// document.write(a.join(", "));
-// })()</script>
-var htmlNospaceReplacementTable = []string{
- 0: "�",
- '\t': "	",
- '\n': " ",
- '\v': "",
- '\f': "",
- '\r': " ",
- ' ': " ",
- '"': """,
- '&': "&",
- '\'': "'",
- '+': "+",
- '<': "<",
- '=': "=",
- '>': ">",
- // A parse error in the attribute value (unquoted) and
- // before attribute value states.
- // Treated as a quoting character by IE.
- '`': "`",
-}
-
-// htmlNospaceNormReplacementTable is like htmlNospaceReplacementTable but
-// without '&' to avoid over-encoding existing entities.
-var htmlNospaceNormReplacementTable = []string{
- 0: "�",
- '\t': "	",
- '\n': " ",
- '\v': "",
- '\f': "",
- '\r': " ",
- ' ': " ",
- '"': """,
- '\'': "'",
- '+': "+",
- '<': "<",
- '=': "=",
- '>': ">",
- // A parse error in the attribute value (unquoted) and
- // before attribute value states.
- // Treated as a quoting character by IE.
- '`': "`",
-}
-
-// htmlReplacer returns s with runes replaced acccording to replacementTable
-// and when badRunes is true, certain bad runes are allowed through unescaped.
-func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
- written, b := 0, new(bytes.Buffer)
- for i, r := range s {
- if int(r) < len(replacementTable) {
- if repl := replacementTable[r]; len(repl) != 0 {
- b.WriteString(s[written:i])
- b.WriteString(repl)
- // Valid as long as replacementTable doesn't
- // include anything above 0x7f.
- written = i + utf8.RuneLen(r)
- }
- } else if badRunes {
- // No-op.
- // IE does not allow these ranges in unquoted attrs.
- } else if 0xfdd0 <= r && r <= 0xfdef || 0xfff0 <= r && r <= 0xffff {
- fmt.Fprintf(b, "%s&#x%x;", s[written:i], r)
- written = i + utf8.RuneLen(r)
- }
- }
- if written == 0 {
- return s
- }
- b.WriteString(s[written:])
- return b.String()
-}
-
-// stripTags takes a snippet of HTML and returns only the text content.
-// For example, `<b>¡Hi!</b> <script>...</script>` -> `¡Hi! `.
-func stripTags(html string) string {
- var b bytes.Buffer
- s, c, i, allText := []byte(html), context{}, 0, true
- // Using the transition funcs helps us avoid mangling
- // `<div title="1>2">` or `I <3 Ponies!`.
- for i != len(s) {
- if c.delim == delimNone {
- st := c.state
- // Use RCDATA instead of parsing into JS or CSS styles.
- if c.element != elementNone && !isInTag(st) {
- st = stateRCDATA
- }
- d, nread := transitionFunc[st](c, s[i:])
- i1 := i + nread
- if c.state == stateText || c.state == stateRCDATA {
- // Emit text up to the start of the tag or comment.
- j := i1
- if d.state != c.state {
- for j1 := j - 1; j1 >= i; j1-- {
- if s[j1] == '<' {
- j = j1
- break
- }
- }
- }
- b.Write(s[i:j])
- } else {
- allText = false
- }
- c, i = d, i1
- continue
- }
- i1 := i + bytes.IndexAny(s[i:], delimEnds[c.delim])
- if i1 < i {
- break
- }
- if c.delim != delimSpaceOrTagEnd {
- // Consume any quote.
- i1++
- }
- c, i = context{state: stateTag, element: c.element}, i1
- }
- if allText {
- return html
- } else if c.state == stateText || c.state == stateRCDATA {
- b.Write(s[i:])
- }
- return b.String()
-}
-
-// htmlNameFilter accepts valid parts of an HTML attribute or tag name or
-// a known-safe HTML attribute.
-func htmlNameFilter(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeHTMLAttr {
- return s
- }
- if len(s) == 0 {
- // Avoid violation of structure preservation.
- // <input checked {{.K}}={{.V}}>.
- // Without this, if .K is empty then .V is the value of
- // checked, but otherwise .V is the value of the attribute
- // named .K.
- return filterFailsafe
- }
- s = strings.ToLower(s)
- if t := attrType(s); t != contentTypePlain {
- // TODO: Split attr and element name part filters so we can whitelist
- // attributes.
- return filterFailsafe
- }
- for _, r := range s {
- switch {
- case '0' <= r && r <= '9':
- case 'a' <= r && r <= 'z':
- default:
- return filterFailsafe
- }
- }
- return s
-}
-
-// commentEscaper returns the empty string regardless of input.
-// Comment content does not correspond to any parsed structure or
-// human-readable content, so the simplest and most secure policy is to drop
-// content interpolated into comments.
-// This approach is equally valid whether or not static comment content is
-// removed from the template.
-func commentEscaper(args ...interface{}) string {
- return ""
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "html"
- "strings"
- "testing"
-)
-
-func TestHTMLNospaceEscaper(t *testing.T) {
- input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./` +
- `0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmno" +
- "pqrstuvwxyz{|}~\x7f" +
- "\u00A0\u0100\u2028\u2029\ufeff\ufdec\U0001D11E")
-
- want := ("�\x01\x02\x03\x04\x05\x06\x07" +
- "\x08	  \x0E\x0F" +
- "\x10\x11\x12\x13\x14\x15\x16\x17" +
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./` +
- `0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\]^_` +
- ``abcdefghijklmno` +
- `pqrstuvwxyz{|}~` + "\u007f" +
- "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
- got := htmlNospaceEscaper(input)
- if got != want {
- t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
- }
-
- got, want = html.UnescapeString(got), strings.Replace(input, "\x00", "\ufffd", 1)
- if want != got {
- t.Errorf("decode: want\n\t%q\nbut got\n\t%q", want, got)
- }
-}
-
-func TestStripTags(t *testing.T) {
- tests := []struct {
- input, want string
- }{
- {"", ""},
- {"Hello, World!", "Hello, World!"},
- {"foo&bar", "foo&bar"},
- {`Hello <a href="www.example.com/">World</a>!`, "Hello World!"},
- {"Foo <textarea>Bar</textarea> Baz", "Foo Bar Baz"},
- {"Foo <!-- Bar --> Baz", "Foo Baz"},
- {"<", "<"},
- {"foo < bar", "foo < bar"},
- {`Foo<script type="text/javascript">alert(1337)</script>Bar`, "FooBar"},
- {`Foo<div title="1>2">Bar`, "FooBar"},
- {`I <3 Ponies!`, `I <3 Ponies!`},
- {`<script>foo()</script>`, ``},
- }
-
- for _, test := range tests {
- if got := stripTags(test.input); got != test.want {
- t.Errorf("%q: want %q, got %q", test.input, test.want, got)
- }
- }
-}
-
-func BenchmarkHTMLNospaceEscaper(b *testing.B) {
- for i := 0; i < b.N; i++ {
- htmlNospaceEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
- }
-}
-
-func BenchmarkHTMLNospaceEscaperNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- htmlNospaceEscaper("The_quick,_brown_fox_jumps_over_the_lazy_dog.")
- }
-}
-
-func BenchmarkStripTags(b *testing.B) {
- for i := 0; i < b.N; i++ {
- stripTags("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
- }
-}
-
-func BenchmarkStripTagsNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- stripTags("The quick, brown fox jumps over the lazy dog.")
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "fmt"
- "json"
- "strings"
- "utf8"
-)
-
-// nextJSCtx returns the context that determines whether a slash after the
-// given run of tokens tokens starts a regular expression instead of a division
-// operator: / or /=.
-//
-// This assumes that the token run does not include any string tokens, comment
-// tokens, regular expression literal tokens, or division operators.
-//
-// This fails on some valid but nonsensical JavaScript programs like
-// "x = ++/foo/i" which is quite different than "x++/foo/i", but is not known to
-// fail on any known useful programs. It is based on the draft
-// JavaScript 2.0 lexical grammar and requires one token of lookbehind:
-// http://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
-func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
- s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
- if len(s) == 0 {
- return preceding
- }
-
- // All cases below are in the single-byte UTF-8 group.
- switch c, n := s[len(s)-1], len(s); c {
- case '+', '-':
- // ++ and -- are not regexp preceders, but + and - are whether
- // they are used as infix or prefix operators.
- start := n - 1
- // Count the number of adjacent dashes or pluses.
- for start > 0 && s[start-1] == c {
- start--
- }
- if (n-start)&1 == 1 {
- // Reached for trailing minus signs since "---" is the
- // same as "-- -".
- return jsCtxRegexp
- }
- return jsCtxDivOp
- case '.':
- // Handle "42."
- if n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {
- return jsCtxDivOp
- }
- return jsCtxRegexp
- // Suffixes for all punctuators from section 7.7 of the language spec
- // that only end binary operators not handled above.
- case ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':
- return jsCtxRegexp
- // Suffixes for all punctuators from section 7.7 of the language spec
- // that are prefix operators not handled above.
- case '!', '~':
- return jsCtxRegexp
- // Matches all the punctuators from section 7.7 of the language spec
- // that are open brackets not handled above.
- case '(', '[':
- return jsCtxRegexp
- // Matches all the punctuators from section 7.7 of the language spec
- // that precede expression starts.
- case ':', ';', '{':
- return jsCtxRegexp
- // CAVEAT: the close punctuators ('}', ']', ')') precede div ops and
- // are handled in the default except for '}' which can precede a
- // division op as in
- // ({ valueOf: function () { return 42 } } / 2
- // which is valid, but, in practice, developers don't divide object
- // literals, so our heuristic works well for code like
- // function () { ... } /foo/.test(x) && sideEffect();
- // The ')' punctuator can precede a regular expression as in
- // if (b) /foo/.test(x) && ...
- // but this is much less likely than
- // (a + b) / c
- case '}':
- return jsCtxRegexp
- default:
- // Look for an IdentifierName and see if it is a keyword that
- // can precede a regular expression.
- j := n
- for j > 0 && isJSIdentPart(rune(s[j-1])) {
- j--
- }
- if regexpPrecederKeywords[string(s[j:])] {
- return jsCtxRegexp
- }
- }
- // Otherwise is a punctuator not listed above, or
- // a string which precedes a div op, or an identifier
- // which precedes a div op.
- return jsCtxDivOp
-}
-
-// regexPrecederKeywords is a set of reserved JS keywords that can precede a
-// regular expression in JS source.
-var regexpPrecederKeywords = map[string]bool{
- "break": true,
- "case": true,
- "continue": true,
- "delete": true,
- "do": true,
- "else": true,
- "finally": true,
- "in": true,
- "instanceof": true,
- "return": true,
- "throw": true,
- "try": true,
- "typeof": true,
- "void": true,
-}
-
-// jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has
-// nether side-effects nor free variables outside (NaN, Infinity).
-func jsValEscaper(args ...interface{}) string {
- var a interface{}
- if len(args) == 1 {
- a = args[0]
- switch t := a.(type) {
- case JS:
- return string(t)
- case JSStr:
- // TODO: normalize quotes.
- return `"` + string(t) + `"`
- case json.Marshaler:
- // Do not treat as a Stringer.
- case fmt.Stringer:
- a = t.String()
- }
- } else {
- a = fmt.Sprint(args...)
- }
- // TODO: detect cycles before calling Marshal which loops infinitely on
- // cyclic data. This may be an unnacceptable DoS risk.
-
- b, err := json.Marshal(a)
- if err != nil {
- // Put a space before comment so that if it is flush against
- // a division operator it is not turned into a line comment:
- // x/{{y}}
- // turning into
- // x//* error marshalling y:
- // second line of error message */null
- return fmt.Sprintf(" /* %s */null ", strings.Replace(err.Error(), "*/", "* /", -1))
- }
-
- // TODO: maybe post-process output to prevent it from containing
- // "<!--", "-->", "<![CDATA[", "]]>", or "</script"
- // in case custom marshallers produce output containing those.
-
- // TODO: Maybe abbreviate \u00ab to \xab to produce more compact output.
- if len(b) == 0 {
- // In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
- // not cause the output `x=y/*z`.
- return " null "
- }
- first, _ := utf8.DecodeRune(b)
- last, _ := utf8.DecodeLastRune(b)
- var buf bytes.Buffer
- // Prevent IdentifierNames and NumericLiterals from running into
- // keywords: in, instanceof, typeof, void
- pad := isJSIdentPart(first) || isJSIdentPart(last)
- if pad {
- buf.WriteByte(' ')
- }
- written := 0
- // Make sure that json.Marshal escapes codepoints U+2028 & U+2029
- // so it falls within the subset of JSON which is valid JS.
- for i := 0; i < len(b); {
- rune, n := utf8.DecodeRune(b[i:])
- repl := ""
- if rune == 0x2028 {
- repl = `\u2028`
- } else if rune == 0x2029 {
- repl = `\u2029`
- }
- if repl != "" {
- buf.Write(b[written:i])
- buf.WriteString(repl)
- written = i + n
- }
- i += n
- }
- if buf.Len() != 0 {
- buf.Write(b[written:])
- if pad {
- buf.WriteByte(' ')
- }
- b = buf.Bytes()
- }
- return string(b)
-}
-
-// jsStrEscaper produces a string that can be included between quotes in
-// JavaScript source, in JavaScript embedded in an HTML5 <script> element,
-// or in an HTML5 event handler attribute such as onclick.
-func jsStrEscaper(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeJSStr {
- return replace(s, jsStrNormReplacementTable)
- }
- return replace(s, jsStrReplacementTable)
-}
-
-// jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
-// specials so the result is treated literally when included in a regular
-// expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
-// the literal text of {{.X}} followed by the string "bar".
-func jsRegexpEscaper(args ...interface{}) string {
- s, _ := stringify(args...)
- s = replace(s, jsRegexpReplacementTable)
- if s == "" {
- // /{{.X}}/ should not produce a line comment when .X == "".
- return "(?:)"
- }
- return s
-}
-
-// replace replaces each rune r of s with replacementTable[r], provided that
-// r < len(replacementTable). If replacementTable[r] is the empty string then
-// no replacement is made.
-// It also replaces runes U+2028 and U+2029 with the raw strings `\u2028` and
-// `\u2029`.
-func replace(s string, replacementTable []string) string {
- var b bytes.Buffer
- written := 0
- for i, r := range s {
- var repl string
- switch {
- case int(r) < len(replacementTable) && replacementTable[r] != "":
- repl = replacementTable[r]
- case r == '\u2028':
- repl = `\u2028`
- case r == '\u2029':
- repl = `\u2029`
- default:
- continue
- }
- b.WriteString(s[written:i])
- b.WriteString(repl)
- written = i + utf8.RuneLen(r)
- }
- if written == 0 {
- return s
- }
- b.WriteString(s[written:])
- return b.String()
-}
-
-var jsStrReplacementTable = []string{
- 0: `\0`,
- '\t': `\t`,
- '\n': `\n`,
- '\v': `\x0b`, // "\v" == "v" on IE 6.
- '\f': `\f`,
- '\r': `\r`,
- // Encode HTML specials as hex so the output can be embedded
- // in HTML attributes without further encoding.
- '"': `\x22`,
- '&': `\x26`,
- '\'': `\x27`,
- '+': `\x2b`,
- '/': `\/`,
- '<': `\x3c`,
- '>': `\x3e`,
- '\\': `\\`,
-}
-
-// jsStrNormReplacementTable is like jsStrReplacementTable but does not
-// overencode existing escapes since this table has no entry for `\`.
-var jsStrNormReplacementTable = []string{
- 0: `\0`,
- '\t': `\t`,
- '\n': `\n`,
- '\v': `\x0b`, // "\v" == "v" on IE 6.
- '\f': `\f`,
- '\r': `\r`,
- // Encode HTML specials as hex so the output can be embedded
- // in HTML attributes without further encoding.
- '"': `\x22`,
- '&': `\x26`,
- '\'': `\x27`,
- '+': `\x2b`,
- '/': `\/`,
- '<': `\x3c`,
- '>': `\x3e`,
-}
-
-var jsRegexpReplacementTable = []string{
- 0: `\0`,
- '\t': `\t`,
- '\n': `\n`,
- '\v': `\x0b`, // "\v" == "v" on IE 6.
- '\f': `\f`,
- '\r': `\r`,
- // Encode HTML specials as hex so the output can be embedded
- // in HTML attributes without further encoding.
- '"': `\x22`,
- '$': `\$`,
- '&': `\x26`,
- '\'': `\x27`,
- '(': `\(`,
- ')': `\)`,
- '*': `\*`,
- '+': `\x2b`,
- '-': `\-`,
- '.': `\.`,
- '/': `\/`,
- '<': `\x3c`,
- '>': `\x3e`,
- '?': `\?`,
- '[': `\[`,
- '\\': `\\`,
- ']': `\]`,
- '^': `\^`,
- '{': `\{`,
- '|': `\|`,
- '}': `\}`,
-}
-
-// isJSIdentPart returns whether the given rune is a JS identifier part.
-// It does not handle all the non-Latin letters, joiners, and combining marks,
-// but it does handle every codepoint that can occur in a numeric literal or
-// a keyword.
-func isJSIdentPart(r rune) bool {
- switch {
- case r == '$':
- return true
- case '0' <= r && r <= '9':
- return true
- case 'A' <= r && r <= 'Z':
- return true
- case r == '_':
- return true
- case 'a' <= r && r <= 'z':
- return true
- }
- return false
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "math"
- "strings"
- "testing"
-)
-
-func TestNextJsCtx(t *testing.T) {
- tests := []struct {
- jsCtx jsCtx
- s string
- }{
- // Statement terminators precede regexps.
- {jsCtxRegexp, ";"},
- // This is not airtight.
- // ({ valueOf: function () { return 1 } } / 2)
- // is valid JavaScript but in practice, devs do not do this.
- // A block followed by a statement starting with a RegExp is
- // much more common:
- // while (x) {...} /foo/.test(x) || panic()
- {jsCtxRegexp, "}"},
- // But member, call, grouping, and array expression terminators
- // precede div ops.
- {jsCtxDivOp, ")"},
- {jsCtxDivOp, "]"},
- // At the start of a primary expression, array, or expression
- // statement, expect a regexp.
- {jsCtxRegexp, "("},
- {jsCtxRegexp, "["},
- {jsCtxRegexp, "{"},
- // Assignment operators precede regexps as do all exclusively
- // prefix and binary operators.
- {jsCtxRegexp, "="},
- {jsCtxRegexp, "+="},
- {jsCtxRegexp, "*="},
- {jsCtxRegexp, "*"},
- {jsCtxRegexp, "!"},
- // Whether the + or - is infix or prefix, it cannot precede a
- // div op.
- {jsCtxRegexp, "+"},
- {jsCtxRegexp, "-"},
- // An incr/decr op precedes a div operator.
- // This is not airtight. In (g = ++/h/i) a regexp follows a
- // pre-increment operator, but in practice devs do not try to
- // increment or decrement regular expressions.
- // (g++/h/i) where ++ is a postfix operator on g is much more
- // common.
- {jsCtxDivOp, "--"},
- {jsCtxDivOp, "++"},
- {jsCtxDivOp, "x--"},
- // When we have many dashes or pluses, then they are grouped
- // left to right.
- {jsCtxRegexp, "x---"}, // A postfix -- then a -.
- // return followed by a slash returns the regexp literal or the
- // slash starts a regexp literal in an expression statement that
- // is dead code.
- {jsCtxRegexp, "return"},
- {jsCtxRegexp, "return "},
- {jsCtxRegexp, "return\t"},
- {jsCtxRegexp, "return\n"},
- {jsCtxRegexp, "return\u2028"},
- // Identifiers can be divided and cannot validly be preceded by
- // a regular expressions. Semicolon insertion cannot happen
- // between an identifier and a regular expression on a new line
- // because the one token lookahead for semicolon insertion has
- // to conclude that it could be a div binary op and treat it as
- // such.
- {jsCtxDivOp, "x"},
- {jsCtxDivOp, "x "},
- {jsCtxDivOp, "x\t"},
- {jsCtxDivOp, "x\n"},
- {jsCtxDivOp, "x\u2028"},
- {jsCtxDivOp, "preturn"},
- // Numbers precede div ops.
- {jsCtxDivOp, "0"},
- // Dots that are part of a number are div preceders.
- {jsCtxDivOp, "0."},
- }
-
- for _, test := range tests {
- if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
- t.Errorf("want %s got %q", test.jsCtx, test.s)
- }
- if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
- t.Errorf("want %s got %q", test.jsCtx, test.s)
- }
- }
-
- if nextJSCtx([]byte(" "), jsCtxRegexp) != jsCtxRegexp {
- t.Error("Blank tokens")
- }
-
- if nextJSCtx([]byte(" "), jsCtxDivOp) != jsCtxDivOp {
- t.Error("Blank tokens")
- }
-}
-
-func TestJSValEscaper(t *testing.T) {
- tests := []struct {
- x interface{}
- js string
- }{
- {int(42), " 42 "},
- {uint(42), " 42 "},
- {int16(42), " 42 "},
- {uint16(42), " 42 "},
- {int32(-42), " -42 "},
- {uint32(42), " 42 "},
- {int16(-42), " -42 "},
- {uint16(42), " 42 "},
- {int64(-42), " -42 "},
- {uint64(42), " 42 "},
- {uint64(1) << 53, " 9007199254740992 "},
- // ulp(1 << 53) > 1 so this loses precision in JS
- // but it is still a representable integer literal.
- {uint64(1)<<53 + 1, " 9007199254740993 "},
- {float32(1.0), " 1 "},
- {float32(-1.0), " -1 "},
- {float32(0.5), " 0.5 "},
- {float32(-0.5), " -0.5 "},
- {float32(1.0) / float32(256), " 0.00390625 "},
- {float32(0), " 0 "},
- {math.Copysign(0, -1), " -0 "},
- {float64(1.0), " 1 "},
- {float64(-1.0), " -1 "},
- {float64(0.5), " 0.5 "},
- {float64(-0.5), " -0.5 "},
- {float64(0), " 0 "},
- {math.Copysign(0, -1), " -0 "},
- {"", `""`},
- {"foo", `"foo"`},
- // Newlines.
- {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
- // "\v" == "v" on IE 6 so use "\x0b" instead.
- {"\t\x0b", `"\u0009\u000b"`},
- {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
- {[]interface{}{}, "[]"},
- {[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
- {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
- {"<!--", `"\u003c!--"`},
- {"-->", `"--\u003e"`},
- {"<![CDATA[", `"\u003c![CDATA["`},
- {"]]>", `"]]\u003e"`},
- {"</script", `"\u003c/script"`},
- {"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
- }
-
- for _, test := range tests {
- if js := jsValEscaper(test.x); js != test.js {
- t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
- }
- // Make sure that escaping corner cases are not broken
- // by nesting.
- a := []interface{}{test.x}
- want := "[" + strings.TrimSpace(test.js) + "]"
- if js := jsValEscaper(a); js != want {
- t.Errorf("%+v: want\n\t%q\ngot\n\t%q", a, want, js)
- }
- }
-}
-
-func TestJSStrEscaper(t *testing.T) {
- tests := []struct {
- x interface{}
- esc string
- }{
- {"", ``},
- {"foo", `foo`},
- {"\u0000", `\0`},
- {"\t", `\t`},
- {"\n", `\n`},
- {"\r", `\r`},
- {"\u2028", `\u2028`},
- {"\u2029", `\u2029`},
- {"\\", `\\`},
- {"\\n", `\\n`},
- {"foo\r\nbar", `foo\r\nbar`},
- // Preserve attribute boundaries.
- {`"`, `\x22`},
- {`'`, `\x27`},
- // Allow embedding in HTML without further escaping.
- {`&`, `\x26amp;`},
- // Prevent breaking out of text node and element boundaries.
- {"</script>", `\x3c\/script\x3e`},
- {"<![CDATA[", `\x3c![CDATA[`},
- {"]]>", `]]\x3e`},
- // http://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
- // "The text in style, script, title, and textarea elements
- // must not have an escaping text span start that is not
- // followed by an escaping text span end."
- // Furthermore, spoofing an escaping text span end could lead
- // to different interpretation of a </script> sequence otherwise
- // masked by the escaping text span, and spoofing a start could
- // allow regular text content to be interpreted as script
- // allowing script execution via a combination of a JS string
- // injection followed by an HTML text injection.
- {"<!--", `\x3c!--`},
- {"-->", `--\x3e`},
- // From http://code.google.com/p/doctype/wiki/ArticleUtf7
- {"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
- `\x2bADw-script\x2bAD4-alert(1)\x2bADw-\/script\x2bAD4-`,
- },
- // Invalid UTF-8 sequence
- {"foo\xA0bar", "foo\xA0bar"},
- // Invalid unicode scalar value.
- {"foo\xed\xa0\x80bar", "foo\xed\xa0\x80bar"},
- }
-
- for _, test := range tests {
- esc := jsStrEscaper(test.x)
- if esc != test.esc {
- t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
- }
- }
-}
-
-func TestJSRegexpEscaper(t *testing.T) {
- tests := []struct {
- x interface{}
- esc string
- }{
- {"", `(?:)`},
- {"foo", `foo`},
- {"\u0000", `\0`},
- {"\t", `\t`},
- {"\n", `\n`},
- {"\r", `\r`},
- {"\u2028", `\u2028`},
- {"\u2029", `\u2029`},
- {"\\", `\\`},
- {"\\n", `\\n`},
- {"foo\r\nbar", `foo\r\nbar`},
- // Preserve attribute boundaries.
- {`"`, `\x22`},
- {`'`, `\x27`},
- // Allow embedding in HTML without further escaping.
- {`&`, `\x26amp;`},
- // Prevent breaking out of text node and element boundaries.
- {"</script>", `\x3c\/script\x3e`},
- {"<![CDATA[", `\x3c!\[CDATA\[`},
- {"]]>", `\]\]\x3e`},
- // Escaping text spans.
- {"<!--", `\x3c!\-\-`},
- {"-->", `\-\-\x3e`},
- {"*", `\*`},
- {"+", `\x2b`},
- {"?", `\?`},
- {"[](){}", `\[\]\(\)\{\}`},
- {"$foo|x.y", `\$foo\|x\.y`},
- {"x^y", `x\^y`},
- }
-
- for _, test := range tests {
- esc := jsRegexpEscaper(test.x)
- if esc != test.esc {
- t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
- }
- }
-}
-
-func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
- input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./` +
- `0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmno" +
- "pqrstuvwxyz{|}~\x7f" +
- "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
- tests := []struct {
- name string
- escaper func(...interface{}) string
- escaped string
- }{
- {
- "jsStrEscaper",
- jsStrEscaper,
- "\\0\x01\x02\x03\x04\x05\x06\x07" +
- "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
- "\x10\x11\x12\x13\x14\x15\x16\x17" +
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !\x22#$%\x26\x27()*\x2b,-.\/` +
- `0123456789:;\x3c=\x3e?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\\]^_` +
- "`abcdefghijklmno" +
- "pqrstuvwxyz{|}~\x7f" +
- "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
- },
- {
- "jsRegexpEscaper",
- jsRegexpEscaper,
- "\\0\x01\x02\x03\x04\x05\x06\x07" +
- "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
- "\x10\x11\x12\x13\x14\x15\x16\x17" +
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !\x22#\$%\x26\x27\(\)\*\x2b,\-\.\/` +
- `0123456789:;\x3c=\x3e\?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ\[\\\]\^_` +
- "`abcdefghijklmno" +
- `pqrstuvwxyz\{\|\}~` + "\u007f" +
- "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
- },
- }
-
- for _, test := range tests {
- if s := test.escaper(input); s != test.escaped {
- t.Errorf("%s once: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
- continue
- }
-
- // Escape it rune by rune to make sure that any
- // fast-path checking does not break escaping.
- var buf bytes.Buffer
- for _, c := range input {
- buf.WriteString(test.escaper(string(c)))
- }
-
- if s := buf.String(); s != test.escaped {
- t.Errorf("%s rune-wise: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
- continue
- }
- }
-}
-
-func BenchmarkJSValEscaperWithNum(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsValEscaper(3.141592654)
- }
-}
-
-func BenchmarkJSValEscaperWithStr(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsValEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
- }
-}
-
-func BenchmarkJSValEscaperWithStrNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsValEscaper("The quick, brown fox jumps over the lazy dog")
- }
-}
-
-func BenchmarkJSValEscaperWithObj(b *testing.B) {
- o := struct {
- S string
- N int
- }{
- "The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>\u2028",
- 42,
- }
- for i := 0; i < b.N; i++ {
- jsValEscaper(o)
- }
-}
-
-func BenchmarkJSValEscaperWithObjNoSpecials(b *testing.B) {
- o := struct {
- S string
- N int
- }{
- "The quick, brown fox jumps over the lazy dog",
- 42,
- }
- for i := 0; i < b.N; i++ {
- jsValEscaper(o)
- }
-}
-
-func BenchmarkJSStrEscaperNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsStrEscaper("The quick, brown fox jumps over the lazy dog.")
- }
-}
-
-func BenchmarkJSStrEscaper(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsStrEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
- }
-}
-
-func BenchmarkJSRegexpEscaperNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsRegexpEscaper("The quick, brown fox jumps over the lazy dog")
- }
-}
-
-func BenchmarkJSRegexpEscaper(b *testing.B) {
- for i := 0; i < b.N; i++ {
- jsRegexpEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "strings"
-)
-
-// transitionFunc is the array of context transition functions for text nodes.
-// A transition function takes a context and template text input, and returns
-// the updated context and the number of bytes consumed from the front of the
-// input.
-var transitionFunc = [...]func(context, []byte) (context, int){
- stateText: tText,
- stateTag: tTag,
- stateAttrName: tAttrName,
- stateAfterName: tAfterName,
- stateBeforeValue: tBeforeValue,
- stateHTMLCmt: tHTMLCmt,
- stateRCDATA: tSpecialTagEnd,
- stateAttr: tAttr,
- stateURL: tURL,
- stateJS: tJS,
- stateJSDqStr: tJSDelimited,
- stateJSSqStr: tJSDelimited,
- stateJSRegexp: tJSDelimited,
- stateJSBlockCmt: tBlockCmt,
- stateJSLineCmt: tLineCmt,
- stateCSS: tCSS,
- stateCSSDqStr: tCSSStr,
- stateCSSSqStr: tCSSStr,
- stateCSSDqURL: tCSSStr,
- stateCSSSqURL: tCSSStr,
- stateCSSURL: tCSSStr,
- stateCSSBlockCmt: tBlockCmt,
- stateCSSLineCmt: tLineCmt,
- stateError: tError,
-}
-
-var commentStart = []byte("<!--")
-var commentEnd = []byte("-->")
-
-// tText is the context transition function for the text state.
-func tText(c context, s []byte) (context, int) {
- k := 0
- for {
- i := k + bytes.IndexByte(s[k:], '<')
- if i < k || i+1 == len(s) {
- return c, len(s)
- } else if i+4 <= len(s) && bytes.Equal(commentStart, s[i:i+4]) {
- return context{state: stateHTMLCmt}, i + 4
- }
- i++
- end := false
- if s[i] == '/' {
- if i+1 == len(s) {
- return c, len(s)
- }
- end, i = true, i+1
- }
- j, e := eatTagName(s, i)
- if j != i {
- if end {
- e = elementNone
- }
- // We've found an HTML tag.
- return context{state: stateTag, element: e}, j
- }
- k = j
- }
- panic("unreachable")
-}
-
-var elementContentType = [...]state{
- elementNone: stateText,
- elementScript: stateJS,
- elementStyle: stateCSS,
- elementTextarea: stateRCDATA,
- elementTitle: stateRCDATA,
-}
-
-// tTag is the context transition function for the tag state.
-func tTag(c context, s []byte) (context, int) {
- // Find the attribute name.
- i := eatWhiteSpace(s, 0)
- if i == len(s) {
- return c, len(s)
- }
- if s[i] == '>' {
- return context{
- state: elementContentType[c.element],
- element: c.element,
- }, i + 1
- }
- j, err := eatAttrName(s, i)
- if err != nil {
- return context{state: stateError, err: err}, len(s)
- }
- state, attr := stateTag, attrNone
- if i == j {
- return context{
- state: stateError,
- err: errorf(ErrBadHTML, 0, "expected space, attr name, or end of tag, but got %q", s[i:]),
- }, len(s)
- }
- switch attrType(string(s[i:j])) {
- case contentTypeURL:
- attr = attrURL
- case contentTypeCSS:
- attr = attrStyle
- case contentTypeJS:
- attr = attrScript
- }
- if j == len(s) {
- state = stateAttrName
- } else {
- state = stateAfterName
- }
- return context{state: state, element: c.element, attr: attr}, j
-}
-
-// tAttrName is the context transition function for stateAttrName.
-func tAttrName(c context, s []byte) (context, int) {
- i, err := eatAttrName(s, 0)
- if err != nil {
- return context{state: stateError, err: err}, len(s)
- } else if i != len(s) {
- c.state = stateAfterName
- }
- return c, i
-}
-
-// tAfterName is the context transition function for stateAfterName.
-func tAfterName(c context, s []byte) (context, int) {
- // Look for the start of the value.
- i := eatWhiteSpace(s, 0)
- if i == len(s) {
- return c, len(s)
- } else if s[i] != '=' {
- // Occurs due to tag ending '>', and valueless attribute.
- c.state = stateTag
- return c, i
- }
- c.state = stateBeforeValue
- // Consume the "=".
- return c, i + 1
-}
-
-var attrStartStates = [...]state{
- attrNone: stateAttr,
- attrScript: stateJS,
- attrStyle: stateCSS,
- attrURL: stateURL,
-}
-
-// tBeforeValue is the context transition function for stateBeforeValue.
-func tBeforeValue(c context, s []byte) (context, int) {
- i := eatWhiteSpace(s, 0)
- if i == len(s) {
- return c, len(s)
- }
- // Find the attribute delimiter.
- delim := delimSpaceOrTagEnd
- switch s[i] {
- case '\'':
- delim, i = delimSingleQuote, i+1
- case '"':
- delim, i = delimDoubleQuote, i+1
- }
- c.state, c.delim, c.attr = attrStartStates[c.attr], delim, attrNone
- return c, i
-}
-
-// tHTMLCmt is the context transition function for stateHTMLCmt.
-func tHTMLCmt(c context, s []byte) (context, int) {
- if i := bytes.Index(s, commentEnd); i != -1 {
- return context{}, i + 3
- }
- return c, len(s)
-}
-
-// specialTagEndMarkers maps element types to the character sequence that
-// case-insensitively signals the end of the special tag body.
-var specialTagEndMarkers = [...]string{
- elementScript: "</script",
- elementStyle: "</style",
- elementTextarea: "</textarea",
- elementTitle: "</title",
-}
-
-// tSpecialTagEnd is the context transition function for raw text and RCDATA
-// element states.
-func tSpecialTagEnd(c context, s []byte) (context, int) {
- if c.element != elementNone {
- if i := strings.Index(strings.ToLower(string(s)), specialTagEndMarkers[c.element]); i != -1 {
- return context{}, i
- }
- }
- return c, len(s)
-}
-
-// tAttr is the context transition function for the attribute state.
-func tAttr(c context, s []byte) (context, int) {
- return c, len(s)
-}
-
-// tURL is the context transition function for the URL state.
-func tURL(c context, s []byte) (context, int) {
- if bytes.IndexAny(s, "#?") >= 0 {
- c.urlPart = urlPartQueryOrFrag
- } else if len(s) != eatWhiteSpace(s, 0) && c.urlPart == urlPartNone {
- // HTML5 uses "Valid URL potentially surrounded by spaces" for
- // attrs: http://www.w3.org/TR/html5/index.html#attributes-1
- c.urlPart = urlPartPreQuery
- }
- return c, len(s)
-}
-
-// tJS is the context transition function for the JS state.
-func tJS(c context, s []byte) (context, int) {
- i := bytes.IndexAny(s, `"'/`)
- if i == -1 {
- // Entire input is non string, comment, regexp tokens.
- c.jsCtx = nextJSCtx(s, c.jsCtx)
- return c, len(s)
- }
- c.jsCtx = nextJSCtx(s[:i], c.jsCtx)
- switch s[i] {
- case '"':
- c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
- case '\'':
- c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
- case '/':
- switch {
- case i+1 < len(s) && s[i+1] == '/':
- c.state, i = stateJSLineCmt, i+1
- case i+1 < len(s) && s[i+1] == '*':
- c.state, i = stateJSBlockCmt, i+1
- case c.jsCtx == jsCtxRegexp:
- c.state = stateJSRegexp
- case c.jsCtx == jsCtxDivOp:
- c.jsCtx = jsCtxRegexp
- default:
- return context{
- state: stateError,
- err: errorf(ErrSlashAmbig, 0, "'/' could start a division or regexp: %.32q", s[i:]),
- }, len(s)
- }
- default:
- panic("unreachable")
- }
- return c, i + 1
-}
-
-// tJSDelimited is the context transition function for the JS string and regexp
-// states.
-func tJSDelimited(c context, s []byte) (context, int) {
- specials := `\"`
- switch c.state {
- case stateJSSqStr:
- specials = `\'`
- case stateJSRegexp:
- specials = `\/[]`
- }
-
- k, inCharset := 0, false
- for {
- i := k + bytes.IndexAny(s[k:], specials)
- if i < k {
- break
- }
- switch s[i] {
- case '\\':
- i++
- if i == len(s) {
- return context{
- state: stateError,
- err: errorf(ErrPartialEscape, 0, "unfinished escape sequence in JS string: %q", s),
- }, len(s)
- }
- case '[':
- inCharset = true
- case ']':
- inCharset = false
- default:
- // end delimiter
- if !inCharset {
- c.state, c.jsCtx = stateJS, jsCtxDivOp
- return c, i + 1
- }
- }
- k = i + 1
- }
-
- if inCharset {
- // This can be fixed by making context richer if interpolation
- // into charsets is desired.
- return context{
- state: stateError,
- err: errorf(ErrPartialCharset, 0, "unfinished JS regexp charset: %q", s),
- }, len(s)
- }
-
- return c, len(s)
-}
-
-var blockCommentEnd = []byte("*/")
-
-// tBlockCmt is the context transition function for /*comment*/ states.
-func tBlockCmt(c context, s []byte) (context, int) {
- i := bytes.Index(s, blockCommentEnd)
- if i == -1 {
- return c, len(s)
- }
- switch c.state {
- case stateJSBlockCmt:
- c.state = stateJS
- case stateCSSBlockCmt:
- c.state = stateCSS
- default:
- panic(c.state.String())
- }
- return c, i + 2
-}
-
-// tLineCmt is the context transition function for //comment states.
-func tLineCmt(c context, s []byte) (context, int) {
- var lineTerminators string
- var endState state
- switch c.state {
- case stateJSLineCmt:
- lineTerminators, endState = "\n\r\u2028\u2029", stateJS
- case stateCSSLineCmt:
- lineTerminators, endState = "\n\f\r", stateCSS
- // Line comments are not part of any published CSS standard but
- // are supported by the 4 major browsers.
- // This defines line comments as
- // LINECOMMENT ::= "//" [^\n\f\d]*
- // since http://www.w3.org/TR/css3-syntax/#SUBTOK-nl defines
- // newlines:
- // nl ::= #xA | #xD #xA | #xD | #xC
- default:
- panic(c.state.String())
- }
-
- i := bytes.IndexAny(s, lineTerminators)
- if i == -1 {
- return c, len(s)
- }
- c.state = endState
- // Per section 7.4 of EcmaScript 5 : http://es5.github.com/#x7.4
- // "However, the LineTerminator at the end of the line is not
- // considered to be part of the single-line comment; it is
- // recognized separately by the lexical grammar and becomes part
- // of the stream of input elements for the syntactic grammar."
- return c, i
-}
-
-// tCSS is the context transition function for the CSS state.
-func tCSS(c context, s []byte) (context, int) {
- // CSS quoted strings are almost never used except for:
- // (1) URLs as in background: "/foo.png"
- // (2) Multiword font-names as in font-family: "Times New Roman"
- // (3) List separators in content values as in inline-lists:
- // <style>
- // ul.inlineList { list-style: none; padding:0 }
- // ul.inlineList > li { display: inline }
- // ul.inlineList > li:before { content: ", " }
- // ul.inlineList > li:first-child:before { content: "" }
- // </style>
- // <ul class=inlineList><li>One<li>Two<li>Three</ul>
- // (4) Attribute value selectors as in a[href="http://example.com/"]
- //
- // We conservatively treat all strings as URLs, but make some
- // allowances to avoid confusion.
- //
- // In (1), our conservative assumption is justified.
- // In (2), valid font names do not contain ':', '?', or '#', so our
- // conservative assumption is fine since we will never transition past
- // urlPartPreQuery.
- // In (3), our protocol heuristic should not be tripped, and there
- // should not be non-space content after a '?' or '#', so as long as
- // we only %-encode RFC 3986 reserved characters we are ok.
- // In (4), we should URL escape for URL attributes, and for others we
- // have the attribute name available if our conservative assumption
- // proves problematic for real code.
-
- k := 0
- for {
- i := k + bytes.IndexAny(s[k:], `("'/`)
- if i < k {
- return c, len(s)
- }
- switch s[i] {
- case '(':
- // Look for url to the left.
- p := bytes.TrimRight(s[:i], "\t\n\f\r ")
- if endsWithCSSKeyword(p, "url") {
- j := len(s) - len(bytes.TrimLeft(s[i+1:], "\t\n\f\r "))
- switch {
- case j != len(s) && s[j] == '"':
- c.state, j = stateCSSDqURL, j+1
- case j != len(s) && s[j] == '\'':
- c.state, j = stateCSSSqURL, j+1
- default:
- c.state = stateCSSURL
- }
- return c, j
- }
- case '/':
- if i+1 < len(s) {
- switch s[i+1] {
- case '/':
- c.state = stateCSSLineCmt
- return c, i + 2
- case '*':
- c.state = stateCSSBlockCmt
- return c, i + 2
- }
- }
- case '"':
- c.state = stateCSSDqStr
- return c, i + 1
- case '\'':
- c.state = stateCSSSqStr
- return c, i + 1
- }
- k = i + 1
- }
- panic("unreachable")
-}
-
-// tCSSStr is the context transition function for the CSS string and URL states.
-func tCSSStr(c context, s []byte) (context, int) {
- var endAndEsc string
- switch c.state {
- case stateCSSDqStr, stateCSSDqURL:
- endAndEsc = `\"`
- case stateCSSSqStr, stateCSSSqURL:
- endAndEsc = `\'`
- case stateCSSURL:
- // Unquoted URLs end with a newline or close parenthesis.
- // The below includes the wc (whitespace character) and nl.
- endAndEsc = "\\\t\n\f\r )"
- default:
- panic(c.state.String())
- }
-
- k := 0
- for {
- i := k + bytes.IndexAny(s[k:], endAndEsc)
- if i < k {
- c, nread := tURL(c, decodeCSS(s[k:]))
- return c, k + nread
- }
- if s[i] == '\\' {
- i++
- if i == len(s) {
- return context{
- state: stateError,
- err: errorf(ErrPartialEscape, 0, "unfinished escape sequence in CSS string: %q", s),
- }, len(s)
- }
- } else {
- c.state = stateCSS
- return c, i + 1
- }
- c, _ = tURL(c, decodeCSS(s[:i+1]))
- k = i + 1
- }
- panic("unreachable")
-}
-
-// tError is the context transition function for the error state.
-func tError(c context, s []byte) (context, int) {
- return c, len(s)
-}
-
-// eatAttrName returns the largest j such that s[i:j] is an attribute name.
-// It returns an error if s[i:] does not look like it begins with an
-// attribute name, such as encountering a quote mark without a preceding
-// equals sign.
-func eatAttrName(s []byte, i int) (int, *Error) {
- for j := i; j < len(s); j++ {
- switch s[j] {
- case ' ', '\t', '\n', '\f', '\r', '=', '>':
- return j, nil
- case '\'', '"', '<':
- // These result in a parse warning in HTML5 and are
- // indicative of serious problems if seen in an attr
- // name in a template.
- return -1, errorf(ErrBadHTML, 0, "%q in attribute name: %.32q", s[j:j+1], s)
- default:
- // No-op.
- }
- }
- return len(s), nil
-}
-
-var elementNameMap = map[string]element{
- "script": elementScript,
- "style": elementStyle,
- "textarea": elementTextarea,
- "title": elementTitle,
-}
-
-// asciiAlpha returns whether c is an ASCII letter.
-func asciiAlpha(c byte) bool {
- return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
-}
-
-// asciiAlphaNum returns whether c is an ASCII letter or digit.
-func asciiAlphaNum(c byte) bool {
- return asciiAlpha(c) || '0' <= c && c <= '9'
-}
-
-// eatTagName returns the largest j such that s[i:j] is a tag name and the tag type.
-func eatTagName(s []byte, i int) (int, element) {
- if i == len(s) || !asciiAlpha(s[i]) {
- return i, elementNone
- }
- j := i + 1
- for j < len(s) {
- x := s[j]
- if asciiAlphaNum(x) {
- j++
- continue
- }
- // Allow "x-y" or "x:y" but not "x-", "-y", or "x--y".
- if (x == ':' || x == '-') && j+1 < len(s) && asciiAlphaNum(s[j+1]) {
- j += 2
- continue
- }
- break
- }
- return j, elementNameMap[strings.ToLower(string(s[i:j]))]
-}
-
-// eatWhiteSpace returns the largest j such that s[i:j] is white space.
-func eatWhiteSpace(s []byte, i int) int {
- for j := i; j < len(s); j++ {
- switch s[j] {
- case ' ', '\t', '\n', '\f', '\r':
- // No-op.
- default:
- return j
- }
- }
- return len(s)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "fmt"
- "strings"
-)
-
-// urlFilter returns its input unless it contains an unsafe protocol in which
-// case it defangs the entire URL.
-func urlFilter(args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeURL {
- return s
- }
- if i := strings.IndexRune(s, ':'); i >= 0 && strings.IndexRune(s[:i], '/') < 0 {
- protocol := strings.ToLower(s[:i])
- if protocol != "http" && protocol != "https" && protocol != "mailto" {
- return "#" + filterFailsafe
- }
- }
- return s
-}
-
-// urlEscaper produces an output that can be embedded in a URL query.
-// The output can be embedded in an HTML attribute without further escaping.
-func urlEscaper(args ...interface{}) string {
- return urlProcessor(false, args...)
-}
-
-// urlEscaper normalizes URL content so it can be embedded in a quote-delimited
-// string or parenthesis delimited url(...).
-// The normalizer does not encode all HTML specials. Specifically, it does not
-// encode '&' so correct embedding in an HTML attribute requires escaping of
-// '&' to '&'.
-func urlNormalizer(args ...interface{}) string {
- return urlProcessor(true, args...)
-}
-
-// urlProcessor normalizes (when norm is true) or escapes its input to produce
-// a valid hierarchical or opaque URL part.
-func urlProcessor(norm bool, args ...interface{}) string {
- s, t := stringify(args...)
- if t == contentTypeURL {
- norm = true
- }
- var b bytes.Buffer
- written := 0
- // The byte loop below assumes that all URLs use UTF-8 as the
- // content-encoding. This is similar to the URI to IRI encoding scheme
- // defined in section 3.1 of RFC 3987, and behaves the same as the
- // EcmaScript builtin encodeURIComponent.
- // It should not cause any misencoding of URLs in pages with
- // Content-type: text/html;charset=UTF-8.
- for i, n := 0, len(s); i < n; i++ {
- c := s[i]
- switch c {
- // Single quote and parens are sub-delims in RFC 3986, but we
- // escape them so the output can be embedded in in single
- // quoted attributes and unquoted CSS url(...) constructs.
- // Single quotes are reserved in URLs, but are only used in
- // the obsolete "mark" rule in an appendix in RFC 3986
- // so can be safely encoded.
- case '!', '#', '$', '&', '*', '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
- if norm {
- continue
- }
- // Unreserved according to RFC 3986 sec 2.3
- // "For consistency, percent-encoded octets in the ranges of
- // ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D),
- // period (%2E), underscore (%5F), or tilde (%7E) should not be
- // created by URI producers
- case '-', '.', '_', '~':
- continue
- case '%':
- // When normalizing do not re-encode valid escapes.
- if norm && i+2 < len(s) && isHex(s[i+1]) && isHex(s[i+2]) {
- continue
- }
- default:
- // Unreserved according to RFC 3986 sec 2.3
- if 'a' <= c && c <= 'z' {
- continue
- }
- if 'A' <= c && c <= 'Z' {
- continue
- }
- if '0' <= c && c <= '9' {
- continue
- }
- }
- b.WriteString(s[written:i])
- fmt.Fprintf(&b, "%%%02x", c)
- written = i + 1
- }
- if written == 0 {
- return s
- }
- b.WriteString(s[written:])
- return b.String()
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "testing"
-)
-
-func TestURLNormalizer(t *testing.T) {
- tests := []struct {
- url, want string
- }{
- {"", ""},
- {
- "http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
- "http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
- },
- {" ", "%20"},
- {"%7c", "%7c"},
- {"%7C", "%7C"},
- {"%2", "%252"},
- {"%", "%25"},
- {"%z", "%25z"},
- {"/foo|bar/%5c\u1234", "/foo%7cbar/%5c%e1%88%b4"},
- }
- for _, test := range tests {
- if got := urlNormalizer(test.url); test.want != got {
- t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.url, test.want, got)
- }
- if test.want != urlNormalizer(test.want) {
- t.Errorf("not idempotent: %q", test.want)
- }
- }
-}
-
-func TestURLFilters(t *testing.T) {
- input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./` +
- `0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNO` +
- `PQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmno" +
- "pqrstuvwxyz{|}~\x7f" +
- "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
- tests := []struct {
- name string
- escaper func(...interface{}) string
- escaped string
- }{
- {
- "urlEscaper",
- urlEscaper,
- "%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
- "%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
- "%20%21%22%23%24%25%26%27%28%29%2a%2b%2c-.%2f" +
- "0123456789%3a%3b%3c%3d%3e%3f" +
- "%40ABCDEFGHIJKLMNO" +
- "PQRSTUVWXYZ%5b%5c%5d%5e_" +
- "%60abcdefghijklmno" +
- "pqrstuvwxyz%7b%7c%7d~%7f" +
- "%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
- },
- {
- "urlNormalizer",
- urlNormalizer,
- "%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
- "%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
- "%20!%22#$%25&%27%28%29*+,-./" +
- "0123456789:;%3c=%3e?" +
- "@ABCDEFGHIJKLMNO" +
- "PQRSTUVWXYZ[%5c]%5e_" +
- "%60abcdefghijklmno" +
- "pqrstuvwxyz%7b%7c%7d~%7f" +
- "%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
- },
- }
-
- for _, test := range tests {
- if s := test.escaper(input); s != test.escaped {
- t.Errorf("%s: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
- continue
- }
- }
-}
-
-func BenchmarkURLEscaper(b *testing.B) {
- for i := 0; i < b.N; i++ {
- urlEscaper("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
- }
-}
-
-func BenchmarkURLEscaperNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- urlEscaper("TheQuickBrownFoxJumpsOverTheLazyDog.")
- }
-}
-
-func BenchmarkURLNormalizer(b *testing.B) {
- for i := 0; i < b.N; i++ {
- urlNormalizer("The quick brown fox jumps over the lazy dog.\n")
- }
-}
-
-func BenchmarkURLNormalizerNoSpecials(b *testing.B) {
- for i := 0; i < b.N; i++ {
- urlNormalizer("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
- }
-}
package types
import (
- "big"
"go/token"
+ "math/big"
"strconv"
)
// otherwise the result is invalid.
func (x Const) Convert(typ *Type) Const {
// TODO(gri) implement this
- switch x := x.val.(type) {
+ switch x.val.(type) {
case bool:
case *big.Int:
case *big.Rat:
package types
import (
- "big"
"errors"
"fmt"
"go/ast"
"go/token"
"io"
+ "math/big"
"os"
"path/filepath"
"runtime"
- "scanner"
"strconv"
+ "text/scanner"
)
const trace = false // set to true for debugging
package types
import (
- "exec"
"go/ast"
"io/ioutil"
+ "os/exec"
"path/filepath"
"runtime"
"strings"
import (
"fmt"
- "syscall"
"os"
+ "syscall"
"unsafe"
)
package main
import (
- "unsafe"
"syscall"
+ "unsafe"
)
type Wndclassex struct {
import (
"bytes"
+ "encoding/json"
"fmt"
- "http"
- "json"
"log"
+ "net/http"
"os"
"runtime"
"strconv"
package expvar
import (
- "json"
+ "encoding/json"
"testing"
)
"bytes"
"strconv"
"unicode"
- "utf8"
+ "unicode/utf8"
)
const (
"reflect"
"sync"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// Some constants in the form of bytes, to avoid string overhead.
"strconv"
"strings"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// runeUnreader is the interface to something that can unread runes.
return
}
-// mustReadRune turns os.EOF into a panic(io.ErrUnexpectedEOF).
+// mustReadRune turns io.EOF into a panic(io.ErrUnexpectedEOF).
// It is called in cases such as string scanning where an EOF is a
// syntax error.
func (s *ss) mustReadRune() (r rune) {
"regexp"
"strings"
"testing"
- "utf8"
+ "unicode/utf8"
)
type ScanTest struct {
import (
"go/token"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// ----------------------------------------------------------------------------
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "go/token"
+ "sort"
+ "strconv"
+)
+
+// SortImports sorts runs of consecutive import lines in import blocks in f.
+func SortImports(fset *token.FileSet, f *File) {
+ for _, d := range f.Decls {
+ d, ok := d.(*GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ break
+ }
+
+ if d.Lparen == token.NoPos {
+ // Not a block: sorted by default.
+ continue
+ }
+
+ // Identify and sort runs of specs on successive lines.
+ i := 0
+ for j, s := range d.Specs {
+ if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ // j begins a new run. End this one.
+ sortSpecs(fset, f, d.Specs[i:j])
+ i = j
+ }
+ }
+ sortSpecs(fset, f, d.Specs[i:])
+ }
+}
+
+func importPath(s Spec) string {
+ t, err := strconv.Unquote(s.(*ImportSpec).Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+type posSpan struct {
+ Start token.Pos
+ End token.Pos
+}
+
+func sortSpecs(fset *token.FileSet, f *File, specs []Spec) {
+ // Avoid work if already sorted (also catches < 2 entries).
+ sorted := true
+ for i, s := range specs {
+ if i > 0 && importPath(specs[i-1]) > importPath(s) {
+ sorted = false
+ break
+ }
+ }
+ if sorted {
+ return
+ }
+
+ // Record positions for specs.
+ pos := make([]posSpan, len(specs))
+ for i, s := range specs {
+ // Cannot use s.End(), because it looks at len(s.Path.Value),
+ // and that string might have gotten longer or shorter.
+ // Instead, use s.Pos()+1, which is guaranteed to be > s.Pos()
+ // and still before the original end of the string, since any
+ // string literal must be at least 2 characters ("" or ``).
+ pos[i] = posSpan{s.Pos(), s.Pos() + 1}
+ }
+
+ // Identify comments in this range.
+ // Any comment from pos[0].Start to the final line counts.
+ lastLine := fset.Position(pos[len(pos)-1].End).Line
+ cstart := len(f.Comments)
+ cend := len(f.Comments)
+ for i, g := range f.Comments {
+ if g.Pos() < pos[0].Start {
+ continue
+ }
+ if i < cstart {
+ cstart = i
+ }
+ if fset.Position(g.End()).Line > lastLine {
+ cend = i
+ break
+ }
+ }
+ comments := f.Comments[cstart:cend]
+
+ // Assign each comment to the import spec preceding it.
+ importComment := map[*ImportSpec][]*CommentGroup{}
+ specIndex := 0
+ for _, g := range comments {
+ for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
+ specIndex++
+ }
+ s := specs[specIndex].(*ImportSpec)
+ importComment[s] = append(importComment[s], g)
+ }
+
+ // Sort the import specs by import path.
+ // Reassign the import paths to have the same position sequence.
+ // Reassign each comment to abut the end of its spec.
+ // Sort the comments by new position.
+ sort.Sort(byImportPath(specs))
+ for i, s := range specs {
+ s := s.(*ImportSpec)
+ if s.Name != nil {
+ s.Name.NamePos = pos[i].Start
+ }
+ s.Path.ValuePos = pos[i].Start
+ s.EndPos = pos[i].End
+ for _, g := range importComment[s] {
+ for _, c := range g.List {
+ c.Slash = pos[i].End
+ }
+ }
+ }
+ sort.Sort(byCommentPos(comments))
+}
+
+type byImportPath []Spec // slice of *ImportSpec
+
+func (x byImportPath) Len() int { return len(x) }
+func (x byImportPath) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportPath) Less(i, j int) bool { return importPath(x[i]) < importPath(x[j]) }
+
+type byCommentPos []*CommentGroup
+
+func (x byCommentPos) Len() int { return len(x) }
+func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
import (
"bytes"
"errors"
- "exec"
"fmt"
"os"
+ "os/exec"
"path/filepath"
"regexp"
"runtime"
package build
import (
- "exec"
+ "os/exec"
"path/filepath"
"reflect"
"runtime"
"io"
"regexp"
"strings"
- "template" // for HTMLEscape
+ "text/template" // for HTMLEscape
)
func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
"go/ast"
"strings"
"unicode"
- "utf8"
+ "unicode/utf8"
)
type Example struct {
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
- switch t := unparen(x).(type) {
+ switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
"io"
"os"
"path/filepath"
- "tabwriter"
+ "text/tabwriter"
)
const debug = false // enable for debugging
"path/filepath"
"strconv"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// A Scanner holds the scanner's internal state while processing
package token
import (
- "gob"
+ "encoding/gob"
"io"
)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "bytes"
- "errors"
- "math"
- "reflect"
- "strings"
- "testing"
- "unsafe"
-)
-
-// Guarantee encoding format by comparing some encodings to hand-written values
-type EncodeT struct {
- x uint64
- b []byte
-}
-
-var encodeT = []EncodeT{
- {0x00, []byte{0x00}},
- {0x0F, []byte{0x0F}},
- {0xFF, []byte{0xFF, 0xFF}},
- {0xFFFF, []byte{0xFE, 0xFF, 0xFF}},
- {0xFFFFFF, []byte{0xFD, 0xFF, 0xFF, 0xFF}},
- {0xFFFFFFFF, []byte{0xFC, 0xFF, 0xFF, 0xFF, 0xFF}},
- {0xFFFFFFFFFF, []byte{0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
- {0xFFFFFFFFFFFF, []byte{0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
- {0xFFFFFFFFFFFFFF, []byte{0xF9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
- {0xFFFFFFFFFFFFFFFF, []byte{0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
- {0x1111, []byte{0xFE, 0x11, 0x11}},
- {0x1111111111111111, []byte{0xF8, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}},
- {0x8888888888888888, []byte{0xF8, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88}},
- {1 << 63, []byte{0xF8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
-}
-
-// testError is meant to be used as a deferred function to turn a panic(gobError) into a
-// plain test.Error call.
-func testError(t *testing.T) {
- if e := recover(); e != nil {
- t.Error(e.(gobError).err) // Will re-panic if not one of our errors, such as a runtime error.
- }
- return
-}
-
-// Test basic encode/decode routines for unsigned integers
-func TestUintCodec(t *testing.T) {
- defer testError(t)
- b := new(bytes.Buffer)
- encState := newEncoderState(b)
- for _, tt := range encodeT {
- b.Reset()
- encState.encodeUint(tt.x)
- if !bytes.Equal(tt.b, b.Bytes()) {
- t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes())
- }
- }
- decState := newDecodeState(b)
- for u := uint64(0); ; u = (u + 1) * 7 {
- b.Reset()
- encState.encodeUint(u)
- v := decState.decodeUint()
- if u != v {
- t.Errorf("Encode/Decode: sent %#x received %#x", u, v)
- }
- if u&(1<<63) != 0 {
- break
- }
- }
-}
-
-func verifyInt(i int64, t *testing.T) {
- defer testError(t)
- var b = new(bytes.Buffer)
- encState := newEncoderState(b)
- encState.encodeInt(i)
- decState := newDecodeState(b)
- decState.buf = make([]byte, 8)
- j := decState.decodeInt()
- if i != j {
- t.Errorf("Encode/Decode: sent %#x received %#x", uint64(i), uint64(j))
- }
-}
-
-// Test basic encode/decode routines for signed integers
-func TestIntCodec(t *testing.T) {
- for u := uint64(0); ; u = (u + 1) * 7 {
- // Do positive and negative values
- i := int64(u)
- verifyInt(i, t)
- verifyInt(-i, t)
- verifyInt(^i, t)
- if u&(1<<63) != 0 {
- break
- }
- }
- verifyInt(-1<<63, t) // a tricky case
-}
-
-// The result of encoding a true boolean with field number 7
-var boolResult = []byte{0x07, 0x01}
-// The result of encoding a number 17 with field number 7
-var signedResult = []byte{0x07, 2 * 17}
-var unsignedResult = []byte{0x07, 17}
-var floatResult = []byte{0x07, 0xFE, 0x31, 0x40}
-// The result of encoding a number 17+19i with field number 7
-var complexResult = []byte{0x07, 0xFE, 0x31, 0x40, 0xFE, 0x33, 0x40}
-// The result of encoding "hello" with field number 7
-var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'}
-
-func newDecodeState(buf *bytes.Buffer) *decoderState {
- d := new(decoderState)
- d.b = buf
- d.buf = make([]byte, uint64Size)
- return d
-}
-
-func newEncoderState(b *bytes.Buffer) *encoderState {
- b.Reset()
- state := &encoderState{enc: nil, b: b}
- state.fieldnum = -1
- return state
-}
-
-// Test instruction execution for encoding.
-// Do not run the machine yet; instead do individual instructions crafted by hand.
-func TestScalarEncInstructions(t *testing.T) {
- var b = new(bytes.Buffer)
-
- // bool
- {
- data := struct{ a bool }{true}
- instr := &encInstr{encBool, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(boolResult, b.Bytes()) {
- t.Errorf("bool enc instructions: expected % x got % x", boolResult, b.Bytes())
- }
- }
-
- // int
- {
- b.Reset()
- data := struct{ a int }{17}
- instr := &encInstr{encInt, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(signedResult, b.Bytes()) {
- t.Errorf("int enc instructions: expected % x got % x", signedResult, b.Bytes())
- }
- }
-
- // uint
- {
- b.Reset()
- data := struct{ a uint }{17}
- instr := &encInstr{encUint, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(unsignedResult, b.Bytes()) {
- t.Errorf("uint enc instructions: expected % x got % x", unsignedResult, b.Bytes())
- }
- }
-
- // int8
- {
- b.Reset()
- data := struct{ a int8 }{17}
- instr := &encInstr{encInt8, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(signedResult, b.Bytes()) {
- t.Errorf("int8 enc instructions: expected % x got % x", signedResult, b.Bytes())
- }
- }
-
- // uint8
- {
- b.Reset()
- data := struct{ a uint8 }{17}
- instr := &encInstr{encUint8, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(unsignedResult, b.Bytes()) {
- t.Errorf("uint8 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
- }
- }
-
- // int16
- {
- b.Reset()
- data := struct{ a int16 }{17}
- instr := &encInstr{encInt16, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(signedResult, b.Bytes()) {
- t.Errorf("int16 enc instructions: expected % x got % x", signedResult, b.Bytes())
- }
- }
-
- // uint16
- {
- b.Reset()
- data := struct{ a uint16 }{17}
- instr := &encInstr{encUint16, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(unsignedResult, b.Bytes()) {
- t.Errorf("uint16 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
- }
- }
-
- // int32
- {
- b.Reset()
- data := struct{ a int32 }{17}
- instr := &encInstr{encInt32, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(signedResult, b.Bytes()) {
- t.Errorf("int32 enc instructions: expected % x got % x", signedResult, b.Bytes())
- }
- }
-
- // uint32
- {
- b.Reset()
- data := struct{ a uint32 }{17}
- instr := &encInstr{encUint32, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(unsignedResult, b.Bytes()) {
- t.Errorf("uint32 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
- }
- }
-
- // int64
- {
- b.Reset()
- data := struct{ a int64 }{17}
- instr := &encInstr{encInt64, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(signedResult, b.Bytes()) {
- t.Errorf("int64 enc instructions: expected % x got % x", signedResult, b.Bytes())
- }
- }
-
- // uint64
- {
- b.Reset()
- data := struct{ a uint64 }{17}
- instr := &encInstr{encUint64, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(unsignedResult, b.Bytes()) {
- t.Errorf("uint64 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
- }
- }
-
- // float32
- {
- b.Reset()
- data := struct{ a float32 }{17}
- instr := &encInstr{encFloat32, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(floatResult, b.Bytes()) {
- t.Errorf("float32 enc instructions: expected % x got % x", floatResult, b.Bytes())
- }
- }
-
- // float64
- {
- b.Reset()
- data := struct{ a float64 }{17}
- instr := &encInstr{encFloat64, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(floatResult, b.Bytes()) {
- t.Errorf("float64 enc instructions: expected % x got % x", floatResult, b.Bytes())
- }
- }
-
- // bytes == []uint8
- {
- b.Reset()
- data := struct{ a []byte }{[]byte("hello")}
- instr := &encInstr{encUint8Array, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(bytesResult, b.Bytes()) {
- t.Errorf("bytes enc instructions: expected % x got % x", bytesResult, b.Bytes())
- }
- }
-
- // string
- {
- b.Reset()
- data := struct{ a string }{"hello"}
- instr := &encInstr{encString, 6, 0, 0}
- state := newEncoderState(b)
- instr.op(instr, state, unsafe.Pointer(&data))
- if !bytes.Equal(bytesResult, b.Bytes()) {
- t.Errorf("string enc instructions: expected % x got % x", bytesResult, b.Bytes())
- }
- }
-}
-
-func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, p unsafe.Pointer) {
- defer testError(t)
- v := int(state.decodeUint())
- if v+state.fieldnum != 6 {
- t.Fatalf("decoding field number %d, got %d", 6, v+state.fieldnum)
- }
- instr.op(instr, state, decIndirect(p, instr.indir))
- state.fieldnum = 6
-}
-
-func newDecodeStateFromData(data []byte) *decoderState {
- b := bytes.NewBuffer(data)
- state := newDecodeState(b)
- state.fieldnum = -1
- return state
-}
-
-// Test instruction execution for decoding.
-// Do not run the machine yet; instead do individual instructions crafted by hand.
-func TestScalarDecInstructions(t *testing.T) {
- ovfl := errors.New("overflow")
-
- // bool
- {
- var data struct {
- a bool
- }
- instr := &decInstr{decBool, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(boolResult)
- execDec("bool", instr, state, t, unsafe.Pointer(&data))
- if data.a != true {
- t.Errorf("bool a = %v not true", data.a)
- }
- }
- // int
- {
- var data struct {
- a int
- }
- instr := &decInstr{decOpTable[reflect.Int], 6, 0, 0, ovfl}
- state := newDecodeStateFromData(signedResult)
- execDec("int", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("int a = %v not 17", data.a)
- }
- }
-
- // uint
- {
- var data struct {
- a uint
- }
- instr := &decInstr{decOpTable[reflect.Uint], 6, 0, 0, ovfl}
- state := newDecodeStateFromData(unsignedResult)
- execDec("uint", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("uint a = %v not 17", data.a)
- }
- }
-
- // int8
- {
- var data struct {
- a int8
- }
- instr := &decInstr{decInt8, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(signedResult)
- execDec("int8", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("int8 a = %v not 17", data.a)
- }
- }
-
- // uint8
- {
- var data struct {
- a uint8
- }
- instr := &decInstr{decUint8, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(unsignedResult)
- execDec("uint8", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("uint8 a = %v not 17", data.a)
- }
- }
-
- // int16
- {
- var data struct {
- a int16
- }
- instr := &decInstr{decInt16, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(signedResult)
- execDec("int16", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("int16 a = %v not 17", data.a)
- }
- }
-
- // uint16
- {
- var data struct {
- a uint16
- }
- instr := &decInstr{decUint16, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(unsignedResult)
- execDec("uint16", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("uint16 a = %v not 17", data.a)
- }
- }
-
- // int32
- {
- var data struct {
- a int32
- }
- instr := &decInstr{decInt32, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(signedResult)
- execDec("int32", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("int32 a = %v not 17", data.a)
- }
- }
-
- // uint32
- {
- var data struct {
- a uint32
- }
- instr := &decInstr{decUint32, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(unsignedResult)
- execDec("uint32", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("uint32 a = %v not 17", data.a)
- }
- }
-
- // uintptr
- {
- var data struct {
- a uintptr
- }
- instr := &decInstr{decOpTable[reflect.Uintptr], 6, 0, 0, ovfl}
- state := newDecodeStateFromData(unsignedResult)
- execDec("uintptr", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("uintptr a = %v not 17", data.a)
- }
- }
-
- // int64
- {
- var data struct {
- a int64
- }
- instr := &decInstr{decInt64, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(signedResult)
- execDec("int64", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("int64 a = %v not 17", data.a)
- }
- }
-
- // uint64
- {
- var data struct {
- a uint64
- }
- instr := &decInstr{decUint64, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(unsignedResult)
- execDec("uint64", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("uint64 a = %v not 17", data.a)
- }
- }
-
- // float32
- {
- var data struct {
- a float32
- }
- instr := &decInstr{decFloat32, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(floatResult)
- execDec("float32", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("float32 a = %v not 17", data.a)
- }
- }
-
- // float64
- {
- var data struct {
- a float64
- }
- instr := &decInstr{decFloat64, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(floatResult)
- execDec("float64", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17 {
- t.Errorf("float64 a = %v not 17", data.a)
- }
- }
-
- // complex64
- {
- var data struct {
- a complex64
- }
- instr := &decInstr{decOpTable[reflect.Complex64], 6, 0, 0, ovfl}
- state := newDecodeStateFromData(complexResult)
- execDec("complex", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17+19i {
- t.Errorf("complex a = %v not 17+19i", data.a)
- }
- }
-
- // complex128
- {
- var data struct {
- a complex128
- }
- instr := &decInstr{decOpTable[reflect.Complex128], 6, 0, 0, ovfl}
- state := newDecodeStateFromData(complexResult)
- execDec("complex", instr, state, t, unsafe.Pointer(&data))
- if data.a != 17+19i {
- t.Errorf("complex a = %v not 17+19i", data.a)
- }
- }
-
- // bytes == []uint8
- {
- var data struct {
- a []byte
- }
- instr := &decInstr{decUint8Slice, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(bytesResult)
- execDec("bytes", instr, state, t, unsafe.Pointer(&data))
- if string(data.a) != "hello" {
- t.Errorf(`bytes a = %q not "hello"`, string(data.a))
- }
- }
-
- // string
- {
- var data struct {
- a string
- }
- instr := &decInstr{decString, 6, 0, 0, ovfl}
- state := newDecodeStateFromData(bytesResult)
- execDec("bytes", instr, state, t, unsafe.Pointer(&data))
- if data.a != "hello" {
- t.Errorf(`bytes a = %q not "hello"`, data.a)
- }
- }
-}
-
-func TestEndToEnd(t *testing.T) {
- type T2 struct {
- T string
- }
- s1 := "string1"
- s2 := "string2"
- type T1 struct {
- A, B, C int
- M map[string]*float64
- EmptyMap map[string]int // to check that we receive a non-nil map.
- N *[3]float64
- Strs *[2]string
- Int64s *[]int64
- RI complex64
- S string
- Y []byte
- T *T2
- }
- pi := 3.14159
- e := 2.71828
- t1 := &T1{
- A: 17,
- B: 18,
- C: -5,
- M: map[string]*float64{"pi": &pi, "e": &e},
- EmptyMap: make(map[string]int),
- N: &[3]float64{1.5, 2.5, 3.5},
- Strs: &[2]string{s1, s2},
- Int64s: &[]int64{77, 89, 123412342134},
- RI: 17 - 23i,
- S: "Now is the time",
- Y: []byte("hello, sailor"),
- T: &T2{"this is T2"},
- }
- b := new(bytes.Buffer)
- err := NewEncoder(b).Encode(t1)
- if err != nil {
- t.Error("encode:", err)
- }
- var _t1 T1
- err = NewDecoder(b).Decode(&_t1)
- if err != nil {
- t.Fatal("decode:", err)
- }
- if !reflect.DeepEqual(t1, &_t1) {
- t.Errorf("encode expected %v got %v", *t1, _t1)
- }
- // Be absolutely sure the received map is non-nil.
- if t1.EmptyMap == nil {
- t.Errorf("nil map sent")
- }
- if _t1.EmptyMap == nil {
- t.Errorf("nil map received")
- }
-}
-
-func TestOverflow(t *testing.T) {
- type inputT struct {
- Maxi int64
- Mini int64
- Maxu uint64
- Maxf float64
- Minf float64
- Maxc complex128
- Minc complex128
- }
- var it inputT
- var err error
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- dec := NewDecoder(b)
-
- // int8
- b.Reset()
- it = inputT{
- Maxi: math.MaxInt8 + 1,
- }
- type outi8 struct {
- Maxi int8
- Mini int8
- }
- var o1 outi8
- enc.Encode(it)
- err = dec.Decode(&o1)
- if err == nil || err.Error() != `value for "Maxi" out of range` {
- t.Error("wrong overflow error for int8:", err)
- }
- it = inputT{
- Mini: math.MinInt8 - 1,
- }
- b.Reset()
- enc.Encode(it)
- err = dec.Decode(&o1)
- if err == nil || err.Error() != `value for "Mini" out of range` {
- t.Error("wrong underflow error for int8:", err)
- }
-
- // int16
- b.Reset()
- it = inputT{
- Maxi: math.MaxInt16 + 1,
- }
- type outi16 struct {
- Maxi int16
- Mini int16
- }
- var o2 outi16
- enc.Encode(it)
- err = dec.Decode(&o2)
- if err == nil || err.Error() != `value for "Maxi" out of range` {
- t.Error("wrong overflow error for int16:", err)
- }
- it = inputT{
- Mini: math.MinInt16 - 1,
- }
- b.Reset()
- enc.Encode(it)
- err = dec.Decode(&o2)
- if err == nil || err.Error() != `value for "Mini" out of range` {
- t.Error("wrong underflow error for int16:", err)
- }
-
- // int32
- b.Reset()
- it = inputT{
- Maxi: math.MaxInt32 + 1,
- }
- type outi32 struct {
- Maxi int32
- Mini int32
- }
- var o3 outi32
- enc.Encode(it)
- err = dec.Decode(&o3)
- if err == nil || err.Error() != `value for "Maxi" out of range` {
- t.Error("wrong overflow error for int32:", err)
- }
- it = inputT{
- Mini: math.MinInt32 - 1,
- }
- b.Reset()
- enc.Encode(it)
- err = dec.Decode(&o3)
- if err == nil || err.Error() != `value for "Mini" out of range` {
- t.Error("wrong underflow error for int32:", err)
- }
-
- // uint8
- b.Reset()
- it = inputT{
- Maxu: math.MaxUint8 + 1,
- }
- type outu8 struct {
- Maxu uint8
- }
- var o4 outu8
- enc.Encode(it)
- err = dec.Decode(&o4)
- if err == nil || err.Error() != `value for "Maxu" out of range` {
- t.Error("wrong overflow error for uint8:", err)
- }
-
- // uint16
- b.Reset()
- it = inputT{
- Maxu: math.MaxUint16 + 1,
- }
- type outu16 struct {
- Maxu uint16
- }
- var o5 outu16
- enc.Encode(it)
- err = dec.Decode(&o5)
- if err == nil || err.Error() != `value for "Maxu" out of range` {
- t.Error("wrong overflow error for uint16:", err)
- }
-
- // uint32
- b.Reset()
- it = inputT{
- Maxu: math.MaxUint32 + 1,
- }
- type outu32 struct {
- Maxu uint32
- }
- var o6 outu32
- enc.Encode(it)
- err = dec.Decode(&o6)
- if err == nil || err.Error() != `value for "Maxu" out of range` {
- t.Error("wrong overflow error for uint32:", err)
- }
-
- // float32
- b.Reset()
- it = inputT{
- Maxf: math.MaxFloat32 * 2,
- }
- type outf32 struct {
- Maxf float32
- Minf float32
- }
- var o7 outf32
- enc.Encode(it)
- err = dec.Decode(&o7)
- if err == nil || err.Error() != `value for "Maxf" out of range` {
- t.Error("wrong overflow error for float32:", err)
- }
-
- // complex64
- b.Reset()
- it = inputT{
- Maxc: complex(math.MaxFloat32*2, math.MaxFloat32*2),
- }
- type outc64 struct {
- Maxc complex64
- Minc complex64
- }
- var o8 outc64
- enc.Encode(it)
- err = dec.Decode(&o8)
- if err == nil || err.Error() != `value for "Maxc" out of range` {
- t.Error("wrong overflow error for complex64:", err)
- }
-}
-
-func TestNesting(t *testing.T) {
- type RT struct {
- A string
- Next *RT
- }
- rt := new(RT)
- rt.A = "level1"
- rt.Next = new(RT)
- rt.Next.A = "level2"
- b := new(bytes.Buffer)
- NewEncoder(b).Encode(rt)
- var drt RT
- dec := NewDecoder(b)
- err := dec.Decode(&drt)
- if err != nil {
- t.Fatal("decoder error:", err)
- }
- if drt.A != rt.A {
- t.Errorf("nesting: encode expected %v got %v", *rt, drt)
- }
- if drt.Next == nil {
- t.Errorf("nesting: recursion failed")
- }
- if drt.Next.A != rt.Next.A {
- t.Errorf("nesting: encode expected %v got %v", *rt.Next, *drt.Next)
- }
-}
-
-// These three structures have the same data with different indirections
-type T0 struct {
- A int
- B int
- C int
- D int
-}
-type T1 struct {
- A int
- B *int
- C **int
- D ***int
-}
-type T2 struct {
- A ***int
- B **int
- C *int
- D int
-}
-
-func TestAutoIndirection(t *testing.T) {
- // First transfer t1 into t0
- var t1 T1
- t1.A = 17
- t1.B = new(int)
- *t1.B = 177
- t1.C = new(*int)
- *t1.C = new(int)
- **t1.C = 1777
- t1.D = new(**int)
- *t1.D = new(*int)
- **t1.D = new(int)
- ***t1.D = 17777
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- enc.Encode(t1)
- dec := NewDecoder(b)
- var t0 T0
- dec.Decode(&t0)
- if t0.A != 17 || t0.B != 177 || t0.C != 1777 || t0.D != 17777 {
- t.Errorf("t1->t0: expected {17 177 1777 17777}; got %v", t0)
- }
-
- // Now transfer t2 into t0
- var t2 T2
- t2.D = 17777
- t2.C = new(int)
- *t2.C = 1777
- t2.B = new(*int)
- *t2.B = new(int)
- **t2.B = 177
- t2.A = new(**int)
- *t2.A = new(*int)
- **t2.A = new(int)
- ***t2.A = 17
- b.Reset()
- enc.Encode(t2)
- t0 = T0{}
- dec.Decode(&t0)
- if t0.A != 17 || t0.B != 177 || t0.C != 1777 || t0.D != 17777 {
- t.Errorf("t2->t0 expected {17 177 1777 17777}; got %v", t0)
- }
-
- // Now transfer t0 into t1
- t0 = T0{17, 177, 1777, 17777}
- b.Reset()
- enc.Encode(t0)
- t1 = T1{}
- dec.Decode(&t1)
- if t1.A != 17 || *t1.B != 177 || **t1.C != 1777 || ***t1.D != 17777 {
- t.Errorf("t0->t1 expected {17 177 1777 17777}; got {%d %d %d %d}", t1.A, *t1.B, **t1.C, ***t1.D)
- }
-
- // Now transfer t0 into t2
- b.Reset()
- enc.Encode(t0)
- t2 = T2{}
- dec.Decode(&t2)
- if ***t2.A != 17 || **t2.B != 177 || *t2.C != 1777 || t2.D != 17777 {
- t.Errorf("t0->t2 expected {17 177 1777 17777}; got {%d %d %d %d}", ***t2.A, **t2.B, *t2.C, t2.D)
- }
-
- // Now do t2 again but without pre-allocated pointers.
- b.Reset()
- enc.Encode(t0)
- ***t2.A = 0
- **t2.B = 0
- *t2.C = 0
- t2.D = 0
- dec.Decode(&t2)
- if ***t2.A != 17 || **t2.B != 177 || *t2.C != 1777 || t2.D != 17777 {
- t.Errorf("t0->t2 expected {17 177 1777 17777}; got {%d %d %d %d}", ***t2.A, **t2.B, *t2.C, t2.D)
- }
-}
-
-type RT0 struct {
- A int
- B string
- C float64
-}
-type RT1 struct {
- C float64
- B string
- A int
- NotSet string
-}
-
-func TestReorderedFields(t *testing.T) {
- var rt0 RT0
- rt0.A = 17
- rt0.B = "hello"
- rt0.C = 3.14159
- b := new(bytes.Buffer)
- NewEncoder(b).Encode(rt0)
- dec := NewDecoder(b)
- var rt1 RT1
- // Wire type is RT0, local type is RT1.
- err := dec.Decode(&rt1)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if rt0.A != rt1.A || rt0.B != rt1.B || rt0.C != rt1.C {
- t.Errorf("rt1->rt0: expected %v; got %v", rt0, rt1)
- }
-}
-
-// Like an RT0 but with fields we'll ignore on the decode side.
-type IT0 struct {
- A int64
- B string
- Ignore_d []int
- Ignore_e [3]float64
- Ignore_f bool
- Ignore_g string
- Ignore_h []byte
- Ignore_i *RT1
- Ignore_m map[string]int
- C float64
-}
-
-func TestIgnoredFields(t *testing.T) {
- var it0 IT0
- it0.A = 17
- it0.B = "hello"
- it0.C = 3.14159
- it0.Ignore_d = []int{1, 2, 3}
- it0.Ignore_e[0] = 1.0
- it0.Ignore_e[1] = 2.0
- it0.Ignore_e[2] = 3.0
- it0.Ignore_f = true
- it0.Ignore_g = "pay no attention"
- it0.Ignore_h = []byte("to the curtain")
- it0.Ignore_i = &RT1{3.1, "hi", 7, "hello"}
- it0.Ignore_m = map[string]int{"one": 1, "two": 2}
-
- b := new(bytes.Buffer)
- NewEncoder(b).Encode(it0)
- dec := NewDecoder(b)
- var rt1 RT1
- // Wire type is IT0, local type is RT1.
- err := dec.Decode(&rt1)
- if err != nil {
- t.Error("error: ", err)
- }
- if int(it0.A) != rt1.A || it0.B != rt1.B || it0.C != rt1.C {
- t.Errorf("rt0->rt1: expected %v; got %v", it0, rt1)
- }
-}
-
-func TestBadRecursiveType(t *testing.T) {
- type Rec ***Rec
- var rec Rec
- b := new(bytes.Buffer)
- err := NewEncoder(b).Encode(&rec)
- if err == nil {
- t.Error("expected error; got none")
- } else if strings.Index(err.Error(), "recursive") < 0 {
- t.Error("expected recursive type error; got", err)
- }
- // Can't test decode easily because we can't encode one, so we can't pass one to a Decoder.
-}
-
-type Bad0 struct {
- CH chan int
- C float64
-}
-
-func TestInvalidField(t *testing.T) {
- var bad0 Bad0
- bad0.CH = make(chan int)
- b := new(bytes.Buffer)
- dummyEncoder := new(Encoder) // sufficient for this purpose.
- dummyEncoder.encode(b, reflect.ValueOf(&bad0), userType(reflect.TypeOf(&bad0)))
- if err := dummyEncoder.err; err == nil {
- t.Error("expected error; got none")
- } else if strings.Index(err.Error(), "type") < 0 {
- t.Error("expected type error; got", err)
- }
-}
-
-type Indirect struct {
- A ***[3]int
- S ***[]int
- M ****map[string]int
-}
-
-type Direct struct {
- A [3]int
- S []int
- M map[string]int
-}
-
-func TestIndirectSliceMapArray(t *testing.T) {
- // Marshal indirect, unmarshal to direct.
- i := new(Indirect)
- i.A = new(**[3]int)
- *i.A = new(*[3]int)
- **i.A = new([3]int)
- ***i.A = [3]int{1, 2, 3}
- i.S = new(**[]int)
- *i.S = new(*[]int)
- **i.S = new([]int)
- ***i.S = []int{4, 5, 6}
- i.M = new(***map[string]int)
- *i.M = new(**map[string]int)
- **i.M = new(*map[string]int)
- ***i.M = new(map[string]int)
- ****i.M = map[string]int{"one": 1, "two": 2, "three": 3}
- b := new(bytes.Buffer)
- NewEncoder(b).Encode(i)
- dec := NewDecoder(b)
- var d Direct
- err := dec.Decode(&d)
- if err != nil {
- t.Error("error: ", err)
- }
- if len(d.A) != 3 || d.A[0] != 1 || d.A[1] != 2 || d.A[2] != 3 {
- t.Errorf("indirect to direct: d.A is %v not %v", d.A, ***i.A)
- }
- if len(d.S) != 3 || d.S[0] != 4 || d.S[1] != 5 || d.S[2] != 6 {
- t.Errorf("indirect to direct: d.S is %v not %v", d.S, ***i.S)
- }
- if len(d.M) != 3 || d.M["one"] != 1 || d.M["two"] != 2 || d.M["three"] != 3 {
- t.Errorf("indirect to direct: d.M is %v not %v", d.M, ***i.M)
- }
- // Marshal direct, unmarshal to indirect.
- d.A = [3]int{11, 22, 33}
- d.S = []int{44, 55, 66}
- d.M = map[string]int{"four": 4, "five": 5, "six": 6}
- i = new(Indirect)
- b.Reset()
- NewEncoder(b).Encode(d)
- dec = NewDecoder(b)
- err = dec.Decode(&i)
- if err != nil {
- t.Fatal("error: ", err)
- }
- if len(***i.A) != 3 || (***i.A)[0] != 11 || (***i.A)[1] != 22 || (***i.A)[2] != 33 {
- t.Errorf("direct to indirect: ***i.A is %v not %v", ***i.A, d.A)
- }
- if len(***i.S) != 3 || (***i.S)[0] != 44 || (***i.S)[1] != 55 || (***i.S)[2] != 66 {
- t.Errorf("direct to indirect: ***i.S is %v not %v", ***i.S, ***i.S)
- }
- if len(****i.M) != 3 || (****i.M)["four"] != 4 || (****i.M)["five"] != 5 || (****i.M)["six"] != 6 {
- t.Errorf("direct to indirect: ****i.M is %v not %v", ****i.M, d.M)
- }
-}
-
-// An interface with several implementations
-type Squarer interface {
- Square() int
-}
-
-type Int int
-
-func (i Int) Square() int {
- return int(i * i)
-}
-
-type Float float64
-
-func (f Float) Square() int {
- return int(f * f)
-}
-
-type Vector []int
-
-func (v Vector) Square() int {
- sum := 0
- for _, x := range v {
- sum += x * x
- }
- return sum
-}
-
-type Point struct {
- X, Y int
-}
-
-func (p Point) Square() int {
- return p.X*p.X + p.Y*p.Y
-}
-
-// A struct with interfaces in it.
-type InterfaceItem struct {
- I int
- Sq1, Sq2, Sq3 Squarer
- F float64
- Sq []Squarer
-}
-
-// The same struct without interfaces
-type NoInterfaceItem struct {
- I int
- F float64
-}
-
-func TestInterface(t *testing.T) {
- iVal := Int(3)
- fVal := Float(5)
- // Sending a Vector will require that the receiver define a type in the middle of
- // receiving the value for item2.
- vVal := Vector{1, 2, 3}
- b := new(bytes.Buffer)
- item1 := &InterfaceItem{1, iVal, fVal, vVal, 11.5, []Squarer{iVal, fVal, nil, vVal}}
- // Register the types.
- Register(Int(0))
- Register(Float(0))
- Register(Vector{})
- err := NewEncoder(b).Encode(item1)
- if err != nil {
- t.Error("expected no encode error; got", err)
- }
-
- item2 := InterfaceItem{}
- err = NewDecoder(b).Decode(&item2)
- if err != nil {
- t.Fatal("decode:", err)
- }
- if item2.I != item1.I {
- t.Error("normal int did not decode correctly")
- }
- if item2.Sq1 == nil || item2.Sq1.Square() != iVal.Square() {
- t.Error("Int did not decode correctly")
- }
- if item2.Sq2 == nil || item2.Sq2.Square() != fVal.Square() {
- t.Error("Float did not decode correctly")
- }
- if item2.Sq3 == nil || item2.Sq3.Square() != vVal.Square() {
- t.Error("Vector did not decode correctly")
- }
- if item2.F != item1.F {
- t.Error("normal float did not decode correctly")
- }
- // Now check that we received a slice of Squarers correctly, including a nil element
- if len(item1.Sq) != len(item2.Sq) {
- t.Fatalf("[]Squarer length wrong: got %d; expected %d", len(item2.Sq), len(item1.Sq))
- }
- for i, v1 := range item1.Sq {
- v2 := item2.Sq[i]
- if v1 == nil || v2 == nil {
- if v1 != nil || v2 != nil {
- t.Errorf("item %d inconsistent nils", i)
- }
- continue
- if v1.Square() != v2.Square() {
- t.Errorf("item %d inconsistent values: %v %v", i, v1, v2)
- }
- }
- }
-}
-
-// A struct with all basic types, stored in interfaces.
-type BasicInterfaceItem struct {
- Int, Int8, Int16, Int32, Int64 interface{}
- Uint, Uint8, Uint16, Uint32, Uint64 interface{}
- Float32, Float64 interface{}
- Complex64, Complex128 interface{}
- Bool interface{}
- String interface{}
- Bytes interface{}
-}
-
-func TestInterfaceBasic(t *testing.T) {
- b := new(bytes.Buffer)
- item1 := &BasicInterfaceItem{
- int(1), int8(1), int16(1), int32(1), int64(1),
- uint(1), uint8(1), uint16(1), uint32(1), uint64(1),
- float32(1), 1.0,
- complex64(1i), complex128(1i),
- true,
- "hello",
- []byte("sailor"),
- }
- err := NewEncoder(b).Encode(item1)
- if err != nil {
- t.Error("expected no encode error; got", err)
- }
-
- item2 := &BasicInterfaceItem{}
- err = NewDecoder(b).Decode(&item2)
- if err != nil {
- t.Fatal("decode:", err)
- }
- if !reflect.DeepEqual(item1, item2) {
- t.Errorf("encode expected %v got %v", item1, item2)
- }
- // Hand check a couple for correct types.
- if v, ok := item2.Bool.(bool); !ok || !v {
- t.Error("boolean should be true")
- }
- if v, ok := item2.String.(string); !ok || v != item1.String.(string) {
- t.Errorf("string should be %v is %v", item1.String, v)
- }
-}
-
-type String string
-
-type PtrInterfaceItem struct {
- Str1 interface{} // basic
- Str2 interface{} // derived
-}
-
-// We'll send pointers; should receive values.
-// Also check that we can register T but send *T.
-func TestInterfacePointer(t *testing.T) {
- b := new(bytes.Buffer)
- str1 := "howdy"
- str2 := String("kiddo")
- item1 := &PtrInterfaceItem{
- &str1,
- &str2,
- }
- // Register the type.
- Register(str2)
- err := NewEncoder(b).Encode(item1)
- if err != nil {
- t.Error("expected no encode error; got", err)
- }
-
- item2 := &PtrInterfaceItem{}
- err = NewDecoder(b).Decode(&item2)
- if err != nil {
- t.Fatal("decode:", err)
- }
- // Hand test for correct types and values.
- if v, ok := item2.Str1.(string); !ok || v != str1 {
- t.Errorf("basic string failed: %q should be %q", v, str1)
- }
- if v, ok := item2.Str2.(String); !ok || v != str2 {
- t.Errorf("derived type String failed: %q should be %q", v, str2)
- }
-}
-
-func TestIgnoreInterface(t *testing.T) {
- iVal := Int(3)
- fVal := Float(5)
- // Sending a Point will require that the receiver define a type in the middle of
- // receiving the value for item2.
- pVal := Point{2, 3}
- b := new(bytes.Buffer)
- item1 := &InterfaceItem{1, iVal, fVal, pVal, 11.5, nil}
- // Register the types.
- Register(Int(0))
- Register(Float(0))
- Register(Point{})
- err := NewEncoder(b).Encode(item1)
- if err != nil {
- t.Error("expected no encode error; got", err)
- }
-
- item2 := NoInterfaceItem{}
- err = NewDecoder(b).Decode(&item2)
- if err != nil {
- t.Fatal("decode:", err)
- }
- if item2.I != item1.I {
- t.Error("normal int did not decode correctly")
- }
- if item2.F != item2.F {
- t.Error("normal float did not decode correctly")
- }
-}
-
-type U struct {
- A int
- B string
- c float64
- D uint
-}
-
-func TestUnexportedFields(t *testing.T) {
- var u0 U
- u0.A = 17
- u0.B = "hello"
- u0.c = 3.14159
- u0.D = 23
- b := new(bytes.Buffer)
- NewEncoder(b).Encode(u0)
- dec := NewDecoder(b)
- var u1 U
- u1.c = 1234.
- err := dec.Decode(&u1)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if u0.A != u0.A || u0.B != u1.B || u0.D != u1.D {
- t.Errorf("u1->u0: expected %v; got %v", u0, u1)
- }
- if u1.c != 1234. {
- t.Error("u1.c modified")
- }
-}
-
-var singletons = []interface{}{
- true,
- 7,
- 3.2,
- "hello",
- [3]int{11, 22, 33},
- []float32{0.5, 0.25, 0.125},
- map[string]int{"one": 1, "two": 2},
-}
-
-func TestDebugSingleton(t *testing.T) {
- if debugFunc == nil {
- return
- }
- b := new(bytes.Buffer)
- // Accumulate a number of values and print them out all at once.
- for _, x := range singletons {
- err := NewEncoder(b).Encode(x)
- if err != nil {
- t.Fatal("encode:", err)
- }
- }
- debugFunc(b)
-}
-
-// A type that won't be defined in the gob until we send it in an interface value.
-type OnTheFly struct {
- A int
-}
-
-type DT struct {
- // X OnTheFly
- A int
- B string
- C float64
- I interface{}
- J interface{}
- I_nil interface{}
- M map[string]int
- T [3]int
- S []string
-}
-
-func TestDebugStruct(t *testing.T) {
- if debugFunc == nil {
- return
- }
- Register(OnTheFly{})
- var dt DT
- dt.A = 17
- dt.B = "hello"
- dt.C = 3.14159
- dt.I = 271828
- dt.J = OnTheFly{3}
- dt.I_nil = nil
- dt.M = map[string]int{"one": 1, "two": 2}
- dt.T = [3]int{11, 22, 33}
- dt.S = []string{"hi", "joe"}
- b := new(bytes.Buffer)
- err := NewEncoder(b).Encode(dt)
- if err != nil {
- t.Fatal("encode:", err)
- }
- debugBuffer := bytes.NewBuffer(b.Bytes())
- dt2 := &DT{}
- err = NewDecoder(b).Decode(&dt2)
- if err != nil {
- t.Error("decode:", err)
- }
- debugFunc(debugBuffer)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-// TODO(rsc): When garbage collector changes, revisit
-// the allocations in this file that use unsafe.Pointer.
-
-import (
- "bytes"
- "errors"
- "io"
- "math"
- "reflect"
- "unsafe"
-)
-
-var (
- errBadUint = errors.New("gob: encoded unsigned integer out of range")
- errBadType = errors.New("gob: unknown type id or corrupted data")
- errRange = errors.New("gob: bad data: field numbers out of bounds")
-)
-
-// decoderState is the execution state of an instance of the decoder. A new state
-// is created for nested objects.
-type decoderState struct {
- dec *Decoder
- // The buffer is stored with an extra indirection because it may be replaced
- // if we load a type during decode (when reading an interface value).
- b *bytes.Buffer
- fieldnum int // the last field number read.
- buf []byte
- next *decoderState // for free list
-}
-
-// We pass the bytes.Buffer separately for easier testing of the infrastructure
-// without requiring a full Decoder.
-func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState {
- d := dec.freeList
- if d == nil {
- d = new(decoderState)
- d.dec = dec
- d.buf = make([]byte, uint64Size)
- } else {
- dec.freeList = d.next
- }
- d.b = buf
- return d
-}
-
-func (dec *Decoder) freeDecoderState(d *decoderState) {
- d.next = dec.freeList
- dec.freeList = d
-}
-
-func overflow(name string) error {
- return errors.New(`value for "` + name + `" out of range`)
-}
-
-// decodeUintReader reads an encoded unsigned integer from an io.Reader.
-// Used only by the Decoder to read the message length.
-func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err error) {
- width = 1
- _, err = r.Read(buf[0:width])
- if err != nil {
- return
- }
- b := buf[0]
- if b <= 0x7f {
- return uint64(b), width, nil
- }
- n := -int(int8(b))
- if n > uint64Size {
- err = errBadUint
- return
- }
- width, err = io.ReadFull(r, buf[0:n])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
- }
- // Could check that the high byte is zero but it's not worth it.
- for _, b := range buf[0:width] {
- x = x<<8 | uint64(b)
- }
- width++ // +1 for length byte
- return
-}
-
-// decodeUint reads an encoded unsigned integer from state.r.
-// Does not check for overflow.
-func (state *decoderState) decodeUint() (x uint64) {
- b, err := state.b.ReadByte()
- if err != nil {
- error_(err)
- }
- if b <= 0x7f {
- return uint64(b)
- }
- n := -int(int8(b))
- if n > uint64Size {
- error_(errBadUint)
- }
- width, err := state.b.Read(state.buf[0:n])
- if err != nil {
- error_(err)
- }
- // Don't need to check error; it's safe to loop regardless.
- // Could check that the high byte is zero but it's not worth it.
- for _, b := range state.buf[0:width] {
- x = x<<8 | uint64(b)
- }
- return x
-}
-
-// decodeInt reads an encoded signed integer from state.r.
-// Does not check for overflow.
-func (state *decoderState) decodeInt() int64 {
- x := state.decodeUint()
- if x&1 != 0 {
- return ^int64(x >> 1)
- }
- return int64(x >> 1)
-}
-
-// decOp is the signature of a decoding operator for a given type.
-type decOp func(i *decInstr, state *decoderState, p unsafe.Pointer)
-
-// The 'instructions' of the decoding machine
-type decInstr struct {
- op decOp
- field int // field number of the wire type
- indir int // how many pointer indirections to reach the value in the struct
- offset uintptr // offset in the structure of the field to encode
- ovfl error // error message for overflow/underflow (for arrays, of the elements)
-}
-
-// Since the encoder writes no zeros, if we arrive at a decoder we have
-// a value to extract and store. The field number has already been read
-// (it's how we knew to call this decoder).
-// Each decoder is responsible for handling any indirections associated
-// with the data structure. If any pointer so reached is nil, allocation must
-// be done.
-
-// Walk the pointer hierarchy, allocating if we find a nil. Stop one before the end.
-func decIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
- for ; indir > 1; indir-- {
- if *(*unsafe.Pointer)(p) == nil {
- // Allocation required
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(unsafe.Pointer))
- }
- p = *(*unsafe.Pointer)(p)
- }
- return p
-}
-
-// ignoreUint discards a uint value with no destination.
-func ignoreUint(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.decodeUint()
-}
-
-// ignoreTwoUints discards a uint value with no destination. It's used to skip
-// complex values.
-func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.decodeUint()
- state.decodeUint()
-}
-
-// decBool decodes a uint and stores it as a boolean through p.
-func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool))
- }
- p = *(*unsafe.Pointer)(p)
- }
- *(*bool)(p) = state.decodeUint() != 0
-}
-
-// decInt8 decodes an integer and stores it as an int8 through p.
-func decInt8(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8))
- }
- p = *(*unsafe.Pointer)(p)
- }
- v := state.decodeInt()
- if v < math.MinInt8 || math.MaxInt8 < v {
- error_(i.ovfl)
- } else {
- *(*int8)(p) = int8(v)
- }
-}
-
-// decUint8 decodes an unsigned integer and stores it as a uint8 through p.
-func decUint8(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8))
- }
- p = *(*unsafe.Pointer)(p)
- }
- v := state.decodeUint()
- if math.MaxUint8 < v {
- error_(i.ovfl)
- } else {
- *(*uint8)(p) = uint8(v)
- }
-}
-
-// decInt16 decodes an integer and stores it as an int16 through p.
-func decInt16(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16))
- }
- p = *(*unsafe.Pointer)(p)
- }
- v := state.decodeInt()
- if v < math.MinInt16 || math.MaxInt16 < v {
- error_(i.ovfl)
- } else {
- *(*int16)(p) = int16(v)
- }
-}
-
-// decUint16 decodes an unsigned integer and stores it as a uint16 through p.
-func decUint16(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16))
- }
- p = *(*unsafe.Pointer)(p)
- }
- v := state.decodeUint()
- if math.MaxUint16 < v {
- error_(i.ovfl)
- } else {
- *(*uint16)(p) = uint16(v)
- }
-}
-
-// decInt32 decodes an integer and stores it as an int32 through p.
-func decInt32(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32))
- }
- p = *(*unsafe.Pointer)(p)
- }
- v := state.decodeInt()
- if v < math.MinInt32 || math.MaxInt32 < v {
- error_(i.ovfl)
- } else {
- *(*int32)(p) = int32(v)
- }
-}
-
-// decUint32 decodes an unsigned integer and stores it as a uint32 through p.
-func decUint32(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32))
- }
- p = *(*unsafe.Pointer)(p)
- }
- v := state.decodeUint()
- if math.MaxUint32 < v {
- error_(i.ovfl)
- } else {
- *(*uint32)(p) = uint32(v)
- }
-}
-
-// decInt64 decodes an integer and stores it as an int64 through p.
-func decInt64(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64))
- }
- p = *(*unsafe.Pointer)(p)
- }
- *(*int64)(p) = int64(state.decodeInt())
-}
-
-// decUint64 decodes an unsigned integer and stores it as a uint64 through p.
-func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64))
- }
- p = *(*unsafe.Pointer)(p)
- }
- *(*uint64)(p) = uint64(state.decodeUint())
-}
-
-// Floating-point numbers are transmitted as uint64s holding the bits
-// of the underlying representation. They are sent byte-reversed, with
-// the exponent end coming out first, so integer floating point numbers
-// (for example) transmit more compactly. This routine does the
-// unswizzling.
-func floatFromBits(u uint64) float64 {
- var v uint64
- for i := 0; i < 8; i++ {
- v <<= 8
- v |= u & 0xFF
- u >>= 8
- }
- return math.Float64frombits(v)
-}
-
-// storeFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
-// number, and stores it through p. It's a helper function for float32 and complex64.
-func storeFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
- v := floatFromBits(state.decodeUint())
- av := v
- if av < 0 {
- av = -av
- }
- // +Inf is OK in both 32- and 64-bit floats. Underflow is always OK.
- if math.MaxFloat32 < av && av <= math.MaxFloat64 {
- error_(i.ovfl)
- } else {
- *(*float32)(p) = float32(v)
- }
-}
-
-// decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
-// number, and stores it through p.
-func decFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32))
- }
- p = *(*unsafe.Pointer)(p)
- }
- storeFloat32(i, state, p)
-}
-
-// decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point
-// number, and stores it through p.
-func decFloat64(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64))
- }
- p = *(*unsafe.Pointer)(p)
- }
- *(*float64)(p) = floatFromBits(uint64(state.decodeUint()))
-}
-
-// decComplex64 decodes a pair of unsigned integers, treats them as a
-// pair of floating point numbers, and stores them as a complex64 through p.
-// The real part comes first.
-func decComplex64(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64))
- }
- p = *(*unsafe.Pointer)(p)
- }
- storeFloat32(i, state, p)
- storeFloat32(i, state, unsafe.Pointer(uintptr(p)+unsafe.Sizeof(float32(0))))
-}
-
-// decComplex128 decodes a pair of unsigned integers, treats them as a
-// pair of floating point numbers, and stores them as a complex128 through p.
-// The real part comes first.
-func decComplex128(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128))
- }
- p = *(*unsafe.Pointer)(p)
- }
- real := floatFromBits(uint64(state.decodeUint()))
- imag := floatFromBits(uint64(state.decodeUint()))
- *(*complex128)(p) = complex(real, imag)
-}
-
-// decUint8Slice decodes a byte slice and stores through p a slice header
-// describing the data.
-// uint8 slices are encoded as an unsigned count followed by the raw bytes.
-func decUint8Slice(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8))
- }
- p = *(*unsafe.Pointer)(p)
- }
- n := int(state.decodeUint())
- if n < 0 {
- errorf("negative length decoding []byte")
- }
- slice := (*[]uint8)(p)
- if cap(*slice) < n {
- *slice = make([]uint8, n)
- } else {
- *slice = (*slice)[0:n]
- }
- if _, err := state.b.Read(*slice); err != nil {
- errorf("error decoding []byte: %s", err)
- }
-}
-
-// decString decodes byte array and stores through p a string header
-// describing the data.
-// Strings are encoded as an unsigned count followed by the raw bytes.
-func decString(i *decInstr, state *decoderState, p unsafe.Pointer) {
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.Pointer(new(string))
- }
- p = *(*unsafe.Pointer)(p)
- }
- b := make([]byte, state.decodeUint())
- state.b.Read(b)
- // It would be a shame to do the obvious thing here,
- // *(*string)(p) = string(b)
- // because we've already allocated the storage and this would
- // allocate again and copy. So we do this ugly hack, which is even
- // even more unsafe than it looks as it depends the memory
- // representation of a string matching the beginning of the memory
- // representation of a byte slice (a byte slice is longer).
- *(*string)(p) = *(*string)(unsafe.Pointer(&b))
-}
-
-// ignoreUint8Array skips over the data for a byte slice value with no destination.
-func ignoreUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) {
- b := make([]byte, state.decodeUint())
- state.b.Read(b)
-}
-
-// Execution engine
-
-// The encoder engine is an array of instructions indexed by field number of the incoming
-// decoder. It is executed with random access according to field number.
-type decEngine struct {
- instr []decInstr
- numInstr int // the number of active instructions
-}
-
-// allocate makes sure storage is available for an object of underlying type rtyp
-// that is indir levels of indirection through p.
-func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr {
- if indir == 0 {
- return p
- }
- up := unsafe.Pointer(p)
- if indir > 1 {
- up = decIndirect(up, indir)
- }
- if *(*unsafe.Pointer)(up) == nil {
- // Allocate object.
- *(*unsafe.Pointer)(up) = unsafe.New(rtyp)
- }
- return *(*uintptr)(up)
-}
-
-// decodeSingle decodes a top-level value that is not a struct and stores it through p.
-// Such values are preceded by a zero, making them have the memory layout of a
-// struct field (although with an illegal field number).
-func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) (err error) {
- state := dec.newDecoderState(&dec.buf)
- state.fieldnum = singletonField
- delta := int(state.decodeUint())
- if delta != 0 {
- errorf("decode: corrupted data: non-zero delta for singleton")
- }
- instr := &engine.instr[singletonField]
- if instr.indir != ut.indir {
- return errors.New("gob: internal error: inconsistent indirection")
- }
- ptr := unsafe.Pointer(basep) // offset will be zero
- if instr.indir > 1 {
- ptr = decIndirect(ptr, instr.indir)
- }
- instr.op(instr, state, ptr)
- dec.freeDecoderState(state)
- return nil
-}
-
-// decodeSingle decodes a top-level struct and stores it through p.
-// Indir is for the value, not the type. At the time of the call it may
-// differ from ut.indir, which was computed when the engine was built.
-// This state cannot arise for decodeSingle, which is called directly
-// from the user's value, not from the innards of an engine.
-func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) {
- p = allocate(ut.base, p, indir)
- state := dec.newDecoderState(&dec.buf)
- state.fieldnum = -1
- basep := p
- for state.b.Len() > 0 {
- delta := int(state.decodeUint())
- if delta < 0 {
- errorf("decode: corrupted data: negative delta")
- }
- if delta == 0 { // struct terminator is zero delta fieldnum
- break
- }
- fieldnum := state.fieldnum + delta
- if fieldnum >= len(engine.instr) {
- error_(errRange)
- break
- }
- instr := &engine.instr[fieldnum]
- p := unsafe.Pointer(basep + instr.offset)
- if instr.indir > 1 {
- p = decIndirect(p, instr.indir)
- }
- instr.op(instr, state, p)
- state.fieldnum = fieldnum
- }
- dec.freeDecoderState(state)
-}
-
-// ignoreStruct discards the data for a struct with no destination.
-func (dec *Decoder) ignoreStruct(engine *decEngine) {
- state := dec.newDecoderState(&dec.buf)
- state.fieldnum = -1
- for state.b.Len() > 0 {
- delta := int(state.decodeUint())
- if delta < 0 {
- errorf("ignore decode: corrupted data: negative delta")
- }
- if delta == 0 { // struct terminator is zero delta fieldnum
- break
- }
- fieldnum := state.fieldnum + delta
- if fieldnum >= len(engine.instr) {
- error_(errRange)
- }
- instr := &engine.instr[fieldnum]
- instr.op(instr, state, unsafe.Pointer(nil))
- state.fieldnum = fieldnum
- }
- dec.freeDecoderState(state)
-}
-
-// ignoreSingle discards the data for a top-level non-struct value with no
-// destination. It's used when calling Decode with a nil value.
-func (dec *Decoder) ignoreSingle(engine *decEngine) {
- state := dec.newDecoderState(&dec.buf)
- state.fieldnum = singletonField
- delta := int(state.decodeUint())
- if delta != 0 {
- errorf("decode: corrupted data: non-zero delta for singleton")
- }
- instr := &engine.instr[singletonField]
- instr.op(instr, state, unsafe.Pointer(nil))
- dec.freeDecoderState(state)
-}
-
-// decodeArrayHelper does the work for decoding arrays and slices.
-func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl error) {
- instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl}
- for i := 0; i < length; i++ {
- up := unsafe.Pointer(p)
- if elemIndir > 1 {
- up = decIndirect(up, elemIndir)
- }
- elemOp(instr, state, up)
- p += uintptr(elemWid)
- }
-}
-
-// decodeArray decodes an array and stores it through p, that is, p points to the zeroth element.
-// The length is an unsigned integer preceding the elements. Even though the length is redundant
-// (it's part of the type), it's a useful check and is included in the encoding.
-func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl error) {
- if indir > 0 {
- p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect
- }
- if n := state.decodeUint(); n != uint64(length) {
- errorf("length mismatch in decodeArray")
- }
- dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl)
-}
-
-// decodeIntoValue is a helper for map decoding. Since maps are decoded using reflection,
-// unlike the other items we can't use a pointer directly.
-func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl error) reflect.Value {
- instr := &decInstr{op, 0, indir, 0, ovfl}
- up := unsafe.Pointer(unsafeAddr(v))
- if indir > 1 {
- up = decIndirect(up, indir)
- }
- op(instr, state, up)
- return v
-}
-
-// decodeMap decodes a map and stores its header through p.
-// Maps are encoded as a length followed by key:value pairs.
-// Because the internals of maps are not visible to us, we must
-// use reflection rather than pointer magic.
-func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl error) {
- if indir > 0 {
- p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect
- }
- up := unsafe.Pointer(p)
- if *(*unsafe.Pointer)(up) == nil { // maps are represented as a pointer in the runtime
- // Allocate map.
- *(*unsafe.Pointer)(up) = unsafe.Pointer(reflect.MakeMap(mtyp).Pointer())
- }
- // Maps cannot be accessed by moving addresses around the way
- // that slices etc. can. We must recover a full reflection value for
- // the iteration.
- v := reflect.ValueOf(unsafe.Unreflect(mtyp, unsafe.Pointer(p)))
- n := int(state.decodeUint())
- for i := 0; i < n; i++ {
- key := decodeIntoValue(state, keyOp, keyIndir, allocValue(mtyp.Key()), ovfl)
- elem := decodeIntoValue(state, elemOp, elemIndir, allocValue(mtyp.Elem()), ovfl)
- v.SetMapIndex(key, elem)
- }
-}
-
-// ignoreArrayHelper does the work for discarding arrays and slices.
-func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) {
- instr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
- for i := 0; i < length; i++ {
- elemOp(instr, state, nil)
- }
-}
-
-// ignoreArray discards the data for an array value with no destination.
-func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) {
- if n := state.decodeUint(); n != uint64(length) {
- errorf("length mismatch in ignoreArray")
- }
- dec.ignoreArrayHelper(state, elemOp, length)
-}
-
-// ignoreMap discards the data for a map value with no destination.
-func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
- n := int(state.decodeUint())
- keyInstr := &decInstr{keyOp, 0, 0, 0, errors.New("no error")}
- elemInstr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
- for i := 0; i < n; i++ {
- keyOp(keyInstr, state, nil)
- elemOp(elemInstr, state, nil)
- }
-}
-
-// decodeSlice decodes a slice and stores the slice header through p.
-// Slices are encoded as an unsigned length followed by the elements.
-func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl error) {
- n := int(uintptr(state.decodeUint()))
- if indir > 0 {
- up := unsafe.Pointer(p)
- if *(*unsafe.Pointer)(up) == nil {
- // Allocate the slice header.
- *(*unsafe.Pointer)(up) = unsafe.Pointer(new([]unsafe.Pointer))
- }
- p = *(*uintptr)(up)
- }
- // Allocate storage for the slice elements, that is, the underlying array,
- // if the existing slice does not have the capacity.
- // Always write a header at p.
- hdrp := (*reflect.SliceHeader)(unsafe.Pointer(p))
- if hdrp.Cap < n {
- hdrp.Data = uintptr(unsafe.NewArray(atyp.Elem(), n))
- hdrp.Cap = n
- }
- hdrp.Len = n
- dec.decodeArrayHelper(state, hdrp.Data, elemOp, elemWid, n, elemIndir, ovfl)
-}
-
-// ignoreSlice skips over the data for a slice value with no destination.
-func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) {
- dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint()))
-}
-
-// setInterfaceValue sets an interface value to a concrete value,
-// but first it checks that the assignment will succeed.
-func setInterfaceValue(ivalue reflect.Value, value reflect.Value) {
- if !value.Type().AssignableTo(ivalue.Type()) {
- errorf("cannot assign value of type %s to %s", value.Type(), ivalue.Type())
- }
- ivalue.Set(value)
-}
-
-// decodeInterface decodes an interface value and stores it through p.
-// Interfaces are encoded as the name of a concrete type followed by a value.
-// If the name is empty, the value is nil and no value is sent.
-func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p uintptr, indir int) {
- // Create a writable interface reflect.Value. We need one even for the nil case.
- ivalue := allocValue(ityp)
- // Read the name of the concrete type.
- b := make([]byte, state.decodeUint())
- state.b.Read(b)
- name := string(b)
- if name == "" {
- // Copy the representation of the nil interface value to the target.
- // This is horribly unsafe and special.
- *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
- return
- }
- // The concrete type must be registered.
- typ, ok := nameToConcreteType[name]
- if !ok {
- errorf("name not registered for interface: %q", name)
- }
- // Read the type id of the concrete value.
- concreteId := dec.decodeTypeSequence(true)
- if concreteId < 0 {
- error_(dec.err)
- }
- // Byte count of value is next; we don't care what it is (it's there
- // in case we want to ignore the value by skipping it completely).
- state.decodeUint()
- // Read the concrete value.
- value := allocValue(typ)
- dec.decodeValue(concreteId, value)
- if dec.err != nil {
- error_(dec.err)
- }
- // Allocate the destination interface value.
- if indir > 0 {
- p = allocate(ityp, p, 1) // All but the last level has been allocated by dec.Indirect
- }
- // Assign the concrete value to the interface.
- // Tread carefully; it might not satisfy the interface.
- setInterfaceValue(ivalue, value)
- // Copy the representation of the interface value to the target.
- // This is horribly unsafe and special.
- *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
-}
-
-// ignoreInterface discards the data for an interface value with no destination.
-func (dec *Decoder) ignoreInterface(state *decoderState) {
- // Read the name of the concrete type.
- b := make([]byte, state.decodeUint())
- _, err := state.b.Read(b)
- if err != nil {
- error_(err)
- }
- id := dec.decodeTypeSequence(true)
- if id < 0 {
- error_(dec.err)
- }
- // At this point, the decoder buffer contains a delimited value. Just toss it.
- state.b.Next(int(state.decodeUint()))
-}
-
-// decodeGobDecoder decodes something implementing the GobDecoder interface.
-// The data is encoded as a byte slice.
-func (dec *Decoder) decodeGobDecoder(state *decoderState, v reflect.Value) {
- // Read the bytes for the value.
- b := make([]byte, state.decodeUint())
- _, err := state.b.Read(b)
- if err != nil {
- error_(err)
- }
- // We know it's a GobDecoder, so just call the method directly.
- err = v.Interface().(GobDecoder).GobDecode(b)
- if err != nil {
- error_(err)
- }
-}
-
-// ignoreGobDecoder discards the data for a GobDecoder value with no destination.
-func (dec *Decoder) ignoreGobDecoder(state *decoderState) {
- // Read the bytes for the value.
- b := make([]byte, state.decodeUint())
- _, err := state.b.Read(b)
- if err != nil {
- error_(err)
- }
-}
-
-// Index by Go types.
-var decOpTable = [...]decOp{
- reflect.Bool: decBool,
- reflect.Int8: decInt8,
- reflect.Int16: decInt16,
- reflect.Int32: decInt32,
- reflect.Int64: decInt64,
- reflect.Uint8: decUint8,
- reflect.Uint16: decUint16,
- reflect.Uint32: decUint32,
- reflect.Uint64: decUint64,
- reflect.Float32: decFloat32,
- reflect.Float64: decFloat64,
- reflect.Complex64: decComplex64,
- reflect.Complex128: decComplex128,
- reflect.String: decString,
-}
-
-// Indexed by gob types. tComplex will be added during type.init().
-var decIgnoreOpMap = map[typeId]decOp{
- tBool: ignoreUint,
- tInt: ignoreUint,
- tUint: ignoreUint,
- tFloat: ignoreUint,
- tBytes: ignoreUint8Array,
- tString: ignoreUint8Array,
- tComplex: ignoreTwoUints,
-}
-
-// decOpFor returns the decoding op for the base type under rt and
-// the indirection count to reach it.
-func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) {
- ut := userType(rt)
- // If the type implements GobEncoder, we handle it without further processing.
- if ut.isGobDecoder {
- return dec.gobDecodeOpFor(ut)
- }
- // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
- // Return the pointer to the op we're already building.
- if opPtr := inProgress[rt]; opPtr != nil {
- return opPtr, ut.indir
- }
- typ := ut.base
- indir := ut.indir
- var op decOp
- k := typ.Kind()
- if int(k) < len(decOpTable) {
- op = decOpTable[k]
- }
- if op == nil {
- inProgress[rt] = &op
- // Special cases
- switch t := typ; t.Kind() {
- case reflect.Array:
- name = "element of " + name
- elemId := dec.wireType[wireId].ArrayT.Elem
- elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
- ovfl := overflow(name)
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl)
- }
-
- case reflect.Map:
- name = "element of " + name
- keyId := dec.wireType[wireId].MapT.Key
- elemId := dec.wireType[wireId].MapT.Elem
- keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress)
- elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
- ovfl := overflow(name)
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- up := unsafe.Pointer(p)
- state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl)
- }
-
- case reflect.Slice:
- name = "element of " + name
- if t.Elem().Kind() == reflect.Uint8 {
- op = decUint8Slice
- break
- }
- var elemId typeId
- if tt, ok := builtinIdToType[wireId]; ok {
- elemId = tt.(*sliceType).Elem
- } else {
- elemId = dec.wireType[wireId].SliceT.Elem
- }
- elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
- ovfl := overflow(name)
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl)
- }
-
- case reflect.Struct:
- // Generate a closure that calls out to the engine for the nested type.
- enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ))
- if err != nil {
- error_(err)
- }
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- // indirect through enginePtr to delay evaluation for recursive structs.
- dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir)
- }
- case reflect.Interface:
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.decodeInterface(t, state, uintptr(p), i.indir)
- }
- }
- }
- if op == nil {
- errorf("decode can't handle type %s", rt)
- }
- return &op, indir
-}
-
-// decIgnoreOpFor returns the decoding op for a field that has no destination.
-func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
- op, ok := decIgnoreOpMap[wireId]
- if !ok {
- if wireId == tInterface {
- // Special case because it's a method: the ignored item might
- // define types and we need to record their state in the decoder.
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.ignoreInterface(state)
- }
- return op
- }
- // Special cases
- wire := dec.wireType[wireId]
- switch {
- case wire == nil:
- errorf("bad data: undefined type %s", wireId.string())
- case wire.ArrayT != nil:
- elemId := wire.ArrayT.Elem
- elemOp := dec.decIgnoreOpFor(elemId)
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len)
- }
-
- case wire.MapT != nil:
- keyId := dec.wireType[wireId].MapT.Key
- elemId := dec.wireType[wireId].MapT.Elem
- keyOp := dec.decIgnoreOpFor(keyId)
- elemOp := dec.decIgnoreOpFor(elemId)
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.ignoreMap(state, keyOp, elemOp)
- }
-
- case wire.SliceT != nil:
- elemId := wire.SliceT.Elem
- elemOp := dec.decIgnoreOpFor(elemId)
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.ignoreSlice(state, elemOp)
- }
-
- case wire.StructT != nil:
- // Generate a closure that calls out to the engine for the nested type.
- enginePtr, err := dec.getIgnoreEnginePtr(wireId)
- if err != nil {
- error_(err)
- }
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- // indirect through enginePtr to delay evaluation for recursive structs
- state.dec.ignoreStruct(*enginePtr)
- }
-
- case wire.GobEncoderT != nil:
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- state.dec.ignoreGobDecoder(state)
- }
- }
- }
- if op == nil {
- errorf("bad data: ignore can't handle type %s", wireId.string())
- }
- return op
-}
-
-// gobDecodeOpFor returns the op for a type that is known to implement
-// GobDecoder.
-func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) {
- rcvrType := ut.user
- if ut.decIndir == -1 {
- rcvrType = reflect.PtrTo(rcvrType)
- } else if ut.decIndir > 0 {
- for i := int8(0); i < ut.decIndir; i++ {
- rcvrType = rcvrType.Elem()
- }
- }
- var op decOp
- op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
- // Caller has gotten us to within one indirection of our value.
- if i.indir > 0 {
- if *(*unsafe.Pointer)(p) == nil {
- *(*unsafe.Pointer)(p) = unsafe.New(ut.base)
- }
- }
- // Now p is a pointer to the base type. Do we need to climb out to
- // get to the receiver type?
- var v reflect.Value
- if ut.decIndir == -1 {
- v = reflect.ValueOf(unsafe.Unreflect(rcvrType, unsafe.Pointer(&p)))
- } else {
- v = reflect.ValueOf(unsafe.Unreflect(rcvrType, p))
- }
- state.dec.decodeGobDecoder(state, v)
- }
- return &op, int(ut.indir)
-
-}
-
-// compatibleType asks: Are these two gob Types compatible?
-// Answers the question for basic types, arrays, maps and slices, plus
-// GobEncoder/Decoder pairs.
-// Structs are considered ok; fields will be checked later.
-func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool {
- if rhs, ok := inProgress[fr]; ok {
- return rhs == fw
- }
- inProgress[fr] = fw
- ut := userType(fr)
- wire, ok := dec.wireType[fw]
- // If fr is a GobDecoder, the wire type must be GobEncoder.
- // And if fr is not a GobDecoder, the wire type must not be either.
- if ut.isGobDecoder != (ok && wire.GobEncoderT != nil) { // the parentheses look odd but are correct.
- return false
- }
- if ut.isGobDecoder { // This test trumps all others.
- return true
- }
- switch t := ut.base; t.Kind() {
- default:
- // chan, etc: cannot handle.
- return false
- case reflect.Bool:
- return fw == tBool
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return fw == tInt
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return fw == tUint
- case reflect.Float32, reflect.Float64:
- return fw == tFloat
- case reflect.Complex64, reflect.Complex128:
- return fw == tComplex
- case reflect.String:
- return fw == tString
- case reflect.Interface:
- return fw == tInterface
- case reflect.Array:
- if !ok || wire.ArrayT == nil {
- return false
- }
- array := wire.ArrayT
- return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress)
- case reflect.Map:
- if !ok || wire.MapT == nil {
- return false
- }
- MapType := wire.MapT
- return dec.compatibleType(t.Key(), MapType.Key, inProgress) && dec.compatibleType(t.Elem(), MapType.Elem, inProgress)
- case reflect.Slice:
- // Is it an array of bytes?
- if t.Elem().Kind() == reflect.Uint8 {
- return fw == tBytes
- }
- // Extract and compare element types.
- var sw *sliceType
- if tt, ok := builtinIdToType[fw]; ok {
- sw = tt.(*sliceType)
- } else {
- sw = dec.wireType[fw].SliceT
- }
- elem := userType(t.Elem()).base
- return sw != nil && dec.compatibleType(elem, sw.Elem, inProgress)
- case reflect.Struct:
- return true
- }
- return true
-}
-
-// typeString returns a human-readable description of the type identified by remoteId.
-func (dec *Decoder) typeString(remoteId typeId) string {
- if t := idToType[remoteId]; t != nil {
- // globally known type.
- return t.string()
- }
- return dec.wireType[remoteId].string()
-}
-
-// compileSingle compiles the decoder engine for a non-struct top-level value, including
-// GobDecoders.
-func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
- rt := ut.user
- engine = new(decEngine)
- engine.instr = make([]decInstr, 1) // one item
- name := rt.String() // best we can do
- if !dec.compatibleType(rt, remoteId, make(map[reflect.Type]typeId)) {
- return nil, errors.New("gob: wrong type received for local value " + name + ": " + dec.typeString(remoteId))
- }
- op, indir := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp))
- ovfl := errors.New(`value for "` + name + `" out of range`)
- engine.instr[singletonField] = decInstr{*op, singletonField, indir, 0, ovfl}
- engine.numInstr = 1
- return
-}
-
-// compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded.
-func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err error) {
- engine = new(decEngine)
- engine.instr = make([]decInstr, 1) // one item
- op := dec.decIgnoreOpFor(remoteId)
- ovfl := overflow(dec.typeString(remoteId))
- engine.instr[0] = decInstr{op, 0, 0, 0, ovfl}
- engine.numInstr = 1
- return
-}
-
-// compileDec compiles the decoder engine for a value. If the value is not a struct,
-// it calls out to compileSingle.
-func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
- rt := ut.base
- srt := rt
- if srt.Kind() != reflect.Struct ||
- ut.isGobDecoder {
- return dec.compileSingle(remoteId, ut)
- }
- var wireStruct *structType
- // Builtin types can come from global pool; the rest must be defined by the decoder.
- // Also we know we're decoding a struct now, so the client must have sent one.
- if t, ok := builtinIdToType[remoteId]; ok {
- wireStruct, _ = t.(*structType)
- } else {
- wire := dec.wireType[remoteId]
- if wire == nil {
- error_(errBadType)
- }
- wireStruct = wire.StructT
- }
- if wireStruct == nil {
- errorf("type mismatch in decoder: want struct type %s; got non-struct", rt)
- }
- engine = new(decEngine)
- engine.instr = make([]decInstr, len(wireStruct.Field))
- seen := make(map[reflect.Type]*decOp)
- // Loop over the fields of the wire type.
- for fieldnum := 0; fieldnum < len(wireStruct.Field); fieldnum++ {
- wireField := wireStruct.Field[fieldnum]
- if wireField.Name == "" {
- errorf("empty name for remote field of type %s", wireStruct.Name)
- }
- ovfl := overflow(wireField.Name)
- // Find the field of the local type with the same name.
- localField, present := srt.FieldByName(wireField.Name)
- // TODO(r): anonymous names
- if !present || !isExported(wireField.Name) {
- op := dec.decIgnoreOpFor(wireField.Id)
- engine.instr[fieldnum] = decInstr{op, fieldnum, 0, 0, ovfl}
- continue
- }
- if !dec.compatibleType(localField.Type, wireField.Id, make(map[reflect.Type]typeId)) {
- errorf("wrong type (%s) for received field %s.%s", localField.Type, wireStruct.Name, wireField.Name)
- }
- op, indir := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen)
- engine.instr[fieldnum] = decInstr{*op, fieldnum, indir, uintptr(localField.Offset), ovfl}
- engine.numInstr++
- }
- return
-}
-
-// getDecEnginePtr returns the engine for the specified type.
-func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err error) {
- rt := ut.base
- decoderMap, ok := dec.decoderCache[rt]
- if !ok {
- decoderMap = make(map[typeId]**decEngine)
- dec.decoderCache[rt] = decoderMap
- }
- if enginePtr, ok = decoderMap[remoteId]; !ok {
- // To handle recursive types, mark this engine as underway before compiling.
- enginePtr = new(*decEngine)
- decoderMap[remoteId] = enginePtr
- *enginePtr, err = dec.compileDec(remoteId, ut)
- if err != nil {
- delete(decoderMap, remoteId)
- }
- }
- return
-}
-
-// emptyStruct is the type we compile into when ignoring a struct value.
-type emptyStruct struct{}
-
-var emptyStructType = reflect.TypeOf(emptyStruct{})
-
-// getDecEnginePtr returns the engine for the specified type when the value is to be discarded.
-func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err error) {
- var ok bool
- if enginePtr, ok = dec.ignorerCache[wireId]; !ok {
- // To handle recursive types, mark this engine as underway before compiling.
- enginePtr = new(*decEngine)
- dec.ignorerCache[wireId] = enginePtr
- wire := dec.wireType[wireId]
- if wire != nil && wire.StructT != nil {
- *enginePtr, err = dec.compileDec(wireId, userType(emptyStructType))
- } else {
- *enginePtr, err = dec.compileIgnoreSingle(wireId)
- }
- if err != nil {
- delete(dec.ignorerCache, wireId)
- }
- }
- return
-}
-
-// decodeValue decodes the data stream representing a value and stores it in val.
-func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) {
- defer catchError(&dec.err)
- // If the value is nil, it means we should just ignore this item.
- if !val.IsValid() {
- dec.decodeIgnoredValue(wireId)
- return
- }
- // Dereference down to the underlying type.
- ut := userType(val.Type())
- base := ut.base
- var enginePtr **decEngine
- enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut)
- if dec.err != nil {
- return
- }
- engine := *enginePtr
- if st := base; st.Kind() == reflect.Struct && !ut.isGobDecoder {
- if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 {
- name := base.Name()
- errorf("type mismatch: no fields matched compiling decoder for %s", name)
- }
- dec.decodeStruct(engine, ut, uintptr(unsafeAddr(val)), ut.indir)
- } else {
- dec.decodeSingle(engine, ut, uintptr(unsafeAddr(val)))
- }
-}
-
-// decodeIgnoredValue decodes the data stream representing a value of the specified type and discards it.
-func (dec *Decoder) decodeIgnoredValue(wireId typeId) {
- var enginePtr **decEngine
- enginePtr, dec.err = dec.getIgnoreEnginePtr(wireId)
- if dec.err != nil {
- return
- }
- wire := dec.wireType[wireId]
- if wire != nil && wire.StructT != nil {
- dec.ignoreStruct(*enginePtr)
- } else {
- dec.ignoreSingle(*enginePtr)
- }
-}
-
-func init() {
- var iop, uop decOp
- switch reflect.TypeOf(int(0)).Bits() {
- case 32:
- iop = decInt32
- uop = decUint32
- case 64:
- iop = decInt64
- uop = decUint64
- default:
- panic("gob: unknown size of int/uint")
- }
- decOpTable[reflect.Int] = iop
- decOpTable[reflect.Uint] = uop
-
- // Finally uintptr
- switch reflect.TypeOf(uintptr(0)).Bits() {
- case 32:
- uop = decUint32
- case 64:
- uop = decUint64
- default:
- panic("gob: unknown size of uintptr")
- }
- decOpTable[reflect.Uintptr] = uop
-}
-
-// Gob assumes it can call UnsafeAddr on any Value
-// in order to get a pointer it can copy data from.
-// Values that have just been created and do not point
-// into existing structs or slices cannot be addressed,
-// so simulate it by returning a pointer to a copy.
-// Each call allocates once.
-func unsafeAddr(v reflect.Value) uintptr {
- if v.CanAddr() {
- return v.UnsafeAddr()
- }
- x := reflect.New(v.Type()).Elem()
- x.Set(v)
- return x.UnsafeAddr()
-}
-
-// Gob depends on being able to take the address
-// of zeroed Values it creates, so use this wrapper instead
-// of the standard reflect.Zero.
-// Each call allocates once.
-func allocValue(t reflect.Type) reflect.Value {
- return reflect.New(t).Elem()
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
- "reflect"
- "sync"
-)
-
-// A Decoder manages the receipt of type and data information read from the
-// remote side of a connection.
-type Decoder struct {
- mutex sync.Mutex // each item must be received atomically
- r io.Reader // source of the data
- buf bytes.Buffer // buffer for more efficient i/o from r
- wireType map[typeId]*wireType // map from remote ID to local description
- decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines
- ignorerCache map[typeId]**decEngine // ditto for ignored objects
- freeList *decoderState // list of free decoderStates; avoids reallocation
- countBuf []byte // used for decoding integers while parsing messages
- tmp []byte // temporary storage for i/o; saves reallocating
- err error
-}
-
-// NewDecoder returns a new decoder that reads from the io.Reader.
-// If r does not also implement io.ByteReader, it will be wrapped in a
-// bufio.Reader.
-func NewDecoder(r io.Reader) *Decoder {
- dec := new(Decoder)
- // We use the ability to read bytes as a plausible surrogate for buffering.
- if _, ok := r.(io.ByteReader); !ok {
- r = bufio.NewReader(r)
- }
- dec.r = r
- dec.wireType = make(map[typeId]*wireType)
- dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine)
- dec.ignorerCache = make(map[typeId]**decEngine)
- dec.countBuf = make([]byte, 9) // counts may be uint64s (unlikely!), require 9 bytes
-
- return dec
-}
-
-// recvType loads the definition of a type.
-func (dec *Decoder) recvType(id typeId) {
- // Have we already seen this type? That's an error
- if id < firstUserId || dec.wireType[id] != nil {
- dec.err = errors.New("gob: duplicate type received")
- return
- }
-
- // Type:
- wire := new(wireType)
- dec.decodeValue(tWireType, reflect.ValueOf(wire))
- if dec.err != nil {
- return
- }
- // Remember we've seen this type.
- dec.wireType[id] = wire
-}
-
-var errBadCount = errors.New("invalid message length")
-
-// recvMessage reads the next count-delimited item from the input. It is the converse
-// of Encoder.writeMessage. It returns false on EOF or other error reading the message.
-func (dec *Decoder) recvMessage() bool {
- // Read a count.
- nbytes, _, err := decodeUintReader(dec.r, dec.countBuf)
- if err != nil {
- dec.err = err
- return false
- }
- if nbytes >= 1<<31 {
- dec.err = errBadCount
- return false
- }
- dec.readMessage(int(nbytes))
- return dec.err == nil
-}
-
-// readMessage reads the next nbytes bytes from the input.
-func (dec *Decoder) readMessage(nbytes int) {
- // Allocate the buffer.
- if cap(dec.tmp) < nbytes {
- dec.tmp = make([]byte, nbytes+100) // room to grow
- }
- dec.tmp = dec.tmp[:nbytes]
-
- // Read the data
- _, dec.err = io.ReadFull(dec.r, dec.tmp)
- if dec.err != nil {
- if dec.err == io.EOF {
- dec.err = io.ErrUnexpectedEOF
- }
- return
- }
- dec.buf.Write(dec.tmp)
-}
-
-// toInt turns an encoded uint64 into an int, according to the marshaling rules.
-func toInt(x uint64) int64 {
- i := int64(x >> 1)
- if x&1 != 0 {
- i = ^i
- }
- return i
-}
-
-func (dec *Decoder) nextInt() int64 {
- n, _, err := decodeUintReader(&dec.buf, dec.countBuf)
- if err != nil {
- dec.err = err
- }
- return toInt(n)
-}
-
-func (dec *Decoder) nextUint() uint64 {
- n, _, err := decodeUintReader(&dec.buf, dec.countBuf)
- if err != nil {
- dec.err = err
- }
- return n
-}
-
-// decodeTypeSequence parses:
-// TypeSequence
-// (TypeDefinition DelimitedTypeDefinition*)?
-// and returns the type id of the next value. It returns -1 at
-// EOF. Upon return, the remainder of dec.buf is the value to be
-// decoded. If this is an interface value, it can be ignored by
-// simply resetting that buffer.
-func (dec *Decoder) decodeTypeSequence(isInterface bool) typeId {
- for dec.err == nil {
- if dec.buf.Len() == 0 {
- if !dec.recvMessage() {
- break
- }
- }
- // Receive a type id.
- id := typeId(dec.nextInt())
- if id >= 0 {
- // Value follows.
- return id
- }
- // Type definition for (-id) follows.
- dec.recvType(-id)
- // When decoding an interface, after a type there may be a
- // DelimitedValue still in the buffer. Skip its count.
- // (Alternatively, the buffer is empty and the byte count
- // will be absorbed by recvMessage.)
- if dec.buf.Len() > 0 {
- if !isInterface {
- dec.err = errors.New("extra data in buffer")
- break
- }
- dec.nextUint()
- }
- }
- return -1
-}
-
-// Decode reads the next value from the connection and stores
-// it in the data represented by the empty interface value.
-// If e is nil, the value will be discarded. Otherwise,
-// the value underlying e must be a pointer to the
-// correct type for the next data item received.
-func (dec *Decoder) Decode(e interface{}) error {
- if e == nil {
- return dec.DecodeValue(reflect.Value{})
- }
- value := reflect.ValueOf(e)
- // If e represents a value as opposed to a pointer, the answer won't
- // get back to the caller. Make sure it's a pointer.
- if value.Type().Kind() != reflect.Ptr {
- dec.err = errors.New("gob: attempt to decode into a non-pointer")
- return dec.err
- }
- return dec.DecodeValue(value)
-}
-
-// DecodeValue reads the next value from the connection.
-// If v is the zero reflect.Value (v.Kind() == Invalid), DecodeValue discards the value.
-// Otherwise, it stores the value into v. In that case, v must represent
-// a non-nil pointer to data or be an assignable reflect.Value (v.CanSet())
-func (dec *Decoder) DecodeValue(v reflect.Value) error {
- if v.IsValid() {
- if v.Kind() == reflect.Ptr && !v.IsNil() {
- // That's okay, we'll store through the pointer.
- } else if !v.CanSet() {
- return errors.New("gob: DecodeValue of unassignable value")
- }
- }
- // Make sure we're single-threaded through here.
- dec.mutex.Lock()
- defer dec.mutex.Unlock()
-
- dec.buf.Reset() // In case data lingers from previous invocation.
- dec.err = nil
- id := dec.decodeTypeSequence(false)
- if dec.err == nil {
- dec.decodeValue(id, v)
- }
- return dec.err
-}
-
-// If debug.go is compiled into the program , debugFunc prints a human-readable
-// representation of the gob data read from r by calling that file's Debug function.
-// Otherwise it is nil.
-var debugFunc func(io.Reader)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is transporting
-arguments and results of remote procedure calls (RPCs) such as those provided by
-package "rpc".
-
-A stream of gobs is self-describing. Each data item in the stream is preceded by
-a specification of its type, expressed in terms of a small set of predefined
-types. Pointers are not transmitted, but the things they point to are
-transmitted; that is, the values are flattened. Recursive types work fine, but
-recursive values (data with cycles) are problematic. This may change.
-
-To use gobs, create an Encoder and present it with a series of data items as
-values or addresses that can be dereferenced to values. The Encoder makes sure
-all type information is sent before it is needed. At the receive side, a
-Decoder retrieves values from the encoded stream and unpacks them into local
-variables.
-
-The source and destination values/types need not correspond exactly. For structs,
-fields (identified by name) that are in the source but absent from the receiving
-variable will be ignored. Fields that are in the receiving variable but missing
-from the transmitted type or value will be ignored in the destination. If a field
-with the same name is present in both, their types must be compatible. Both the
-receiver and transmitter will do all necessary indirection and dereferencing to
-convert between gobs and actual Go values. For instance, a gob type that is
-schematically,
-
- struct { A, B int }
-
-can be sent from or received into any of these Go types:
-
- struct { A, B int } // the same
- *struct { A, B int } // extra indirection of the struct
- struct { *A, **B int } // extra indirection of the fields
- struct { A, B int64 } // different concrete value type; see below
-
-It may also be received into any of these:
-
- struct { A, B int } // the same
- struct { B, A int } // ordering doesn't matter; matching is by name
- struct { A, B, C int } // extra field (C) ignored
- struct { B int } // missing field (A) ignored; data will be dropped
- struct { B, C int } // missing field (A) ignored; extra field (C) ignored.
-
-Attempting to receive into these types will draw a decode error:
-
- struct { A int; B uint } // change of signedness for B
- struct { A int; B float } // change of type for B
- struct { } // no field names in common
- struct { C, D int } // no field names in common
-
-Integers are transmitted two ways: arbitrary precision signed integers or
-arbitrary precision unsigned integers. There is no int8, int16 etc.
-discrimination in the gob format; there are only signed and unsigned integers. As
-described below, the transmitter sends the value in a variable-length encoding;
-the receiver accepts the value and stores it in the destination variable.
-Floating-point numbers are always sent using IEEE-754 64-bit precision (see
-below).
-
-Signed integers may be received into any signed integer variable: int, int16, etc.;
-unsigned integers may be received into any unsigned integer variable; and floating
-point values may be received into any floating point variable. However,
-the destination variable must be able to represent the value or the decode
-operation will fail.
-
-Structs, arrays and slices are also supported. Strings and arrays of bytes are
-supported with a special, efficient representation (see below). When a slice is
-decoded, if the existing slice has capacity the slice will be extended in place;
-if not, a new array is allocated. Regardless, the length of the resuling slice
-reports the number of elements decoded.
-
-Functions and channels cannot be sent in a gob. Attempting
-to encode a value that contains one will fail.
-
-The rest of this comment documents the encoding, details that are not important
-for most users. Details are presented bottom-up.
-
-An unsigned integer is sent one of two ways. If it is less than 128, it is sent
-as a byte with that value. Otherwise it is sent as a minimal-length big-endian
-(high byte first) byte stream holding the value, preceded by one byte holding the
-byte count, negated. Thus 0 is transmitted as (00), 7 is transmitted as (07) and
-256 is transmitted as (FE 01 00).
-
-A boolean is encoded within an unsigned integer: 0 for false, 1 for true.
-
-A signed integer, i, is encoded within an unsigned integer, u. Within u, bits 1
-upward contain the value; bit 0 says whether they should be complemented upon
-receipt. The encode algorithm looks like this:
-
- uint u;
- if i < 0 {
- u = (^i << 1) | 1 // complement i, bit 0 is 1
- } else {
- u = (i << 1) // do not complement i, bit 0 is 0
- }
- encodeUnsigned(u)
-
-The low bit is therefore analogous to a sign bit, but making it the complement bit
-instead guarantees that the largest negative integer is not a special case. For
-example, -129=^128=(^256>>1) encodes as (FE 01 01).
-
-Floating-point numbers are always sent as a representation of a float64 value.
-That value is converted to a uint64 using math.Float64bits. The uint64 is then
-byte-reversed and sent as a regular unsigned integer. The byte-reversal means the
-exponent and high-precision part of the mantissa go first. Since the low bits are
-often zero, this can save encoding bytes. For instance, 17.0 is encoded in only
-three bytes (FE 31 40).
-
-Strings and slices of bytes are sent as an unsigned count followed by that many
-uninterpreted bytes of the value.
-
-All other slices and arrays are sent as an unsigned count followed by that many
-elements using the standard gob encoding for their type, recursively.
-
-Maps are sent as an unsigned count followed by that man key, element
-pairs. Empty but non-nil maps are sent, so if the sender has allocated
-a map, the receiver will allocate a map even no elements are
-transmitted.
-
-Structs are sent as a sequence of (field number, field value) pairs. The field
-value is sent using the standard gob encoding for its type, recursively. If a
-field has the zero value for its type, it is omitted from the transmission. The
-field number is defined by the type of the encoded struct: the first field of the
-encoded type is field 0, the second is field 1, etc. When encoding a value, the
-field numbers are delta encoded for efficiency and the fields are always sent in
-order of increasing field number; the deltas are therefore unsigned. The
-initialization for the delta encoding sets the field number to -1, so an unsigned
-integer field 0 with value 7 is transmitted as unsigned delta = 1, unsigned value
-= 7 or (01 07). Finally, after all the fields have been sent a terminating mark
-denotes the end of the struct. That mark is a delta=0 value, which has
-representation (00).
-
-Interface types are not checked for compatibility; all interface types are
-treated, for transmission, as members of a single "interface" type, analogous to
-int or []byte - in effect they're all treated as interface{}. Interface values
-are transmitted as a string identifying the concrete type being sent (a name
-that must be pre-defined by calling Register), followed by a byte count of the
-length of the following data (so the value can be skipped if it cannot be
-stored), followed by the usual encoding of concrete (dynamic) value stored in
-the interface value. (A nil interface value is identified by the empty string
-and transmits no value.) Upon receipt, the decoder verifies that the unpacked
-concrete item satisfies the interface of the receiving variable.
-
-The representation of types is described below. When a type is defined on a given
-connection between an Encoder and Decoder, it is assigned a signed integer type
-id. When Encoder.Encode(v) is called, it makes sure there is an id assigned for
-the type of v and all its elements and then it sends the pair (typeid, encoded-v)
-where typeid is the type id of the encoded type of v and encoded-v is the gob
-encoding of the value v.
-
-To define a type, the encoder chooses an unused, positive type id and sends the
-pair (-type id, encoded-type) where encoded-type is the gob encoding of a wireType
-description, constructed from these types:
-
- type wireType struct {
- ArrayT *ArrayType
- SliceT *SliceType
- StructT *StructType
- MapT *MapType
- }
- type ArrayType struct {
- CommonType
- Elem typeId
- Len int
- }
- type CommonType struct {
- Name string // the name of the struct type
- Id int // the id of the type, repeated so it's inside the type
- }
- type SliceType struct {
- CommonType
- Elem typeId
- }
- type StructType struct {
- CommonType
- Field []*fieldType // the fields of the struct.
- }
- type FieldType struct {
- Name string // the name of the field.
- Id int // the type id of the field, which must be already defined
- }
- type MapType struct {
- CommonType
- Key typeId
- Elem typeId
- }
-
-If there are nested type ids, the types for all inner type ids must be defined
-before the top-level type id is used to describe an encoded-v.
-
-For simplicity in setup, the connection is defined to understand these types a
-priori, as well as the basic gob types int, uint, etc. Their ids are:
-
- bool 1
- int 2
- uint 3
- float 4
- []byte 5
- string 6
- complex 7
- interface 8
- // gap for reserved ids.
- WireType 16
- ArrayType 17
- CommonType 18
- SliceType 19
- StructType 20
- FieldType 21
- // 22 is slice of fieldType.
- MapType 23
-
-Finally, each message created by a call to Encode is preceded by an encoded
-unsigned integer count of the number of bytes remaining in the message. After
-the initial type name, interface values are wrapped the same way; in effect, the
-interface value acts like a recursive invocation of Encode.
-
-In summary, a gob stream looks like
-
- (byteCount (-type id, encoding of a wireType)* (type id, encoding of a value))*
-
-where * signifies zero or more repetitions and the type id of a value must
-be predefined or be defined before the value in the stream.
-
-See "Gobs of data" for a design discussion of the gob wire format:
-http://blog.golang.org/2011/03/gobs-of-data.html
-*/
-package gob
-
-/*
-Grammar:
-
-Tokens starting with a lower case letter are terminals; int(n)
-and uint(n) represent the signed/unsigned encodings of the value n.
-
-GobStream:
- DelimitedMessage*
-DelimitedMessage:
- uint(lengthOfMessage) Message
-Message:
- TypeSequence TypedValue
-TypeSequence
- (TypeDefinition DelimitedTypeDefinition*)?
-DelimitedTypeDefinition:
- uint(lengthOfTypeDefinition) TypeDefinition
-TypedValue:
- int(typeId) Value
-TypeDefinition:
- int(-typeId) encodingOfWireType
-Value:
- SingletonValue | StructValue
-SingletonValue:
- uint(0) FieldValue
-FieldValue:
- builtinValue | ArrayValue | MapValue | SliceValue | StructValue | InterfaceValue
-InterfaceValue:
- NilInterfaceValue | NonNilInterfaceValue
-NilInterfaceValue:
- uint(0)
-NonNilInterfaceValue:
- ConcreteTypeName TypeSequence InterfaceContents
-ConcreteTypeName:
- uint(lengthOfName) [already read=n] name
-InterfaceContents:
- int(concreteTypeId) DelimitedValue
-DelimitedValue:
- uint(length) Value
-ArrayValue:
- uint(n) FieldValue*n [n elements]
-MapValue:
- uint(n) (FieldValue FieldValue)*n [n (key, value) pairs]
-SliceValue:
- uint(n) FieldValue*n [n elements]
-StructValue:
- (uint(fieldDelta) FieldValue)*
-*/
-
-/*
-For implementers and the curious, here is an encoded example. Given
- type Point struct {X, Y int}
-and the value
- p := Point{22, 33}
-the bytes transmitted that encode p will be:
- 1f ff 81 03 01 01 05 50 6f 69 6e 74 01 ff 82 00
- 01 02 01 01 58 01 04 00 01 01 59 01 04 00 00 00
- 07 ff 82 01 2c 01 42 00
-They are determined as follows.
-
-Since this is the first transmission of type Point, the type descriptor
-for Point itself must be sent before the value. This is the first type
-we've sent on this Encoder, so it has type id 65 (0 through 64 are
-reserved).
-
- 1f // This item (a type descriptor) is 31 bytes long.
- ff 81 // The negative of the id for the type we're defining, -65.
- // This is one byte (indicated by FF = -1) followed by
- // ^-65<<1 | 1. The low 1 bit signals to complement the
- // rest upon receipt.
-
- // Now we send a type descriptor, which is itself a struct (wireType).
- // The type of wireType itself is known (it's built in, as is the type of
- // all its components), so we just need to send a *value* of type wireType
- // that represents type "Point".
- // Here starts the encoding of that value.
- // Set the field number implicitly to -1; this is done at the beginning
- // of every struct, including nested structs.
- 03 // Add 3 to field number; now 2 (wireType.structType; this is a struct).
- // structType starts with an embedded commonType, which appears
- // as a regular structure here too.
- 01 // add 1 to field number (now 0); start of embedded commonType.
- 01 // add 1 to field number (now 0, the name of the type)
- 05 // string is (unsigned) 5 bytes long
- 50 6f 69 6e 74 // wireType.structType.commonType.name = "Point"
- 01 // add 1 to field number (now 1, the id of the type)
- ff 82 // wireType.structType.commonType._id = 65
- 00 // end of embedded wiretype.structType.commonType struct
- 01 // add 1 to field number (now 1, the field array in wireType.structType)
- 02 // There are two fields in the type (len(structType.field))
- 01 // Start of first field structure; add 1 to get field number 0: field[0].name
- 01 // 1 byte
- 58 // structType.field[0].name = "X"
- 01 // Add 1 to get field number 1: field[0].id
- 04 // structType.field[0].typeId is 2 (signed int).
- 00 // End of structType.field[0]; start structType.field[1]; set field number to -1.
- 01 // Add 1 to get field number 0: field[1].name
- 01 // 1 byte
- 59 // structType.field[1].name = "Y"
- 01 // Add 1 to get field number 1: field[0].id
- 04 // struct.Type.field[1].typeId is 2 (signed int).
- 00 // End of structType.field[1]; end of structType.field.
- 00 // end of wireType.structType structure
- 00 // end of wireType structure
-
-Now we can send the Point value. Again the field number resets to -1:
-
- 07 // this value is 7 bytes long
- ff 82 // the type number, 65 (1 byte (-FF) followed by 65<<1)
- 01 // add one to field number, yielding field 0
- 2c // encoding of signed "22" (0x22 = 44 = 22<<1); Point.x = 22
- 01 // add one to field number, yielding field 1
- 42 // encoding of signed "33" (0x42 = 66 = 33<<1); Point.y = 33
- 00 // end of structure
-
-The type encoding is long and fairly intricate but we send it only once.
-If p is transmitted a second time, the type is already known so the
-output will be just:
-
- 07 ff 82 01 2c 01 42 00
-
-A single non-struct value at top level is transmitted like a field with
-delta tag 0. For instance, a signed integer with value 3 presented as
-the argument to Encode will emit:
-
- 03 04 00 06
-
-Which represents:
-
- 03 // this value is 3 bytes long
- 04 // the type number, 2, represents an integer
- 00 // tag delta 0
- 06 // value 3
-
-*/
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "bytes"
- "math"
- "reflect"
- "unsafe"
-)
-
-const uint64Size = int(unsafe.Sizeof(uint64(0)))
-
-// encoderState is the global execution state of an instance of the encoder.
-// Field numbers are delta encoded and always increase. The field
-// number is initialized to -1 so 0 comes out as delta(1). A delta of
-// 0 terminates the structure.
-type encoderState struct {
- enc *Encoder
- b *bytes.Buffer
- sendZero bool // encoding an array element or map key/value pair; send zero values
- fieldnum int // the last field number written.
- buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation.
- next *encoderState // for free list
-}
-
-func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState {
- e := enc.freeList
- if e == nil {
- e = new(encoderState)
- e.enc = enc
- } else {
- enc.freeList = e.next
- }
- e.sendZero = false
- e.fieldnum = 0
- e.b = b
- return e
-}
-
-func (enc *Encoder) freeEncoderState(e *encoderState) {
- e.next = enc.freeList
- enc.freeList = e
-}
-
-// Unsigned integers have a two-state encoding. If the number is less
-// than 128 (0 through 0x7F), its value is written directly.
-// Otherwise the value is written in big-endian byte order preceded
-// by the byte length, negated.
-
-// encodeUint writes an encoded unsigned integer to state.b.
-func (state *encoderState) encodeUint(x uint64) {
- if x <= 0x7F {
- err := state.b.WriteByte(uint8(x))
- if err != nil {
- error_(err)
- }
- return
- }
- i := uint64Size
- for x > 0 {
- state.buf[i] = uint8(x)
- x >>= 8
- i--
- }
- state.buf[i] = uint8(i - uint64Size) // = loop count, negated
- _, err := state.b.Write(state.buf[i : uint64Size+1])
- if err != nil {
- error_(err)
- }
-}
-
-// encodeInt writes an encoded signed integer to state.w.
-// The low bit of the encoding says whether to bit complement the (other bits of the)
-// uint to recover the int.
-func (state *encoderState) encodeInt(i int64) {
- var x uint64
- if i < 0 {
- x = uint64(^i<<1) | 1
- } else {
- x = uint64(i << 1)
- }
- state.encodeUint(uint64(x))
-}
-
-// encOp is the signature of an encoding operator for a given type.
-type encOp func(i *encInstr, state *encoderState, p unsafe.Pointer)
-
-// The 'instructions' of the encoding machine
-type encInstr struct {
- op encOp
- field int // field number
- indir int // how many pointer indirections to reach the value in the struct
- offset uintptr // offset in the structure of the field to encode
-}
-
-// update emits a field number and updates the state to record its value for delta encoding.
-// If the instruction pointer is nil, it does nothing
-func (state *encoderState) update(instr *encInstr) {
- if instr != nil {
- state.encodeUint(uint64(instr.field - state.fieldnum))
- state.fieldnum = instr.field
- }
-}
-
-// Each encoder for a composite is responsible for handling any
-// indirections associated with the elements of the data structure.
-// If any pointer so reached is nil, no bytes are written. If the
-// data item is zero, no bytes are written. Single values - ints,
-// strings etc. - are indirected before calling their encoders.
-// Otherwise, the output (for a scalar) is the field number, as an
-// encoded integer, followed by the field data in its appropriate
-// format.
-
-// encIndirect dereferences p indir times and returns the result.
-func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
- for ; indir > 0; indir-- {
- p = *(*unsafe.Pointer)(p)
- if p == nil {
- return unsafe.Pointer(nil)
- }
- }
- return p
-}
-
-// encBool encodes the bool with address p as an unsigned 0 or 1.
-func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) {
- b := *(*bool)(p)
- if b || state.sendZero {
- state.update(i)
- if b {
- state.encodeUint(1)
- } else {
- state.encodeUint(0)
- }
- }
-}
-
-// encInt encodes the int with address p.
-func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := int64(*(*int)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeInt(v)
- }
-}
-
-// encUint encodes the uint with address p.
-func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := uint64(*(*uint)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// encInt8 encodes the int8 with address p.
-func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := int64(*(*int8)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeInt(v)
- }
-}
-
-// encUint8 encodes the uint8 with address p.
-func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := uint64(*(*uint8)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// encInt16 encodes the int16 with address p.
-func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := int64(*(*int16)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeInt(v)
- }
-}
-
-// encUint16 encodes the uint16 with address p.
-func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := uint64(*(*uint16)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// encInt32 encodes the int32 with address p.
-func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := int64(*(*int32)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeInt(v)
- }
-}
-
-// encUint encodes the uint32 with address p.
-func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := uint64(*(*uint32)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// encInt64 encodes the int64 with address p.
-func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := *(*int64)(p)
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeInt(v)
- }
-}
-
-// encInt64 encodes the uint64 with address p.
-func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := *(*uint64)(p)
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// encUintptr encodes the uintptr with address p.
-func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) {
- v := uint64(*(*uintptr)(p))
- if v != 0 || state.sendZero {
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// floatBits returns a uint64 holding the bits of a floating-point number.
-// Floating-point numbers are transmitted as uint64s holding the bits
-// of the underlying representation. They are sent byte-reversed, with
-// the exponent end coming out first, so integer floating point numbers
-// (for example) transmit more compactly. This routine does the
-// swizzling.
-func floatBits(f float64) uint64 {
- u := math.Float64bits(f)
- var v uint64
- for i := 0; i < 8; i++ {
- v <<= 8
- v |= u & 0xFF
- u >>= 8
- }
- return v
-}
-
-// encFloat32 encodes the float32 with address p.
-func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) {
- f := *(*float32)(p)
- if f != 0 || state.sendZero {
- v := floatBits(float64(f))
- state.update(i)
- state.encodeUint(v)
- }
-}
-
-// encFloat64 encodes the float64 with address p.
-func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) {
- f := *(*float64)(p)
- if f != 0 || state.sendZero {
- state.update(i)
- v := floatBits(f)
- state.encodeUint(v)
- }
-}
-
-// encComplex64 encodes the complex64 with address p.
-// Complex numbers are just a pair of floating-point numbers, real part first.
-func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) {
- c := *(*complex64)(p)
- if c != 0+0i || state.sendZero {
- rpart := floatBits(float64(real(c)))
- ipart := floatBits(float64(imag(c)))
- state.update(i)
- state.encodeUint(rpart)
- state.encodeUint(ipart)
- }
-}
-
-// encComplex128 encodes the complex128 with address p.
-func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) {
- c := *(*complex128)(p)
- if c != 0+0i || state.sendZero {
- rpart := floatBits(real(c))
- ipart := floatBits(imag(c))
- state.update(i)
- state.encodeUint(rpart)
- state.encodeUint(ipart)
- }
-}
-
-// encUint8Array encodes the byte slice whose header has address p.
-// Byte arrays are encoded as an unsigned count followed by the raw bytes.
-func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) {
- b := *(*[]byte)(p)
- if len(b) > 0 || state.sendZero {
- state.update(i)
- state.encodeUint(uint64(len(b)))
- state.b.Write(b)
- }
-}
-
-// encString encodes the string whose header has address p.
-// Strings are encoded as an unsigned count followed by the raw bytes.
-func encString(i *encInstr, state *encoderState, p unsafe.Pointer) {
- s := *(*string)(p)
- if len(s) > 0 || state.sendZero {
- state.update(i)
- state.encodeUint(uint64(len(s)))
- state.b.WriteString(s)
- }
-}
-
-// encStructTerminator encodes the end of an encoded struct
-// as delta field number of 0.
-func encStructTerminator(i *encInstr, state *encoderState, p unsafe.Pointer) {
- state.encodeUint(0)
-}
-
-// Execution engine
-
-// encEngine an array of instructions indexed by field number of the encoding
-// data, typically a struct. It is executed top to bottom, walking the struct.
-type encEngine struct {
- instr []encInstr
-}
-
-const singletonField = 0
-
-// encodeSingle encodes a single top-level non-struct value.
-func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintptr) {
- state := enc.newEncoderState(b)
- state.fieldnum = singletonField
- // There is no surrounding struct to frame the transmission, so we must
- // generate data even if the item is zero. To do this, set sendZero.
- state.sendZero = true
- instr := &engine.instr[singletonField]
- p := unsafe.Pointer(basep) // offset will be zero
- if instr.indir > 0 {
- if p = encIndirect(p, instr.indir); p == nil {
- return
- }
- }
- instr.op(instr, state, p)
- enc.freeEncoderState(state)
-}
-
-// encodeStruct encodes a single struct value.
-func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintptr) {
- state := enc.newEncoderState(b)
- state.fieldnum = -1
- for i := 0; i < len(engine.instr); i++ {
- instr := &engine.instr[i]
- p := unsafe.Pointer(basep + instr.offset)
- if instr.indir > 0 {
- if p = encIndirect(p, instr.indir); p == nil {
- continue
- }
- }
- instr.op(instr, state, p)
- }
- enc.freeEncoderState(state)
-}
-
-// encodeArray encodes the array whose 0th element is at p.
-func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid uintptr, elemIndir int, length int) {
- state := enc.newEncoderState(b)
- state.fieldnum = -1
- state.sendZero = true
- state.encodeUint(uint64(length))
- for i := 0; i < length; i++ {
- elemp := p
- up := unsafe.Pointer(elemp)
- if elemIndir > 0 {
- if up = encIndirect(up, elemIndir); up == nil {
- errorf("encodeArray: nil element")
- }
- elemp = uintptr(up)
- }
- op(nil, state, unsafe.Pointer(elemp))
- p += uintptr(elemWid)
- }
- enc.freeEncoderState(state)
-}
-
-// encodeReflectValue is a helper for maps. It encodes the value v.
-func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir int) {
- for i := 0; i < indir && v.IsValid(); i++ {
- v = reflect.Indirect(v)
- }
- if !v.IsValid() {
- errorf("encodeReflectValue: nil element")
- }
- op(nil, state, unsafe.Pointer(unsafeAddr(v)))
-}
-
-// encodeMap encodes a map as unsigned count followed by key:value pairs.
-// Because map internals are not exposed, we must use reflection rather than
-// addresses.
-func (enc *Encoder) encodeMap(b *bytes.Buffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) {
- state := enc.newEncoderState(b)
- state.fieldnum = -1
- state.sendZero = true
- keys := mv.MapKeys()
- state.encodeUint(uint64(len(keys)))
- for _, key := range keys {
- encodeReflectValue(state, key, keyOp, keyIndir)
- encodeReflectValue(state, mv.MapIndex(key), elemOp, elemIndir)
- }
- enc.freeEncoderState(state)
-}
-
-// encodeInterface encodes the interface value iv.
-// To send an interface, we send a string identifying the concrete type, followed
-// by the type identifier (which might require defining that type right now), followed
-// by the concrete value. A nil value gets sent as the empty string for the name,
-// followed by no value.
-func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) {
- state := enc.newEncoderState(b)
- state.fieldnum = -1
- state.sendZero = true
- if iv.IsNil() {
- state.encodeUint(0)
- return
- }
-
- ut := userType(iv.Elem().Type())
- name, ok := concreteTypeToName[ut.base]
- if !ok {
- errorf("type not registered for interface: %s", ut.base)
- }
- // Send the name.
- state.encodeUint(uint64(len(name)))
- _, err := state.b.WriteString(name)
- if err != nil {
- error_(err)
- }
- // Define the type id if necessary.
- enc.sendTypeDescriptor(enc.writer(), state, ut)
- // Send the type id.
- enc.sendTypeId(state, ut)
- // Encode the value into a new buffer. Any nested type definitions
- // should be written to b, before the encoded value.
- enc.pushWriter(b)
- data := new(bytes.Buffer)
- data.Write(spaceForLength)
- enc.encode(data, iv.Elem(), ut)
- if enc.err != nil {
- error_(enc.err)
- }
- enc.popWriter()
- enc.writeMessage(b, data)
- if enc.err != nil {
- error_(err)
- }
- enc.freeEncoderState(state)
-}
-
-// isZero returns whether the value is the zero of its type.
-func isZero(val reflect.Value) bool {
- switch val.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return val.Len() == 0
- case reflect.Bool:
- return !val.Bool()
- case reflect.Complex64, reflect.Complex128:
- return val.Complex() == 0
- case reflect.Chan, reflect.Func, reflect.Ptr:
- return val.IsNil()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return val.Int() == 0
- case reflect.Float32, reflect.Float64:
- return val.Float() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return val.Uint() == 0
- }
- panic("unknown type in isZero " + val.Type().String())
-}
-
-// encGobEncoder encodes a value that implements the GobEncoder interface.
-// The data is sent as a byte array.
-func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, v reflect.Value) {
- // TODO: should we catch panics from the called method?
- // We know it's a GobEncoder, so just call the method directly.
- data, err := v.Interface().(GobEncoder).GobEncode()
- if err != nil {
- error_(err)
- }
- state := enc.newEncoderState(b)
- state.fieldnum = -1
- state.encodeUint(uint64(len(data)))
- state.b.Write(data)
- enc.freeEncoderState(state)
-}
-
-var encOpTable = [...]encOp{
- reflect.Bool: encBool,
- reflect.Int: encInt,
- reflect.Int8: encInt8,
- reflect.Int16: encInt16,
- reflect.Int32: encInt32,
- reflect.Int64: encInt64,
- reflect.Uint: encUint,
- reflect.Uint8: encUint8,
- reflect.Uint16: encUint16,
- reflect.Uint32: encUint32,
- reflect.Uint64: encUint64,
- reflect.Uintptr: encUintptr,
- reflect.Float32: encFloat32,
- reflect.Float64: encFloat64,
- reflect.Complex64: encComplex64,
- reflect.Complex128: encComplex128,
- reflect.String: encString,
-}
-
-// encOpFor returns (a pointer to) the encoding op for the base type under rt and
-// the indirection count to reach it.
-func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) {
- ut := userType(rt)
- // If the type implements GobEncoder, we handle it without further processing.
- if ut.isGobEncoder {
- return enc.gobEncodeOpFor(ut)
- }
- // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
- // Return the pointer to the op we're already building.
- if opPtr := inProgress[rt]; opPtr != nil {
- return opPtr, ut.indir
- }
- typ := ut.base
- indir := ut.indir
- k := typ.Kind()
- var op encOp
- if int(k) < len(encOpTable) {
- op = encOpTable[k]
- }
- if op == nil {
- inProgress[rt] = &op
- // Special cases
- switch t := typ; t.Kind() {
- case reflect.Slice:
- if t.Elem().Kind() == reflect.Uint8 {
- op = encUint8Array
- break
- }
- // Slices have a header; we decode it to find the underlying array.
- elemOp, indir := enc.encOpFor(t.Elem(), inProgress)
- op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
- slice := (*reflect.SliceHeader)(p)
- if !state.sendZero && slice.Len == 0 {
- return
- }
- state.update(i)
- state.enc.encodeArray(state.b, slice.Data, *elemOp, t.Elem().Size(), indir, int(slice.Len))
- }
- case reflect.Array:
- // True arrays have size in the type.
- elemOp, indir := enc.encOpFor(t.Elem(), inProgress)
- op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
- state.update(i)
- state.enc.encodeArray(state.b, uintptr(p), *elemOp, t.Elem().Size(), indir, t.Len())
- }
- case reflect.Map:
- keyOp, keyIndir := enc.encOpFor(t.Key(), inProgress)
- elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress)
- op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
- // Maps cannot be accessed by moving addresses around the way
- // that slices etc. can. We must recover a full reflection value for
- // the iteration.
- v := reflect.ValueOf(unsafe.Unreflect(t, unsafe.Pointer(p)))
- mv := reflect.Indirect(v)
- // We send zero-length (but non-nil) maps because the
- // receiver might want to use the map. (Maps don't use append.)
- if !state.sendZero && mv.IsNil() {
- return
- }
- state.update(i)
- state.enc.encodeMap(state.b, mv, *keyOp, *elemOp, keyIndir, elemIndir)
- }
- case reflect.Struct:
- // Generate a closure that calls out to the engine for the nested type.
- enc.getEncEngine(userType(typ))
- info := mustGetTypeInfo(typ)
- op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
- state.update(i)
- // indirect through info to delay evaluation for recursive structs
- state.enc.encodeStruct(state.b, info.encoder, uintptr(p))
- }
- case reflect.Interface:
- op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
- // Interfaces transmit the name and contents of the concrete
- // value they contain.
- v := reflect.ValueOf(unsafe.Unreflect(t, unsafe.Pointer(p)))
- iv := reflect.Indirect(v)
- if !state.sendZero && (!iv.IsValid() || iv.IsNil()) {
- return
- }
- state.update(i)
- state.enc.encodeInterface(state.b, iv)
- }
- }
- }
- if op == nil {
- errorf("can't happen: encode type %s", rt)
- }
- return &op, indir
-}
-
-// gobEncodeOpFor returns the op for a type that is known to implement
-// GobEncoder.
-func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) {
- rt := ut.user
- if ut.encIndir == -1 {
- rt = reflect.PtrTo(rt)
- } else if ut.encIndir > 0 {
- for i := int8(0); i < ut.encIndir; i++ {
- rt = rt.Elem()
- }
- }
- var op encOp
- op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
- var v reflect.Value
- if ut.encIndir == -1 {
- // Need to climb up one level to turn value into pointer.
- v = reflect.ValueOf(unsafe.Unreflect(rt, unsafe.Pointer(&p)))
- } else {
- v = reflect.ValueOf(unsafe.Unreflect(rt, p))
- }
- if !state.sendZero && isZero(v) {
- return
- }
- state.update(i)
- state.enc.encodeGobEncoder(state.b, v)
- }
- return &op, int(ut.encIndir) // encIndir: op will get called with p == address of receiver.
-}
-
-// compileEnc returns the engine to compile the type.
-func (enc *Encoder) compileEnc(ut *userTypeInfo) *encEngine {
- srt := ut.base
- engine := new(encEngine)
- seen := make(map[reflect.Type]*encOp)
- rt := ut.base
- if ut.isGobEncoder {
- rt = ut.user
- }
- if !ut.isGobEncoder &&
- srt.Kind() == reflect.Struct {
- for fieldNum, wireFieldNum := 0, 0; fieldNum < srt.NumField(); fieldNum++ {
- f := srt.Field(fieldNum)
- if !isExported(f.Name) {
- continue
- }
- op, indir := enc.encOpFor(f.Type, seen)
- engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, indir, uintptr(f.Offset)})
- wireFieldNum++
- }
- if srt.NumField() > 0 && len(engine.instr) == 0 {
- errorf("type %s has no exported fields", rt)
- }
- engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0})
- } else {
- engine.instr = make([]encInstr, 1)
- op, indir := enc.encOpFor(rt, seen)
- engine.instr[0] = encInstr{*op, singletonField, indir, 0} // offset is zero
- }
- return engine
-}
-
-// getEncEngine returns the engine to compile the type.
-// typeLock must be held (or we're in initialization and guaranteed single-threaded).
-func (enc *Encoder) getEncEngine(ut *userTypeInfo) *encEngine {
- info, err1 := getTypeInfo(ut)
- if err1 != nil {
- error_(err1)
- }
- if info.encoder == nil {
- // mark this engine as underway before compiling to handle recursive types.
- info.encoder = new(encEngine)
- info.encoder = enc.compileEnc(ut)
- }
- return info.encoder
-}
-
-// lockAndGetEncEngine is a function that locks and compiles.
-// This lets us hold the lock only while compiling, not when encoding.
-func (enc *Encoder) lockAndGetEncEngine(ut *userTypeInfo) *encEngine {
- typeLock.Lock()
- defer typeLock.Unlock()
- return enc.getEncEngine(ut)
-}
-
-func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) {
- defer catchError(&enc.err)
- engine := enc.lockAndGetEncEngine(ut)
- indir := ut.indir
- if ut.isGobEncoder {
- indir = int(ut.encIndir)
- }
- for i := 0; i < indir; i++ {
- value = reflect.Indirect(value)
- }
- if !ut.isGobEncoder && value.Type().Kind() == reflect.Struct {
- enc.encodeStruct(b, engine, unsafeAddr(value))
- } else {
- enc.encodeSingle(b, engine, unsafeAddr(value))
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "bytes"
- "errors"
- "io"
- "reflect"
- "sync"
-)
-
-// An Encoder manages the transmission of type and data information to the
-// other side of a connection.
-type Encoder struct {
- mutex sync.Mutex // each item must be sent atomically
- w []io.Writer // where to send the data
- sent map[reflect.Type]typeId // which types we've already sent
- countState *encoderState // stage for writing counts
- freeList *encoderState // list of free encoderStates; avoids reallocation
- byteBuf bytes.Buffer // buffer for top-level encoderState
- err error
-}
-
-// Before we encode a message, we reserve space at the head of the
-// buffer in which to encode its length. This means we can use the
-// buffer to assemble the message without another allocation.
-const maxLength = 9 // Maximum size of an encoded length.
-var spaceForLength = make([]byte, maxLength)
-
-// NewEncoder returns a new encoder that will transmit on the io.Writer.
-func NewEncoder(w io.Writer) *Encoder {
- enc := new(Encoder)
- enc.w = []io.Writer{w}
- enc.sent = make(map[reflect.Type]typeId)
- enc.countState = enc.newEncoderState(new(bytes.Buffer))
- return enc
-}
-
-// writer() returns the innermost writer the encoder is using
-func (enc *Encoder) writer() io.Writer {
- return enc.w[len(enc.w)-1]
-}
-
-// pushWriter adds a writer to the encoder.
-func (enc *Encoder) pushWriter(w io.Writer) {
- enc.w = append(enc.w, w)
-}
-
-// popWriter pops the innermost writer.
-func (enc *Encoder) popWriter() {
- enc.w = enc.w[0 : len(enc.w)-1]
-}
-
-func (enc *Encoder) badType(rt reflect.Type) {
- enc.setError(errors.New("gob: can't encode type " + rt.String()))
-}
-
-func (enc *Encoder) setError(err error) {
- if enc.err == nil { // remember the first.
- enc.err = err
- }
-}
-
-// writeMessage sends the data item preceded by a unsigned count of its length.
-func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) {
- // Space has been reserved for the length at the head of the message.
- // This is a little dirty: we grab the slice from the bytes.Buffer and massage
- // it by hand.
- message := b.Bytes()
- messageLen := len(message) - maxLength
- // Encode the length.
- enc.countState.b.Reset()
- enc.countState.encodeUint(uint64(messageLen))
- // Copy the length to be a prefix of the message.
- offset := maxLength - enc.countState.b.Len()
- copy(message[offset:], enc.countState.b.Bytes())
- // Write the data.
- _, err := w.Write(message[offset:])
- // Drain the buffer and restore the space at the front for the count of the next message.
- b.Reset()
- b.Write(spaceForLength)
- if err != nil {
- enc.setError(err)
- }
-}
-
-// sendActualType sends the requested type, without further investigation, unless
-// it's been sent before.
-func (enc *Encoder) sendActualType(w io.Writer, state *encoderState, ut *userTypeInfo, actual reflect.Type) (sent bool) {
- if _, alreadySent := enc.sent[actual]; alreadySent {
- return false
- }
- typeLock.Lock()
- info, err := getTypeInfo(ut)
- typeLock.Unlock()
- if err != nil {
- enc.setError(err)
- return
- }
- // Send the pair (-id, type)
- // Id:
- state.encodeInt(-int64(info.id))
- // Type:
- enc.encode(state.b, reflect.ValueOf(info.wire), wireTypeUserInfo)
- enc.writeMessage(w, state.b)
- if enc.err != nil {
- return
- }
-
- // Remember we've sent this type, both what the user gave us and the base type.
- enc.sent[ut.base] = info.id
- if ut.user != ut.base {
- enc.sent[ut.user] = info.id
- }
- // Now send the inner types
- switch st := actual; st.Kind() {
- case reflect.Struct:
- for i := 0; i < st.NumField(); i++ {
- enc.sendType(w, state, st.Field(i).Type)
- }
- case reflect.Array, reflect.Slice:
- enc.sendType(w, state, st.Elem())
- case reflect.Map:
- enc.sendType(w, state, st.Key())
- enc.sendType(w, state, st.Elem())
- }
- return true
-}
-
-// sendType sends the type info to the other side, if necessary.
-func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) {
- ut := userType(origt)
- if ut.isGobEncoder {
- // The rules are different: regardless of the underlying type's representation,
- // we need to tell the other side that this exact type is a GobEncoder.
- return enc.sendActualType(w, state, ut, ut.user)
- }
-
- // It's a concrete value, so drill down to the base type.
- switch rt := ut.base; rt.Kind() {
- default:
- // Basic types and interfaces do not need to be described.
- return
- case reflect.Slice:
- // If it's []uint8, don't send; it's considered basic.
- if rt.Elem().Kind() == reflect.Uint8 {
- return
- }
- // Otherwise we do send.
- break
- case reflect.Array:
- // arrays must be sent so we know their lengths and element types.
- break
- case reflect.Map:
- // maps must be sent so we know their lengths and key/value types.
- break
- case reflect.Struct:
- // structs must be sent so we know their fields.
- break
- case reflect.Chan, reflect.Func:
- // Probably a bad field in a struct.
- enc.badType(rt)
- return
- }
-
- return enc.sendActualType(w, state, ut, ut.base)
-}
-
-// Encode transmits the data item represented by the empty interface value,
-// guaranteeing that all necessary type information has been transmitted first.
-func (enc *Encoder) Encode(e interface{}) error {
- return enc.EncodeValue(reflect.ValueOf(e))
-}
-
-// sendTypeDescriptor makes sure the remote side knows about this type.
-// It will send a descriptor if this is the first time the type has been
-// sent.
-func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) {
- // Make sure the type is known to the other side.
- // First, have we already sent this type?
- rt := ut.base
- if ut.isGobEncoder {
- rt = ut.user
- }
- if _, alreadySent := enc.sent[rt]; !alreadySent {
- // No, so send it.
- sent := enc.sendType(w, state, rt)
- if enc.err != nil {
- return
- }
- // If the type info has still not been transmitted, it means we have
- // a singleton basic type (int, []byte etc.) at top level. We don't
- // need to send the type info but we do need to update enc.sent.
- if !sent {
- typeLock.Lock()
- info, err := getTypeInfo(ut)
- typeLock.Unlock()
- if err != nil {
- enc.setError(err)
- return
- }
- enc.sent[rt] = info.id
- }
- }
-}
-
-// sendTypeId sends the id, which must have already been defined.
-func (enc *Encoder) sendTypeId(state *encoderState, ut *userTypeInfo) {
- // Identify the type of this top-level value.
- state.encodeInt(int64(enc.sent[ut.base]))
-}
-
-// EncodeValue transmits the data item represented by the reflection value,
-// guaranteeing that all necessary type information has been transmitted first.
-func (enc *Encoder) EncodeValue(value reflect.Value) error {
- // Make sure we're single-threaded through here, so multiple
- // goroutines can share an encoder.
- enc.mutex.Lock()
- defer enc.mutex.Unlock()
-
- // Remove any nested writers remaining due to previous errors.
- enc.w = enc.w[0:1]
-
- ut, err := validUserType(value.Type())
- if err != nil {
- return err
- }
-
- enc.err = nil
- enc.byteBuf.Reset()
- enc.byteBuf.Write(spaceForLength)
- state := enc.newEncoderState(&enc.byteBuf)
-
- enc.sendTypeDescriptor(enc.writer(), state, ut)
- enc.sendTypeId(state, ut)
- if enc.err != nil {
- return enc.err
- }
-
- // Encode the object.
- enc.encode(state.b, value, ut)
- if enc.err == nil {
- enc.writeMessage(enc.writer(), state.b)
- }
-
- enc.freeEncoderState(state)
- return enc.err
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "strings"
- "testing"
-)
-
-type ET2 struct {
- X string
-}
-
-type ET1 struct {
- A int
- Et2 *ET2
- Next *ET1
-}
-
-// Like ET1 but with a different name for a field
-type ET3 struct {
- A int
- Et2 *ET2
- DifferentNext *ET1
-}
-
-// Like ET1 but with a different type for a field
-type ET4 struct {
- A int
- Et2 float64
- Next int
-}
-
-func TestEncoderDecoder(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- et1 := new(ET1)
- et1.A = 7
- et1.Et2 = new(ET2)
- err := enc.Encode(et1)
- if err != nil {
- t.Error("encoder fail:", err)
- }
- dec := NewDecoder(b)
- newEt1 := new(ET1)
- err = dec.Decode(newEt1)
- if err != nil {
- t.Fatal("error decoding ET1:", err)
- }
-
- if !reflect.DeepEqual(et1, newEt1) {
- t.Fatalf("invalid data for et1: expected %+v; got %+v", *et1, *newEt1)
- }
- if b.Len() != 0 {
- t.Error("not at eof;", b.Len(), "bytes left")
- }
-
- enc.Encode(et1)
- newEt1 = new(ET1)
- err = dec.Decode(newEt1)
- if err != nil {
- t.Fatal("round 2: error decoding ET1:", err)
- }
- if !reflect.DeepEqual(et1, newEt1) {
- t.Fatalf("round 2: invalid data for et1: expected %+v; got %+v", *et1, *newEt1)
- }
- if b.Len() != 0 {
- t.Error("round 2: not at eof;", b.Len(), "bytes left")
- }
-
- // Now test with a running encoder/decoder pair that we recognize a type mismatch.
- err = enc.Encode(et1)
- if err != nil {
- t.Error("round 3: encoder fail:", err)
- }
- newEt2 := new(ET2)
- err = dec.Decode(newEt2)
- if err == nil {
- t.Fatal("round 3: expected `bad type' error decoding ET2")
- }
-}
-
-// Run one value through the encoder/decoder, but use the wrong type.
-// Input is always an ET1; we compare it to whatever is under 'e'.
-func badTypeCheck(e interface{}, shouldFail bool, msg string, t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- et1 := new(ET1)
- et1.A = 7
- et1.Et2 = new(ET2)
- err := enc.Encode(et1)
- if err != nil {
- t.Error("encoder fail:", err)
- }
- dec := NewDecoder(b)
- err = dec.Decode(e)
- if shouldFail && err == nil {
- t.Error("expected error for", msg)
- }
- if !shouldFail && err != nil {
- t.Error("unexpected error for", msg, err)
- }
-}
-
-// Test that we recognize a bad type the first time.
-func TestWrongTypeDecoder(t *testing.T) {
- badTypeCheck(new(ET2), true, "no fields in common", t)
- badTypeCheck(new(ET3), false, "different name of field", t)
- badTypeCheck(new(ET4), true, "different type of field", t)
-}
-
-func corruptDataCheck(s string, err error, t *testing.T) {
- b := bytes.NewBufferString(s)
- dec := NewDecoder(b)
- err1 := dec.Decode(new(ET2))
- if err1 != err {
- t.Errorf("from %q expected error %s; got %s", s, err, err1)
- }
-}
-
-// Check that we survive bad data.
-func TestBadData(t *testing.T) {
- corruptDataCheck("", io.EOF, t)
- corruptDataCheck("\x7Fhi", io.ErrUnexpectedEOF, t)
- corruptDataCheck("\x03now is the time for all good men", errBadType, t)
-}
-
-// Types not supported by the Encoder.
-var unsupportedValues = []interface{}{
- make(chan int),
- func(a int) bool { return true },
-}
-
-func TestUnsupported(t *testing.T) {
- var b bytes.Buffer
- enc := NewEncoder(&b)
- for _, v := range unsupportedValues {
- err := enc.Encode(v)
- if err == nil {
- t.Errorf("expected error for %T; got none", v)
- }
- }
-}
-
-func encAndDec(in, out interface{}) error {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(in)
- if err != nil {
- return err
- }
- dec := NewDecoder(b)
- err = dec.Decode(out)
- if err != nil {
- return err
- }
- return nil
-}
-
-func TestTypeToPtrType(t *testing.T) {
- // Encode a T, decode a *T
- type Type0 struct {
- A int
- }
- t0 := Type0{7}
- t0p := new(Type0)
- if err := encAndDec(t0, t0p); err != nil {
- t.Error(err)
- }
-}
-
-func TestPtrTypeToType(t *testing.T) {
- // Encode a *T, decode a T
- type Type1 struct {
- A uint
- }
- t1p := &Type1{17}
- var t1 Type1
- if err := encAndDec(t1, t1p); err != nil {
- t.Error(err)
- }
-}
-
-func TestTypeToPtrPtrPtrPtrType(t *testing.T) {
- type Type2 struct {
- A ****float64
- }
- t2 := Type2{}
- t2.A = new(***float64)
- *t2.A = new(**float64)
- **t2.A = new(*float64)
- ***t2.A = new(float64)
- ****t2.A = 27.4
- t2pppp := new(***Type2)
- if err := encAndDec(t2, t2pppp); err != nil {
- t.Fatal(err)
- }
- if ****(****t2pppp).A != ****t2.A {
- t.Errorf("wrong value after decode: %g not %g", ****(****t2pppp).A, ****t2.A)
- }
-}
-
-func TestSlice(t *testing.T) {
- type Type3 struct {
- A []string
- }
- t3p := &Type3{[]string{"hello", "world"}}
- var t3 Type3
- if err := encAndDec(t3, t3p); err != nil {
- t.Error(err)
- }
-}
-
-func TestValueError(t *testing.T) {
- // Encode a *T, decode a T
- type Type4 struct {
- A int
- }
- t4p := &Type4{3}
- var t4 Type4 // note: not a pointer.
- if err := encAndDec(t4p, t4); err == nil || strings.Index(err.Error(), "pointer") < 0 {
- t.Error("expected error about pointer; got", err)
- }
-}
-
-func TestArray(t *testing.T) {
- type Type5 struct {
- A [3]string
- B [3]byte
- }
- type Type6 struct {
- A [2]string // can't hold t5.a
- }
- t5 := Type5{[3]string{"hello", ",", "world"}, [3]byte{1, 2, 3}}
- var t5p Type5
- if err := encAndDec(t5, &t5p); err != nil {
- t.Error(err)
- }
- var t6 Type6
- if err := encAndDec(t5, &t6); err == nil {
- t.Error("should fail with mismatched array sizes")
- }
-}
-
-func TestRecursiveMapType(t *testing.T) {
- type recursiveMap map[string]recursiveMap
- r1 := recursiveMap{"A": recursiveMap{"B": nil, "C": nil}, "D": nil}
- r2 := make(recursiveMap)
- if err := encAndDec(r1, &r2); err != nil {
- t.Error(err)
- }
-}
-
-func TestRecursiveSliceType(t *testing.T) {
- type recursiveSlice []recursiveSlice
- r1 := recursiveSlice{0: recursiveSlice{0: nil}, 1: nil}
- r2 := make(recursiveSlice, 0)
- if err := encAndDec(r1, &r2); err != nil {
- t.Error(err)
- }
-}
-
-// Regression test for bug: must send zero values inside arrays
-func TestDefaultsInArray(t *testing.T) {
- type Type7 struct {
- B []bool
- I []int
- S []string
- F []float64
- }
- t7 := Type7{
- []bool{false, false, true},
- []int{0, 0, 1},
- []string{"hi", "", "there"},
- []float64{0, 0, 1},
- }
- var t7p Type7
- if err := encAndDec(t7, &t7p); err != nil {
- t.Error(err)
- }
-}
-
-var testInt int
-var testFloat32 float32
-var testString string
-var testSlice []string
-var testMap map[string]int
-var testArray [7]int
-
-type SingleTest struct {
- in interface{}
- out interface{}
- err string
-}
-
-var singleTests = []SingleTest{
- {17, &testInt, ""},
- {float32(17.5), &testFloat32, ""},
- {"bike shed", &testString, ""},
- {[]string{"bike", "shed", "paint", "color"}, &testSlice, ""},
- {map[string]int{"seven": 7, "twelve": 12}, &testMap, ""},
- {[7]int{4, 55, 0, 0, 0, 0, 0}, &testArray, ""}, // case that once triggered a bug
- {[7]int{4, 55, 1, 44, 22, 66, 1234}, &testArray, ""},
-
- // Decode errors
- {172, &testFloat32, "wrong type"},
-}
-
-func TestSingletons(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- dec := NewDecoder(b)
- for _, test := range singleTests {
- b.Reset()
- err := enc.Encode(test.in)
- if err != nil {
- t.Errorf("error encoding %v: %s", test.in, err)
- continue
- }
- err = dec.Decode(test.out)
- switch {
- case err != nil && test.err == "":
- t.Errorf("error decoding %v: %s", test.in, err)
- continue
- case err == nil && test.err != "":
- t.Errorf("expected error decoding %v: %s", test.in, test.err)
- continue
- case err != nil && test.err != "":
- if strings.Index(err.Error(), test.err) < 0 {
- t.Errorf("wrong error decoding %v: wanted %s, got %v", test.in, test.err, err)
- }
- continue
- }
- // Get rid of the pointer in the rhs
- val := reflect.ValueOf(test.out).Elem().Interface()
- if !reflect.DeepEqual(test.in, val) {
- t.Errorf("decoding singleton: expected %v got %v", test.in, val)
- }
- }
-}
-
-func TestStructNonStruct(t *testing.T) {
- type Struct struct {
- A string
- }
- type NonStruct string
- s := Struct{"hello"}
- var sp Struct
- if err := encAndDec(s, &sp); err != nil {
- t.Error(err)
- }
- var ns NonStruct
- if err := encAndDec(s, &ns); err == nil {
- t.Error("should get error for struct/non-struct")
- } else if strings.Index(err.Error(), "type") < 0 {
- t.Error("for struct/non-struct expected type error; got", err)
- }
- // Now try the other way
- var nsp NonStruct
- if err := encAndDec(ns, &nsp); err != nil {
- t.Error(err)
- }
- if err := encAndDec(ns, &s); err == nil {
- t.Error("should get error for non-struct/struct")
- } else if strings.Index(err.Error(), "type") < 0 {
- t.Error("for non-struct/struct expected type error; got", err)
- }
-}
-
-type interfaceIndirectTestI interface {
- F() bool
-}
-
-type interfaceIndirectTestT struct{}
-
-func (this *interfaceIndirectTestT) F() bool {
- return true
-}
-
-// A version of a bug reported on golang-nuts. Also tests top-level
-// slice of interfaces. The issue was registering *T caused T to be
-// stored as the concrete type.
-func TestInterfaceIndirect(t *testing.T) {
- Register(&interfaceIndirectTestT{})
- b := new(bytes.Buffer)
- w := []interfaceIndirectTestI{&interfaceIndirectTestT{}}
- err := NewEncoder(b).Encode(w)
- if err != nil {
- t.Fatal("encode error:", err)
- }
-
- var r []interfaceIndirectTestI
- err = NewDecoder(b).Decode(&r)
- if err != nil {
- t.Fatal("decode error:", err)
- }
-}
-
-// Now follow various tests that decode into things that can't represent the
-// encoded value, all of which should be legal.
-
-// Also, when the ignored object contains an interface value, it may define
-// types. Make sure that skipping the value still defines the types by using
-// the encoder/decoder pair to send a value afterwards. If an interface
-// is sent, its type in the test is always NewType0, so this checks that the
-// encoder and decoder don't skew with respect to type definitions.
-
-type Struct0 struct {
- I interface{}
-}
-
-type NewType0 struct {
- S string
-}
-
-type ignoreTest struct {
- in, out interface{}
-}
-
-var ignoreTests = []ignoreTest{
- // Decode normal struct into an empty struct
- {&struct{ A int }{23}, &struct{}{}},
- // Decode normal struct into a nil.
- {&struct{ A int }{23}, nil},
- // Decode singleton string into a nil.
- {"hello, world", nil},
- // Decode singleton slice into a nil.
- {[]int{1, 2, 3, 4}, nil},
- // Decode struct containing an interface into a nil.
- {&Struct0{&NewType0{"value0"}}, nil},
- // Decode singleton slice of interfaces into a nil.
- {[]interface{}{"hi", &NewType0{"value1"}, 23}, nil},
-}
-
-func TestDecodeIntoNothing(t *testing.T) {
- Register(new(NewType0))
- for i, test := range ignoreTests {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(test.in)
- if err != nil {
- t.Errorf("%d: encode error %s:", i, err)
- continue
- }
- dec := NewDecoder(b)
- err = dec.Decode(test.out)
- if err != nil {
- t.Errorf("%d: decode error: %s", i, err)
- continue
- }
- // Now see if the encoder and decoder are in a consistent state.
- str := fmt.Sprintf("Value %d", i)
- err = enc.Encode(&NewType0{str})
- if err != nil {
- t.Fatalf("%d: NewType0 encode error: %s", i, err)
- }
- ns := new(NewType0)
- err = dec.Decode(ns)
- if err != nil {
- t.Fatalf("%d: NewType0 decode error: %s", i, err)
- }
- if ns.S != str {
- t.Fatalf("%d: expected %q got %q", i, str, ns.S)
- }
- }
-}
-
-// Another bug from golang-nuts, involving nested interfaces.
-type Bug0Outer struct {
- Bug0Field interface{}
-}
-
-type Bug0Inner struct {
- A int
-}
-
-func TestNestedInterfaces(t *testing.T) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- d := NewDecoder(&buf)
- Register(new(Bug0Outer))
- Register(new(Bug0Inner))
- f := &Bug0Outer{&Bug0Outer{&Bug0Inner{7}}}
- var v interface{} = f
- err := e.Encode(&v)
- if err != nil {
- t.Fatal("Encode:", err)
- }
- err = d.Decode(&v)
- if err != nil {
- t.Fatal("Decode:", err)
- }
- // Make sure it decoded correctly.
- outer1, ok := v.(*Bug0Outer)
- if !ok {
- t.Fatalf("v not Bug0Outer: %T", v)
- }
- outer2, ok := outer1.Bug0Field.(*Bug0Outer)
- if !ok {
- t.Fatalf("v.Bug0Field not Bug0Outer: %T", outer1.Bug0Field)
- }
- inner, ok := outer2.Bug0Field.(*Bug0Inner)
- if !ok {
- t.Fatalf("v.Bug0Field.Bug0Field not Bug0Inner: %T", outer2.Bug0Field)
- }
- if inner.A != 7 {
- t.Fatalf("final value %d; expected %d", inner.A, 7)
- }
-}
-
-// The bugs keep coming. We forgot to send map subtypes before the map.
-
-type Bug1Elem struct {
- Name string
- Id int
-}
-
-type Bug1StructMap map[string]Bug1Elem
-
-func bug1EncDec(in Bug1StructMap, out *Bug1StructMap) error {
- return nil
-}
-
-func TestMapBug1(t *testing.T) {
- in := make(Bug1StructMap)
- in["val1"] = Bug1Elem{"elem1", 1}
- in["val2"] = Bug1Elem{"elem2", 2}
-
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(in)
- if err != nil {
- t.Fatal("encode:", err)
- }
- dec := NewDecoder(b)
- out := make(Bug1StructMap)
- err = dec.Decode(&out)
- if err != nil {
- t.Fatal("decode:", err)
- }
- if !reflect.DeepEqual(in, out) {
- t.Errorf("mismatch: %v %v", in, out)
- }
-}
-
-func TestGobMapInterfaceEncode(t *testing.T) {
- m := map[string]interface{}{
- "up": uintptr(0),
- "i0": []int{-1},
- "i1": []int8{-1},
- "i2": []int16{-1},
- "i3": []int32{-1},
- "i4": []int64{-1},
- "u0": []uint{1},
- "u1": []uint8{1},
- "u2": []uint16{1},
- "u3": []uint32{1},
- "u4": []uint64{1},
- "f0": []float32{1},
- "f1": []float64{1},
- "c0": []complex64{complex(2, -2)},
- "c1": []complex128{complex(2, float64(-2))},
- "us": []uintptr{0},
- "bo": []bool{false},
- "st": []string{"s"},
- }
- buf := bytes.NewBuffer(nil)
- enc := NewEncoder(buf)
- err := enc.Encode(m)
- if err != nil {
- t.Errorf("encode map: %s", err)
- }
-}
-
-func TestSliceReusesMemory(t *testing.T) {
- buf := bytes.NewBuffer(nil)
- // Bytes
- {
- x := []byte("abcd")
- enc := NewEncoder(buf)
- err := enc.Encode(x)
- if err != nil {
- t.Errorf("bytes: encode: %s", err)
- }
- // Decode into y, which is big enough.
- y := []byte("ABCDE")
- addr := &y[0]
- dec := NewDecoder(buf)
- err = dec.Decode(&y)
- if err != nil {
- t.Fatal("bytes: decode:", err)
- }
- if !bytes.Equal(x, y) {
- t.Errorf("bytes: expected %q got %q\n", x, y)
- }
- if addr != &y[0] {
- t.Errorf("bytes: unnecessary reallocation")
- }
- }
- // general slice
- {
- x := []rune("abcd")
- enc := NewEncoder(buf)
- err := enc.Encode(x)
- if err != nil {
- t.Errorf("ints: encode: %s", err)
- }
- // Decode into y, which is big enough.
- y := []rune("ABCDE")
- addr := &y[0]
- dec := NewDecoder(buf)
- err = dec.Decode(&y)
- if err != nil {
- t.Fatal("ints: decode:", err)
- }
- if !reflect.DeepEqual(x, y) {
- t.Errorf("ints: expected %q got %q\n", x, y)
- }
- if addr != &y[0] {
- t.Errorf("ints: unnecessary reallocation")
- }
- }
-}
-
-// Used to crash: negative count in recvMessage.
-func TestBadCount(t *testing.T) {
- b := []byte{0xfb, 0xa5, 0x82, 0x2f, 0xca, 0x1}
- if err := NewDecoder(bytes.NewBuffer(b)).Decode(nil); err == nil {
- t.Error("expected error from bad count")
- } else if err.Error() != errBadCount.Error() {
- t.Error("expected bad count error; got", err)
- }
-}
-
-// Verify that sequential Decoders built on a single input will
-// succeed if the input implements ReadByte and there is no
-// type information in the stream.
-func TestSequentialDecoder(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- const count = 10
- for i := 0; i < count; i++ {
- s := fmt.Sprintf("%d", i)
- if err := enc.Encode(s); err != nil {
- t.Error("encoder fail:", err)
- }
- }
- for i := 0; i < count; i++ {
- dec := NewDecoder(b)
- var s string
- if err := dec.Decode(&s); err != nil {
- t.Fatal("decoder fail:", err)
- }
- if s != fmt.Sprintf("%d", i) {
- t.Fatalf("decode expected %d got %s", i, s)
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import "fmt"
-
-// Errors in decoding and encoding are handled using panic and recover.
-// Panics caused by user error (that is, everything except run-time panics
-// such as "index out of bounds" errors) do not leave the file that caused
-// them, but are instead turned into plain error returns. Encoding and
-// decoding functions and methods that do not return an error either use
-// panic to report an error or are guaranteed error-free.
-
-// A gobError is used to distinguish errors (panics) generated in this package.
-type gobError struct {
- err error
-}
-
-// errorf is like error_ but takes Printf-style arguments to construct an error.
-// It always prefixes the message with "gob: ".
-func errorf(format string, args ...interface{}) {
- error_(fmt.Errorf("gob: "+format, args...))
-}
-
-// error wraps the argument error and uses it as the argument to panic.
-func error_(err error) {
- panic(gobError{err})
-}
-
-// catchError is meant to be used as a deferred function to turn a panic(gobError) into a
-// plain error. It overwrites the error return of the function that deferred its call.
-func catchError(err *error) {
- if e := recover(); e != nil {
- *err = e.(gobError).err // Will re-panic if not one of our errors, such as a runtime error.
- }
- return
-}
+++ /dev/null
-// Copyright 20011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains tests of the GobEncoder/GobDecoder support.
-
-package gob
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "strings"
- "testing"
-)
-
-// Types that implement the GobEncoder/Decoder interfaces.
-
-type ByteStruct struct {
- a byte // not an exported field
-}
-
-type StringStruct struct {
- s string // not an exported field
-}
-
-type ArrayStruct struct {
- a [8192]byte // not an exported field
-}
-
-type Gobber int
-
-type ValueGobber string // encodes with a value, decodes with a pointer.
-
-// The relevant methods
-
-func (g *ByteStruct) GobEncode() ([]byte, error) {
- b := make([]byte, 3)
- b[0] = g.a
- b[1] = g.a + 1
- b[2] = g.a + 2
- return b, nil
-}
-
-func (g *ByteStruct) GobDecode(data []byte) error {
- if g == nil {
- return errors.New("NIL RECEIVER")
- }
- // Expect N sequential-valued bytes.
- if len(data) == 0 {
- return io.EOF
- }
- g.a = data[0]
- for i, c := range data {
- if c != g.a+byte(i) {
- return errors.New("invalid data sequence")
- }
- }
- return nil
-}
-
-func (g *StringStruct) GobEncode() ([]byte, error) {
- return []byte(g.s), nil
-}
-
-func (g *StringStruct) GobDecode(data []byte) error {
- // Expect N sequential-valued bytes.
- if len(data) == 0 {
- return io.EOF
- }
- a := data[0]
- for i, c := range data {
- if c != a+byte(i) {
- return errors.New("invalid data sequence")
- }
- }
- g.s = string(data)
- return nil
-}
-
-func (a *ArrayStruct) GobEncode() ([]byte, error) {
- return a.a[:], nil
-}
-
-func (a *ArrayStruct) GobDecode(data []byte) error {
- if len(data) != len(a.a) {
- return errors.New("wrong length in array decode")
- }
- copy(a.a[:], data)
- return nil
-}
-
-func (g *Gobber) GobEncode() ([]byte, error) {
- return []byte(fmt.Sprintf("VALUE=%d", *g)), nil
-}
-
-func (g *Gobber) GobDecode(data []byte) error {
- _, err := fmt.Sscanf(string(data), "VALUE=%d", (*int)(g))
- return err
-}
-
-func (v ValueGobber) GobEncode() ([]byte, error) {
- return []byte(fmt.Sprintf("VALUE=%s", v)), nil
-}
-
-func (v *ValueGobber) GobDecode(data []byte) error {
- _, err := fmt.Sscanf(string(data), "VALUE=%s", (*string)(v))
- return err
-}
-
-// Structs that include GobEncodable fields.
-
-type GobTest0 struct {
- X int // guarantee we have something in common with GobTest*
- G *ByteStruct
-}
-
-type GobTest1 struct {
- X int // guarantee we have something in common with GobTest*
- G *StringStruct
-}
-
-type GobTest2 struct {
- X int // guarantee we have something in common with GobTest*
- G string // not a GobEncoder - should give us errors
-}
-
-type GobTest3 struct {
- X int // guarantee we have something in common with GobTest*
- G *Gobber
-}
-
-type GobTest4 struct {
- X int // guarantee we have something in common with GobTest*
- V ValueGobber
-}
-
-type GobTest5 struct {
- X int // guarantee we have something in common with GobTest*
- V *ValueGobber
-}
-
-type GobTestIgnoreEncoder struct {
- X int // guarantee we have something in common with GobTest*
-}
-
-type GobTestValueEncDec struct {
- X int // guarantee we have something in common with GobTest*
- G StringStruct // not a pointer.
-}
-
-type GobTestIndirectEncDec struct {
- X int // guarantee we have something in common with GobTest*
- G ***StringStruct // indirections to the receiver.
-}
-
-type GobTestArrayEncDec struct {
- X int // guarantee we have something in common with GobTest*
- A ArrayStruct // not a pointer.
-}
-
-type GobTestIndirectArrayEncDec struct {
- X int // guarantee we have something in common with GobTest*
- A ***ArrayStruct // indirections to a large receiver.
-}
-
-func TestGobEncoderField(t *testing.T) {
- b := new(bytes.Buffer)
- // First a field that's a structure.
- enc := NewEncoder(b)
- err := enc.Encode(GobTest0{17, &ByteStruct{'A'}})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTest0)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.G.a != 'A' {
- t.Errorf("expected 'A' got %c", x.G.a)
- }
- // Now a field that's not a structure.
- b.Reset()
- gobber := Gobber(23)
- err = enc.Encode(GobTest3{17, &gobber})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- y := new(GobTest3)
- err = dec.Decode(y)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if *y.G != 23 {
- t.Errorf("expected '23 got %d", *y.G)
- }
-}
-
-// Even though the field is a value, we can still take its address
-// and should be able to call the methods.
-func TestGobEncoderValueField(t *testing.T) {
- b := new(bytes.Buffer)
- // First a field that's a structure.
- enc := NewEncoder(b)
- err := enc.Encode(GobTestValueEncDec{17, StringStruct{"HIJKL"}})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTestValueEncDec)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.G.s != "HIJKL" {
- t.Errorf("expected `HIJKL` got %s", x.G.s)
- }
-}
-
-// GobEncode/Decode should work even if the value is
-// more indirect than the receiver.
-func TestGobEncoderIndirectField(t *testing.T) {
- b := new(bytes.Buffer)
- // First a field that's a structure.
- enc := NewEncoder(b)
- s := &StringStruct{"HIJKL"}
- sp := &s
- err := enc.Encode(GobTestIndirectEncDec{17, &sp})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTestIndirectEncDec)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if (***x.G).s != "HIJKL" {
- t.Errorf("expected `HIJKL` got %s", (***x.G).s)
- }
-}
-
-// Test with a large field with methods.
-func TestGobEncoderArrayField(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- var a GobTestArrayEncDec
- a.X = 17
- for i := range a.A.a {
- a.A.a[i] = byte(i)
- }
- err := enc.Encode(a)
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTestArrayEncDec)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- for i, v := range x.A.a {
- if v != byte(i) {
- t.Errorf("expected %x got %x", byte(i), v)
- break
- }
- }
-}
-
-// Test an indirection to a large field with methods.
-func TestGobEncoderIndirectArrayField(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- var a GobTestIndirectArrayEncDec
- a.X = 17
- var array ArrayStruct
- ap := &array
- app := &ap
- a.A = &app
- for i := range array.a {
- array.a[i] = byte(i)
- }
- err := enc.Encode(a)
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTestIndirectArrayEncDec)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- for i, v := range (***x.A).a {
- if v != byte(i) {
- t.Errorf("expected %x got %x", byte(i), v)
- break
- }
- }
-}
-
-// As long as the fields have the same name and implement the
-// interface, we can cross-connect them. Not sure it's useful
-// and may even be bad but it works and it's hard to prevent
-// without exposing the contents of the object, which would
-// defeat the purpose.
-func TestGobEncoderFieldsOfDifferentType(t *testing.T) {
- // first, string in field to byte in field
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTest0)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.G.a != 'A' {
- t.Errorf("expected 'A' got %c", x.G.a)
- }
- // now the other direction, byte in field to string in field
- b.Reset()
- err = enc.Encode(GobTest0{17, &ByteStruct{'X'}})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- y := new(GobTest1)
- err = dec.Decode(y)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if y.G.s != "XYZ" {
- t.Fatalf("expected `XYZ` got %c", y.G.s)
- }
-}
-
-// Test that we can encode a value and decode into a pointer.
-func TestGobEncoderValueEncoder(t *testing.T) {
- // first, string in field to byte in field
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(GobTest4{17, ValueGobber("hello")})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTest5)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if *x.V != "hello" {
- t.Errorf("expected `hello` got %s", x.V)
- }
-}
-
-func TestGobEncoderFieldTypeError(t *testing.T) {
- // GobEncoder to non-decoder: error
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := &GobTest2{}
- err = dec.Decode(x)
- if err == nil {
- t.Fatal("expected decode error for mismatched fields (encoder to non-decoder)")
- }
- if strings.Index(err.Error(), "type") < 0 {
- t.Fatal("expected type error; got", err)
- }
- // Non-encoder to GobDecoder: error
- b.Reset()
- err = enc.Encode(GobTest2{17, "ABC"})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- y := &GobTest1{}
- err = dec.Decode(y)
- if err == nil {
- t.Fatal("expected decode error for mismatched fields (non-encoder to decoder)")
- }
- if strings.Index(err.Error(), "type") < 0 {
- t.Fatal("expected type error; got", err)
- }
-}
-
-// Even though ByteStruct is a struct, it's treated as a singleton at the top level.
-func TestGobEncoderStructSingleton(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(&ByteStruct{'A'})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(ByteStruct)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.a != 'A' {
- t.Errorf("expected 'A' got %c", x.a)
- }
-}
-
-func TestGobEncoderNonStructSingleton(t *testing.T) {
- b := new(bytes.Buffer)
- enc := NewEncoder(b)
- err := enc.Encode(Gobber(1234))
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- var x Gobber
- err = dec.Decode(&x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x != 1234 {
- t.Errorf("expected 1234 got %d", x)
- }
-}
-
-func TestGobEncoderIgnoreStructField(t *testing.T) {
- b := new(bytes.Buffer)
- // First a field that's a structure.
- enc := NewEncoder(b)
- err := enc.Encode(GobTest0{17, &ByteStruct{'A'}})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTestIgnoreEncoder)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.X != 17 {
- t.Errorf("expected 17 got %c", x.X)
- }
-}
-
-func TestGobEncoderIgnoreNonStructField(t *testing.T) {
- b := new(bytes.Buffer)
- // First a field that's a structure.
- enc := NewEncoder(b)
- gobber := Gobber(23)
- err := enc.Encode(GobTest3{17, &gobber})
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTestIgnoreEncoder)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.X != 17 {
- t.Errorf("expected 17 got %c", x.X)
- }
-}
-
-func TestGobEncoderIgnoreNilEncoder(t *testing.T) {
- b := new(bytes.Buffer)
- // First a field that's a structure.
- enc := NewEncoder(b)
- err := enc.Encode(GobTest0{X: 18}) // G is nil
- if err != nil {
- t.Fatal("encode error:", err)
- }
- dec := NewDecoder(b)
- x := new(GobTest0)
- err = dec.Decode(x)
- if err != nil {
- t.Fatal("decode error:", err)
- }
- if x.X != 18 {
- t.Errorf("expected x.X = 18, got %v", x.X)
- }
- if x.G != nil {
- t.Errorf("expected x.G = nil, got %v", x.G)
- }
-}
-
-type gobDecoderBug0 struct {
- foo, bar string
-}
-
-func (br *gobDecoderBug0) String() string {
- return br.foo + "-" + br.bar
-}
-
-func (br *gobDecoderBug0) GobEncode() ([]byte, error) {
- return []byte(br.String()), nil
-}
-
-func (br *gobDecoderBug0) GobDecode(b []byte) error {
- br.foo = "foo"
- br.bar = "bar"
- return nil
-}
-
-// This was a bug: the receiver has a different indirection level
-// than the variable.
-func TestGobEncoderExtraIndirect(t *testing.T) {
- gdb := &gobDecoderBug0{"foo", "bar"}
- buf := new(bytes.Buffer)
- e := NewEncoder(buf)
- if err := e.Encode(gdb); err != nil {
- t.Fatalf("encode: %v", err)
- }
- d := NewDecoder(buf)
- var got *gobDecoderBug0
- if err := d.Decode(&got); err != nil {
- t.Fatalf("decode: %v", err)
- }
- if got.foo != gdb.foo || got.bar != gdb.bar {
- t.Errorf("got = %q, want %q", got, gdb)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "runtime"
- "testing"
-)
-
-type Bench struct {
- A int
- B float64
- C string
- D []byte
-}
-
-func benchmarkEndToEnd(r io.Reader, w io.Writer, b *testing.B) {
- b.StopTimer()
- enc := NewEncoder(w)
- dec := NewDecoder(r)
- bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- if enc.Encode(bench) != nil {
- panic("encode error")
- }
- if dec.Decode(bench) != nil {
- panic("decode error")
- }
- }
-}
-
-func BenchmarkEndToEndPipe(b *testing.B) {
- r, w, err := os.Pipe()
- if err != nil {
- panic("can't get pipe:" + err.Error())
- }
- benchmarkEndToEnd(r, w, b)
-}
-
-func BenchmarkEndToEndByteBuffer(b *testing.B) {
- var buf bytes.Buffer
- benchmarkEndToEnd(&buf, &buf, b)
-}
-
-func TestCountEncodeMallocs(t *testing.T) {
- var buf bytes.Buffer
- enc := NewEncoder(&buf)
- bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
- const count = 1000
- for i := 0; i < count; i++ {
- err := enc.Encode(bench)
- if err != nil {
- t.Fatal("encode:", err)
- }
- }
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
- fmt.Printf("mallocs per encode of type Bench: %d\n", mallocs/count)
-}
-
-func TestCountDecodeMallocs(t *testing.T) {
- var buf bytes.Buffer
- enc := NewEncoder(&buf)
- bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
- const count = 1000
- for i := 0; i < count; i++ {
- err := enc.Encode(bench)
- if err != nil {
- t.Fatal("encode:", err)
- }
- }
- dec := NewDecoder(&buf)
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
- for i := 0; i < count; i++ {
- *bench = Bench{}
- err := dec.Decode(&bench)
- if err != nil {
- t.Fatal("decode:", err)
- }
- }
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
- fmt.Printf("mallocs per decode of type Bench: %d\n", mallocs/count)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "errors"
- "fmt"
- "os"
- "reflect"
- "sync"
- "unicode"
- "utf8"
-)
-
-// userTypeInfo stores the information associated with a type the user has handed
-// to the package. It's computed once and stored in a map keyed by reflection
-// type.
-type userTypeInfo struct {
- user reflect.Type // the type the user handed us
- base reflect.Type // the base type after all indirections
- indir int // number of indirections to reach the base type
- isGobEncoder bool // does the type implement GobEncoder?
- isGobDecoder bool // does the type implement GobDecoder?
- encIndir int8 // number of indirections to reach the receiver type; may be negative
- decIndir int8 // number of indirections to reach the receiver type; may be negative
-}
-
-var (
- // Protected by an RWMutex because we read it a lot and write
- // it only when we see a new type, typically when compiling.
- userTypeLock sync.RWMutex
- userTypeCache = make(map[reflect.Type]*userTypeInfo)
-)
-
-// validType returns, and saves, the information associated with user-provided type rt.
-// If the user type is not valid, err will be non-nil. To be used when the error handler
-// is not set up.
-func validUserType(rt reflect.Type) (ut *userTypeInfo, err error) {
- userTypeLock.RLock()
- ut = userTypeCache[rt]
- userTypeLock.RUnlock()
- if ut != nil {
- return
- }
- // Now set the value under the write lock.
- userTypeLock.Lock()
- defer userTypeLock.Unlock()
- if ut = userTypeCache[rt]; ut != nil {
- // Lost the race; not a problem.
- return
- }
- ut = new(userTypeInfo)
- ut.base = rt
- ut.user = rt
- // A type that is just a cycle of pointers (such as type T *T) cannot
- // be represented in gobs, which need some concrete data. We use a
- // cycle detection algorithm from Knuth, Vol 2, Section 3.1, Ex 6,
- // pp 539-540. As we step through indirections, run another type at
- // half speed. If they meet up, there's a cycle.
- slowpoke := ut.base // walks half as fast as ut.base
- for {
- pt := ut.base
- if pt.Kind() != reflect.Ptr {
- break
- }
- ut.base = pt.Elem()
- if ut.base == slowpoke { // ut.base lapped slowpoke
- // recursive pointer type.
- return nil, errors.New("can't represent recursive pointer type " + ut.base.String())
- }
- if ut.indir%2 == 0 {
- slowpoke = slowpoke.Elem()
- }
- ut.indir++
- }
- ut.isGobEncoder, ut.encIndir = implementsInterface(ut.user, gobEncoderInterfaceType)
- ut.isGobDecoder, ut.decIndir = implementsInterface(ut.user, gobDecoderInterfaceType)
- userTypeCache[rt] = ut
- return
-}
-
-var (
- gobEncoderInterfaceType = reflect.TypeOf((*GobEncoder)(nil)).Elem()
- gobDecoderInterfaceType = reflect.TypeOf((*GobDecoder)(nil)).Elem()
-)
-
-// implementsInterface reports whether the type implements the
-// gobEncoder/gobDecoder interface.
-// It also returns the number of indirections required to get to the
-// implementation.
-func implementsInterface(typ, gobEncDecType reflect.Type) (success bool, indir int8) {
- if typ == nil {
- return
- }
- rt := typ
- // The type might be a pointer and we need to keep
- // dereferencing to the base type until we find an implementation.
- for {
- if rt.Implements(gobEncDecType) {
- return true, indir
- }
- if p := rt; p.Kind() == reflect.Ptr {
- indir++
- if indir > 100 { // insane number of indirections
- return false, 0
- }
- rt = p.Elem()
- continue
- }
- break
- }
- // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
- if typ.Kind() != reflect.Ptr {
- // Not a pointer, but does the pointer work?
- if reflect.PtrTo(typ).Implements(gobEncDecType) {
- return true, -1
- }
- }
- return false, 0
-}
-
-// userType returns, and saves, the information associated with user-provided type rt.
-// If the user type is not valid, it calls error.
-func userType(rt reflect.Type) *userTypeInfo {
- ut, err := validUserType(rt)
- if err != nil {
- error_(err)
- }
- return ut
-}
-// A typeId represents a gob Type as an integer that can be passed on the wire.
-// Internally, typeIds are used as keys to a map to recover the underlying type info.
-type typeId int32
-
-var nextId typeId // incremented for each new type we build
-var typeLock sync.Mutex // set while building a type
-const firstUserId = 64 // lowest id number granted to user
-
-type gobType interface {
- id() typeId
- setId(id typeId)
- name() string
- string() string // not public; only for debugging
- safeString(seen map[typeId]bool) string
-}
-
-var types = make(map[reflect.Type]gobType)
-var idToType = make(map[typeId]gobType)
-var builtinIdToType map[typeId]gobType // set in init() after builtins are established
-
-func setTypeId(typ gobType) {
- nextId++
- typ.setId(nextId)
- idToType[nextId] = typ
-}
-
-func (t typeId) gobType() gobType {
- if t == 0 {
- return nil
- }
- return idToType[t]
-}
-
-// string returns the string representation of the type associated with the typeId.
-func (t typeId) string() string {
- if t.gobType() == nil {
- return "<nil>"
- }
- return t.gobType().string()
-}
-
-// Name returns the name of the type associated with the typeId.
-func (t typeId) name() string {
- if t.gobType() == nil {
- return "<nil>"
- }
- return t.gobType().name()
-}
-
-// Common elements of all types.
-type CommonType struct {
- Name string
- Id typeId
-}
-
-func (t *CommonType) id() typeId { return t.Id }
-
-func (t *CommonType) setId(id typeId) { t.Id = id }
-
-func (t *CommonType) string() string { return t.Name }
-
-func (t *CommonType) safeString(seen map[typeId]bool) string {
- return t.Name
-}
-
-func (t *CommonType) name() string { return t.Name }
-
-// Create and check predefined types
-// The string for tBytes is "bytes" not "[]byte" to signify its specialness.
-
-var (
- // Primordial types, needed during initialization.
- // Always passed as pointers so the interface{} type
- // goes through without losing its interfaceness.
- tBool = bootstrapType("bool", (*bool)(nil), 1)
- tInt = bootstrapType("int", (*int)(nil), 2)
- tUint = bootstrapType("uint", (*uint)(nil), 3)
- tFloat = bootstrapType("float", (*float64)(nil), 4)
- tBytes = bootstrapType("bytes", (*[]byte)(nil), 5)
- tString = bootstrapType("string", (*string)(nil), 6)
- tComplex = bootstrapType("complex", (*complex128)(nil), 7)
- tInterface = bootstrapType("interface", (*interface{})(nil), 8)
- // Reserve some Ids for compatible expansion
- tReserved7 = bootstrapType("_reserved1", (*struct{ r7 int })(nil), 9)
- tReserved6 = bootstrapType("_reserved1", (*struct{ r6 int })(nil), 10)
- tReserved5 = bootstrapType("_reserved1", (*struct{ r5 int })(nil), 11)
- tReserved4 = bootstrapType("_reserved1", (*struct{ r4 int })(nil), 12)
- tReserved3 = bootstrapType("_reserved1", (*struct{ r3 int })(nil), 13)
- tReserved2 = bootstrapType("_reserved1", (*struct{ r2 int })(nil), 14)
- tReserved1 = bootstrapType("_reserved1", (*struct{ r1 int })(nil), 15)
-)
-
-// Predefined because it's needed by the Decoder
-var tWireType = mustGetTypeInfo(reflect.TypeOf(wireType{})).id
-var wireTypeUserInfo *userTypeInfo // userTypeInfo of (*wireType)
-
-func init() {
- // Some magic numbers to make sure there are no surprises.
- checkId(16, tWireType)
- checkId(17, mustGetTypeInfo(reflect.TypeOf(arrayType{})).id)
- checkId(18, mustGetTypeInfo(reflect.TypeOf(CommonType{})).id)
- checkId(19, mustGetTypeInfo(reflect.TypeOf(sliceType{})).id)
- checkId(20, mustGetTypeInfo(reflect.TypeOf(structType{})).id)
- checkId(21, mustGetTypeInfo(reflect.TypeOf(fieldType{})).id)
- checkId(23, mustGetTypeInfo(reflect.TypeOf(mapType{})).id)
-
- builtinIdToType = make(map[typeId]gobType)
- for k, v := range idToType {
- builtinIdToType[k] = v
- }
-
- // Move the id space upwards to allow for growth in the predefined world
- // without breaking existing files.
- if nextId > firstUserId {
- panic(fmt.Sprintln("nextId too large:", nextId))
- }
- nextId = firstUserId
- registerBasics()
- wireTypeUserInfo = userType(reflect.TypeOf((*wireType)(nil)))
-}
-
-// Array type
-type arrayType struct {
- CommonType
- Elem typeId
- Len int
-}
-
-func newArrayType(name string) *arrayType {
- a := &arrayType{CommonType{Name: name}, 0, 0}
- return a
-}
-
-func (a *arrayType) init(elem gobType, len int) {
- // Set our type id before evaluating the element's, in case it's our own.
- setTypeId(a)
- a.Elem = elem.id()
- a.Len = len
-}
-
-func (a *arrayType) safeString(seen map[typeId]bool) string {
- if seen[a.Id] {
- return a.Name
- }
- seen[a.Id] = true
- return fmt.Sprintf("[%d]%s", a.Len, a.Elem.gobType().safeString(seen))
-}
-
-func (a *arrayType) string() string { return a.safeString(make(map[typeId]bool)) }
-
-// GobEncoder type (something that implements the GobEncoder interface)
-type gobEncoderType struct {
- CommonType
-}
-
-func newGobEncoderType(name string) *gobEncoderType {
- g := &gobEncoderType{CommonType{Name: name}}
- setTypeId(g)
- return g
-}
-
-func (g *gobEncoderType) safeString(seen map[typeId]bool) string {
- return g.Name
-}
-
-func (g *gobEncoderType) string() string { return g.Name }
-
-// Map type
-type mapType struct {
- CommonType
- Key typeId
- Elem typeId
-}
-
-func newMapType(name string) *mapType {
- m := &mapType{CommonType{Name: name}, 0, 0}
- return m
-}
-
-func (m *mapType) init(key, elem gobType) {
- // Set our type id before evaluating the element's, in case it's our own.
- setTypeId(m)
- m.Key = key.id()
- m.Elem = elem.id()
-}
-
-func (m *mapType) safeString(seen map[typeId]bool) string {
- if seen[m.Id] {
- return m.Name
- }
- seen[m.Id] = true
- key := m.Key.gobType().safeString(seen)
- elem := m.Elem.gobType().safeString(seen)
- return fmt.Sprintf("map[%s]%s", key, elem)
-}
-
-func (m *mapType) string() string { return m.safeString(make(map[typeId]bool)) }
-
-// Slice type
-type sliceType struct {
- CommonType
- Elem typeId
-}
-
-func newSliceType(name string) *sliceType {
- s := &sliceType{CommonType{Name: name}, 0}
- return s
-}
-
-func (s *sliceType) init(elem gobType) {
- // Set our type id before evaluating the element's, in case it's our own.
- setTypeId(s)
- s.Elem = elem.id()
-}
-
-func (s *sliceType) safeString(seen map[typeId]bool) string {
- if seen[s.Id] {
- return s.Name
- }
- seen[s.Id] = true
- return fmt.Sprintf("[]%s", s.Elem.gobType().safeString(seen))
-}
-
-func (s *sliceType) string() string { return s.safeString(make(map[typeId]bool)) }
-
-// Struct type
-type fieldType struct {
- Name string
- Id typeId
-}
-
-type structType struct {
- CommonType
- Field []*fieldType
-}
-
-func (s *structType) safeString(seen map[typeId]bool) string {
- if s == nil {
- return "<nil>"
- }
- if _, ok := seen[s.Id]; ok {
- return s.Name
- }
- seen[s.Id] = true
- str := s.Name + " = struct { "
- for _, f := range s.Field {
- str += fmt.Sprintf("%s %s; ", f.Name, f.Id.gobType().safeString(seen))
- }
- str += "}"
- return str
-}
-
-func (s *structType) string() string { return s.safeString(make(map[typeId]bool)) }
-
-func newStructType(name string) *structType {
- s := &structType{CommonType{Name: name}, nil}
- // For historical reasons we set the id here rather than init.
- // See the comment in newTypeObject for details.
- setTypeId(s)
- return s
-}
-
-// newTypeObject allocates a gobType for the reflection type rt.
-// Unless ut represents a GobEncoder, rt should be the base type
-// of ut.
-// This is only called from the encoding side. The decoding side
-// works through typeIds and userTypeInfos alone.
-func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
- // Does this type implement GobEncoder?
- if ut.isGobEncoder {
- return newGobEncoderType(name), nil
- }
- var err error
- var type0, type1 gobType
- defer func() {
- if err != nil {
- delete(types, rt)
- }
- }()
- // Install the top-level type before the subtypes (e.g. struct before
- // fields) so recursive types can be constructed safely.
- switch t := rt; t.Kind() {
- // All basic types are easy: they are predefined.
- case reflect.Bool:
- return tBool.gobType(), nil
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return tInt.gobType(), nil
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return tUint.gobType(), nil
-
- case reflect.Float32, reflect.Float64:
- return tFloat.gobType(), nil
-
- case reflect.Complex64, reflect.Complex128:
- return tComplex.gobType(), nil
-
- case reflect.String:
- return tString.gobType(), nil
-
- case reflect.Interface:
- return tInterface.gobType(), nil
-
- case reflect.Array:
- at := newArrayType(name)
- types[rt] = at
- type0, err = getBaseType("", t.Elem())
- if err != nil {
- return nil, err
- }
- // Historical aside:
- // For arrays, maps, and slices, we set the type id after the elements
- // are constructed. This is to retain the order of type id allocation after
- // a fix made to handle recursive types, which changed the order in
- // which types are built. Delaying the setting in this way preserves
- // type ids while allowing recursive types to be described. Structs,
- // done below, were already handling recursion correctly so they
- // assign the top-level id before those of the field.
- at.init(type0, t.Len())
- return at, nil
-
- case reflect.Map:
- mt := newMapType(name)
- types[rt] = mt
- type0, err = getBaseType("", t.Key())
- if err != nil {
- return nil, err
- }
- type1, err = getBaseType("", t.Elem())
- if err != nil {
- return nil, err
- }
- mt.init(type0, type1)
- return mt, nil
-
- case reflect.Slice:
- // []byte == []uint8 is a special case
- if t.Elem().Kind() == reflect.Uint8 {
- return tBytes.gobType(), nil
- }
- st := newSliceType(name)
- types[rt] = st
- type0, err = getBaseType(t.Elem().Name(), t.Elem())
- if err != nil {
- return nil, err
- }
- st.init(type0)
- return st, nil
-
- case reflect.Struct:
- st := newStructType(name)
- types[rt] = st
- idToType[st.id()] = st
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if !isExported(f.Name) {
- continue
- }
- typ := userType(f.Type).base
- tname := typ.Name()
- if tname == "" {
- t := userType(f.Type).base
- tname = t.String()
- }
- gt, err := getBaseType(tname, f.Type)
- if err != nil {
- return nil, err
- }
- st.Field = append(st.Field, &fieldType{f.Name, gt.id()})
- }
- return st, nil
-
- default:
- return nil, errors.New("gob NewTypeObject can't handle type: " + rt.String())
- }
- return nil, nil
-}
-
-// isExported reports whether this is an exported - upper case - name.
-func isExported(name string) bool {
- rune, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(rune)
-}
-
-// getBaseType returns the Gob type describing the given reflect.Type's base type.
-// typeLock must be held.
-func getBaseType(name string, rt reflect.Type) (gobType, error) {
- ut := userType(rt)
- return getType(name, ut, ut.base)
-}
-
-// getType returns the Gob type describing the given reflect.Type.
-// Should be called only when handling GobEncoders/Decoders,
-// which may be pointers. All other types are handled through the
-// base type, never a pointer.
-// typeLock must be held.
-func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
- typ, present := types[rt]
- if present {
- return typ, nil
- }
- typ, err := newTypeObject(name, ut, rt)
- if err == nil {
- types[rt] = typ
- }
- return typ, err
-}
-
-func checkId(want, got typeId) {
- if want != got {
- fmt.Fprintf(os.Stderr, "checkId: %d should be %d\n", int(got), int(want))
- panic("bootstrap type wrong id: " + got.name() + " " + got.string() + " not " + want.string())
- }
-}
-
-// used for building the basic types; called only from init(). the incoming
-// interface always refers to a pointer.
-func bootstrapType(name string, e interface{}, expect typeId) typeId {
- rt := reflect.TypeOf(e).Elem()
- _, present := types[rt]
- if present {
- panic("bootstrap type already present: " + name + ", " + rt.String())
- }
- typ := &CommonType{Name: name}
- types[rt] = typ
- setTypeId(typ)
- checkId(expect, nextId)
- userType(rt) // might as well cache it now
- return nextId
-}
-
-// Representation of the information we send and receive about this type.
-// Each value we send is preceded by its type definition: an encoded int.
-// However, the very first time we send the value, we first send the pair
-// (-id, wireType).
-// For bootstrapping purposes, we assume that the recipient knows how
-// to decode a wireType; it is exactly the wireType struct here, interpreted
-// using the gob rules for sending a structure, except that we assume the
-// ids for wireType and structType etc. are known. The relevant pieces
-// are built in encode.go's init() function.
-// To maintain binary compatibility, if you extend this type, always put
-// the new fields last.
-type wireType struct {
- ArrayT *arrayType
- SliceT *sliceType
- StructT *structType
- MapT *mapType
- GobEncoderT *gobEncoderType
-}
-
-func (w *wireType) string() string {
- const unknown = "unknown type"
- if w == nil {
- return unknown
- }
- switch {
- case w.ArrayT != nil:
- return w.ArrayT.Name
- case w.SliceT != nil:
- return w.SliceT.Name
- case w.StructT != nil:
- return w.StructT.Name
- case w.MapT != nil:
- return w.MapT.Name
- case w.GobEncoderT != nil:
- return w.GobEncoderT.Name
- }
- return unknown
-}
-
-type typeInfo struct {
- id typeId
- encoder *encEngine
- wire *wireType
-}
-
-var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock
-
-// typeLock must be held.
-func getTypeInfo(ut *userTypeInfo) (*typeInfo, error) {
- rt := ut.base
- if ut.isGobEncoder {
- // We want the user type, not the base type.
- rt = ut.user
- }
- info, ok := typeInfoMap[rt]
- if ok {
- return info, nil
- }
- info = new(typeInfo)
- gt, err := getBaseType(rt.Name(), rt)
- if err != nil {
- return nil, err
- }
- info.id = gt.id()
-
- if ut.isGobEncoder {
- userType, err := getType(rt.Name(), ut, rt)
- if err != nil {
- return nil, err
- }
- info.wire = &wireType{GobEncoderT: userType.id().gobType().(*gobEncoderType)}
- typeInfoMap[ut.user] = info
- return info, nil
- }
-
- t := info.id.gobType()
- switch typ := rt; typ.Kind() {
- case reflect.Array:
- info.wire = &wireType{ArrayT: t.(*arrayType)}
- case reflect.Map:
- info.wire = &wireType{MapT: t.(*mapType)}
- case reflect.Slice:
- // []byte == []uint8 is a special case handled separately
- if typ.Elem().Kind() != reflect.Uint8 {
- info.wire = &wireType{SliceT: t.(*sliceType)}
- }
- case reflect.Struct:
- info.wire = &wireType{StructT: t.(*structType)}
- }
- typeInfoMap[rt] = info
- return info, nil
-}
-
-// Called only when a panic is acceptable and unexpected.
-func mustGetTypeInfo(rt reflect.Type) *typeInfo {
- t, err := getTypeInfo(userType(rt))
- if err != nil {
- panic("getTypeInfo: " + err.Error())
- }
- return t
-}
-
-// GobEncoder is the interface describing data that provides its own
-// representation for encoding values for transmission to a GobDecoder.
-// A type that implements GobEncoder and GobDecoder has complete
-// control over the representation of its data and may therefore
-// contain things such as private fields, channels, and functions,
-// which are not usually transmissible in gob streams.
-//
-// Note: Since gobs can be stored permanently, It is good design
-// to guarantee the encoding used by a GobEncoder is stable as the
-// software evolves. For instance, it might make sense for GobEncode
-// to include a version number in the encoding.
-type GobEncoder interface {
- // GobEncode returns a byte slice representing the encoding of the
- // receiver for transmission to a GobDecoder, usually of the same
- // concrete type.
- GobEncode() ([]byte, error)
-}
-
-// GobDecoder is the interface describing data that provides its own
-// routine for decoding transmitted values sent by a GobEncoder.
-type GobDecoder interface {
- // GobDecode overwrites the receiver, which must be a pointer,
- // with the value represented by the byte slice, which was written
- // by GobEncode, usually for the same concrete type.
- GobDecode([]byte) error
-}
-
-var (
- nameToConcreteType = make(map[string]reflect.Type)
- concreteTypeToName = make(map[reflect.Type]string)
-)
-
-// RegisterName is like Register but uses the provided name rather than the
-// type's default.
-func RegisterName(name string, value interface{}) {
- if name == "" {
- // reserved for nil
- panic("attempt to register empty name")
- }
- base := userType(reflect.TypeOf(value)).base
- // Check for incompatible duplicates.
- if t, ok := nameToConcreteType[name]; ok && t != base {
- panic("gob: registering duplicate types for " + name)
- }
- if n, ok := concreteTypeToName[base]; ok && n != name {
- panic("gob: registering duplicate names for " + base.String())
- }
- // Store the name and type provided by the user....
- nameToConcreteType[name] = reflect.TypeOf(value)
- // but the flattened type in the type table, since that's what decode needs.
- concreteTypeToName[base] = name
-}
-
-// Register records a type, identified by a value for that type, under its
-// internal type name. That name will identify the concrete type of a value
-// sent or received as an interface variable. Only types that will be
-// transferred as implementations of interface values need to be registered.
-// Expecting to be used only during initialization, it panics if the mapping
-// between types and names is not a bijection.
-func Register(value interface{}) {
- // Default to printed representation for unnamed types
- rt := reflect.TypeOf(value)
- name := rt.String()
-
- // But for named types (or pointers to them), qualify with import path.
- // Dereference one pointer looking for a named type.
- star := ""
- if rt.Name() == "" {
- if pt := rt; pt.Kind() == reflect.Ptr {
- star = "*"
- rt = pt
- }
- }
- if rt.Name() != "" {
- if rt.PkgPath() == "" {
- name = star + rt.Name()
- } else {
- name = star + rt.PkgPath() + "." + rt.Name()
- }
- }
-
- RegisterName(name, value)
-}
-
-func registerBasics() {
- Register(int(0))
- Register(int8(0))
- Register(int16(0))
- Register(int32(0))
- Register(int64(0))
- Register(uint(0))
- Register(uint8(0))
- Register(uint16(0))
- Register(uint32(0))
- Register(uint64(0))
- Register(float32(0))
- Register(float64(0))
- Register(complex64(0i))
- Register(complex128(0i))
- Register(uintptr(0))
- Register(false)
- Register("")
- Register([]byte(nil))
- Register([]int(nil))
- Register([]int8(nil))
- Register([]int16(nil))
- Register([]int32(nil))
- Register([]int64(nil))
- Register([]uint(nil))
- Register([]uint8(nil))
- Register([]uint16(nil))
- Register([]uint32(nil))
- Register([]uint64(nil))
- Register([]float32(nil))
- Register([]float64(nil))
- Register([]complex64(nil))
- Register([]complex128(nil))
- Register([]uintptr(nil))
- Register([]bool(nil))
- Register([]string(nil))
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gob
-
-import (
- "reflect"
- "testing"
-)
-
-type typeT struct {
- id typeId
- str string
-}
-
-var basicTypes = []typeT{
- {tBool, "bool"},
- {tInt, "int"},
- {tUint, "uint"},
- {tFloat, "float"},
- {tBytes, "bytes"},
- {tString, "string"},
-}
-
-func getTypeUnlocked(name string, rt reflect.Type) gobType {
- typeLock.Lock()
- defer typeLock.Unlock()
- t, err := getBaseType(name, rt)
- if err != nil {
- panic("getTypeUnlocked: " + err.Error())
- }
- return t
-}
-
-// Sanity checks
-func TestBasic(t *testing.T) {
- for _, tt := range basicTypes {
- if tt.id.string() != tt.str {
- t.Errorf("checkType: expected %q got %s", tt.str, tt.id.string())
- }
- if tt.id == 0 {
- t.Errorf("id for %q is zero", tt.str)
- }
- }
-}
-
-// Reregister some basic types to check registration is idempotent.
-func TestReregistration(t *testing.T) {
- newtyp := getTypeUnlocked("int", reflect.TypeOf(int(0)))
- if newtyp != tInt.gobType() {
- t.Errorf("reregistration of %s got new type", newtyp.string())
- }
- newtyp = getTypeUnlocked("uint", reflect.TypeOf(uint(0)))
- if newtyp != tUint.gobType() {
- t.Errorf("reregistration of %s got new type", newtyp.string())
- }
- newtyp = getTypeUnlocked("string", reflect.TypeOf("hello"))
- if newtyp != tString.gobType() {
- t.Errorf("reregistration of %s got new type", newtyp.string())
- }
-}
-
-func TestArrayType(t *testing.T) {
- var a3 [3]int
- a3int := getTypeUnlocked("foo", reflect.TypeOf(a3))
- newa3int := getTypeUnlocked("bar", reflect.TypeOf(a3))
- if a3int != newa3int {
- t.Errorf("second registration of [3]int creates new type")
- }
- var a4 [4]int
- a4int := getTypeUnlocked("goo", reflect.TypeOf(a4))
- if a3int == a4int {
- t.Errorf("registration of [3]int creates same type as [4]int")
- }
- var b3 [3]bool
- a3bool := getTypeUnlocked("", reflect.TypeOf(b3))
- if a3int == a3bool {
- t.Errorf("registration of [3]bool creates same type as [3]int")
- }
- str := a3bool.string()
- expected := "[3]bool"
- if str != expected {
- t.Errorf("array printed as %q; expected %q", str, expected)
- }
-}
-
-func TestSliceType(t *testing.T) {
- var s []int
- sint := getTypeUnlocked("slice", reflect.TypeOf(s))
- var news []int
- newsint := getTypeUnlocked("slice1", reflect.TypeOf(news))
- if sint != newsint {
- t.Errorf("second registration of []int creates new type")
- }
- var b []bool
- sbool := getTypeUnlocked("", reflect.TypeOf(b))
- if sbool == sint {
- t.Errorf("registration of []bool creates same type as []int")
- }
- str := sbool.string()
- expected := "[]bool"
- if str != expected {
- t.Errorf("slice printed as %q; expected %q", str, expected)
- }
-}
-
-func TestMapType(t *testing.T) {
- var m map[string]int
- mapStringInt := getTypeUnlocked("map", reflect.TypeOf(m))
- var newm map[string]int
- newMapStringInt := getTypeUnlocked("map1", reflect.TypeOf(newm))
- if mapStringInt != newMapStringInt {
- t.Errorf("second registration of map[string]int creates new type")
- }
- var b map[string]bool
- mapStringBool := getTypeUnlocked("", reflect.TypeOf(b))
- if mapStringBool == mapStringInt {
- t.Errorf("registration of map[string]bool creates same type as map[string]int")
- }
- str := mapStringBool.string()
- expected := "map[string]bool"
- if str != expected {
- t.Errorf("map printed as %q; expected %q", str, expected)
- }
-}
-
-type Bar struct {
- X string
-}
-
-// This structure has pointers and refers to itself, making it a good test case.
-type Foo struct {
- A int
- B int32 // will become int
- C string
- D []byte
- E *float64 // will become float64
- F ****float64 // will become float64
- G *Bar
- H *Bar // should not interpolate the definition of Bar again
- I *Foo // will not explode
-}
-
-func TestStructType(t *testing.T) {
- sstruct := getTypeUnlocked("Foo", reflect.TypeOf(Foo{}))
- str := sstruct.string()
- // If we can print it correctly, we built it correctly.
- expected := "Foo = struct { A int; B int; C string; D bytes; E float; F float; G Bar = struct { X string; }; H Bar; I Foo; }"
- if str != expected {
- t.Errorf("struct printed as %q; expected %q", str, expected)
- }
-}
for {
if z.Next() == html.ErrorToken {
- // Returning os.EOF indicates success.
+ // Returning io.EOF indicates success.
return z.Error()
}
emitToken(z.Token())
case StartTagToken, EndTagToken:
tn, _ := z.TagName()
if len(tn) == 1 && tn[0] == 'a' {
- if tt == StartTag {
+ if tt == StartTagToken {
depth++
} else {
depth--
import (
"testing"
- "utf8"
+ "unicode/utf8"
)
func TestEntityLength(t *testing.T) {
import (
"bytes"
"strings"
- "utf8"
+ "unicode/utf8"
)
// These replacements permit compatibility with old numeric entities that
p.tok = p.tokenizer.Token()
switch p.tok.Type {
case ErrorToken:
- return p.tokenizer.Error()
+ return p.tokenizer.Err()
case SelfClosingTagToken:
p.hasSelfClosingToken = true
p.tok.Type = StartTagToken
// Section 11.2.3.1, "using the rules for".
func useTheRulesFor(p *parser, actual, delegate insertionMode) (insertionMode, bool) {
im, consumed := delegate(p)
- // TODO: do we need to update p.originalMode if it equals delegate?
+ if p.originalIM == delegate {
+ p.originalIM = actual
+ }
if im != delegate {
return im, consumed
}
}
if add || implied {
p.addElement("head", attr)
+ p.head = p.top()
}
return inHeadIM, !implied
}
implied = true
case StartTagToken:
switch p.tok.Data {
- case "meta":
- // TODO.
+ case "base", "basefont", "bgsound", "command", "link", "meta":
+ p.addElement(p.tok.Data, p.tok.Attr)
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
case "script", "title", "noscript", "noframes", "style":
p.addElement(p.tok.Data, p.tok.Attr)
p.setOriginalIM(inHeadIM)
case "frameset":
// TODO.
case "base", "basefont", "bgsound", "link", "meta", "noframes", "script", "style", "title":
- // TODO.
+ p.oe = append(p.oe, p.head)
+ defer p.oe.pop()
+ return useTheRulesFor(p, afterHeadIM, inHeadIM)
case "head":
// TODO.
default:
return inBodyIM, !implied
}
+// copyAttributes copies attributes of src not found on dst to dst.
+func copyAttributes(dst *Node, src Token) {
+ if len(src.Attr) == 0 {
+ return
+ }
+ attr := map[string]string{}
+ for _, a := range dst.Attr {
+ attr[a.Key] = a.Val
+ }
+ for _, a := range src.Attr {
+ if _, ok := attr[a.Key]; !ok {
+ dst.Attr = append(dst.Attr, a)
+ attr[a.Key] = a.Val
+ }
+ }
+}
+
// Section 11.2.5.4.7.
func inBodyIM(p *parser) (insertionMode, bool) {
switch p.tok.Type {
}
p.reconstructActiveFormattingElements()
p.addElement(p.tok.Data, p.tok.Attr)
+ case "body":
+ if len(p.oe) >= 2 {
+ body := p.oe[1]
+ if body.Type == ElementNode && body.Data == "body" {
+ p.framesetOK = false
+ copyAttributes(body, p.tok)
+ }
+ }
+ case "base", "basefont", "bgsound", "command", "link", "meta", "noframes", "script", "style", "title":
+ return useTheRulesFor(p, inBodyIM, inHeadIM)
+ case "image":
+ p.tok.Data = "img"
+ return inBodyIM, false
default:
// TODO.
p.addElement(p.tok.Data, p.tok.Attr)
p.inBodyEndTagFormatting(p.tok.Data)
case "address", "article", "aside", "blockquote", "button", "center", "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "listing", "menu", "nav", "ol", "pre", "section", "summary", "ul":
p.popUntil(defaultScopeStopTags, p.tok.Data)
+ case "applet", "marquee", "object":
+ if p.popUntil(defaultScopeStopTags, p.tok.Data) {
+ p.clearActiveFormattingElements()
+ }
default:
p.inBodyEndTagOther(p.tok.Data)
}
case StartTagToken:
switch p.tok.Data {
case "td", "th":
- // TODO: clear the stack back to a table row context.
+ p.clearStackToContext(tableRowContextStopTags)
p.addElement(p.tok.Data, p.tok.Attr)
p.afe = append(p.afe, &scopeMarker)
return inCellIM, true
+ case "caption", "col", "colgroup", "tbody", "tfoot", "thead", "tr":
+ if p.popUntil(tableScopeStopTags, "tr") {
+ return inTableBodyIM, false
+ }
+ // Ignore the token.
+ return inRowIM, true
default:
// TODO.
}
case EndTagToken:
switch p.tok.Data {
case "tr":
- if !p.elementInScope(tableScopeStopTags, "tr") {
- return inRowIM, true
+ if p.popUntil(tableScopeStopTags, "tr") {
+ return inTableBodyIM, true
}
- p.clearStackToContext(tableRowContextStopTags)
- p.oe.pop()
- return inTableBodyIM, true
+ // Ignore the token.
+ return inRowIM, true
case "table":
if p.popUntil(tableScopeStopTags, "tr") {
return inTableBodyIM, false
}
func readDat(filename string, c chan io.Reader) {
+ defer close(c)
f, err := os.Open("testdata/webkit/" + filename)
if err != nil {
c <- pipeErr(err)
}
func TestParser(t *testing.T) {
- // TODO(nigeltao): Process all the .dat files, not just the first one.
- filenames := []string{
- "tests1.dat",
+ testFiles := []struct {
+ filename string
+ // n is the number of test cases to run from that file.
+ // -1 means all test cases.
+ n int
+ }{
+ // TODO(nigeltao): Process all the test cases from all the .dat files.
+ {"tests1.dat", 92},
+ {"tests2.dat", 0},
+ {"tests3.dat", 0},
}
- for _, filename := range filenames {
+ for _, tf := range testFiles {
rc := make(chan io.Reader)
- go readDat(filename, rc)
- // TODO(nigeltao): Process all test cases, not just a subset.
- for i := 0; i < 80; i++ {
+ go readDat(tf.filename, rc)
+ for i := 0; i != tf.n; i++ {
// Parse the #data section.
- b, err := ioutil.ReadAll(<-rc)
+ dataReader := <-rc
+ if dataReader == nil {
+ break
+ }
+ b, err := ioutil.ReadAll(dataReader)
if err != nil {
t.Fatal(err)
}
t.Fatal(err)
}
if want := string(b); got != want {
- t.Errorf("%s test #%d %q, got vs want:\n----\n%s----\n%s----", filename, i, text, got, want)
+ t.Errorf("%s test #%d %q, got vs want:\n----\n%s----\n%s----", tf.filename, i, text, got, want)
continue
}
if renderTestBlacklist[text] {
t.Fatal(err)
}
if got != got1 {
- t.Errorf("%s test #%d %q, got vs got1:\n----\n%s----\n%s----", filename, i, text, got, got1)
+ t.Errorf("%s test #%d %q, got vs got1:\n----\n%s----\n%s----", tf.filename, i, text, got, got1)
continue
}
}
+ // Drain any untested cases for the test file.
+ for r := range rc {
+ if _, err := ioutil.ReadAll(r); err != nil {
+ t.Fatal(err)
+ }
+ }
}
}
// The second <a> will be reparented to the first <table>'s parent. This
// results in an <a> whose parent is an <a>, which is not 'well-formed'.
`<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y`: true,
- // The second <a> will be reparented, similar to the case above.
+ // More cases of <a> being reparented:
`<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe`: true,
+ `<a><table><a></table><p><a><div><a>`: true,
}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "strings"
+)
+
+// attrTypeMap[n] describes the value of the given attribute.
+// If an attribute affects (or can mask) the encoding or interpretation of
+// other content, or affects the contents, idempotency, or credentials of a
+// network message, then the value in this map is contentTypeUnsafe.
+// This map is derived from HTML5, specifically
+// http://www.w3.org/TR/html5/Overview.html#attributes-1
+// as well as "%URI"-typed attributes from
+// http://www.w3.org/TR/html4/index/attributes.html
+var attrTypeMap = map[string]contentType{
+ "accept": contentTypePlain,
+ "accept-charset": contentTypeUnsafe,
+ "action": contentTypeURL,
+ "alt": contentTypePlain,
+ "archive": contentTypeURL,
+ "async": contentTypeUnsafe,
+ "autocomplete": contentTypePlain,
+ "autofocus": contentTypePlain,
+ "autoplay": contentTypePlain,
+ "background": contentTypeURL,
+ "border": contentTypePlain,
+ "checked": contentTypePlain,
+ "cite": contentTypeURL,
+ "challenge": contentTypeUnsafe,
+ "charset": contentTypeUnsafe,
+ "class": contentTypePlain,
+ "classid": contentTypeURL,
+ "codebase": contentTypeURL,
+ "cols": contentTypePlain,
+ "colspan": contentTypePlain,
+ "content": contentTypeUnsafe,
+ "contenteditable": contentTypePlain,
+ "contextmenu": contentTypePlain,
+ "controls": contentTypePlain,
+ "coords": contentTypePlain,
+ "crossorigin": contentTypeUnsafe,
+ "data": contentTypeURL,
+ "datetime": contentTypePlain,
+ "default": contentTypePlain,
+ "defer": contentTypeUnsafe,
+ "dir": contentTypePlain,
+ "dirname": contentTypePlain,
+ "disabled": contentTypePlain,
+ "draggable": contentTypePlain,
+ "dropzone": contentTypePlain,
+ "enctype": contentTypeUnsafe,
+ "for": contentTypePlain,
+ "form": contentTypeUnsafe,
+ "formaction": contentTypeURL,
+ "formenctype": contentTypeUnsafe,
+ "formmethod": contentTypeUnsafe,
+ "formnovalidate": contentTypeUnsafe,
+ "formtarget": contentTypePlain,
+ "headers": contentTypePlain,
+ "height": contentTypePlain,
+ "hidden": contentTypePlain,
+ "high": contentTypePlain,
+ "href": contentTypeURL,
+ "hreflang": contentTypePlain,
+ "http-equiv": contentTypeUnsafe,
+ "icon": contentTypeURL,
+ "id": contentTypePlain,
+ "ismap": contentTypePlain,
+ "keytype": contentTypeUnsafe,
+ "kind": contentTypePlain,
+ "label": contentTypePlain,
+ "lang": contentTypePlain,
+ "language": contentTypeUnsafe,
+ "list": contentTypePlain,
+ "longdesc": contentTypeURL,
+ "loop": contentTypePlain,
+ "low": contentTypePlain,
+ "manifest": contentTypeURL,
+ "max": contentTypePlain,
+ "maxlength": contentTypePlain,
+ "media": contentTypePlain,
+ "mediagroup": contentTypePlain,
+ "method": contentTypeUnsafe,
+ "min": contentTypePlain,
+ "multiple": contentTypePlain,
+ "name": contentTypePlain,
+ "novalidate": contentTypeUnsafe,
+ // Skip handler names from
+ // http://www.w3.org/TR/html5/Overview.html#event-handlers-on-elements-document-objects-and-window-objects
+ // since we have special handling in attrType.
+ "open": contentTypePlain,
+ "optimum": contentTypePlain,
+ "pattern": contentTypeUnsafe,
+ "placeholder": contentTypePlain,
+ "poster": contentTypeURL,
+ "profile": contentTypeURL,
+ "preload": contentTypePlain,
+ "pubdate": contentTypePlain,
+ "radiogroup": contentTypePlain,
+ "readonly": contentTypePlain,
+ "rel": contentTypeUnsafe,
+ "required": contentTypePlain,
+ "reversed": contentTypePlain,
+ "rows": contentTypePlain,
+ "rowspan": contentTypePlain,
+ "sandbox": contentTypeUnsafe,
+ "spellcheck": contentTypePlain,
+ "scope": contentTypePlain,
+ "scoped": contentTypePlain,
+ "seamless": contentTypePlain,
+ "selected": contentTypePlain,
+ "shape": contentTypePlain,
+ "size": contentTypePlain,
+ "sizes": contentTypePlain,
+ "span": contentTypePlain,
+ "src": contentTypeURL,
+ "srcdoc": contentTypeHTML,
+ "srclang": contentTypePlain,
+ "start": contentTypePlain,
+ "step": contentTypePlain,
+ "style": contentTypeCSS,
+ "tabindex": contentTypePlain,
+ "target": contentTypePlain,
+ "title": contentTypePlain,
+ "type": contentTypeUnsafe,
+ "usemap": contentTypeURL,
+ "value": contentTypeUnsafe,
+ "width": contentTypePlain,
+ "wrap": contentTypePlain,
+ "xmlns": contentTypeURL,
+}
+
+// attrType returns a conservative (upper-bound on authority) guess at the
+// type of the named attribute.
+func attrType(name string) contentType {
+ name = strings.ToLower(name)
+ if strings.HasPrefix(name, "data-") {
+ // Strip data- so that custom attribute heuristics below are
+ // widely applied.
+ // Treat data-action as URL below.
+ name = name[5:]
+ } else if colon := strings.IndexRune(name, ':'); colon != -1 {
+ if name[:colon] == "xmlns" {
+ return contentTypeURL
+ }
+ // Treat svg:href and xlink:href as href below.
+ name = name[colon+1:]
+ }
+ if t, ok := attrTypeMap[name]; ok {
+ return t
+ }
+ // Treat partial event handler names as script.
+ if strings.HasPrefix(name, "on") {
+ return contentTypeJS
+ }
+
+ // Heuristics to prevent "javascript:..." injection in custom
+ // data attributes and custom attributes like g:tweetUrl.
+ // http://www.w3.org/TR/html5/elements.html#embedding-custom-non-visible-data-with-the-data-attributes:
+ // "Custom data attributes are intended to store custom data
+ // private to the page or application, for which there are no
+ // more appropriate attributes or elements."
+ // Developers seem to store URL content in data URLs that start
+ // or end with "URI" or "URL".
+ if strings.Contains(name, "src") ||
+ strings.Contains(name, "uri") ||
+ strings.Contains(name, "url") {
+ return contentTypeURL
+ }
+ return contentTypePlain
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "text/template/parse"
+)
+
+// clone clones a template Node.
+func clone(n parse.Node) parse.Node {
+ switch t := n.(type) {
+ case *parse.ActionNode:
+ return cloneAction(t)
+ case *parse.IfNode:
+ b := new(parse.IfNode)
+ copyBranch(&b.BranchNode, &t.BranchNode)
+ return b
+ case *parse.ListNode:
+ return cloneList(t)
+ case *parse.RangeNode:
+ b := new(parse.RangeNode)
+ copyBranch(&b.BranchNode, &t.BranchNode)
+ return b
+ case *parse.TemplateNode:
+ return cloneTemplate(t)
+ case *parse.TextNode:
+ return cloneText(t)
+ case *parse.WithNode:
+ b := new(parse.WithNode)
+ copyBranch(&b.BranchNode, &t.BranchNode)
+ return b
+ }
+ panic("cloning " + n.String() + " is unimplemented")
+}
+
+// cloneAction returns a deep clone of n.
+func cloneAction(n *parse.ActionNode) *parse.ActionNode {
+ // We use keyless fields because they won't compile if a field is added.
+ return &parse.ActionNode{n.NodeType, n.Line, clonePipe(n.Pipe)}
+}
+
+// cloneList returns a deep clone of n.
+func cloneList(n *parse.ListNode) *parse.ListNode {
+ if n == nil {
+ return nil
+ }
+ // We use keyless fields because they won't compile if a field is added.
+ c := parse.ListNode{n.NodeType, make([]parse.Node, len(n.Nodes))}
+ for i, child := range n.Nodes {
+ c.Nodes[i] = clone(child)
+ }
+ return &c
+}
+
+// clonePipe returns a shallow clone of n.
+// The escaper does not modify pipe descendants in place so there's no need to
+// clone deeply.
+func clonePipe(n *parse.PipeNode) *parse.PipeNode {
+ if n == nil {
+ return nil
+ }
+ // We use keyless fields because they won't compile if a field is added.
+ return &parse.PipeNode{n.NodeType, n.Line, n.Decl, n.Cmds}
+}
+
+// cloneTemplate returns a deep clone of n.
+func cloneTemplate(n *parse.TemplateNode) *parse.TemplateNode {
+ // We use keyless fields because they won't compile if a field is added.
+ return &parse.TemplateNode{n.NodeType, n.Line, n.Name, clonePipe(n.Pipe)}
+}
+
+// cloneText clones the given node sharing its []byte.
+func cloneText(n *parse.TextNode) *parse.TextNode {
+ // We use keyless fields because they won't compile if a field is added.
+ return &parse.TextNode{n.NodeType, n.Text}
+}
+
+// copyBranch clones src into dst.
+func copyBranch(dst, src *parse.BranchNode) {
+ // We use keyless fields because they won't compile if a field is added.
+ *dst = parse.BranchNode{
+ src.NodeType,
+ src.Line,
+ clonePipe(src.Pipe),
+ cloneList(src.List),
+ cloneList(src.ElseList),
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "testing"
+ "text/template"
+ "text/template/parse"
+)
+
+func TestClone(t *testing.T) {
+ tests := []struct {
+ input, want, wantClone string
+ }{
+ {
+ `Hello, {{if true}}{{"<World>"}}{{end}}!`,
+ "Hello, <World>!",
+ "Hello, <World>!",
+ },
+ {
+ `Hello, {{if false}}{{.X}}{{else}}{{"<World>"}}{{end}}!`,
+ "Hello, <World>!",
+ "Hello, <World>!",
+ },
+ {
+ `Hello, {{with "<World>"}}{{.}}{{end}}!`,
+ "Hello, <World>!",
+ "Hello, <World>!",
+ },
+ {
+ `{{range .}}<p>{{.}}</p>{{end}}`,
+ "<p>foo</p><p><bar></p><p>baz</p>",
+ "<p>foo</p><p><bar></p><p>baz</p>",
+ },
+ {
+ `Hello, {{"<World>" | html}}!`,
+ "Hello, <World>!",
+ "Hello, <World>!",
+ },
+ {
+ `Hello{{if 1}}, World{{else}}{{template "d"}}{{end}}!`,
+ "Hello, World!",
+ "Hello, World!",
+ },
+ }
+
+ for _, test := range tests {
+ s := template.Must(template.New("s").Parse(test.input))
+ d := template.New("d")
+ d.Tree = &parse.Tree{Name: d.Name(), Root: cloneList(s.Root)}
+
+ if want, got := s.Root.String(), d.Root.String(); want != got {
+ t.Errorf("want %q, got %q", want, got)
+ }
+
+ err := escape(d)
+ if err != nil {
+ t.Errorf("%q: failed to escape: %s", test.input, err)
+ continue
+ }
+
+ if want, got := "s", s.Name(); want != got {
+ t.Errorf("want %q, got %q", want, got)
+ continue
+ }
+ if want, got := "d", d.Name(); want != got {
+ t.Errorf("want %q, got %q", want, got)
+ continue
+ }
+
+ data := []string{"foo", "<bar>", "baz"}
+
+ // Make sure escaping d did not affect s.
+ var b bytes.Buffer
+ s.Execute(&b, data)
+ if got := b.String(); got != test.want {
+ t.Errorf("%q: want %q, got %q", test.input, test.want, got)
+ continue
+ }
+
+ b.Reset()
+ d.Execute(&b, data)
+ if got := b.String(); got != test.wantClone {
+ t.Errorf("%q: want %q, got %q", test.input, test.wantClone, got)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+)
+
+// Strings of content from a trusted source.
+type (
+ // CSS encapsulates known safe content that matches any of:
+ // (1) The CSS3 stylesheet production, such as `p { color: purple }`.
+ // (2) The CSS3 rule production, such as `a[href=~"https:"].foo#bar`.
+ // (3) CSS3 declaration productions, such as `color: red; margin: 2px`.
+ // (4) The CSS3 value production, such as `rgba(0, 0, 255, 127)`.
+ // See http://www.w3.org/TR/css3-syntax/#style
+ CSS string
+
+ // HTML encapsulates a known safe HTML document fragment.
+ // It should not be used for HTML from a third-party, or HTML with
+ // unclosed tags or comments. The outputs of a sound HTML sanitizer
+ // and a template escaped by this package are fine for use with HTML.
+ HTML string
+
+ // HTMLAttr encapsulates an HTML attribute from a trusted source,
+ // for example: ` dir="ltr"`.
+ HTMLAttr string
+
+ // JS encapsulates a known safe EcmaScript5 Expression, or example,
+ // `(x + y * z())`.
+ // Template authors are responsible for ensuring that typed expressions
+ // do not break the intended precedence and that there is no
+ // statement/expression ambiguity as when passing an expression like
+ // "{ foo: bar() }\n['foo']()", which is both a valid Expression and a
+ // valid Program with a very different meaning.
+ JS string
+
+ // JSStr encapsulates a sequence of characters meant to be embedded
+ // between quotes in a JavaScript expression.
+ // The string must match a series of StringCharacters:
+ // StringCharacter :: SourceCharacter but not `\` or LineTerminator
+ // | EscapeSequence
+ // Note that LineContinuations are not allowed.
+ // JSStr("foo\\nbar") is fine, but JSStr("foo\\\nbar") is not.
+ JSStr string
+
+ // URL encapsulates a known safe URL as defined in RFC 3896.
+ // A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`
+ // from a trusted source should go in the page, but by default dynamic
+ // `javascript:` URLs are filtered out since they are a frequently
+ // exploited injection vector.
+ URL string
+)
+
+type contentType uint8
+
+const (
+ contentTypePlain contentType = iota
+ contentTypeCSS
+ contentTypeHTML
+ contentTypeHTMLAttr
+ contentTypeJS
+ contentTypeJSStr
+ contentTypeURL
+ // contentTypeUnsafe is used in attr.go for values that affect how
+ // embedded content and network messages are formed, vetted,
+ // or interpreted; or which credentials network messages carry.
+ contentTypeUnsafe
+)
+
+// stringify converts its arguments to a string and the type of the content.
+func stringify(args ...interface{}) (string, contentType) {
+ if len(args) == 1 {
+ switch s := args[0].(type) {
+ case string:
+ return s, contentTypePlain
+ case CSS:
+ return string(s), contentTypeCSS
+ case HTML:
+ return string(s), contentTypeHTML
+ case HTMLAttr:
+ return string(s), contentTypeHTMLAttr
+ case JS:
+ return string(s), contentTypeJS
+ case JSStr:
+ return string(s), contentTypeJSStr
+ case URL:
+ return string(s), contentTypeURL
+ }
+ }
+ return fmt.Sprint(args...), contentTypePlain
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestTypedContent(t *testing.T) {
+ data := []interface{}{
+ `<b> "foo%" O'Reilly &bar;`,
+ CSS(`a[href =~ "//example.com"]#foo`),
+ HTML(`Hello, <b>World</b> &tc!`),
+ HTMLAttr(` dir="ltr"`),
+ JS(`c && alert("Hello, World!");`),
+ JSStr(`Hello, World & O'Reilly\x21`),
+ URL(`greeting=H%69&addressee=(World)`),
+ }
+
+ // For each content sensitive escaper, see how it does on
+ // each of the typed strings above.
+ tests := []struct {
+ // A template containing a single {{.}}.
+ input string
+ want []string
+ }{
+ {
+ `<style>{{.}} { color: blue }</style>`,
+ []string{
+ `ZgotmplZ`,
+ // Allowed but not escaped.
+ `a[href =~ "//example.com"]#foo`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ },
+ },
+ {
+ `<div style="{{.}}">`,
+ []string{
+ `ZgotmplZ`,
+ // Allowed and HTML escaped.
+ `a[href =~ "//example.com"]#foo`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ },
+ },
+ {
+ `{{.}}`,
+ []string{
+ `<b> "foo%" O'Reilly &bar;`,
+ `a[href =~ "//example.com"]#foo`,
+ // Not escaped.
+ `Hello, <b>World</b> &tc!`,
+ ` dir="ltr"`,
+ `c && alert("Hello, World!");`,
+ `Hello, World & O'Reilly\x21`,
+ `greeting=H%69&addressee=(World)`,
+ },
+ },
+ {
+ `<a{{.}}>`,
+ []string{
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ // Allowed and HTML escaped.
+ ` dir="ltr"`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ `ZgotmplZ`,
+ },
+ },
+ {
+ `<a title={{.}}>`,
+ []string{
+ `<b> "foo%" O'Reilly &bar;`,
+ `a[href =~ "//example.com"]#foo`,
+ // Tags stripped, spaces escaped, entity not re-escaped.
+ `Hello, World &tc!`,
+ ` dir="ltr"`,
+ `c && alert("Hello, World!");`,
+ `Hello, World & O'Reilly\x21`,
+ `greeting=H%69&addressee=(World)`,
+ },
+ },
+ {
+ `<a title='{{.}}'>`,
+ []string{
+ `<b> "foo%" O'Reilly &bar;`,
+ `a[href =~ "//example.com"]#foo`,
+ // Tags stripped, entity not re-escaped.
+ `Hello, World &tc!`,
+ ` dir="ltr"`,
+ `c && alert("Hello, World!");`,
+ `Hello, World & O'Reilly\x21`,
+ `greeting=H%69&addressee=(World)`,
+ },
+ },
+ {
+ `<textarea>{{.}}</textarea>`,
+ []string{
+ `<b> "foo%" O'Reilly &bar;`,
+ `a[href =~ "//example.com"]#foo`,
+ // Angle brackets escaped to prevent injection of close tags, entity not re-escaped.
+ `Hello, <b>World</b> &tc!`,
+ ` dir="ltr"`,
+ `c && alert("Hello, World!");`,
+ `Hello, World & O'Reilly\x21`,
+ `greeting=H%69&addressee=(World)`,
+ },
+ },
+ {
+ `<script>alert({{.}})</script>`,
+ []string{
+ `"\u003cb\u003e \"foo%\" O'Reilly &bar;"`,
+ `"a[href =~ \"//example.com\"]#foo"`,
+ `"Hello, \u003cb\u003eWorld\u003c/b\u003e &tc!"`,
+ `" dir=\"ltr\""`,
+ // Not escaped.
+ `c && alert("Hello, World!");`,
+ // Escape sequence not over-escaped.
+ `"Hello, World & O'Reilly\x21"`,
+ `"greeting=H%69&addressee=(World)"`,
+ },
+ },
+ {
+ `<button onclick="alert({{.}})">`,
+ []string{
+ `"\u003cb\u003e \"foo%\" O'Reilly &bar;"`,
+ `"a[href =~ \"//example.com\"]#foo"`,
+ `"Hello, \u003cb\u003eWorld\u003c/b\u003e &amp;tc!"`,
+ `" dir=\"ltr\""`,
+ // Not JS escaped but HTML escaped.
+ `c && alert("Hello, World!");`,
+ // Escape sequence not over-escaped.
+ `"Hello, World & O'Reilly\x21"`,
+ `"greeting=H%69&addressee=(World)"`,
+ },
+ },
+ {
+ `<script>alert("{{.}}")</script>`,
+ []string{
+ `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+ `a[href =~ \x22\/\/example.com\x22]#foo`,
+ `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+ ` dir=\x22ltr\x22`,
+ `c \x26\x26 alert(\x22Hello, World!\x22);`,
+ // Escape sequence not over-escaped.
+ `Hello, World \x26 O\x27Reilly\x21`,
+ `greeting=H%69\x26addressee=(World)`,
+ },
+ },
+ {
+ `<button onclick='alert("{{.}}")'>`,
+ []string{
+ `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+ `a[href =~ \x22\/\/example.com\x22]#foo`,
+ `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+ ` dir=\x22ltr\x22`,
+ `c \x26\x26 alert(\x22Hello, World!\x22);`,
+ // Escape sequence not over-escaped.
+ `Hello, World \x26 O\x27Reilly\x21`,
+ `greeting=H%69\x26addressee=(World)`,
+ },
+ },
+ {
+ `<a href="?q={{.}}">`,
+ []string{
+ `%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
+ `a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
+ `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+ `%20dir%3d%22ltr%22`,
+ `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+ `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
+ // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
+ `greeting=H%69&addressee=%28World%29`,
+ },
+ },
+ {
+ `<style>body { background: url('?img={{.}}') }</style>`,
+ []string{
+ `%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
+ `a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
+ `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+ `%20dir%3d%22ltr%22`,
+ `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+ `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
+ // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
+ `greeting=H%69&addressee=%28World%29`,
+ },
+ },
+ }
+
+ for _, test := range tests {
+ tmpl := Must(New("x").Parse(test.input))
+ pre := strings.Index(test.input, "{{.}}")
+ post := len(test.input) - (pre + 5)
+ var b bytes.Buffer
+ for i, x := range data {
+ b.Reset()
+ if err := tmpl.Execute(&b, x); err != nil {
+ t.Errorf("%q with %v: %s", test.input, x, err)
+ continue
+ }
+ if want, got := test.want[i], b.String()[pre:b.Len()-post]; want != got {
+ t.Errorf("%q with %v:\nwant\n\t%q,\ngot\n\t%q\n", test.input, x, want, got)
+ continue
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+)
+
+// context describes the state an HTML parser must be in when it reaches the
+// portion of HTML produced by evaluating a particular template node.
+//
+// The zero value of type context is the start context for a template that
+// produces an HTML fragment as defined at
+// http://www.w3.org/TR/html5/the-end.html#parsing-html-fragments
+// where the context element is null.
+type context struct {
+ state state
+ delim delim
+ urlPart urlPart
+ jsCtx jsCtx
+ attr attr
+ element element
+ err *Error
+}
+
+func (c context) String() string {
+ return fmt.Sprintf("{%v %v %v %v %v %v %v}", c.state, c.delim, c.urlPart, c.jsCtx, c.attr, c.element, c.err)
+}
+
+// eq returns whether two contexts are equal.
+func (c context) eq(d context) bool {
+ return c.state == d.state &&
+ c.delim == d.delim &&
+ c.urlPart == d.urlPart &&
+ c.jsCtx == d.jsCtx &&
+ c.attr == d.attr &&
+ c.element == d.element &&
+ c.err == d.err
+}
+
+// mangle produces an identifier that includes a suffix that distinguishes it
+// from template names mangled with different contexts.
+func (c context) mangle(templateName string) string {
+ // The mangled name for the default context is the input templateName.
+ if c.state == stateText {
+ return templateName
+ }
+ s := templateName + "$htmltemplate_" + c.state.String()
+ if c.delim != 0 {
+ s += "_" + c.delim.String()
+ }
+ if c.urlPart != 0 {
+ s += "_" + c.urlPart.String()
+ }
+ if c.jsCtx != 0 {
+ s += "_" + c.jsCtx.String()
+ }
+ if c.attr != 0 {
+ s += "_" + c.attr.String()
+ }
+ if c.element != 0 {
+ s += "_" + c.element.String()
+ }
+ return s
+}
+
+// state describes a high-level HTML parser state.
+//
+// It bounds the top of the element stack, and by extension the HTML insertion
+// mode, but also contains state that does not correspond to anything in the
+// HTML5 parsing algorithm because a single token production in the HTML
+// grammar may contain embedded actions in a template. For instance, the quoted
+// HTML attribute produced by
+// <div title="Hello {{.World}}">
+// is a single token in HTML's grammar but in a template spans several nodes.
+type state uint8
+
+const (
+ // stateText is parsed character data. An HTML parser is in
+ // this state when its parse position is outside an HTML tag,
+ // directive, comment, and special element body.
+ stateText state = iota
+ // stateTag occurs before an HTML attribute or the end of a tag.
+ stateTag
+ // stateAttrName occurs inside an attribute name.
+ // It occurs between the ^'s in ` ^name^ = value`.
+ stateAttrName
+ // stateAfterName occurs after an attr name has ended but before any
+ // equals sign. It occurs between the ^'s in ` name^ ^= value`.
+ stateAfterName
+ // stateBeforeValue occurs after the equals sign but before the value.
+ // It occurs between the ^'s in ` name =^ ^value`.
+ stateBeforeValue
+ // stateHTMLCmt occurs inside an <!-- HTML comment -->.
+ stateHTMLCmt
+ // stateRCDATA occurs inside an RCDATA element (<textarea> or <title>)
+ // as described at http://dev.w3.org/html5/spec/syntax.html#elements-0
+ stateRCDATA
+ // stateAttr occurs inside an HTML attribute whose content is text.
+ stateAttr
+ // stateURL occurs inside an HTML attribute whose content is a URL.
+ stateURL
+ // stateJS occurs inside an event handler or script element.
+ stateJS
+ // stateJSDqStr occurs inside a JavaScript double quoted string.
+ stateJSDqStr
+ // stateJSSqStr occurs inside a JavaScript single quoted string.
+ stateJSSqStr
+ // stateJSRegexp occurs inside a JavaScript regexp literal.
+ stateJSRegexp
+ // stateJSBlockCmt occurs inside a JavaScript /* block comment */.
+ stateJSBlockCmt
+ // stateJSLineCmt occurs inside a JavaScript // line comment.
+ stateJSLineCmt
+ // stateCSS occurs inside a <style> element or style attribute.
+ stateCSS
+ // stateCSSDqStr occurs inside a CSS double quoted string.
+ stateCSSDqStr
+ // stateCSSSqStr occurs inside a CSS single quoted string.
+ stateCSSSqStr
+ // stateCSSDqURL occurs inside a CSS double quoted url("...").
+ stateCSSDqURL
+ // stateCSSSqURL occurs inside a CSS single quoted url('...').
+ stateCSSSqURL
+ // stateCSSURL occurs inside a CSS unquoted url(...).
+ stateCSSURL
+ // stateCSSBlockCmt occurs inside a CSS /* block comment */.
+ stateCSSBlockCmt
+ // stateCSSLineCmt occurs inside a CSS // line comment.
+ stateCSSLineCmt
+ // stateError is an infectious error state outside any valid
+ // HTML/CSS/JS construct.
+ stateError
+)
+
+var stateNames = [...]string{
+ stateText: "stateText",
+ stateTag: "stateTag",
+ stateAttrName: "stateAttrName",
+ stateAfterName: "stateAfterName",
+ stateBeforeValue: "stateBeforeValue",
+ stateHTMLCmt: "stateHTMLCmt",
+ stateRCDATA: "stateRCDATA",
+ stateAttr: "stateAttr",
+ stateURL: "stateURL",
+ stateJS: "stateJS",
+ stateJSDqStr: "stateJSDqStr",
+ stateJSSqStr: "stateJSSqStr",
+ stateJSRegexp: "stateJSRegexp",
+ stateJSBlockCmt: "stateJSBlockCmt",
+ stateJSLineCmt: "stateJSLineCmt",
+ stateCSS: "stateCSS",
+ stateCSSDqStr: "stateCSSDqStr",
+ stateCSSSqStr: "stateCSSSqStr",
+ stateCSSDqURL: "stateCSSDqURL",
+ stateCSSSqURL: "stateCSSSqURL",
+ stateCSSURL: "stateCSSURL",
+ stateCSSBlockCmt: "stateCSSBlockCmt",
+ stateCSSLineCmt: "stateCSSLineCmt",
+ stateError: "stateError",
+}
+
+func (s state) String() string {
+ if int(s) < len(stateNames) {
+ return stateNames[s]
+ }
+ return fmt.Sprintf("illegal state %d", int(s))
+}
+
+// isComment is true for any state that contains content meant for template
+// authors & maintainers, not for end-users or machines.
+func isComment(s state) bool {
+ switch s {
+ case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
+ return true
+ }
+ return false
+}
+
+// isInTag return whether s occurs solely inside an HTML tag.
+func isInTag(s state) bool {
+ switch s {
+ case stateTag, stateAttrName, stateAfterName, stateBeforeValue, stateAttr:
+ return true
+ }
+ return false
+}
+
+// delim is the delimiter that will end the current HTML attribute.
+type delim uint8
+
+const (
+ // delimNone occurs outside any attribute.
+ delimNone delim = iota
+ // delimDoubleQuote occurs when a double quote (") closes the attribute.
+ delimDoubleQuote
+ // delimSingleQuote occurs when a single quote (') closes the attribute.
+ delimSingleQuote
+ // delimSpaceOrTagEnd occurs when a space or right angle bracket (>)
+ // closes the attribute.
+ delimSpaceOrTagEnd
+)
+
+var delimNames = [...]string{
+ delimNone: "delimNone",
+ delimDoubleQuote: "delimDoubleQuote",
+ delimSingleQuote: "delimSingleQuote",
+ delimSpaceOrTagEnd: "delimSpaceOrTagEnd",
+}
+
+func (d delim) String() string {
+ if int(d) < len(delimNames) {
+ return delimNames[d]
+ }
+ return fmt.Sprintf("illegal delim %d", int(d))
+}
+
+// urlPart identifies a part in an RFC 3986 hierarchical URL to allow different
+// encoding strategies.
+type urlPart uint8
+
+const (
+ // urlPartNone occurs when not in a URL, or possibly at the start:
+ // ^ in "^http://auth/path?k=v#frag".
+ urlPartNone urlPart = iota
+ // urlPartPreQuery occurs in the scheme, authority, or path; between the
+ // ^s in "h^ttp://auth/path^?k=v#frag".
+ urlPartPreQuery
+ // urlPartQueryOrFrag occurs in the query portion between the ^s in
+ // "http://auth/path?^k=v#frag^".
+ urlPartQueryOrFrag
+ // urlPartUnknown occurs due to joining of contexts both before and
+ // after the query separator.
+ urlPartUnknown
+)
+
+var urlPartNames = [...]string{
+ urlPartNone: "urlPartNone",
+ urlPartPreQuery: "urlPartPreQuery",
+ urlPartQueryOrFrag: "urlPartQueryOrFrag",
+ urlPartUnknown: "urlPartUnknown",
+}
+
+func (u urlPart) String() string {
+ if int(u) < len(urlPartNames) {
+ return urlPartNames[u]
+ }
+ return fmt.Sprintf("illegal urlPart %d", int(u))
+}
+
+// jsCtx determines whether a '/' starts a regular expression literal or a
+// division operator.
+type jsCtx uint8
+
+const (
+ // jsCtxRegexp occurs where a '/' would start a regexp literal.
+ jsCtxRegexp jsCtx = iota
+ // jsCtxDivOp occurs where a '/' would start a division operator.
+ jsCtxDivOp
+ // jsCtxUnknown occurs where a '/' is ambiguous due to context joining.
+ jsCtxUnknown
+)
+
+func (c jsCtx) String() string {
+ switch c {
+ case jsCtxRegexp:
+ return "jsCtxRegexp"
+ case jsCtxDivOp:
+ return "jsCtxDivOp"
+ case jsCtxUnknown:
+ return "jsCtxUnknown"
+ }
+ return fmt.Sprintf("illegal jsCtx %d", int(c))
+}
+
+// element identifies the HTML element when inside a start tag or special body.
+// Certain HTML element (for example <script> and <style>) have bodies that are
+// treated differently from stateText so the element type is necessary to
+// transition into the correct context at the end of a tag and to identify the
+// end delimiter for the body.
+type element uint8
+
+const (
+ // elementNone occurs outside a special tag or special element body.
+ elementNone element = iota
+ // elementScript corresponds to the raw text <script> element.
+ elementScript
+ // elementStyle corresponds to the raw text <style> element.
+ elementStyle
+ // elementTextarea corresponds to the RCDATA <textarea> element.
+ elementTextarea
+ // elementTitle corresponds to the RCDATA <title> element.
+ elementTitle
+)
+
+var elementNames = [...]string{
+ elementNone: "elementNone",
+ elementScript: "elementScript",
+ elementStyle: "elementStyle",
+ elementTextarea: "elementTextarea",
+ elementTitle: "elementTitle",
+}
+
+func (e element) String() string {
+ if int(e) < len(elementNames) {
+ return elementNames[e]
+ }
+ return fmt.Sprintf("illegal element %d", int(e))
+}
+
+// attr identifies the most recent HTML attribute when inside a start tag.
+type attr uint8
+
+const (
+ // attrNone corresponds to a normal attribute or no attribute.
+ attrNone attr = iota
+ // attrScript corresponds to an event handler attribute.
+ attrScript
+ // attrStyle corresponds to the style attribute whose value is CSS.
+ attrStyle
+ // attrURL corresponds to an attribute whose value is a URL.
+ attrURL
+)
+
+var attrNames = [...]string{
+ attrNone: "attrNone",
+ attrScript: "attrScript",
+ attrStyle: "attrStyle",
+ attrURL: "attrURL",
+}
+
+func (a attr) String() string {
+ if int(a) < len(attrNames) {
+ return attrNames[a]
+ }
+ return fmt.Sprintf("illegal attr %d", int(a))
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "unicode"
+ "unicode/utf8"
+)
+
+// endsWithCSSKeyword returns whether b ends with an ident that
+// case-insensitively matches the lower-case kw.
+func endsWithCSSKeyword(b []byte, kw string) bool {
+ i := len(b) - len(kw)
+ if i < 0 {
+ // Too short.
+ return false
+ }
+ if i != 0 {
+ r, _ := utf8.DecodeLastRune(b[:i])
+ if isCSSNmchar(r) {
+ // Too long.
+ return false
+ }
+ }
+ // Many CSS keywords, such as "!important" can have characters encoded,
+ // but the URI production does not allow that according to
+ // http://www.w3.org/TR/css3-syntax/#TOK-URI
+ // This does not attempt to recognize encoded keywords. For example,
+ // given "\75\72\6c" and "url" this return false.
+ return string(bytes.ToLower(b[i:])) == kw
+}
+
+// isCSSNmchar returns whether rune is allowed anywhere in a CSS identifier.
+func isCSSNmchar(r rune) bool {
+ // Based on the CSS3 nmchar production but ignores multi-rune escape
+ // sequences.
+ // http://www.w3.org/TR/css3-syntax/#SUBTOK-nmchar
+ return 'a' <= r && r <= 'z' ||
+ 'A' <= r && r <= 'Z' ||
+ '0' <= r && r <= '9' ||
+ r == '-' ||
+ r == '_' ||
+ // Non-ASCII cases below.
+ 0x80 <= r && r <= 0xd7ff ||
+ 0xe000 <= r && r <= 0xfffd ||
+ 0x10000 <= r && r <= 0x10ffff
+}
+
+// decodeCSS decodes CSS3 escapes given a sequence of stringchars.
+// If there is no change, it returns the input, otherwise it returns a slice
+// backed by a new array.
+// http://www.w3.org/TR/css3-syntax/#SUBTOK-stringchar defines stringchar.
+func decodeCSS(s []byte) []byte {
+ i := bytes.IndexByte(s, '\\')
+ if i == -1 {
+ return s
+ }
+ // The UTF-8 sequence for a codepoint is never longer than 1 + the
+ // number hex digits need to represent that codepoint, so len(s) is an
+ // upper bound on the output length.
+ b := make([]byte, 0, len(s))
+ for len(s) != 0 {
+ i := bytes.IndexByte(s, '\\')
+ if i == -1 {
+ i = len(s)
+ }
+ b, s = append(b, s[:i]...), s[i:]
+ if len(s) < 2 {
+ break
+ }
+ // http://www.w3.org/TR/css3-syntax/#SUBTOK-escape
+ // escape ::= unicode | '\' [#x20-#x7E#x80-#xD7FF#xE000-#xFFFD#x10000-#x10FFFF]
+ if isHex(s[1]) {
+ // http://www.w3.org/TR/css3-syntax/#SUBTOK-unicode
+ // unicode ::= '\' [0-9a-fA-F]{1,6} wc?
+ j := 2
+ for j < len(s) && j < 7 && isHex(s[j]) {
+ j++
+ }
+ r := hexDecode(s[1:j])
+ if r > unicode.MaxRune {
+ r, j = r/16, j-1
+ }
+ n := utf8.EncodeRune(b[len(b):cap(b)], r)
+ // The optional space at the end allows a hex
+ // sequence to be followed by a literal hex.
+ // string(decodeCSS([]byte(`\A B`))) == "\nB"
+ b, s = b[:len(b)+n], skipCSSSpace(s[j:])
+ } else {
+ // `\\` decodes to `\` and `\"` to `"`.
+ _, n := utf8.DecodeRune(s[1:])
+ b, s = append(b, s[1:1+n]...), s[1+n:]
+ }
+ }
+ return b
+}
+
+// isHex returns whether the given character is a hex digit.
+func isHex(c byte) bool {
+ return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
+}
+
+// hexDecode decodes a short hex digit sequence: "10" -> 16.
+func hexDecode(s []byte) rune {
+ n := rune(0)
+ for _, c := range s {
+ n <<= 4
+ switch {
+ case '0' <= c && c <= '9':
+ n |= rune(c - '0')
+ case 'a' <= c && c <= 'f':
+ n |= rune(c-'a') + 10
+ case 'A' <= c && c <= 'F':
+ n |= rune(c-'A') + 10
+ default:
+ panic(fmt.Sprintf("Bad hex digit in %q", s))
+ }
+ }
+ return n
+}
+
+// skipCSSSpace returns a suffix of c, skipping over a single space.
+func skipCSSSpace(c []byte) []byte {
+ if len(c) == 0 {
+ return c
+ }
+ // wc ::= #x9 | #xA | #xC | #xD | #x20
+ switch c[0] {
+ case '\t', '\n', '\f', ' ':
+ return c[1:]
+ case '\r':
+ // This differs from CSS3's wc production because it contains a
+ // probable spec error whereby wc contains all the single byte
+ // sequences in nl (newline) but not CRLF.
+ if len(c) >= 2 && c[1] == '\n' {
+ return c[2:]
+ }
+ return c[1:]
+ }
+ return c
+}
+
+// isCSSSpace returns whether b is a CSS space char as defined in wc.
+func isCSSSpace(b byte) bool {
+ switch b {
+ case '\t', '\n', '\f', '\r', ' ':
+ return true
+ }
+ return false
+}
+
+// cssEscaper escapes HTML and CSS special characters using \<hex>+ escapes.
+func cssEscaper(args ...interface{}) string {
+ s, _ := stringify(args...)
+ var b bytes.Buffer
+ written := 0
+ for i, r := range s {
+ var repl string
+ switch r {
+ case 0:
+ repl = `\0`
+ case '\t':
+ repl = `\9`
+ case '\n':
+ repl = `\a`
+ case '\f':
+ repl = `\c`
+ case '\r':
+ repl = `\d`
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ case '"':
+ repl = `\22`
+ case '&':
+ repl = `\26`
+ case '\'':
+ repl = `\27`
+ case '(':
+ repl = `\28`
+ case ')':
+ repl = `\29`
+ case '+':
+ repl = `\2b`
+ case '/':
+ repl = `\2f`
+ case ':':
+ repl = `\3a`
+ case ';':
+ repl = `\3b`
+ case '<':
+ repl = `\3c`
+ case '>':
+ repl = `\3e`
+ case '\\':
+ repl = `\\`
+ case '{':
+ repl = `\7b`
+ case '}':
+ repl = `\7d`
+ default:
+ continue
+ }
+ b.WriteString(s[written:i])
+ b.WriteString(repl)
+ written = i + utf8.RuneLen(r)
+ if repl != `\\` && (written == len(s) || isHex(s[written]) || isCSSSpace(s[written])) {
+ b.WriteByte(' ')
+ }
+ }
+ if written == 0 {
+ return s
+ }
+ b.WriteString(s[written:])
+ return b.String()
+}
+
+var expressionBytes = []byte("expression")
+var mozBindingBytes = []byte("mozbinding")
+
+// cssValueFilter allows innocuous CSS values in the output including CSS
+// quantities (10px or 25%), ID or class literals (#foo, .bar), keyword values
+// (inherit, blue), and colors (#888).
+// It filters out unsafe values, such as those that affect token boundaries,
+// and anything that might execute scripts.
+func cssValueFilter(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeCSS {
+ return s
+ }
+ b, id := decodeCSS([]byte(s)), make([]byte, 0, 64)
+
+ // CSS3 error handling is specified as honoring string boundaries per
+ // http://www.w3.org/TR/css3-syntax/#error-handling :
+ // Malformed declarations. User agents must handle unexpected
+ // tokens encountered while parsing a declaration by reading until
+ // the end of the declaration, while observing the rules for
+ // matching pairs of (), [], {}, "", and '', and correctly handling
+ // escapes. For example, a malformed declaration may be missing a
+ // property, colon (:) or value.
+ // So we need to make sure that values do not have mismatched bracket
+ // or quote characters to prevent the browser from restarting parsing
+ // inside a string that might embed JavaScript source.
+ for i, c := range b {
+ switch c {
+ case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
+ return filterFailsafe
+ case '-':
+ // Disallow <!-- or -->.
+ // -- should not appear in valid identifiers.
+ if i != 0 && b[i-1] == '-' {
+ return filterFailsafe
+ }
+ default:
+ if c < 0x80 && isCSSNmchar(rune(c)) {
+ id = append(id, c)
+ }
+ }
+ }
+ id = bytes.ToLower(id)
+ if bytes.Index(id, expressionBytes) != -1 || bytes.Index(id, mozBindingBytes) != -1 {
+ return filterFailsafe
+ }
+ return string(b)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestEndsWithCSSKeyword(t *testing.T) {
+ tests := []struct {
+ css, kw string
+ want bool
+ }{
+ {"", "url", false},
+ {"url", "url", true},
+ {"URL", "url", true},
+ {"Url", "url", true},
+ {"url", "important", false},
+ {"important", "important", true},
+ {"image-url", "url", false},
+ {"imageurl", "url", false},
+ {"image url", "url", true},
+ }
+ for _, test := range tests {
+ got := endsWithCSSKeyword([]byte(test.css), test.kw)
+ if got != test.want {
+ t.Errorf("want %t but got %t for css=%v, kw=%v", test.want, got, test.css, test.kw)
+ }
+ }
+}
+
+func TestIsCSSNmchar(t *testing.T) {
+ tests := []struct {
+ rune rune
+ want bool
+ }{
+ {0, false},
+ {'0', true},
+ {'9', true},
+ {'A', true},
+ {'Z', true},
+ {'a', true},
+ {'z', true},
+ {'_', true},
+ {'-', true},
+ {':', false},
+ {';', false},
+ {' ', false},
+ {0x7f, false},
+ {0x80, true},
+ {0x1234, true},
+ {0xd800, false},
+ {0xdc00, false},
+ {0xfffe, false},
+ {0x10000, true},
+ {0x110000, false},
+ }
+ for _, test := range tests {
+ got := isCSSNmchar(test.rune)
+ if got != test.want {
+ t.Errorf("%q: want %t but got %t", string(test.rune), test.want, got)
+ }
+ }
+}
+
+func TestDecodeCSS(t *testing.T) {
+ tests := []struct {
+ css, want string
+ }{
+ {``, ``},
+ {`foo`, `foo`},
+ {`foo\`, `foo`},
+ {`foo\\`, `foo\`},
+ {`\`, ``},
+ {`\A`, "\n"},
+ {`\a`, "\n"},
+ {`\0a`, "\n"},
+ {`\00000a`, "\n"},
+ {`\000000a`, "\u0000a"},
+ {`\1234 5`, "\u1234" + "5"},
+ {`\1234\20 5`, "\u1234" + " 5"},
+ {`\1234\A 5`, "\u1234" + "\n5"},
+ {"\\1234\t5", "\u1234" + "5"},
+ {"\\1234\n5", "\u1234" + "5"},
+ {"\\1234\r\n5", "\u1234" + "5"},
+ {`\12345`, "\U00012345"},
+ {`\\`, `\`},
+ {`\\ `, `\ `},
+ {`\"`, `"`},
+ {`\'`, `'`},
+ {`\.`, `.`},
+ {`\. .`, `. .`},
+ {
+ `The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3e dog\3c/canine\3e`,
+ "The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>",
+ },
+ }
+ for _, test := range tests {
+ got1 := string(decodeCSS([]byte(test.css)))
+ if got1 != test.want {
+ t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.css, test.want, got1)
+ }
+ recoded := cssEscaper(got1)
+ if got2 := string(decodeCSS([]byte(recoded))); got2 != test.want {
+ t.Errorf("%q: escape & decode not dual for %q", test.css, recoded)
+ }
+ }
+}
+
+func TestHexDecode(t *testing.T) {
+ for i := 0; i < 0x200000; i += 101 /* coprime with 16 */ {
+ s := strconv.Itob(i, 16)
+ if got := int(hexDecode([]byte(s))); got != i {
+ t.Errorf("%s: want %d but got %d", s, i, got)
+ }
+ s = strings.ToUpper(s)
+ if got := int(hexDecode([]byte(s))); got != i {
+ t.Errorf("%s: want %d but got %d", s, i, got)
+ }
+ }
+}
+
+func TestSkipCSSSpace(t *testing.T) {
+ tests := []struct {
+ css, want string
+ }{
+ {"", ""},
+ {"foo", "foo"},
+ {"\n", ""},
+ {"\r\n", ""},
+ {"\r", ""},
+ {"\t", ""},
+ {" ", ""},
+ {"\f", ""},
+ {" foo", "foo"},
+ {" foo", " foo"},
+ {`\20`, `\20`},
+ }
+ for _, test := range tests {
+ got := string(skipCSSSpace([]byte(test.css)))
+ if got != test.want {
+ t.Errorf("%q: want %q but got %q", test.css, test.want, got)
+ }
+ }
+}
+
+func TestCSSEscaper(t *testing.T) {
+ input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./` +
+ `0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\]^_` +
+ "`abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\x7f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+ want := ("\\0\x01\x02\x03\x04\x05\x06\x07" +
+ "\x08\\9 \\a\x0b\\c \\d\x0E\x0F" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17" +
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !\22#$%\26\27\28\29*\2b,-.\2f ` +
+ `0123456789\3a\3b\3c=\3e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+ "`abcdefghijklmno" +
+ `pqrstuvwxyz\7b|\7d~` + "\u007f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+ got := cssEscaper(input)
+ if got != want {
+ t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
+ }
+
+ got = string(decodeCSS([]byte(got)))
+ if input != got {
+ t.Errorf("decode: want\n\t%q\nbut got\n\t%q", input, got)
+ }
+}
+
+func TestCSSValueFilter(t *testing.T) {
+ tests := []struct {
+ css, want string
+ }{
+ {"", ""},
+ {"foo", "foo"},
+ {"0", "0"},
+ {"0px", "0px"},
+ {"-5px", "-5px"},
+ {"1.25in", "1.25in"},
+ {"+.33em", "+.33em"},
+ {"100%", "100%"},
+ {"12.5%", "12.5%"},
+ {".foo", ".foo"},
+ {"#bar", "#bar"},
+ {"corner-radius", "corner-radius"},
+ {"-moz-corner-radius", "-moz-corner-radius"},
+ {"#000", "#000"},
+ {"#48f", "#48f"},
+ {"#123456", "#123456"},
+ {"U+00-FF, U+980-9FF", "U+00-FF, U+980-9FF"},
+ {"color: red", "color: red"},
+ {"<!--", "ZgotmplZ"},
+ {"-->", "ZgotmplZ"},
+ {"<![CDATA[", "ZgotmplZ"},
+ {"]]>", "ZgotmplZ"},
+ {"</style", "ZgotmplZ"},
+ {`"`, "ZgotmplZ"},
+ {`'`, "ZgotmplZ"},
+ {"`", "ZgotmplZ"},
+ {"\x00", "ZgotmplZ"},
+ {"/* foo */", "ZgotmplZ"},
+ {"//", "ZgotmplZ"},
+ {"[href=~", "ZgotmplZ"},
+ {"expression(alert(1337))", "ZgotmplZ"},
+ {"-expression(alert(1337))", "ZgotmplZ"},
+ {"expression", "ZgotmplZ"},
+ {"Expression", "ZgotmplZ"},
+ {"EXPRESSION", "ZgotmplZ"},
+ {"-moz-binding", "ZgotmplZ"},
+ {"-expr\x00ession(alert(1337))", "ZgotmplZ"},
+ {`-expr\0ession(alert(1337))`, "ZgotmplZ"},
+ {`-express\69on(alert(1337))`, "ZgotmplZ"},
+ {`-express\69 on(alert(1337))`, "ZgotmplZ"},
+ {`-exp\72 ession(alert(1337))`, "ZgotmplZ"},
+ {`-exp\52 ession(alert(1337))`, "ZgotmplZ"},
+ {`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
+ {`-expre\0000073sion`, "-expre\x073sion"},
+ {`@import url evil.css`, "ZgotmplZ"},
+ }
+ for _, test := range tests {
+ got := cssValueFilter(test.css)
+ if got != test.want {
+ t.Errorf("%q: want %q but got %q", test.css, test.want, got)
+ }
+ }
+}
+
+func BenchmarkCSSEscaper(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ cssEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+ }
+}
+
+func BenchmarkCSSEscaperNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ cssEscaper("The quick, brown fox jumps over the lazy dog.")
+ }
+}
+
+func BenchmarkDecodeCSS(b *testing.B) {
+ s := []byte(`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3edog\3c/canine\3e`)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ decodeCSS(s)
+ }
+}
+
+func BenchmarkDecodeCSSNoSpecials(b *testing.B) {
+ s := []byte("The quick, brown fox jumps over the lazy dog.")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ decodeCSS(s)
+ }
+}
+
+func BenchmarkCSSValueFilter(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ cssValueFilter(` e\78preS\0Sio/**/n(alert(1337))`)
+ }
+}
+
+func BenchmarkCSSValueFilterOk(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ cssValueFilter(`Times New Roman`)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template (html/template) is a specialization of package text/template
+that automates the construction of HTML output that is safe against code
+injection.
+
+
+Introduction
+
+This package wraps package template so you can use the standard template API
+to parse and execute templates.
+
+ set, err := new(template.Set).Parse(...)
+ // Error checking elided
+ err = set.Execute(out, "Foo", data)
+
+If successful, set will now be injection-safe. Otherwise, err is an error
+defined in the docs for ErrorCode.
+
+HTML templates treat data values as plain text which should be encoded so they
+can be safely embedded in an HTML document. The escaping is contextual, so
+actions can appear within JavaScript, CSS, and URI contexts.
+
+The security model used by this package assumes that template authors are
+trusted, while Execute's data parameter is not. More details are provided below.
+
+Example
+
+ import "template"
+ ...
+ t, err := (&template.Set{}).Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+ err = t.Execute(out, "T", "<script>alert('you have been pwned')</script>")
+
+produces
+
+ Hello, <script>alert('you have been pwned')</script>!
+
+but with contextual autoescaping,
+
+ import "html/template"
+ ...
+ t, err := (&template.Set{}).Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+ err = t.Execute(out, "T", "<script>alert('you have been pwned')</script>")
+
+produces safe, escaped HTML output
+
+ Hello, <script>alert('you have been pwned')</script>!
+
+
+Contexts
+
+This package understands HTML, CSS, JavaScript, and URIs. It adds sanitizing
+functions to each simple action pipeline, so given the excerpt
+
+ <a href="/search?q={{.}}">{{.}}</a>
+
+At parse time each {{.}} is overwritten to add escaping functions as necessary,
+in this case,
+
+ <a href="/search?q={{. | urlquery}}">{{. | html}}</a>
+
+
+Errors
+
+See the documentation of ErrorCode for details.
+
+
+A fuller picture
+
+The rest of this package comment may be skipped on first reading; it includes
+details necessary to understand escaping contexts and error messages. Most users
+will not need to understand these details.
+
+
+Contexts
+
+Assuming {{.}} is `O'Reilly: How are <i>you</i>?`, the table below shows
+how {{.}} appears when used in the context to the left.
+
+Context {{.}} After
+{{.}} O'Reilly: How are <i>you</i>?
+<a title='{{.}}'> O'Reilly: How are you?
+<a href="/{{.}}"> O'Reilly: How are %3ci%3eyou%3c/i%3e?
+<a href="?q={{.}}"> O'Reilly%3a%20How%20are%3ci%3e...%3f
+<a onx='f("{{.}}")'> O\x27Reilly: How are \x3ci\x3eyou...?
+<a onx='f({{.}})'> "O\x27Reilly: How are \x3ci\x3eyou...?"
+<a onx='pattern = /{{.}}/;'> O\x27Reilly: How are \x3ci\x3eyou...\x3f
+
+If used in an unsafe context, then the value might be filtered out:
+
+Context {{.}} After
+<a href="{{.}}"> #ZgotmplZ
+
+since "O'Reilly:" is not an allowed protocol like "http:".
+
+
+If {{.}} is the innocuous word, `left`, then it can appear more widely,
+
+Context {{.}} After
+{{.}} left
+<a title='{{.}}'> left
+<a href='{{.}}'> left
+<a href='/{{.}}'> left
+<a href='?dir={{.}}'> left
+<a style="border-{{.}}: 4px"> left
+<a style="align: {{.}}"> left
+<a style="background: '{{.}}'> left
+<a style="background: url('{{.}}')> left
+<style>p.{{.}} {color:red}</style> left
+
+Non-string values can be used in JavaScript contexts.
+If {{.}} is
+
+ []struct{A,B string}{ "foo", "bar" }
+
+in the escaped template
+
+ <script>var pair = {{.}};</script>
+
+then the template output is
+
+ <script>var pair = {"A": "foo", "B": "bar"};</script>
+
+See package json to understand how non-string content is marshalled for
+embedding in JavaScript contexts.
+
+
+Typed Strings
+
+By default, this package assumes that all pipelines produce a plain text string.
+It adds escaping pipeline stages necessary to correctly and safely embed that
+plain text string in the appropriate context.
+
+When a data value is not plain text, you can make sure it is not over-escaped
+by marking it with its type.
+
+Types HTML, JS, URL, and others from content.go can carry safe content that is
+exempted from escaping.
+
+The template
+
+ Hello, {{.}}!
+
+can be invoked with
+
+ tmpl.Execute(out, HTML(`<b>World</b>`))
+
+to produce
+
+ Hello, <b>World</b>!
+
+instead of the
+
+ Hello, <b>World<b>!
+
+that would have been produced if {{.}} was a regular string.
+
+
+Security Model
+
+http://js-quasis-libraries-and-repl.googlecode.com/svn/trunk/safetemplate.html#problem_definition defines "safe" as used by this package.
+
+This package assumes that template authors are trusted, that Execute's data
+parameter is not, and seeks to preserve the properties below in the face
+of untrusted data:
+
+Structure Preservation Property
+"... when a template author writes an HTML tag in a safe templating language,
+the browser will interpret the corresponding portion of the output as a tag
+regardless of the values of untrusted data, and similarly for other structures
+such as attribute boundaries and JS and CSS string boundaries."
+
+Code Effect Property
+"... only code specified by the template author should run as a result of
+injecting the template output into a page and all code specified by the
+template author should run as a result of the same."
+
+Least Surprise Property
+"A developer (or code reviewer) familiar with HTML, CSS, and JavaScript, who
+knows that contextual autoescaping happens should be able to look at a {{.}}
+and correctly infer what sanitization happens."
+*/
+package template
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+)
+
+// Error describes a problem encountered during template Escaping.
+type Error struct {
+ // ErrorCode describes the kind of error.
+ ErrorCode ErrorCode
+ // Name is the name of the template in which the error was encountered.
+ Name string
+ // Line is the line number of the error in the template source or 0.
+ Line int
+ // Description is a human-readable description of the problem.
+ Description string
+}
+
+// ErrorCode is a code for a kind of error.
+type ErrorCode int
+
+// We define codes for each error that manifests while escaping templates, but
+// escaped templates may also fail at runtime.
+//
+// Output: "ZgotmplZ"
+// Example:
+// <img src="{{.X}}">
+// where {{.X}} evaluates to `javascript:...`
+// Discussion:
+// "ZgotmplZ" is a special value that indicates that unsafe content reached a
+// CSS or URL context at runtime. The output of the example will be
+// <img src="#ZgotmplZ">
+// If the data comes from a trusted source, use content types to exempt it
+// from filtering: URL(`javascript:...`).
+const (
+ // OK indicates the lack of an error.
+ OK ErrorCode = iota
+
+ // ErrAmbigContext: "... appears in an ambiguous URL context"
+ // Example:
+ // <a href="
+ // {{if .C}}
+ // /path/
+ // {{else}}
+ // /search?q=
+ // {{end}}
+ // {{.X}}
+ // ">
+ // Discussion:
+ // {{.X}} is in an ambiguous URL context since, depending on {{.C}},
+ // it may be either a URL suffix or a query parameter.
+ // Moving {{.X}} into the condition removes the ambiguity:
+ // <a href="{{if .C}}/path/{{.X}}{{else}}/search?q={{.X}}">
+ ErrAmbigContext
+
+ // ErrBadHTML: "expected space, attr name, or end of tag, but got ...",
+ // "... in unquoted attr", "... in attribute name"
+ // Example:
+ // <a href = /search?q=foo>
+ // <href=foo>
+ // <form na<e=...>
+ // <option selected<
+ // Discussion:
+ // This is often due to a typo in an HTML element, but some runes
+ // are banned in tag names, attribute names, and unquoted attribute
+ // values because they can tickle parser ambiguities.
+ // Quoting all attributes is the best policy.
+ ErrBadHTML
+
+ // ErrBranchEnd: "{{if}} branches end in different contexts"
+ // Example:
+ // {{if .C}}<a href="{{end}}{{.X}}
+ // Discussion:
+ // Package html/template statically examines each path through an
+ // {{if}}, {{range}}, or {{with}} to escape any following pipelines.
+ // The example is ambiguous since {{.X}} might be an HTML text node,
+ // or a URL prefix in an HTML attribute. The context of {{.X}} is
+ // used to figure out how to escape it, but that context depends on
+ // the run-time value of {{.C}} which is not statically known.
+ //
+ // The problem is usually something like missing quotes or angle
+ // brackets, or can be avoided by refactoring to put the two contexts
+ // into different branches of an if, range or with. If the problem
+ // is in a {{range}} over a collection that should never be empty,
+ // adding a dummy {{else}} can help.
+ ErrBranchEnd
+
+ // ErrEndContext: "... ends in a non-text context: ..."
+ // Examples:
+ // <div
+ // <div title="no close quote>
+ // <script>f()
+ // Discussion:
+ // Executed templates should produce a DocumentFragment of HTML.
+ // Templates that end without closing tags will trigger this error.
+ // Templates that should not be used in an HTML context or that
+ // produce incomplete Fragments should not be executed directly.
+ //
+ // {{define "main"}} <script>{{template "helper"}}</script> {{end}}
+ // {{define "helper"}} document.write(' <div title=" ') {{end}}
+ //
+ // "helper" does not produce a valid document fragment, so should
+ // not be Executed directly.
+ ErrEndContext
+
+ // ErrNoSuchTemplate: "no such template ..."
+ // Examples:
+ // {{define "main"}}<div {{template "attrs"}}>{{end}}
+ // {{define "attrs"}}href="{{.URL}}"{{end}}
+ // Discussion:
+ // Package html/template looks through template calls to compute the
+ // context.
+ // Here the {{.URL}} in "attrs" must be treated as a URL when called
+ // from "main", but you will get this error if "attrs" is not defined
+ // when "main" is parsed.
+ ErrNoSuchTemplate
+
+ // ErrOutputContext: "cannot compute output context for template ..."
+ // Examples:
+ // {{define "t"}}{{if .T}}{{template "t" .T}}{{end}}{{.H}}",{{end}}
+ // Discussion:
+ // A recursive template does not end in the same context in which it
+ // starts, and a reliable output context cannot be computed.
+ // Look for typos in the named template.
+ // If the template should not be called in the named start context,
+ // look for calls to that template in unexpected contexts.
+ // Maybe refactor recursive templates to not be recursive.
+ ErrOutputContext
+
+ // ErrPartialCharset: "unfinished JS regexp charset in ..."
+ // Example:
+ // <script>var pattern = /foo[{{.Chars}}]/</script>
+ // Discussion:
+ // Package html/template does not support interpolation into regular
+ // expression literal character sets.
+ ErrPartialCharset
+
+ // ErrPartialEscape: "unfinished escape sequence in ..."
+ // Example:
+ // <script>alert("\{{.X}}")</script>
+ // Discussion:
+ // Package html/template does not support actions following a
+ // backslash.
+ // This is usually an error and there are better solutions; for
+ // example
+ // <script>alert("{{.X}}")</script>
+ // should work, and if {{.X}} is a partial escape sequence such as
+ // "xA0", mark the whole sequence as safe content: JSStr(`\xA0`)
+ ErrPartialEscape
+
+ // ErrRangeLoopReentry: "on range loop re-entry: ..."
+ // Example:
+ // <script>var x = [{{range .}}'{{.}},{{end}}]</script>
+ // Discussion:
+ // If an iteration through a range would cause it to end in a
+ // different context than an earlier pass, there is no single context.
+ // In the example, there is missing a quote, so it is not clear
+ // whether {{.}} is meant to be inside a JS string or in a JS value
+ // context. The second iteration would produce something like
+ //
+ // <script>var x = ['firstValue,'secondValue]</script>
+ ErrRangeLoopReentry
+
+ // ErrSlashAmbig: '/' could start a division or regexp.
+ // Example:
+ // <script>
+ // {{if .C}}var x = 1{{end}}
+ // /-{{.N}}/i.test(x) ? doThis : doThat();
+ // </script>
+ // Discussion:
+ // The example above could produce `var x = 1/-2/i.test(s)...`
+ // in which the first '/' is a mathematical division operator or it
+ // could produce `/-2/i.test(s)` in which the first '/' starts a
+ // regexp literal.
+ // Look for missing semicolons inside branches, and maybe add
+ // parentheses to make it clear which interpretation you intend.
+ ErrSlashAmbig
+)
+
+func (e *Error) Error() string {
+ if e.Line != 0 {
+ return fmt.Sprintf("exp/template/html:%s:%d: %s", e.Name, e.Line, e.Description)
+ } else if e.Name != "" {
+ return fmt.Sprintf("exp/template/html:%s: %s", e.Name, e.Description)
+ }
+ return "exp/template/html: " + e.Description
+}
+
+// errorf creates an error given a format string f and args.
+// The template Name still needs to be supplied.
+func errorf(k ErrorCode, line int, f string, args ...interface{}) *Error {
+ return &Error{k, "", line, fmt.Sprintf(f, args...)}
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "html"
+ "text/template"
+ "text/template/parse"
+)
+
+// escape rewrites each action in the template to guarantee that the output is
+// properly escaped.
+func escape(t *template.Template) error {
+ var s template.Set
+ s.Add(t)
+ return escapeSet(&s, t.Name())
+ // TODO: if s contains cloned dependencies due to self-recursion
+ // cross-context, error out.
+}
+
+// escapeSet rewrites the template set to guarantee that the output of any of
+// the named templates is properly escaped.
+// Names should include the names of all templates that might be Executed but
+// need not include helper templates.
+// If no error is returned, then the named templates have been modified.
+// Otherwise the named templates have been rendered unusable.
+func escapeSet(s *template.Set, names ...string) error {
+ e := newEscaper(s)
+ for _, name := range names {
+ c, _ := e.escapeTree(context{}, name, 0)
+ var err error
+ if c.err != nil {
+ err, c.err.Name = c.err, name
+ } else if c.state != stateText {
+ err = &Error{ErrEndContext, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)}
+ }
+ if err != nil {
+ // Prevent execution of unsafe templates.
+ for _, name := range names {
+ if t := s.Template(name); t != nil {
+ t.Tree = nil
+ }
+ }
+ return err
+ }
+ }
+ e.commit()
+ return nil
+}
+
+// funcMap maps command names to functions that render their inputs safe.
+var funcMap = template.FuncMap{
+ "exp_template_html_attrescaper": attrEscaper,
+ "exp_template_html_commentescaper": commentEscaper,
+ "exp_template_html_cssescaper": cssEscaper,
+ "exp_template_html_cssvaluefilter": cssValueFilter,
+ "exp_template_html_htmlnamefilter": htmlNameFilter,
+ "exp_template_html_htmlescaper": htmlEscaper,
+ "exp_template_html_jsregexpescaper": jsRegexpEscaper,
+ "exp_template_html_jsstrescaper": jsStrEscaper,
+ "exp_template_html_jsvalescaper": jsValEscaper,
+ "exp_template_html_nospaceescaper": htmlNospaceEscaper,
+ "exp_template_html_rcdataescaper": rcdataEscaper,
+ "exp_template_html_urlescaper": urlEscaper,
+ "exp_template_html_urlfilter": urlFilter,
+ "exp_template_html_urlnormalizer": urlNormalizer,
+}
+
+// equivEscapers matches contextual escapers to equivalent template builtins.
+var equivEscapers = map[string]string{
+ "exp_template_html_attrescaper": "html",
+ "exp_template_html_htmlescaper": "html",
+ "exp_template_html_nospaceescaper": "html",
+ "exp_template_html_rcdataescaper": "html",
+ "exp_template_html_urlescaper": "urlquery",
+ "exp_template_html_urlnormalizer": "urlquery",
+}
+
+// escaper collects type inferences about templates and changes needed to make
+// templates injection safe.
+type escaper struct {
+ // set is the template set being escaped.
+ set *template.Set
+ // output[templateName] is the output context for a templateName that
+ // has been mangled to include its input context.
+ output map[string]context
+ // derived[c.mangle(name)] maps to a template derived from the template
+ // named name templateName for the start context c.
+ derived map[string]*template.Template
+ // called[templateName] is a set of called mangled template names.
+ called map[string]bool
+ // xxxNodeEdits are the accumulated edits to apply during commit.
+ // Such edits are not applied immediately in case a template set
+ // executes a given template in different escaping contexts.
+ actionNodeEdits map[*parse.ActionNode][]string
+ templateNodeEdits map[*parse.TemplateNode]string
+ textNodeEdits map[*parse.TextNode][]byte
+}
+
+// newEscaper creates a blank escaper for the given set.
+func newEscaper(s *template.Set) *escaper {
+ return &escaper{
+ s,
+ map[string]context{},
+ map[string]*template.Template{},
+ map[string]bool{},
+ map[*parse.ActionNode][]string{},
+ map[*parse.TemplateNode]string{},
+ map[*parse.TextNode][]byte{},
+ }
+}
+
+// filterFailsafe is an innocuous word that is emitted in place of unsafe values
+// by sanitizer functions. It is not a keyword in any programming language,
+// contains no special characters, is not empty, and when it appears in output
+// it is distinct enough that a developer can find the source of the problem
+// via a search engine.
+const filterFailsafe = "ZgotmplZ"
+
+// escape escapes a template node.
+func (e *escaper) escape(c context, n parse.Node) context {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ return e.escapeAction(c, n)
+ case *parse.IfNode:
+ return e.escapeBranch(c, &n.BranchNode, "if")
+ case *parse.ListNode:
+ return e.escapeList(c, n)
+ case *parse.RangeNode:
+ return e.escapeBranch(c, &n.BranchNode, "range")
+ case *parse.TemplateNode:
+ return e.escapeTemplate(c, n)
+ case *parse.TextNode:
+ return e.escapeText(c, n)
+ case *parse.WithNode:
+ return e.escapeBranch(c, &n.BranchNode, "with")
+ }
+ panic("escaping " + n.String() + " is unimplemented")
+}
+
+// escapeAction escapes an action template node.
+func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
+ if len(n.Pipe.Decl) != 0 {
+ // A local variable assignment, not an interpolation.
+ return c
+ }
+ c = nudge(c)
+ s := make([]string, 0, 3)
+ switch c.state {
+ case stateError:
+ return c
+ case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
+ switch c.urlPart {
+ case urlPartNone:
+ s = append(s, "exp_template_html_urlfilter")
+ fallthrough
+ case urlPartPreQuery:
+ switch c.state {
+ case stateCSSDqStr, stateCSSSqStr:
+ s = append(s, "exp_template_html_cssescaper")
+ default:
+ s = append(s, "exp_template_html_urlnormalizer")
+ }
+ case urlPartQueryOrFrag:
+ s = append(s, "exp_template_html_urlescaper")
+ case urlPartUnknown:
+ return context{
+ state: stateError,
+ err: errorf(ErrAmbigContext, n.Line, "%s appears in an ambiguous URL context", n),
+ }
+ default:
+ panic(c.urlPart.String())
+ }
+ case stateJS:
+ s = append(s, "exp_template_html_jsvalescaper")
+ // A slash after a value starts a div operator.
+ c.jsCtx = jsCtxDivOp
+ case stateJSDqStr, stateJSSqStr:
+ s = append(s, "exp_template_html_jsstrescaper")
+ case stateJSRegexp:
+ s = append(s, "exp_template_html_jsregexpescaper")
+ case stateCSS:
+ s = append(s, "exp_template_html_cssvaluefilter")
+ case stateText:
+ s = append(s, "exp_template_html_htmlescaper")
+ case stateRCDATA:
+ s = append(s, "exp_template_html_rcdataescaper")
+ case stateAttr:
+ // Handled below in delim check.
+ case stateAttrName, stateTag:
+ c.state = stateAttrName
+ s = append(s, "exp_template_html_htmlnamefilter")
+ default:
+ if isComment(c.state) {
+ s = append(s, "exp_template_html_commentescaper")
+ } else {
+ panic("unexpected state " + c.state.String())
+ }
+ }
+ switch c.delim {
+ case delimNone:
+ // No extra-escaping needed for raw text content.
+ case delimSpaceOrTagEnd:
+ s = append(s, "exp_template_html_nospaceescaper")
+ default:
+ s = append(s, "exp_template_html_attrescaper")
+ }
+ e.editActionNode(n, s)
+ return c
+}
+
+// ensurePipelineContains ensures that the pipeline has commands with
+// the identifiers in s in order.
+// If the pipeline already has some of the sanitizers, do not interfere.
+// For example, if p is (.X | html) and s is ["escapeJSVal", "html"] then it
+// has one matching, "html", and one to insert, "escapeJSVal", to produce
+// (.X | escapeJSVal | html).
+func ensurePipelineContains(p *parse.PipeNode, s []string) {
+ if len(s) == 0 {
+ return
+ }
+ n := len(p.Cmds)
+ // Find the identifiers at the end of the command chain.
+ idents := p.Cmds
+ for i := n - 1; i >= 0; i-- {
+ if cmd := p.Cmds[i]; len(cmd.Args) != 0 {
+ if id, ok := cmd.Args[0].(*parse.IdentifierNode); ok {
+ if id.Ident == "noescape" {
+ return
+ }
+ continue
+ }
+ }
+ idents = p.Cmds[i+1:]
+ }
+ dups := 0
+ for _, id := range idents {
+ if escFnsEq(s[dups], (id.Args[0].(*parse.IdentifierNode)).Ident) {
+ dups++
+ if dups == len(s) {
+ return
+ }
+ }
+ }
+ newCmds := make([]*parse.CommandNode, n-len(idents), n+len(s)-dups)
+ copy(newCmds, p.Cmds)
+ // Merge existing identifier commands with the sanitizers needed.
+ for _, id := range idents {
+ i := indexOfStr((id.Args[0].(*parse.IdentifierNode)).Ident, s, escFnsEq)
+ if i != -1 {
+ for _, name := range s[:i] {
+ newCmds = appendCmd(newCmds, newIdentCmd(name))
+ }
+ s = s[i+1:]
+ }
+ newCmds = appendCmd(newCmds, id)
+ }
+ // Create any remaining sanitizers.
+ for _, name := range s {
+ newCmds = appendCmd(newCmds, newIdentCmd(name))
+ }
+ p.Cmds = newCmds
+}
+
+// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
+// for all x.
+var redundantFuncs = map[string]map[string]bool{
+ "exp_template_html_commentescaper": {
+ "exp_template_html_attrescaper": true,
+ "exp_template_html_nospaceescaper": true,
+ "exp_template_html_htmlescaper": true,
+ },
+ "exp_template_html_cssescaper": {
+ "exp_template_html_attrescaper": true,
+ },
+ "exp_template_html_jsregexpescaper": {
+ "exp_template_html_attrescaper": true,
+ },
+ "exp_template_html_jsstrescaper": {
+ "exp_template_html_attrescaper": true,
+ },
+ "exp_template_html_urlescaper": {
+ "exp_template_html_urlnormalizer": true,
+ },
+}
+
+// appendCmd appends the given command to the end of the command pipeline
+// unless it is redundant with the last command.
+func appendCmd(cmds []*parse.CommandNode, cmd *parse.CommandNode) []*parse.CommandNode {
+ if n := len(cmds); n != 0 {
+ last, ok := cmds[n-1].Args[0].(*parse.IdentifierNode)
+ next, _ := cmd.Args[0].(*parse.IdentifierNode)
+ if ok && redundantFuncs[last.Ident][next.Ident] {
+ return cmds
+ }
+ }
+ return append(cmds, cmd)
+}
+
+// indexOfStr is the first i such that eq(s, strs[i]) or -1 if s was not found.
+func indexOfStr(s string, strs []string, eq func(a, b string) bool) int {
+ for i, t := range strs {
+ if eq(s, t) {
+ return i
+ }
+ }
+ return -1
+}
+
+// escFnsEq returns whether the two escaping functions are equivalent.
+func escFnsEq(a, b string) bool {
+ if e := equivEscapers[a]; e != "" {
+ a = e
+ }
+ if e := equivEscapers[b]; e != "" {
+ b = e
+ }
+ return a == b
+}
+
+// newIdentCmd produces a command containing a single identifier node.
+func newIdentCmd(identifier string) *parse.CommandNode {
+ return &parse.CommandNode{
+ NodeType: parse.NodeCommand,
+ Args: []parse.Node{parse.NewIdentifier(identifier)},
+ }
+}
+
+// nudge returns the context that would result from following empty string
+// transitions from the input context.
+// For example, parsing:
+// `<a href=`
+// will end in context{stateBeforeValue, attrURL}, but parsing one extra rune:
+// `<a href=x`
+// will end in context{stateURL, delimSpaceOrTagEnd, ...}.
+// There are two transitions that happen when the 'x' is seen:
+// (1) Transition from a before-value state to a start-of-value state without
+// consuming any character.
+// (2) Consume 'x' and transition past the first value character.
+// In this case, nudging produces the context after (1) happens.
+func nudge(c context) context {
+ switch c.state {
+ case stateTag:
+ // In `<foo {{.}}`, the action should emit an attribute.
+ c.state = stateAttrName
+ case stateBeforeValue:
+ // In `<foo bar={{.}}`, the action is an undelimited value.
+ c.state, c.delim, c.attr = attrStartStates[c.attr], delimSpaceOrTagEnd, attrNone
+ case stateAfterName:
+ // In `<foo bar {{.}}`, the action is an attribute name.
+ c.state, c.attr = stateAttrName, attrNone
+ }
+ return c
+}
+
+// join joins the two contexts of a branch template node. The result is an
+// error context if either of the input contexts are error contexts, or if the
+// the input contexts differ.
+func join(a, b context, line int, nodeName string) context {
+ if a.state == stateError {
+ return a
+ }
+ if b.state == stateError {
+ return b
+ }
+ if a.eq(b) {
+ return a
+ }
+
+ c := a
+ c.urlPart = b.urlPart
+ if c.eq(b) {
+ // The contexts differ only by urlPart.
+ c.urlPart = urlPartUnknown
+ return c
+ }
+
+ c = a
+ c.jsCtx = b.jsCtx
+ if c.eq(b) {
+ // The contexts differ only by jsCtx.
+ c.jsCtx = jsCtxUnknown
+ return c
+ }
+
+ // Allow a nudged context to join with an unnudged one.
+ // This means that
+ // <p title={{if .C}}{{.}}{{end}}
+ // ends in an unquoted value state even though the else branch
+ // ends in stateBeforeValue.
+ if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) {
+ if e := join(c, d, line, nodeName); e.state != stateError {
+ return e
+ }
+ }
+
+ return context{
+ state: stateError,
+ err: errorf(ErrBranchEnd, line, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b),
+ }
+}
+
+// escapeBranch escapes a branch template node: "if", "range" and "with".
+func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
+ c0 := e.escapeList(c, n.List)
+ if nodeName == "range" && c0.state != stateError {
+ // The "true" branch of a "range" node can execute multiple times.
+ // We check that executing n.List once results in the same context
+ // as executing n.List twice.
+ c1, _ := e.escapeListConditionally(c0, n.List, nil)
+ c0 = join(c0, c1, n.Line, nodeName)
+ if c0.state == stateError {
+ // Make clear that this is a problem on loop re-entry
+ // since developers tend to overlook that branch when
+ // debugging templates.
+ c0.err.Line = n.Line
+ c0.err.Description = "on range loop re-entry: " + c0.err.Description
+ return c0
+ }
+ }
+ c1 := e.escapeList(c, n.ElseList)
+ return join(c0, c1, n.Line, nodeName)
+}
+
+// escapeList escapes a list template node.
+func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ if n == nil {
+ return c
+ }
+ for _, m := range n.Nodes {
+ c = e.escape(c, m)
+ }
+ return c
+}
+
+// escapeListConditionally escapes a list node but only preserves edits and
+// inferences in e if the inferences and output context satisfy filter.
+// It returns the best guess at an output context, and the result of the filter
+// which is the same as whether e was updated.
+func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
+ e1 := newEscaper(e.set)
+ // Make type inferences available to f.
+ for k, v := range e.output {
+ e1.output[k] = v
+ }
+ c = e1.escapeList(c, n)
+ ok := filter != nil && filter(e1, c)
+ if ok {
+ // Copy inferences and edits from e1 back into e.
+ for k, v := range e1.output {
+ e.output[k] = v
+ }
+ for k, v := range e1.derived {
+ e.derived[k] = v
+ }
+ for k, v := range e1.called {
+ e.called[k] = v
+ }
+ for k, v := range e1.actionNodeEdits {
+ e.editActionNode(k, v)
+ }
+ for k, v := range e1.templateNodeEdits {
+ e.editTemplateNode(k, v)
+ }
+ for k, v := range e1.textNodeEdits {
+ e.editTextNode(k, v)
+ }
+ }
+ return c, ok
+}
+
+// escapeTemplate escapes a {{template}} call node.
+func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context {
+ c, name := e.escapeTree(c, n.Name, n.Line)
+ if name != n.Name {
+ e.editTemplateNode(n, name)
+ }
+ return c
+}
+
+// escapeTree escapes the named template starting in the given context as
+// necessary and returns its output context.
+func (e *escaper) escapeTree(c context, name string, line int) (context, string) {
+ // Mangle the template name with the input context to produce a reliable
+ // identifier.
+ dname := c.mangle(name)
+ e.called[dname] = true
+ if out, ok := e.output[dname]; ok {
+ // Already escaped.
+ return out, dname
+ }
+ t := e.template(name)
+ if t == nil {
+ return context{
+ state: stateError,
+ err: errorf(ErrNoSuchTemplate, line, "no such template %s", name),
+ }, dname
+ }
+ if dname != name {
+ // Use any template derived during an earlier call to escapeSet
+ // with different top level templates, or clone if necessary.
+ dt := e.template(dname)
+ if dt == nil {
+ dt = template.New(dname)
+ dt.Tree = &parse.Tree{Name: dname, Root: cloneList(t.Root)}
+ e.derived[dname] = dt
+ }
+ t = dt
+ }
+ return e.computeOutCtx(c, t), dname
+}
+
+// computeOutCtx takes a template and its start context and computes the output
+// context while storing any inferences in e.
+func (e *escaper) computeOutCtx(c context, t *template.Template) context {
+ // Propagate context over the body.
+ c1, ok := e.escapeTemplateBody(c, t)
+ if !ok {
+ // Look for a fixed point by assuming c1 as the output context.
+ if c2, ok2 := e.escapeTemplateBody(c1, t); ok2 {
+ c1, ok = c2, true
+ }
+ // Use c1 as the error context if neither assumption worked.
+ }
+ if !ok && c1.state != stateError {
+ return context{
+ state: stateError,
+ // TODO: Find the first node with a line in t.Tree.Root
+ err: errorf(ErrOutputContext, 0, "cannot compute output context for template %s", t.Name()),
+ }
+ }
+ return c1
+}
+
+// escapeTemplateBody escapes the given template assuming the given output
+// context, and returns the best guess at the output context and whether the
+// assumption was correct.
+func (e *escaper) escapeTemplateBody(c context, t *template.Template) (context, bool) {
+ filter := func(e1 *escaper, c1 context) bool {
+ if c1.state == stateError {
+ // Do not update the input escaper, e.
+ return false
+ }
+ if !e1.called[t.Name()] {
+ // If t is not recursively called, then c1 is an
+ // accurate output context.
+ return true
+ }
+ // c1 is accurate if it matches our assumed output context.
+ return c.eq(c1)
+ }
+ // We need to assume an output context so that recursive template calls
+ // take the fast path out of escapeTree instead of infinitely recursing.
+ // Naively assuming that the input context is the same as the output
+ // works >90% of the time.
+ e.output[t.Name()] = c
+ return e.escapeListConditionally(c, t.Tree.Root, filter)
+}
+
+// delimEnds maps each delim to a string of characters that terminate it.
+var delimEnds = [...]string{
+ delimDoubleQuote: `"`,
+ delimSingleQuote: "'",
+ // Determined empirically by running the below in various browsers.
+ // var div = document.createElement("DIV");
+ // for (var i = 0; i < 0x10000; ++i) {
+ // div.innerHTML = "<span title=x" + String.fromCharCode(i) + "-bar>";
+ // if (div.getElementsByTagName("SPAN")[0].title.indexOf("bar") < 0)
+ // document.write("<p>U+" + i.toString(16));
+ // }
+ delimSpaceOrTagEnd: " \t\n\f\r>",
+}
+
+var doctypeBytes = []byte("<!DOCTYPE")
+
+// escapeText escapes a text template node.
+func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ s, written, i, b := n.Text, 0, 0, new(bytes.Buffer)
+ for i != len(s) {
+ c1, nread := contextAfterText(c, s[i:])
+ i1 := i + nread
+ if c.state == stateText || c.state == stateRCDATA {
+ end := i1
+ if c1.state != c.state {
+ for j := end - 1; j >= i; j-- {
+ if s[j] == '<' {
+ end = j
+ break
+ }
+ }
+ }
+ for j := i; j < end; j++ {
+ if s[j] == '<' && !bytes.HasPrefix(s[j:], doctypeBytes) {
+ b.Write(s[written:j])
+ b.WriteString("<")
+ written = j + 1
+ }
+ }
+ } else if isComment(c.state) && c.delim == delimNone {
+ switch c.state {
+ case stateJSBlockCmt:
+ // http://es5.github.com/#x7.4:
+ // "Comments behave like white space and are
+ // discarded except that, if a MultiLineComment
+ // contains a line terminator character, then
+ // the entire comment is considered to be a
+ // LineTerminator for purposes of parsing by
+ // the syntactic grammar."
+ if bytes.IndexAny(s[written:i1], "\n\r\u2028\u2029") != -1 {
+ b.WriteByte('\n')
+ } else {
+ b.WriteByte(' ')
+ }
+ case stateCSSBlockCmt:
+ b.WriteByte(' ')
+ }
+ written = i1
+ }
+ if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
+ // Preserve the portion between written and the comment start.
+ cs := i1 - 2
+ if c1.state == stateHTMLCmt {
+ // "<!--" instead of "/*" or "//"
+ cs -= 2
+ }
+ b.Write(s[written:cs])
+ written = i1
+ }
+ if i == i1 && c.state == c1.state {
+ panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
+ }
+ c, i = c1, i1
+ }
+
+ if written != 0 && c.state != stateError {
+ if !isComment(c.state) || c.delim != delimNone {
+ b.Write(n.Text[written:])
+ }
+ e.editTextNode(n, b.Bytes())
+ }
+ return c
+}
+
+// contextAfterText starts in context c, consumes some tokens from the front of
+// s, then returns the context after those tokens and the unprocessed suffix.
+func contextAfterText(c context, s []byte) (context, int) {
+ if c.delim == delimNone {
+ c1, i := tSpecialTagEnd(c, s)
+ if i == 0 {
+ // A special end tag (`</script>`) has been seen and
+ // all content preceding it has been consumed.
+ return c1, 0
+ }
+ // Consider all content up to any end tag.
+ return transitionFunc[c.state](c, s[:i])
+ }
+
+ i := bytes.IndexAny(s, delimEnds[c.delim])
+ if i == -1 {
+ i = len(s)
+ }
+ if c.delim == delimSpaceOrTagEnd {
+ // http://www.w3.org/TR/html5/tokenization.html#attribute-value-unquoted-state
+ // lists the runes below as error characters.
+ // Error out because HTML parsers may differ on whether
+ // "<a id= onclick=f(" ends inside id's or onclick's value,
+ // "<a class=`foo " ends inside a value,
+ // "<a style=font:'Arial'" needs open-quote fixup.
+ // IE treats '`' as a quotation character.
+ if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 {
+ return context{
+ state: stateError,
+ err: errorf(ErrBadHTML, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]),
+ }, len(s)
+ }
+ }
+ if i == len(s) {
+ // Remain inside the attribute.
+ // Decode the value so non-HTML rules can easily handle
+ // <button onclick="alert("Hi!")">
+ // without having to entity decode token boundaries.
+ for u := []byte(html.UnescapeString(string(s))); len(u) != 0; {
+ c1, i1 := transitionFunc[c.state](c, u)
+ c, u = c1, u[i1:]
+ }
+ return c, len(s)
+ }
+ if c.delim != delimSpaceOrTagEnd {
+ // Consume any quote.
+ i++
+ }
+ // On exiting an attribute, we discard all state information
+ // except the state and element.
+ return context{state: stateTag, element: c.element}, i
+}
+
+// editActionNode records a change to an action pipeline for later commit.
+func (e *escaper) editActionNode(n *parse.ActionNode, cmds []string) {
+ if _, ok := e.actionNodeEdits[n]; ok {
+ panic(fmt.Sprintf("node %s shared between templates", n))
+ }
+ e.actionNodeEdits[n] = cmds
+}
+
+// editTemplateNode records a change to a {{template}} callee for later commit.
+func (e *escaper) editTemplateNode(n *parse.TemplateNode, callee string) {
+ if _, ok := e.templateNodeEdits[n]; ok {
+ panic(fmt.Sprintf("node %s shared between templates", n))
+ }
+ e.templateNodeEdits[n] = callee
+}
+
+// editTextNode records a change to a text node for later commit.
+func (e *escaper) editTextNode(n *parse.TextNode, text []byte) {
+ if _, ok := e.textNodeEdits[n]; ok {
+ panic(fmt.Sprintf("node %s shared between templates", n))
+ }
+ e.textNodeEdits[n] = text
+}
+
+// commit applies changes to actions and template calls needed to contextually
+// autoescape content and adds any derived templates to the set.
+func (e *escaper) commit() {
+ for name, _ := range e.output {
+ e.template(name).Funcs(funcMap)
+ }
+ for _, t := range e.derived {
+ e.set.Add(t)
+ }
+ for n, s := range e.actionNodeEdits {
+ ensurePipelineContains(n.Pipe, s)
+ }
+ for n, name := range e.templateNodeEdits {
+ n.Name = name
+ }
+ for n, s := range e.textNodeEdits {
+ n.Text = s
+ }
+}
+
+// template returns the named template given a mangled template name.
+func (e *escaper) template(name string) *template.Template {
+ t := e.set.Template(name)
+ if t == nil {
+ t = e.derived[name]
+ }
+ return t
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "testing"
+ "text/template"
+ "text/template/parse"
+)
+
+type badMarshaler struct{}
+
+func (x *badMarshaler) MarshalJSON() ([]byte, error) {
+ // Keys in valid JSON must be double quoted as must all strings.
+ return []byte("{ foo: 'not quite valid JSON' }"), nil
+}
+
+type goodMarshaler struct{}
+
+func (x *goodMarshaler) MarshalJSON() ([]byte, error) {
+ return []byte(`{ "<foo>": "O'Reilly" }`), nil
+}
+
+func TestEscape(t *testing.T) {
+ var data = struct {
+ F, T bool
+ C, G, H string
+ A, E []string
+ B, M json.Marshaler
+ N int
+ Z *int
+ W HTML
+ }{
+ F: false,
+ T: true,
+ C: "<Cincinatti>",
+ G: "<Goodbye>",
+ H: "<Hello>",
+ A: []string{"<a>", "<b>"},
+ E: []string{},
+ N: 42,
+ B: &badMarshaler{},
+ M: &goodMarshaler{},
+ Z: nil,
+ W: HTML(`¡<b class="foo">Hello</b>, <textarea>O'World</textarea>!`),
+ }
+
+ tests := []struct {
+ name string
+ input string
+ output string
+ }{
+ {
+ "if",
+ "{{if .T}}Hello{{end}}, {{.C}}!",
+ "Hello, <Cincinatti>!",
+ },
+ {
+ "else",
+ "{{if .F}}{{.H}}{{else}}{{.G}}{{end}}!",
+ "<Goodbye>!",
+ },
+ {
+ "overescaping1",
+ "Hello, {{.C | html}}!",
+ "Hello, <Cincinatti>!",
+ },
+ {
+ "overescaping2",
+ "Hello, {{html .C}}!",
+ "Hello, <Cincinatti>!",
+ },
+ {
+ "overescaping3",
+ "{{with .C}}{{$msg := .}}Hello, {{$msg}}!{{end}}",
+ "Hello, <Cincinatti>!",
+ },
+ {
+ "assignment",
+ "{{if $x := .H}}{{$x}}{{end}}",
+ "<Hello>",
+ },
+ {
+ "withBody",
+ "{{with .H}}{{.}}{{end}}",
+ "<Hello>",
+ },
+ {
+ "withElse",
+ "{{with .E}}{{.}}{{else}}{{.H}}{{end}}",
+ "<Hello>",
+ },
+ {
+ "rangeBody",
+ "{{range .A}}{{.}}{{end}}",
+ "<a><b>",
+ },
+ {
+ "rangeElse",
+ "{{range .E}}{{.}}{{else}}{{.H}}{{end}}",
+ "<Hello>",
+ },
+ {
+ "nonStringValue",
+ "{{.T}}",
+ "true",
+ },
+ {
+ "constant",
+ `<a href="/search?q={{"'a<b'"}}">`,
+ `<a href="/search?q=%27a%3cb%27">`,
+ },
+ {
+ "multipleAttrs",
+ "<a b=1 c={{.H}}>",
+ "<a b=1 c=<Hello>>",
+ },
+ {
+ "urlStartRel",
+ `<a href='{{"/foo/bar?a=b&c=d"}}'>`,
+ `<a href='/foo/bar?a=b&c=d'>`,
+ },
+ {
+ "urlStartAbsOk",
+ `<a href='{{"http://example.com/foo/bar?a=b&c=d"}}'>`,
+ `<a href='http://example.com/foo/bar?a=b&c=d'>`,
+ },
+ {
+ "protocolRelativeURLStart",
+ `<a href='{{"//example.com:8000/foo/bar?a=b&c=d"}}'>`,
+ `<a href='//example.com:8000/foo/bar?a=b&c=d'>`,
+ },
+ {
+ "pathRelativeURLStart",
+ `<a href="{{"/javascript:80/foo/bar"}}">`,
+ `<a href="/javascript:80/foo/bar">`,
+ },
+ {
+ "dangerousURLStart",
+ `<a href='{{"javascript:alert(%22pwned%22)"}}'>`,
+ `<a href='#ZgotmplZ'>`,
+ },
+ {
+ "dangerousURLStart2",
+ `<a href=' {{"javascript:alert(%22pwned%22)"}}'>`,
+ `<a href=' #ZgotmplZ'>`,
+ },
+ {
+ "nonHierURL",
+ `<a href={{"mailto:Muhammed \"The Greatest\" Ali <m.ali@example.com>"}}>`,
+ `<a href=mailto:Muhammed%20%22The%20Greatest%22%20Ali%20%3cm.ali@example.com%3e>`,
+ },
+ {
+ "urlPath",
+ `<a href='http://{{"javascript:80"}}/foo'>`,
+ `<a href='http://javascript:80/foo'>`,
+ },
+ {
+ "urlQuery",
+ `<a href='/search?q={{.H}}'>`,
+ `<a href='/search?q=%3cHello%3e'>`,
+ },
+ {
+ "urlFragment",
+ `<a href='/faq#{{.H}}'>`,
+ `<a href='/faq#%3cHello%3e'>`,
+ },
+ {
+ "urlBranch",
+ `<a href="{{if .F}}/foo?a=b{{else}}/bar{{end}}">`,
+ `<a href="/bar">`,
+ },
+ {
+ "urlBranchConflictMoot",
+ `<a href="{{if .T}}/foo?a={{else}}/bar#{{end}}{{.C}}">`,
+ `<a href="/foo?a=%3cCincinatti%3e">`,
+ },
+ {
+ "jsStrValue",
+ "<button onclick='alert({{.H}})'>",
+ `<button onclick='alert("\u003cHello\u003e")'>`,
+ },
+ {
+ "jsNumericValue",
+ "<button onclick='alert({{.N}})'>",
+ `<button onclick='alert( 42 )'>`,
+ },
+ {
+ "jsBoolValue",
+ "<button onclick='alert({{.T}})'>",
+ `<button onclick='alert( true )'>`,
+ },
+ {
+ "jsNilValue",
+ "<button onclick='alert(typeof{{.Z}})'>",
+ `<button onclick='alert(typeof null )'>`,
+ },
+ {
+ "jsObjValue",
+ "<button onclick='alert({{.A}})'>",
+ `<button onclick='alert(["\u003ca\u003e","\u003cb\u003e"])'>`,
+ },
+ {
+ "jsObjValueScript",
+ "<script>alert({{.A}})</script>",
+ `<script>alert(["\u003ca\u003e","\u003cb\u003e"])</script>`,
+ },
+ {
+ "jsObjValueNotOverEscaped",
+ "<button onclick='alert({{.A | html}})'>",
+ `<button onclick='alert(["\u003ca\u003e","\u003cb\u003e"])'>`,
+ },
+ {
+ "jsStr",
+ "<button onclick='alert("{{.H}}")'>",
+ `<button onclick='alert("\x3cHello\x3e")'>`,
+ },
+ {
+ "badMarshaller",
+ `<button onclick='alert(1/{{.B}}in numbers)'>`,
+ `<button onclick='alert(1/ /* json: error calling MarshalJSON for type *template.badMarshaler: invalid character 'f' looking for beginning of object key string */null in numbers)'>`,
+ },
+ {
+ "jsMarshaller",
+ `<button onclick='alert({{.M}})'>`,
+ `<button onclick='alert({"<foo>":"O'Reilly"})'>`,
+ },
+ {
+ "jsStrNotUnderEscaped",
+ "<button onclick='alert({{.C | urlquery}})'>",
+ // URL escaped, then quoted for JS.
+ `<button onclick='alert("%3CCincinatti%3E")'>`,
+ },
+ {
+ "jsRe",
+ `<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
+ `<button onclick='alert(/foo\x2bbar/.test(""))'>`,
+ },
+ {
+ "jsReBlank",
+ `<script>alert(/{{""}}/.test(""));</script>`,
+ `<script>alert(/(?:)/.test(""));</script>`,
+ },
+ {
+ "jsReAmbigOk",
+ `<script>{{if true}}var x = 1{{end}}</script>`,
+ // The {if} ends in an ambiguous jsCtx but there is
+ // no slash following so we shouldn't care.
+ `<script>var x = 1</script>`,
+ },
+ {
+ "styleBidiKeywordPassed",
+ `<p style="dir: {{"ltr"}}">`,
+ `<p style="dir: ltr">`,
+ },
+ {
+ "styleBidiPropNamePassed",
+ `<p style="border-{{"left"}}: 0; border-{{"right"}}: 1in">`,
+ `<p style="border-left: 0; border-right: 1in">`,
+ },
+ {
+ "styleExpressionBlocked",
+ `<p style="width: {{"expression(alert(1337))"}}">`,
+ `<p style="width: ZgotmplZ">`,
+ },
+ {
+ "styleTagSelectorPassed",
+ `<style>{{"p"}} { color: pink }</style>`,
+ `<style>p { color: pink }</style>`,
+ },
+ {
+ "styleIDPassed",
+ `<style>p{{"#my-ID"}} { font: Arial }</style>`,
+ `<style>p#my-ID { font: Arial }</style>`,
+ },
+ {
+ "styleClassPassed",
+ `<style>p{{".my_class"}} { font: Arial }</style>`,
+ `<style>p.my_class { font: Arial }</style>`,
+ },
+ {
+ "styleQuantityPassed",
+ `<a style="left: {{"2em"}}; top: {{0}}">`,
+ `<a style="left: 2em; top: 0">`,
+ },
+ {
+ "stylePctPassed",
+ `<table style=width:{{"100%"}}>`,
+ `<table style=width:100%>`,
+ },
+ {
+ "styleColorPassed",
+ `<p style="color: {{"#8ff"}}; background: {{"#000"}}">`,
+ `<p style="color: #8ff; background: #000">`,
+ },
+ {
+ "styleObfuscatedExpressionBlocked",
+ `<p style="width: {{" e\78preS\0Sio/**/n(alert(1337))"}}">`,
+ `<p style="width: ZgotmplZ">`,
+ },
+ {
+ "styleMozBindingBlocked",
+ `<p style="{{"-moz-binding(alert(1337))"}}: ...">`,
+ `<p style="ZgotmplZ: ...">`,
+ },
+ {
+ "styleObfuscatedMozBindingBlocked",
+ `<p style="{{" -mo\7a-B\0I/**/nding(alert(1337))"}}: ...">`,
+ `<p style="ZgotmplZ: ...">`,
+ },
+ {
+ "styleFontNameString",
+ `<p style='font-family: "{{"Times New Roman"}}"'>`,
+ `<p style='font-family: "Times New Roman"'>`,
+ },
+ {
+ "styleFontNameString",
+ `<p style='font-family: "{{"Times New Roman"}}", "{{"sans-serif"}}"'>`,
+ `<p style='font-family: "Times New Roman", "sans-serif"'>`,
+ },
+ {
+ "styleFontNameUnquoted",
+ `<p style='font-family: {{"Times New Roman"}}'>`,
+ `<p style='font-family: Times New Roman'>`,
+ },
+ {
+ "styleURLQueryEncoded",
+ `<p style="background: url(/img?name={{"O'Reilly Animal(1)<2>.png"}})">`,
+ `<p style="background: url(/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png)">`,
+ },
+ {
+ "styleQuotedURLQueryEncoded",
+ `<p style="background: url('/img?name={{"O'Reilly Animal(1)<2>.png"}}')">`,
+ `<p style="background: url('/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png')">`,
+ },
+ {
+ "styleStrQueryEncoded",
+ `<p style="background: '/img?name={{"O'Reilly Animal(1)<2>.png"}}'">`,
+ `<p style="background: '/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png'">`,
+ },
+ {
+ "styleURLBadProtocolBlocked",
+ `<a style="background: url('{{"javascript:alert(1337)"}}')">`,
+ `<a style="background: url('#ZgotmplZ')">`,
+ },
+ {
+ "styleStrBadProtocolBlocked",
+ `<a style="background: '{{"vbscript:alert(1337)"}}'">`,
+ `<a style="background: '#ZgotmplZ'">`,
+ },
+ {
+ "styleStrEncodedProtocolEncoded",
+ `<a style="background: '{{"javascript\\3a alert(1337)"}}'">`,
+ // The CSS string 'javascript\\3a alert(1337)' does not contains a colon.
+ `<a style="background: 'javascript\\3a alert\28 1337\29 '">`,
+ },
+ {
+ "styleURLGoodProtocolPassed",
+ `<a style="background: url('{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}')">`,
+ `<a style="background: url('http://oreilly.com/O%27Reilly%20Animals%281%29%3c2%3e;%7b%7d.html')">`,
+ },
+ {
+ "styleStrGoodProtocolPassed",
+ `<a style="background: '{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}'">`,
+ `<a style="background: 'http\3a\2f\2foreilly.com\2fO\27Reilly Animals\28 1\29\3c 2\3e\3b\7b\7d.html'">`,
+ },
+ {
+ "styleURLEncodedForHTMLInAttr",
+ `<a style="background: url('{{"/search?img=foo&size=icon"}}')">`,
+ `<a style="background: url('/search?img=foo&size=icon')">`,
+ },
+ {
+ "styleURLNotEncodedForHTMLInCdata",
+ `<style>body { background: url('{{"/search?img=foo&size=icon"}}') }</style>`,
+ `<style>body { background: url('/search?img=foo&size=icon') }</style>`,
+ },
+ {
+ "styleURLMixedCase",
+ `<p style="background: URL(#{{.H}})">`,
+ `<p style="background: URL(#%3cHello%3e)">`,
+ },
+ {
+ "stylePropertyPairPassed",
+ `<a style='{{"color: red"}}'>`,
+ `<a style='color: red'>`,
+ },
+ {
+ "styleStrSpecialsEncoded",
+ `<a style="font-family: '{{"/**/'\";:// \\"}}', "{{"/**/'\";:// \\"}}"">`,
+ `<a style="font-family: '\2f**\2f\27\22\3b\3a\2f\2f \\', "\2f**\2f\27\22\3b\3a\2f\2f \\"">`,
+ },
+ {
+ "styleURLSpecialsEncoded",
+ `<a style="border-image: url({{"/**/'\";:// \\"}}), url("{{"/**/'\";:// \\"}}"), url('{{"/**/'\";:// \\"}}'), 'http://www.example.com/?q={{"/**/'\";:// \\"}}''">`,
+ `<a style="border-image: url(/**/%27%22;://%20%5c), url("/**/%27%22;://%20%5c"), url('/**/%27%22;://%20%5c'), 'http://www.example.com/?q=%2f%2a%2a%2f%27%22%3b%3a%2f%2f%20%5c''">`,
+ },
+ {
+ "HTML comment",
+ "<b>Hello, <!-- name of world -->{{.C}}</b>",
+ "<b>Hello, <Cincinatti></b>",
+ },
+ {
+ "HTML comment not first < in text node.",
+ "<<!-- -->!--",
+ "<!--",
+ },
+ {
+ "HTML normalization 1",
+ "a < b",
+ "a < b",
+ },
+ {
+ "HTML normalization 2",
+ "a << b",
+ "a << b",
+ },
+ {
+ "HTML normalization 3",
+ "a<<!-- --><!-- -->b",
+ "a<b",
+ },
+ {
+ "HTML doctype not normalized",
+ "<!DOCTYPE html>Hello, World!",
+ "<!DOCTYPE html>Hello, World!",
+ },
+ {
+ "No doctype injection",
+ `<!{{"DOCTYPE"}}`,
+ "<!DOCTYPE",
+ },
+ {
+ "Split HTML comment",
+ "<b>Hello, <!-- name of {{if .T}}city -->{{.C}}{{else}}world -->{{.W}}{{end}}</b>",
+ "<b>Hello, <Cincinatti></b>",
+ },
+ {
+ "JS line comment",
+ "<script>for (;;) { if (c()) break// foo not a label\n" +
+ "foo({{.T}});}</script>",
+ "<script>for (;;) { if (c()) break\n" +
+ "foo( true );}</script>",
+ },
+ {
+ "JS multiline block comment",
+ "<script>for (;;) { if (c()) break/* foo not a label\n" +
+ " */foo({{.T}});}</script>",
+ // Newline separates break from call. If newline
+ // removed, then break will consume label leaving
+ // code invalid.
+ "<script>for (;;) { if (c()) break\n" +
+ "foo( true );}</script>",
+ },
+ {
+ "JS single-line block comment",
+ "<script>for (;;) {\n" +
+ "if (c()) break/* foo a label */foo;" +
+ "x({{.T}});}</script>",
+ // Newline separates break from call. If newline
+ // removed, then break will consume label leaving
+ // code invalid.
+ "<script>for (;;) {\n" +
+ "if (c()) break foo;" +
+ "x( true );}</script>",
+ },
+ {
+ "JS block comment flush with mathematical division",
+ "<script>var a/*b*//c\nd</script>",
+ "<script>var a /c\nd</script>",
+ },
+ {
+ "JS mixed comments",
+ "<script>var a/*b*///c\nd</script>",
+ "<script>var a \nd</script>",
+ },
+ {
+ "CSS comments",
+ "<style>p// paragraph\n" +
+ `{border: 1px/* color */{{"#00f"}}}</style>`,
+ "<style>p\n" +
+ "{border: 1px #00f}</style>",
+ },
+ {
+ "JS attr block comment",
+ `<a onclick="f(""); /* alert({{.H}}) */">`,
+ // Attribute comment tests should pass if the comments
+ // are successfully elided.
+ `<a onclick="f(""); /* alert() */">`,
+ },
+ {
+ "JS attr line comment",
+ `<a onclick="// alert({{.G}})">`,
+ `<a onclick="// alert()">`,
+ },
+ {
+ "CSS attr block comment",
+ `<a style="/* color: {{.H}} */">`,
+ `<a style="/* color: */">`,
+ },
+ {
+ "CSS attr line comment",
+ `<a style="// color: {{.G}}">`,
+ `<a style="// color: ">`,
+ },
+ {
+ "HTML substitution commented out",
+ "<p><!-- {{.H}} --></p>",
+ "<p></p>",
+ },
+ {
+ "Comment ends flush with start",
+ "<!--{{.}}--><script>/*{{.}}*///{{.}}\n</script><style>/*{{.}}*///{{.}}\n</style><a onclick='/*{{.}}*///{{.}}' style='/*{{.}}*///{{.}}'>",
+ "<script> \n</script><style> \n</style><a onclick='/**///' style='/**///'>",
+ },
+ {
+ "typed HTML in text",
+ `{{.W}}`,
+ `¡<b class="foo">Hello</b>, <textarea>O'World</textarea>!`,
+ },
+ {
+ "typed HTML in attribute",
+ `<div title="{{.W}}">`,
+ `<div title="¡Hello, O'World!">`,
+ },
+ {
+ "typed HTML in script",
+ `<button onclick="alert({{.W}})">`,
+ `<button onclick="alert("&iexcl;\u003cb class=\"foo\"\u003eHello\u003c/b\u003e, \u003ctextarea\u003eO'World\u003c/textarea\u003e!")">`,
+ },
+ {
+ "typed HTML in RCDATA",
+ `<textarea>{{.W}}</textarea>`,
+ `<textarea>¡<b class="foo">Hello</b>, <textarea>O'World</textarea>!</textarea>`,
+ },
+ {
+ "range in textarea",
+ "<textarea>{{range .A}}{{.}}{{end}}</textarea>",
+ "<textarea><a><b></textarea>",
+ },
+ {
+ "auditable exemption from escaping",
+ "{{range .A}}{{. | noescape}}{{end}}",
+ "<a><b>",
+ },
+ {
+ "No tag injection",
+ `{{"10$"}}<{{"script src,evil.org/pwnd.js"}}...`,
+ `10$<script src,evil.org/pwnd.js...`,
+ },
+ {
+ "No comment injection",
+ `<{{"!--"}}`,
+ `<!--`,
+ },
+ {
+ "No RCDATA end tag injection",
+ `<textarea><{{"/textarea "}}...</textarea>`,
+ `<textarea></textarea ...</textarea>`,
+ },
+ {
+ "optional attrs",
+ `<img class="{{"iconClass"}}"` +
+ `{{if .T}} id="{{"<iconId>"}}"{{end}}` +
+ // Double quotes inside if/else.
+ ` src=` +
+ `{{if .T}}"?{{"<iconPath>"}}"` +
+ `{{else}}"images/cleardot.gif"{{end}}` +
+ // Missing space before title, but it is not a
+ // part of the src attribute.
+ `{{if .T}}title="{{"<title>"}}"{{end}}` +
+ // Quotes outside if/else.
+ ` alt="` +
+ `{{if .T}}{{"<alt>"}}` +
+ `{{else}}{{if .F}}{{"<title>"}}{{end}}` +
+ `{{end}}"` +
+ `>`,
+ `<img class="iconClass" id="<iconId>" src="?%3ciconPath%3e"title="<title>" alt="<alt>">`,
+ },
+ {
+ "conditional valueless attr name",
+ `<input{{if .T}} checked{{end}} name=n>`,
+ `<input checked name=n>`,
+ },
+ {
+ "conditional dynamic valueless attr name 1",
+ `<input{{if .T}} {{"checked"}}{{end}} name=n>`,
+ `<input checked name=n>`,
+ },
+ {
+ "conditional dynamic valueless attr name 2",
+ `<input {{if .T}}{{"checked"}} {{end}}name=n>`,
+ `<input checked name=n>`,
+ },
+ {
+ "dynamic attribute name",
+ `<img on{{"load"}}="alert({{"loaded"}})">`,
+ // Treated as JS since quotes are inserted.
+ `<img onload="alert("loaded")">`,
+ },
+ {
+ "bad dynamic attribute name 1",
+ // Allow checked, selected, disabled, but not JS or
+ // CSS attributes.
+ `<input {{"onchange"}}="{{"doEvil()"}}">`,
+ `<input ZgotmplZ="doEvil()">`,
+ },
+ {
+ "bad dynamic attribute name 2",
+ `<div {{"sTyle"}}="{{"color: expression(alert(1337))"}}">`,
+ `<div ZgotmplZ="color: expression(alert(1337))">`,
+ },
+ {
+ "bad dynamic attribute name 3",
+ // Allow title or alt, but not a URL.
+ `<img {{"src"}}="{{"javascript:doEvil()"}}">`,
+ `<img ZgotmplZ="javascript:doEvil()">`,
+ },
+ {
+ "bad dynamic attribute name 4",
+ // Structure preservation requires values to associate
+ // with a consistent attribute.
+ `<input checked {{""}}="Whose value am I?">`,
+ `<input checked ZgotmplZ="Whose value am I?">`,
+ },
+ {
+ "dynamic element name",
+ `<h{{3}}><table><t{{"head"}}>...</h{{3}}>`,
+ `<h3><table><thead>...</h3>`,
+ },
+ {
+ "bad dynamic element name",
+ // Dynamic element names are typically used to switch
+ // between (thead, tfoot, tbody), (ul, ol), (th, td),
+ // and other replaceable sets.
+ // We do not currently easily support (ul, ol).
+ // If we do change to support that, this test should
+ // catch failures to filter out special tag names which
+ // would violate the structure preservation property --
+ // if any special tag name could be substituted, then
+ // the content could be raw text/RCDATA for some inputs
+ // and regular HTML content for others.
+ `<{{"script"}}>{{"doEvil()"}}</{{"script"}}>`,
+ `<script>doEvil()</script>`,
+ },
+ }
+
+ for _, test := range tests {
+ tmpl := New(test.name)
+ // TODO: Move noescape into template/func.go
+ tmpl.Funcs(template.FuncMap{
+ "noescape": func(a ...interface{}) string {
+ return fmt.Sprint(a...)
+ },
+ })
+ tmpl = Must(tmpl.Parse(test.input))
+ b := new(bytes.Buffer)
+ if err := tmpl.Execute(b, data); err != nil {
+ t.Errorf("%s: template execution failed: %s", test.name, err)
+ continue
+ }
+ if w, g := test.output, b.String(); w != g {
+ t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
+ continue
+ }
+ }
+}
+
+func TestEscapeSet(t *testing.T) {
+ type dataItem struct {
+ Children []*dataItem
+ X string
+ }
+
+ data := dataItem{
+ Children: []*dataItem{
+ &dataItem{X: "foo"},
+ &dataItem{X: "<bar>"},
+ &dataItem{
+ Children: []*dataItem{
+ &dataItem{X: "baz"},
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ inputs map[string]string
+ want string
+ }{
+ // The trivial set.
+ {
+ map[string]string{
+ "main": ``,
+ },
+ ``,
+ },
+ // A template called in the start context.
+ {
+ map[string]string{
+ "main": `Hello, {{template "helper"}}!`,
+ // Not a valid top level HTML template.
+ // "<b" is not a full tag.
+ "helper": `{{"<World>"}}`,
+ },
+ `Hello, <World>!`,
+ },
+ // A template called in a context other than the start.
+ {
+ map[string]string{
+ "main": `<a onclick='a = {{template "helper"}};'>`,
+ // Not a valid top level HTML template.
+ // "<b" is not a full tag.
+ "helper": `{{"<a>"}}<b`,
+ },
+ `<a onclick='a = "\u003ca\u003e"<b;'>`,
+ },
+ // A recursive template that ends in its start context.
+ {
+ map[string]string{
+ "main": `{{range .Children}}{{template "main" .}}{{else}}{{.X}} {{end}}`,
+ },
+ `foo <bar> baz `,
+ },
+ // A recursive helper template that ends in its start context.
+ {
+ map[string]string{
+ "main": `{{template "helper" .}}`,
+ "helper": `{{if .Children}}<ul>{{range .Children}}<li>{{template "main" .}}</li>{{end}}</ul>{{else}}{{.X}}{{end}}`,
+ },
+ `<ul><li>foo</li><li><bar></li><li><ul><li>baz</li></ul></li></ul>`,
+ },
+ // Co-recursive templates that end in its start context.
+ {
+ map[string]string{
+ "main": `<blockquote>{{range .Children}}{{template "helper" .}}{{end}}</blockquote>`,
+ "helper": `{{if .Children}}{{template "main" .}}{{else}}{{.X}}<br>{{end}}`,
+ },
+ `<blockquote>foo<br><bar><br><blockquote>baz<br></blockquote></blockquote>`,
+ },
+ // A template that is called in two different contexts.
+ {
+ map[string]string{
+ "main": `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
+ "helper": `{{11}} of {{"<100>"}}`,
+ },
+ `<button onclick="title='11 of \x3c100\x3e'; ...">11 of <100></button>`,
+ },
+ // A non-recursive template that ends in a different context.
+ // helper starts in jsCtxRegexp and ends in jsCtxDivOp.
+ {
+ map[string]string{
+ "main": `<script>var x={{template "helper"}}/{{"42"}};</script>`,
+ "helper": "{{126}}",
+ },
+ `<script>var x= 126 /"42";</script>`,
+ },
+ // A recursive template that ends in a similar context.
+ {
+ map[string]string{
+ "main": `<script>var x=[{{template "countdown" 4}}];</script>`,
+ "countdown": `{{.}}{{if .}},{{template "countdown" . | pred}}{{end}}`,
+ },
+ `<script>var x=[ 4 , 3 , 2 , 1 , 0 ];</script>`,
+ },
+ // A recursive template that ends in a different context.
+ /*
+ {
+ map[string]string{
+ "main": `<a href="/foo{{template "helper" .}}">`,
+ "helper": `{{if .Children}}{{range .Children}}{{template "helper" .}}{{end}}{{else}}?x={{.X}}{{end}}`,
+ },
+ `<a href="/foo?x=foo?x=%3cbar%3e?x=baz">`,
+ },
+ */
+ }
+
+ // pred is a template function that returns the predecessor of a
+ // natural number for testing recursive templates.
+ fns := template.FuncMap{"pred": func(a ...interface{}) (interface{}, error) {
+ if len(a) == 1 {
+ if i, _ := a[0].(int); i > 0 {
+ return i - 1, nil
+ }
+ }
+ return nil, fmt.Errorf("undefined pred(%v)", a)
+ }}
+
+ for _, test := range tests {
+ source := ""
+ for name, body := range test.inputs {
+ source += fmt.Sprintf("{{define %q}}%s{{end}} ", name, body)
+ }
+ s := &Set{}
+ s.Funcs(fns)
+ s.Parse(source)
+ var b bytes.Buffer
+
+ if err := s.Execute(&b, "main", data); err != nil {
+ t.Errorf("%q executing %v", err.Error(), s.Template("main"))
+ continue
+ }
+ if got := b.String(); test.want != got {
+ t.Errorf("want\n\t%q\ngot\n\t%q", test.want, got)
+ }
+ }
+
+}
+
+func TestErrors(t *testing.T) {
+ tests := []struct {
+ input string
+ err string
+ }{
+ // Non-error cases.
+ {
+ "{{if .Cond}}<a>{{else}}<b>{{end}}",
+ "",
+ },
+ {
+ "{{if .Cond}}<a>{{end}}",
+ "",
+ },
+ {
+ "{{if .Cond}}{{else}}<b>{{end}}",
+ "",
+ },
+ {
+ "{{with .Cond}}<div>{{end}}",
+ "",
+ },
+ {
+ "{{range .Items}}<a>{{end}}",
+ "",
+ },
+ {
+ "<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
+ "",
+ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+ "z:1: {{if}} branches",
+ },
+ {
+ "{{if .Cond}}\n{{else}}\n<a{{end}}",
+ "z:1: {{if}} branches",
+ },
+ {
+ // Missing quote in the else branch.
+ `{{if .Cond}}<a href="foo">{{else}}<a href="bar>{{end}}`,
+ "z:1: {{if}} branches",
+ },
+ {
+ // Different kind of attribute: href implies a URL.
+ "<a {{if .Cond}}href='{{else}}title='{{end}}{{.X}}'>",
+ "z:1: {{if}} branches",
+ },
+ {
+ "\n{{with .X}}<a{{end}}",
+ "z:2: {{with}} branches",
+ },
+ {
+ "\n{{with .X}}<a>{{else}}<a{{end}}",
+ "z:2: {{with}} branches",
+ },
+ {
+ "{{range .Items}}<a{{end}}",
+ `z:1: on range loop re-entry: "<" in attribute name: "<a"`,
+ },
+ {
+ "\n{{range .Items}} x='<a{{end}}",
+ "z:2: on range loop re-entry: {{range}} branches",
+ },
+ {
+ "<a b=1 c={{.H}}",
+ "z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
+ },
+ {
+ "<script>foo();",
+ "z: ends in a non-text context: {stateJS",
+ },
+ {
+ `<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
+ "z:1: (action: [(command: [F=[H]])]) appears in an ambiguous URL context",
+ },
+ {
+ `<a onclick="alert('Hello \`,
+ `unfinished escape sequence in JS string: "Hello \\"`,
+ },
+ {
+ `<a onclick='alert("Hello\, World\`,
+ `unfinished escape sequence in JS string: "Hello\\, World\\"`,
+ },
+ {
+ `<a onclick='alert(/x+\`,
+ `unfinished escape sequence in JS string: "x+\\"`,
+ },
+ {
+ `<a onclick="/foo[\]/`,
+ `unfinished JS regexp charset: "foo[\\]/"`,
+ },
+ {
+ // It is ambiguous whether 1.5 should be 1\.5 or 1.5.
+ // Either `var x = 1/- 1.5 /i.test(x)`
+ // where `i.test(x)` is a method call of reference i,
+ // or `/-1\.5/i.test(x)` which is a method call on a
+ // case insensitive regular expression.
+ `<script>{{if false}}var x = 1{{end}}/-{{"1.5"}}/i.test(x)</script>`,
+ `'/' could start a division or regexp: "/-"`,
+ },
+ {
+ `{{template "foo"}}`,
+ "z:1: no such template foo",
+ },
+ {
+ `{{define "z"}}<div{{template "y"}}>{{end}}` +
+ // Illegal starting in stateTag but not in stateText.
+ `{{define "y"}} foo<b{{end}}`,
+ `"<" in attribute name: " foo<b"`,
+ },
+ {
+ `{{define "z"}}<script>reverseList = [{{template "t"}}]</script>{{end}}` +
+ // Missing " after recursive call.
+ `{{define "t"}}{{if .Tail}}{{template "t" .Tail}}{{end}}{{.Head}}",{{end}}`,
+ `: cannot compute output context for template t$htmltemplate_stateJS_elementScript`,
+ },
+ {
+ `<input type=button value=onclick=>`,
+ `exp/template/html:z: "=" in unquoted attr: "onclick="`,
+ },
+ {
+ `<input type=button value= onclick=>`,
+ `exp/template/html:z: "=" in unquoted attr: "onclick="`,
+ },
+ {
+ `<input type=button value= 1+1=2>`,
+ `exp/template/html:z: "=" in unquoted attr: "1+1=2"`,
+ },
+ {
+ "<a class=`foo>",
+ "exp/template/html:z: \"`\" in unquoted attr: \"`foo\"",
+ },
+ {
+ `<a style=font:'Arial'>`,
+ `exp/template/html:z: "'" in unquoted attr: "font:'Arial'"`,
+ },
+ {
+ `<a=foo>`,
+ `: expected space, attr name, or end of tag, but got "=foo>"`,
+ },
+ }
+
+ for _, test := range tests {
+ var err error
+ buf := new(bytes.Buffer)
+ if strings.HasPrefix(test.input, "{{define") {
+ var s *Set
+ s, err = (&Set{}).Parse(test.input)
+ if err == nil {
+ err = s.Execute(buf, "z", nil)
+ }
+ } else {
+ var t *Template
+ t, err = New("z").Parse(test.input)
+ if err == nil {
+ err = t.Execute(buf, nil)
+ }
+ }
+ var got string
+ if err != nil {
+ got = err.Error()
+ }
+ if test.err == "" {
+ if got != "" {
+ t.Errorf("input=%q: unexpected error %q", test.input, got)
+ }
+ continue
+ }
+ if strings.Index(got, test.err) == -1 {
+ t.Errorf("input=%q: error\n\t%q\ndoes not contain expected string\n\t%q", test.input, got, test.err)
+ continue
+ }
+ }
+}
+
+func TestEscapeText(t *testing.T) {
+ tests := []struct {
+ input string
+ output context
+ }{
+ {
+ ``,
+ context{},
+ },
+ {
+ `Hello, World!`,
+ context{},
+ },
+ {
+ // An orphaned "<" is OK.
+ `I <3 Ponies!`,
+ context{},
+ },
+ {
+ `<a`,
+ context{state: stateTag},
+ },
+ {
+ `<a `,
+ context{state: stateTag},
+ },
+ {
+ `<a>`,
+ context{state: stateText},
+ },
+ {
+ `<a href`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a on`,
+ context{state: stateAttrName, attr: attrScript},
+ },
+ {
+ `<a href `,
+ context{state: stateAfterName, attr: attrURL},
+ },
+ {
+ `<a style = `,
+ context{state: stateBeforeValue, attr: attrStyle},
+ },
+ {
+ `<a href=`,
+ context{state: stateBeforeValue, attr: attrURL},
+ },
+ {
+ `<a href=x`,
+ context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href=x `,
+ context{state: stateTag},
+ },
+ {
+ `<a href=>`,
+ context{state: stateText},
+ },
+ {
+ `<a href=x>`,
+ context{state: stateText},
+ },
+ {
+ `<a href ='`,
+ context{state: stateURL, delim: delimSingleQuote},
+ },
+ {
+ `<a href=''`,
+ context{state: stateTag},
+ },
+ {
+ `<a href= "`,
+ context{state: stateURL, delim: delimDoubleQuote},
+ },
+ {
+ `<a href=""`,
+ context{state: stateTag},
+ },
+ {
+ `<a title="`,
+ context{state: stateAttr, delim: delimDoubleQuote},
+ },
+ {
+ `<a HREF='http:`,
+ context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a Href='/`,
+ context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href='"`,
+ context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href="'`,
+ context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href=''`,
+ context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href=""`,
+ context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href=""`,
+ context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a href="`,
+ context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery},
+ },
+ {
+ `<img alt="1">`,
+ context{state: stateText},
+ },
+ {
+ `<img alt="1>"`,
+ context{state: stateTag},
+ },
+ {
+ `<img alt="1>">`,
+ context{state: stateText},
+ },
+ {
+ `<input checked type="checkbox"`,
+ context{state: stateTag},
+ },
+ {
+ `<a onclick="`,
+ context{state: stateJS, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="//foo`,
+ context{state: stateJSLineCmt, delim: delimDoubleQuote},
+ },
+ {
+ "<a onclick='//\n",
+ context{state: stateJS, delim: delimSingleQuote},
+ },
+ {
+ "<a onclick='//\r\n",
+ context{state: stateJS, delim: delimSingleQuote},
+ },
+ {
+ "<a onclick='//\u2028",
+ context{state: stateJS, delim: delimSingleQuote},
+ },
+ {
+ `<a onclick="/*`,
+ context{state: stateJSBlockCmt, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="/*/`,
+ context{state: stateJSBlockCmt, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="/**/`,
+ context{state: stateJS, delim: delimDoubleQuote},
+ },
+ {
+ `<a onkeypress=""`,
+ context{state: stateJSDqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick='"foo"`,
+ context{state: stateJS, delim: delimSingleQuote, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<a onclick='foo'`,
+ context{state: stateJS, delim: delimSpaceOrTagEnd, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<a onclick='foo`,
+ context{state: stateJSSqStr, delim: delimSpaceOrTagEnd},
+ },
+ {
+ `<a onclick=""foo'`,
+ context{state: stateJSDqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="'foo"`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<A ONCLICK="'`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="/`,
+ context{state: stateJSRegexp, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="'foo'`,
+ context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<a onclick="'foo\'`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="'foo\'`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="/foo/`,
+ context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<script>/foo/ /=`,
+ context{state: stateJS, element: elementScript},
+ },
+ {
+ `<a onclick="1 /foo`,
+ context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<a onclick="1 /*c*/ /foo`,
+ context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<a onclick="/foo[/]`,
+ context{state: stateJSRegexp, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="/foo\/`,
+ context{state: stateJSRegexp, delim: delimDoubleQuote},
+ },
+ {
+ `<a onclick="/foo/`,
+ context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<input checked style="`,
+ context{state: stateCSS, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="//`,
+ context{state: stateCSSLineCmt, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="//</script>`,
+ context{state: stateCSSLineCmt, delim: delimDoubleQuote},
+ },
+ {
+ "<a style='//\n",
+ context{state: stateCSS, delim: delimSingleQuote},
+ },
+ {
+ "<a style='//\r",
+ context{state: stateCSS, delim: delimSingleQuote},
+ },
+ {
+ `<a style="/*`,
+ context{state: stateCSSBlockCmt, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="/*/`,
+ context{state: stateCSSBlockCmt, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="/**/`,
+ context{state: stateCSS, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="background: '`,
+ context{state: stateCSSSqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="background: "`,
+ context{state: stateCSSDqStr, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="background: '/foo?img=`,
+ context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag},
+ },
+ {
+ `<a style="background: '/`,
+ context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a style="background: url("/`,
+ context{state: stateCSSDqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a style="background: url('/`,
+ context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a style="background: url('/)`,
+ context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a style="background: url('/ `,
+ context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a style="background: url(/`,
+ context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery},
+ },
+ {
+ `<a style="background: url( `,
+ context{state: stateCSSURL, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="background: url( /image?name=`,
+ context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag},
+ },
+ {
+ `<a style="background: url(x)`,
+ context{state: stateCSS, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="background: url('x'`,
+ context{state: stateCSS, delim: delimDoubleQuote},
+ },
+ {
+ `<a style="background: url( x `,
+ context{state: stateCSS, delim: delimDoubleQuote},
+ },
+ {
+ `<!-- foo`,
+ context{state: stateHTMLCmt},
+ },
+ {
+ `<!-->`,
+ context{state: stateHTMLCmt},
+ },
+ {
+ `<!--->`,
+ context{state: stateHTMLCmt},
+ },
+ {
+ `<!-- foo -->`,
+ context{state: stateText},
+ },
+ {
+ `<script`,
+ context{state: stateTag, element: elementScript},
+ },
+ {
+ `<script `,
+ context{state: stateTag, element: elementScript},
+ },
+ {
+ `<script src="foo.js" `,
+ context{state: stateTag, element: elementScript},
+ },
+ {
+ `<script src='foo.js' `,
+ context{state: stateTag, element: elementScript},
+ },
+ {
+ `<script type=text/javascript `,
+ context{state: stateTag, element: elementScript},
+ },
+ {
+ `<script>foo`,
+ context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
+ },
+ {
+ `<script>foo</script>`,
+ context{state: stateText},
+ },
+ {
+ `<script>foo</script><!--`,
+ context{state: stateHTMLCmt},
+ },
+ {
+ `<script>document.write("<p>foo</p>");`,
+ context{state: stateJS, element: elementScript},
+ },
+ {
+ `<script>document.write("<p>foo<\/script>");`,
+ context{state: stateJS, element: elementScript},
+ },
+ {
+ `<script>document.write("<script>alert(1)</script>");`,
+ context{state: stateText},
+ },
+ {
+ `<Script>`,
+ context{state: stateJS, element: elementScript},
+ },
+ {
+ `<SCRIPT>foo`,
+ context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
+ },
+ {
+ `<textarea>value`,
+ context{state: stateRCDATA, element: elementTextarea},
+ },
+ {
+ `<textarea>value</TEXTAREA>`,
+ context{state: stateText},
+ },
+ {
+ `<textarea name=html><b`,
+ context{state: stateRCDATA, element: elementTextarea},
+ },
+ {
+ `<title>value`,
+ context{state: stateRCDATA, element: elementTitle},
+ },
+ {
+ `<style>value`,
+ context{state: stateCSS, element: elementStyle},
+ },
+ {
+ `<a xlink:href`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a xmlns`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a xmlns:foo`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a xmlnsxyz`,
+ context{state: stateAttrName},
+ },
+ {
+ `<a data-url`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a data-iconUri`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a data-urlItem`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a g:`,
+ context{state: stateAttrName},
+ },
+ {
+ `<a g:url`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a g:iconUri`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a g:urlItem`,
+ context{state: stateAttrName, attr: attrURL},
+ },
+ {
+ `<a g:value`,
+ context{state: stateAttrName},
+ },
+ {
+ `<a svg:style='`,
+ context{state: stateCSS, delim: delimSingleQuote},
+ },
+ {
+ `<svg:font-face`,
+ context{state: stateTag},
+ },
+ {
+ `<svg:a svg:onclick="`,
+ context{state: stateJS, delim: delimDoubleQuote},
+ },
+ }
+
+ for _, test := range tests {
+ b, e := []byte(test.input), newEscaper(nil)
+ c := e.escapeText(context{}, &parse.TextNode{parse.NodeText, b})
+ if !test.output.eq(c) {
+ t.Errorf("input %q: want context\n\t%v\ngot\n\t%v", test.input, test.output, c)
+ continue
+ }
+ if test.input != string(b) {
+ t.Errorf("input %q: text node was modified: want %q got %q", test.input, test.input, b)
+ continue
+ }
+ }
+}
+
+func TestEnsurePipelineContains(t *testing.T) {
+ tests := []struct {
+ input, output string
+ ids []string
+ }{
+ {
+ "{{.X}}",
+ "[(command: [F=[X]])]",
+ []string{},
+ },
+ {
+ "{{.X | html}}",
+ "[(command: [F=[X]]) (command: [I=html])]",
+ []string{},
+ },
+ {
+ "{{.X}}",
+ "[(command: [F=[X]]) (command: [I=html])]",
+ []string{"html"},
+ },
+ {
+ "{{.X | html}}",
+ "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ []string{"urlquery"},
+ },
+ {
+ "{{.X | html | urlquery}}",
+ "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ []string{"urlquery"},
+ },
+ {
+ "{{.X | html | urlquery}}",
+ "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ []string{"html", "urlquery"},
+ },
+ {
+ "{{.X | html | urlquery}}",
+ "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ []string{"html"},
+ },
+ {
+ "{{.X | urlquery}}",
+ "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ []string{"html", "urlquery"},
+ },
+ {
+ "{{.X | html | print}}",
+ "[(command: [F=[X]]) (command: [I=urlquery]) (command: [I=html]) (command: [I=print])]",
+ []string{"urlquery", "html"},
+ },
+ }
+ for _, test := range tests {
+ tmpl := template.Must(template.New("test").Parse(test.input))
+ action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
+ if !ok {
+ t.Errorf("First node is not an action: %s", test.input)
+ continue
+ }
+ pipe := action.Pipe
+ ensurePipelineContains(pipe, test.ids)
+ got := pipe.String()
+ if got != test.output {
+ t.Errorf("%s, %v: want\n\t%s\ngot\n\t%s", test.input, test.ids, test.output, got)
+ }
+ }
+}
+
+func TestEscapeErrorsNotIgnorable(t *testing.T) {
+ var b bytes.Buffer
+ tmpl, _ := New("dangerous").Parse("<a")
+ err := tmpl.Execute(&b, nil)
+ if err == nil {
+ t.Errorf("Expected error")
+ } else if b.Len() != 0 {
+ t.Errorf("Emitted output despite escaping failure")
+ }
+}
+
+func TestEscapeSetErrorsNotIgnorable(t *testing.T) {
+ var b bytes.Buffer
+ s, err := (&Set{}).Parse(`{{define "t"}}<a{{end}}`)
+ if err != nil {
+ t.Errorf("failed to parse set: %q", err)
+ }
+ err = s.Execute(&b, "t", nil)
+ if err == nil {
+ t.Errorf("Expected error")
+ } else if b.Len() != 0 {
+ t.Errorf("Emitted output despite escaping failure")
+ }
+}
+
+func TestRedundantFuncs(t *testing.T) {
+ inputs := []interface{}{
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./` +
+ `0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\]^_` +
+ "`abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\x7f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\ufdec\ufffd\uffff\U0001D11E" +
+ "&%22\\",
+ CSS(`a[href =~ "//example.com"]#foo`),
+ HTML(`Hello, <b>World</b> &tc!`),
+ HTMLAttr(` dir="ltr"`),
+ JS(`c && alert("Hello, World!");`),
+ JSStr(`Hello, World & O'Reilly\x21`),
+ URL(`greeting=H%69&addressee=(World)`),
+ }
+
+ for n0, m := range redundantFuncs {
+ f0 := funcMap[n0].(func(...interface{}) string)
+ for n1, _ := range m {
+ f1 := funcMap[n1].(func(...interface{}) string)
+ for _, input := range inputs {
+ want := f0(input)
+ if got := f1(want); want != got {
+ t.Errorf("%s %s with %T %q: want\n\t%q,\ngot\n\t%q", n0, n1, input, input, want, got)
+ }
+ }
+ }
+ }
+}
+
+func BenchmarkEscapedExecute(b *testing.B) {
+ tmpl := Must(New("t").Parse(`<a onclick="alert('{{.}}')">{{.}}</a>`))
+ var buf bytes.Buffer
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ tmpl.Execute(&buf, "foo & 'bar' & baz")
+ buf.Reset()
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
+func htmlNospaceEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeHTML {
+ return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
+ }
+ return htmlReplacer(s, htmlNospaceReplacementTable, false)
+}
+
+// attrEscaper escapes for inclusion in quoted attribute values.
+func attrEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeHTML {
+ return htmlReplacer(stripTags(s), htmlNormReplacementTable, true)
+ }
+ return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// rcdataEscaper escapes for inclusion in an RCDATA element body.
+func rcdataEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeHTML {
+ return htmlReplacer(s, htmlNormReplacementTable, true)
+ }
+ return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// htmlEscaper escapes for inclusion in HTML text.
+func htmlEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeHTML {
+ return s
+ }
+ return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// htmlReplacementTable contains the runes that need to be escaped
+// inside a quoted attribute value or in a text node.
+var htmlReplacementTable = []string{
+ // http://www.w3.org/TR/html5/tokenization.html#attribute-value-unquoted-state: "
+ // U+0000 NULL Parse error. Append a U+FFFD REPLACEMENT
+ // CHARACTER character to the current attribute's value.
+ // "
+ // and similarly
+ // http://www.w3.org/TR/html5/tokenization.html#before-attribute-value-state
+ 0: "\uFFFD",
+ '"': """,
+ '&': "&",
+ '\'': "'",
+ '+': "+",
+ '<': "<",
+ '>': ">",
+}
+
+// htmlNormReplacementTable is like htmlReplacementTable but without '&' to
+// avoid over-encoding existing entities.
+var htmlNormReplacementTable = []string{
+ 0: "\uFFFD",
+ '"': """,
+ '\'': "'",
+ '+': "+",
+ '<': "<",
+ '>': ">",
+}
+
+// htmlNospaceReplacementTable contains the runes that need to be escaped
+// inside an unquoted attribute value.
+// The set of runes escaped is the union of the HTML specials and
+// those determined by running the JS below in browsers:
+// <div id=d></div>
+// <script>(function () {
+// var a = [], d = document.getElementById("d"), i, c, s;
+// for (i = 0; i < 0x10000; ++i) {
+// c = String.fromCharCode(i);
+// d.innerHTML = "<span title=" + c + "lt" + c + "></span>"
+// s = d.getElementsByTagName("SPAN")[0];
+// if (!s || s.title !== c + "lt" + c) { a.push(i.toString(16)); }
+// }
+// document.write(a.join(", "));
+// })()</script>
+var htmlNospaceReplacementTable = []string{
+ 0: "�",
+ '\t': "	",
+ '\n': " ",
+ '\v': "",
+ '\f': "",
+ '\r': " ",
+ ' ': " ",
+ '"': """,
+ '&': "&",
+ '\'': "'",
+ '+': "+",
+ '<': "<",
+ '=': "=",
+ '>': ">",
+ // A parse error in the attribute value (unquoted) and
+ // before attribute value states.
+ // Treated as a quoting character by IE.
+ '`': "`",
+}
+
+// htmlNospaceNormReplacementTable is like htmlNospaceReplacementTable but
+// without '&' to avoid over-encoding existing entities.
+var htmlNospaceNormReplacementTable = []string{
+ 0: "�",
+ '\t': "	",
+ '\n': " ",
+ '\v': "",
+ '\f': "",
+ '\r': " ",
+ ' ': " ",
+ '"': """,
+ '\'': "'",
+ '+': "+",
+ '<': "<",
+ '=': "=",
+ '>': ">",
+ // A parse error in the attribute value (unquoted) and
+ // before attribute value states.
+ // Treated as a quoting character by IE.
+ '`': "`",
+}
+
+// htmlReplacer returns s with runes replaced acccording to replacementTable
+// and when badRunes is true, certain bad runes are allowed through unescaped.
+func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
+ written, b := 0, new(bytes.Buffer)
+ for i, r := range s {
+ if int(r) < len(replacementTable) {
+ if repl := replacementTable[r]; len(repl) != 0 {
+ b.WriteString(s[written:i])
+ b.WriteString(repl)
+ // Valid as long as replacementTable doesn't
+ // include anything above 0x7f.
+ written = i + utf8.RuneLen(r)
+ }
+ } else if badRunes {
+ // No-op.
+ // IE does not allow these ranges in unquoted attrs.
+ } else if 0xfdd0 <= r && r <= 0xfdef || 0xfff0 <= r && r <= 0xffff {
+ fmt.Fprintf(b, "%s&#x%x;", s[written:i], r)
+ written = i + utf8.RuneLen(r)
+ }
+ }
+ if written == 0 {
+ return s
+ }
+ b.WriteString(s[written:])
+ return b.String()
+}
+
+// stripTags takes a snippet of HTML and returns only the text content.
+// For example, `<b>¡Hi!</b> <script>...</script>` -> `¡Hi! `.
+func stripTags(html string) string {
+ var b bytes.Buffer
+ s, c, i, allText := []byte(html), context{}, 0, true
+ // Using the transition funcs helps us avoid mangling
+ // `<div title="1>2">` or `I <3 Ponies!`.
+ for i != len(s) {
+ if c.delim == delimNone {
+ st := c.state
+ // Use RCDATA instead of parsing into JS or CSS styles.
+ if c.element != elementNone && !isInTag(st) {
+ st = stateRCDATA
+ }
+ d, nread := transitionFunc[st](c, s[i:])
+ i1 := i + nread
+ if c.state == stateText || c.state == stateRCDATA {
+ // Emit text up to the start of the tag or comment.
+ j := i1
+ if d.state != c.state {
+ for j1 := j - 1; j1 >= i; j1-- {
+ if s[j1] == '<' {
+ j = j1
+ break
+ }
+ }
+ }
+ b.Write(s[i:j])
+ } else {
+ allText = false
+ }
+ c, i = d, i1
+ continue
+ }
+ i1 := i + bytes.IndexAny(s[i:], delimEnds[c.delim])
+ if i1 < i {
+ break
+ }
+ if c.delim != delimSpaceOrTagEnd {
+ // Consume any quote.
+ i1++
+ }
+ c, i = context{state: stateTag, element: c.element}, i1
+ }
+ if allText {
+ return html
+ } else if c.state == stateText || c.state == stateRCDATA {
+ b.Write(s[i:])
+ }
+ return b.String()
+}
+
+// htmlNameFilter accepts valid parts of an HTML attribute or tag name or
+// a known-safe HTML attribute.
+func htmlNameFilter(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeHTMLAttr {
+ return s
+ }
+ if len(s) == 0 {
+ // Avoid violation of structure preservation.
+ // <input checked {{.K}}={{.V}}>.
+ // Without this, if .K is empty then .V is the value of
+ // checked, but otherwise .V is the value of the attribute
+ // named .K.
+ return filterFailsafe
+ }
+ s = strings.ToLower(s)
+ if t := attrType(s); t != contentTypePlain {
+ // TODO: Split attr and element name part filters so we can whitelist
+ // attributes.
+ return filterFailsafe
+ }
+ for _, r := range s {
+ switch {
+ case '0' <= r && r <= '9':
+ case 'a' <= r && r <= 'z':
+ default:
+ return filterFailsafe
+ }
+ }
+ return s
+}
+
+// commentEscaper returns the empty string regardless of input.
+// Comment content does not correspond to any parsed structure or
+// human-readable content, so the simplest and most secure policy is to drop
+// content interpolated into comments.
+// This approach is equally valid whether or not static comment content is
+// removed from the template.
+func commentEscaper(args ...interface{}) string {
+ return ""
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "html"
+ "strings"
+ "testing"
+)
+
+func TestHTMLNospaceEscaper(t *testing.T) {
+ input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./` +
+ `0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\]^_` +
+ "`abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\x7f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\ufdec\U0001D11E")
+
+ want := ("�\x01\x02\x03\x04\x05\x06\x07" +
+ "\x08	  \x0E\x0F" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17" +
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./` +
+ `0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\]^_` +
+ ``abcdefghijklmno` +
+ `pqrstuvwxyz{|}~` + "\u007f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+ got := htmlNospaceEscaper(input)
+ if got != want {
+ t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
+ }
+
+ got, want = html.UnescapeString(got), strings.Replace(input, "\x00", "\ufffd", 1)
+ if want != got {
+ t.Errorf("decode: want\n\t%q\nbut got\n\t%q", want, got)
+ }
+}
+
+func TestStripTags(t *testing.T) {
+ tests := []struct {
+ input, want string
+ }{
+ {"", ""},
+ {"Hello, World!", "Hello, World!"},
+ {"foo&bar", "foo&bar"},
+ {`Hello <a href="www.example.com/">World</a>!`, "Hello World!"},
+ {"Foo <textarea>Bar</textarea> Baz", "Foo Bar Baz"},
+ {"Foo <!-- Bar --> Baz", "Foo Baz"},
+ {"<", "<"},
+ {"foo < bar", "foo < bar"},
+ {`Foo<script type="text/javascript">alert(1337)</script>Bar`, "FooBar"},
+ {`Foo<div title="1>2">Bar`, "FooBar"},
+ {`I <3 Ponies!`, `I <3 Ponies!`},
+ {`<script>foo()</script>`, ``},
+ }
+
+ for _, test := range tests {
+ if got := stripTags(test.input); got != test.want {
+ t.Errorf("%q: want %q, got %q", test.input, test.want, got)
+ }
+ }
+}
+
+func BenchmarkHTMLNospaceEscaper(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ htmlNospaceEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+ }
+}
+
+func BenchmarkHTMLNospaceEscaperNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ htmlNospaceEscaper("The_quick,_brown_fox_jumps_over_the_lazy_dog.")
+ }
+}
+
+func BenchmarkStripTags(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ stripTags("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+ }
+}
+
+func BenchmarkStripTagsNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ stripTags("The quick, brown fox jumps over the lazy dog.")
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// nextJSCtx returns the context that determines whether a slash after the
+// given run of tokens tokens starts a regular expression instead of a division
+// operator: / or /=.
+//
+// This assumes that the token run does not include any string tokens, comment
+// tokens, regular expression literal tokens, or division operators.
+//
+// This fails on some valid but nonsensical JavaScript programs like
+// "x = ++/foo/i" which is quite different than "x++/foo/i", but is not known to
+// fail on any known useful programs. It is based on the draft
+// JavaScript 2.0 lexical grammar and requires one token of lookbehind:
+// http://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
+func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
+ s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
+ if len(s) == 0 {
+ return preceding
+ }
+
+ // All cases below are in the single-byte UTF-8 group.
+ switch c, n := s[len(s)-1], len(s); c {
+ case '+', '-':
+ // ++ and -- are not regexp preceders, but + and - are whether
+ // they are used as infix or prefix operators.
+ start := n - 1
+ // Count the number of adjacent dashes or pluses.
+ for start > 0 && s[start-1] == c {
+ start--
+ }
+ if (n-start)&1 == 1 {
+ // Reached for trailing minus signs since "---" is the
+ // same as "-- -".
+ return jsCtxRegexp
+ }
+ return jsCtxDivOp
+ case '.':
+ // Handle "42."
+ if n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {
+ return jsCtxDivOp
+ }
+ return jsCtxRegexp
+ // Suffixes for all punctuators from section 7.7 of the language spec
+ // that only end binary operators not handled above.
+ case ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':
+ return jsCtxRegexp
+ // Suffixes for all punctuators from section 7.7 of the language spec
+ // that are prefix operators not handled above.
+ case '!', '~':
+ return jsCtxRegexp
+ // Matches all the punctuators from section 7.7 of the language spec
+ // that are open brackets not handled above.
+ case '(', '[':
+ return jsCtxRegexp
+ // Matches all the punctuators from section 7.7 of the language spec
+ // that precede expression starts.
+ case ':', ';', '{':
+ return jsCtxRegexp
+ // CAVEAT: the close punctuators ('}', ']', ')') precede div ops and
+ // are handled in the default except for '}' which can precede a
+ // division op as in
+ // ({ valueOf: function () { return 42 } } / 2
+ // which is valid, but, in practice, developers don't divide object
+ // literals, so our heuristic works well for code like
+ // function () { ... } /foo/.test(x) && sideEffect();
+ // The ')' punctuator can precede a regular expression as in
+ // if (b) /foo/.test(x) && ...
+ // but this is much less likely than
+ // (a + b) / c
+ case '}':
+ return jsCtxRegexp
+ default:
+ // Look for an IdentifierName and see if it is a keyword that
+ // can precede a regular expression.
+ j := n
+ for j > 0 && isJSIdentPart(rune(s[j-1])) {
+ j--
+ }
+ if regexpPrecederKeywords[string(s[j:])] {
+ return jsCtxRegexp
+ }
+ }
+ // Otherwise is a punctuator not listed above, or
+ // a string which precedes a div op, or an identifier
+ // which precedes a div op.
+ return jsCtxDivOp
+}
+
+// regexPrecederKeywords is a set of reserved JS keywords that can precede a
+// regular expression in JS source.
+var regexpPrecederKeywords = map[string]bool{
+ "break": true,
+ "case": true,
+ "continue": true,
+ "delete": true,
+ "do": true,
+ "else": true,
+ "finally": true,
+ "in": true,
+ "instanceof": true,
+ "return": true,
+ "throw": true,
+ "try": true,
+ "typeof": true,
+ "void": true,
+}
+
+// jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has
+// nether side-effects nor free variables outside (NaN, Infinity).
+func jsValEscaper(args ...interface{}) string {
+ var a interface{}
+ if len(args) == 1 {
+ a = args[0]
+ switch t := a.(type) {
+ case JS:
+ return string(t)
+ case JSStr:
+ // TODO: normalize quotes.
+ return `"` + string(t) + `"`
+ case json.Marshaler:
+ // Do not treat as a Stringer.
+ case fmt.Stringer:
+ a = t.String()
+ }
+ } else {
+ a = fmt.Sprint(args...)
+ }
+ // TODO: detect cycles before calling Marshal which loops infinitely on
+ // cyclic data. This may be an unnacceptable DoS risk.
+
+ b, err := json.Marshal(a)
+ if err != nil {
+ // Put a space before comment so that if it is flush against
+ // a division operator it is not turned into a line comment:
+ // x/{{y}}
+ // turning into
+ // x//* error marshalling y:
+ // second line of error message */null
+ return fmt.Sprintf(" /* %s */null ", strings.Replace(err.Error(), "*/", "* /", -1))
+ }
+
+ // TODO: maybe post-process output to prevent it from containing
+ // "<!--", "-->", "<![CDATA[", "]]>", or "</script"
+ // in case custom marshallers produce output containing those.
+
+ // TODO: Maybe abbreviate \u00ab to \xab to produce more compact output.
+ if len(b) == 0 {
+ // In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
+ // not cause the output `x=y/*z`.
+ return " null "
+ }
+ first, _ := utf8.DecodeRune(b)
+ last, _ := utf8.DecodeLastRune(b)
+ var buf bytes.Buffer
+ // Prevent IdentifierNames and NumericLiterals from running into
+ // keywords: in, instanceof, typeof, void
+ pad := isJSIdentPart(first) || isJSIdentPart(last)
+ if pad {
+ buf.WriteByte(' ')
+ }
+ written := 0
+ // Make sure that json.Marshal escapes codepoints U+2028 & U+2029
+ // so it falls within the subset of JSON which is valid JS.
+ for i := 0; i < len(b); {
+ rune, n := utf8.DecodeRune(b[i:])
+ repl := ""
+ if rune == 0x2028 {
+ repl = `\u2028`
+ } else if rune == 0x2029 {
+ repl = `\u2029`
+ }
+ if repl != "" {
+ buf.Write(b[written:i])
+ buf.WriteString(repl)
+ written = i + n
+ }
+ i += n
+ }
+ if buf.Len() != 0 {
+ buf.Write(b[written:])
+ if pad {
+ buf.WriteByte(' ')
+ }
+ b = buf.Bytes()
+ }
+ return string(b)
+}
+
+// jsStrEscaper produces a string that can be included between quotes in
+// JavaScript source, in JavaScript embedded in an HTML5 <script> element,
+// or in an HTML5 event handler attribute such as onclick.
+func jsStrEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeJSStr {
+ return replace(s, jsStrNormReplacementTable)
+ }
+ return replace(s, jsStrReplacementTable)
+}
+
+// jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
+// specials so the result is treated literally when included in a regular
+// expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
+// the literal text of {{.X}} followed by the string "bar".
+func jsRegexpEscaper(args ...interface{}) string {
+ s, _ := stringify(args...)
+ s = replace(s, jsRegexpReplacementTable)
+ if s == "" {
+ // /{{.X}}/ should not produce a line comment when .X == "".
+ return "(?:)"
+ }
+ return s
+}
+
+// replace replaces each rune r of s with replacementTable[r], provided that
+// r < len(replacementTable). If replacementTable[r] is the empty string then
+// no replacement is made.
+// It also replaces runes U+2028 and U+2029 with the raw strings `\u2028` and
+// `\u2029`.
+func replace(s string, replacementTable []string) string {
+ var b bytes.Buffer
+ written := 0
+ for i, r := range s {
+ var repl string
+ switch {
+ case int(r) < len(replacementTable) && replacementTable[r] != "":
+ repl = replacementTable[r]
+ case r == '\u2028':
+ repl = `\u2028`
+ case r == '\u2029':
+ repl = `\u2029`
+ default:
+ continue
+ }
+ b.WriteString(s[written:i])
+ b.WriteString(repl)
+ written = i + utf8.RuneLen(r)
+ }
+ if written == 0 {
+ return s
+ }
+ b.WriteString(s[written:])
+ return b.String()
+}
+
+var jsStrReplacementTable = []string{
+ 0: `\0`,
+ '\t': `\t`,
+ '\n': `\n`,
+ '\v': `\x0b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ '"': `\x22`,
+ '&': `\x26`,
+ '\'': `\x27`,
+ '+': `\x2b`,
+ '/': `\/`,
+ '<': `\x3c`,
+ '>': `\x3e`,
+ '\\': `\\`,
+}
+
+// jsStrNormReplacementTable is like jsStrReplacementTable but does not
+// overencode existing escapes since this table has no entry for `\`.
+var jsStrNormReplacementTable = []string{
+ 0: `\0`,
+ '\t': `\t`,
+ '\n': `\n`,
+ '\v': `\x0b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ '"': `\x22`,
+ '&': `\x26`,
+ '\'': `\x27`,
+ '+': `\x2b`,
+ '/': `\/`,
+ '<': `\x3c`,
+ '>': `\x3e`,
+}
+
+var jsRegexpReplacementTable = []string{
+ 0: `\0`,
+ '\t': `\t`,
+ '\n': `\n`,
+ '\v': `\x0b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ '"': `\x22`,
+ '$': `\$`,
+ '&': `\x26`,
+ '\'': `\x27`,
+ '(': `\(`,
+ ')': `\)`,
+ '*': `\*`,
+ '+': `\x2b`,
+ '-': `\-`,
+ '.': `\.`,
+ '/': `\/`,
+ '<': `\x3c`,
+ '>': `\x3e`,
+ '?': `\?`,
+ '[': `\[`,
+ '\\': `\\`,
+ ']': `\]`,
+ '^': `\^`,
+ '{': `\{`,
+ '|': `\|`,
+ '}': `\}`,
+}
+
+// isJSIdentPart returns whether the given rune is a JS identifier part.
+// It does not handle all the non-Latin letters, joiners, and combining marks,
+// but it does handle every codepoint that can occur in a numeric literal or
+// a keyword.
+func isJSIdentPart(r rune) bool {
+ switch {
+ case r == '$':
+ return true
+ case '0' <= r && r <= '9':
+ return true
+ case 'A' <= r && r <= 'Z':
+ return true
+ case r == '_':
+ return true
+ case 'a' <= r && r <= 'z':
+ return true
+ }
+ return false
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "math"
+ "strings"
+ "testing"
+)
+
+func TestNextJsCtx(t *testing.T) {
+ tests := []struct {
+ jsCtx jsCtx
+ s string
+ }{
+ // Statement terminators precede regexps.
+ {jsCtxRegexp, ";"},
+ // This is not airtight.
+ // ({ valueOf: function () { return 1 } } / 2)
+ // is valid JavaScript but in practice, devs do not do this.
+ // A block followed by a statement starting with a RegExp is
+ // much more common:
+ // while (x) {...} /foo/.test(x) || panic()
+ {jsCtxRegexp, "}"},
+ // But member, call, grouping, and array expression terminators
+ // precede div ops.
+ {jsCtxDivOp, ")"},
+ {jsCtxDivOp, "]"},
+ // At the start of a primary expression, array, or expression
+ // statement, expect a regexp.
+ {jsCtxRegexp, "("},
+ {jsCtxRegexp, "["},
+ {jsCtxRegexp, "{"},
+ // Assignment operators precede regexps as do all exclusively
+ // prefix and binary operators.
+ {jsCtxRegexp, "="},
+ {jsCtxRegexp, "+="},
+ {jsCtxRegexp, "*="},
+ {jsCtxRegexp, "*"},
+ {jsCtxRegexp, "!"},
+ // Whether the + or - is infix or prefix, it cannot precede a
+ // div op.
+ {jsCtxRegexp, "+"},
+ {jsCtxRegexp, "-"},
+ // An incr/decr op precedes a div operator.
+ // This is not airtight. In (g = ++/h/i) a regexp follows a
+ // pre-increment operator, but in practice devs do not try to
+ // increment or decrement regular expressions.
+ // (g++/h/i) where ++ is a postfix operator on g is much more
+ // common.
+ {jsCtxDivOp, "--"},
+ {jsCtxDivOp, "++"},
+ {jsCtxDivOp, "x--"},
+ // When we have many dashes or pluses, then they are grouped
+ // left to right.
+ {jsCtxRegexp, "x---"}, // A postfix -- then a -.
+ // return followed by a slash returns the regexp literal or the
+ // slash starts a regexp literal in an expression statement that
+ // is dead code.
+ {jsCtxRegexp, "return"},
+ {jsCtxRegexp, "return "},
+ {jsCtxRegexp, "return\t"},
+ {jsCtxRegexp, "return\n"},
+ {jsCtxRegexp, "return\u2028"},
+ // Identifiers can be divided and cannot validly be preceded by
+ // a regular expressions. Semicolon insertion cannot happen
+ // between an identifier and a regular expression on a new line
+ // because the one token lookahead for semicolon insertion has
+ // to conclude that it could be a div binary op and treat it as
+ // such.
+ {jsCtxDivOp, "x"},
+ {jsCtxDivOp, "x "},
+ {jsCtxDivOp, "x\t"},
+ {jsCtxDivOp, "x\n"},
+ {jsCtxDivOp, "x\u2028"},
+ {jsCtxDivOp, "preturn"},
+ // Numbers precede div ops.
+ {jsCtxDivOp, "0"},
+ // Dots that are part of a number are div preceders.
+ {jsCtxDivOp, "0."},
+ }
+
+ for _, test := range tests {
+ if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
+ t.Errorf("want %s got %q", test.jsCtx, test.s)
+ }
+ if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
+ t.Errorf("want %s got %q", test.jsCtx, test.s)
+ }
+ }
+
+ if nextJSCtx([]byte(" "), jsCtxRegexp) != jsCtxRegexp {
+ t.Error("Blank tokens")
+ }
+
+ if nextJSCtx([]byte(" "), jsCtxDivOp) != jsCtxDivOp {
+ t.Error("Blank tokens")
+ }
+}
+
+func TestJSValEscaper(t *testing.T) {
+ tests := []struct {
+ x interface{}
+ js string
+ }{
+ {int(42), " 42 "},
+ {uint(42), " 42 "},
+ {int16(42), " 42 "},
+ {uint16(42), " 42 "},
+ {int32(-42), " -42 "},
+ {uint32(42), " 42 "},
+ {int16(-42), " -42 "},
+ {uint16(42), " 42 "},
+ {int64(-42), " -42 "},
+ {uint64(42), " 42 "},
+ {uint64(1) << 53, " 9007199254740992 "},
+ // ulp(1 << 53) > 1 so this loses precision in JS
+ // but it is still a representable integer literal.
+ {uint64(1)<<53 + 1, " 9007199254740993 "},
+ {float32(1.0), " 1 "},
+ {float32(-1.0), " -1 "},
+ {float32(0.5), " 0.5 "},
+ {float32(-0.5), " -0.5 "},
+ {float32(1.0) / float32(256), " 0.00390625 "},
+ {float32(0), " 0 "},
+ {math.Copysign(0, -1), " -0 "},
+ {float64(1.0), " 1 "},
+ {float64(-1.0), " -1 "},
+ {float64(0.5), " 0.5 "},
+ {float64(-0.5), " -0.5 "},
+ {float64(0), " 0 "},
+ {math.Copysign(0, -1), " -0 "},
+ {"", `""`},
+ {"foo", `"foo"`},
+ // Newlines.
+ {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
+ // "\v" == "v" on IE 6 so use "\x0b" instead.
+ {"\t\x0b", `"\u0009\u000b"`},
+ {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+ {[]interface{}{}, "[]"},
+ {[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
+ {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
+ {"<!--", `"\u003c!--"`},
+ {"-->", `"--\u003e"`},
+ {"<![CDATA[", `"\u003c![CDATA["`},
+ {"]]>", `"]]\u003e"`},
+ {"</script", `"\u003c/script"`},
+ {"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
+ }
+
+ for _, test := range tests {
+ if js := jsValEscaper(test.x); js != test.js {
+ t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
+ }
+ // Make sure that escaping corner cases are not broken
+ // by nesting.
+ a := []interface{}{test.x}
+ want := "[" + strings.TrimSpace(test.js) + "]"
+ if js := jsValEscaper(a); js != want {
+ t.Errorf("%+v: want\n\t%q\ngot\n\t%q", a, want, js)
+ }
+ }
+}
+
+func TestJSStrEscaper(t *testing.T) {
+ tests := []struct {
+ x interface{}
+ esc string
+ }{
+ {"", ``},
+ {"foo", `foo`},
+ {"\u0000", `\0`},
+ {"\t", `\t`},
+ {"\n", `\n`},
+ {"\r", `\r`},
+ {"\u2028", `\u2028`},
+ {"\u2029", `\u2029`},
+ {"\\", `\\`},
+ {"\\n", `\\n`},
+ {"foo\r\nbar", `foo\r\nbar`},
+ // Preserve attribute boundaries.
+ {`"`, `\x22`},
+ {`'`, `\x27`},
+ // Allow embedding in HTML without further escaping.
+ {`&`, `\x26amp;`},
+ // Prevent breaking out of text node and element boundaries.
+ {"</script>", `\x3c\/script\x3e`},
+ {"<![CDATA[", `\x3c![CDATA[`},
+ {"]]>", `]]\x3e`},
+ // http://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
+ // "The text in style, script, title, and textarea elements
+ // must not have an escaping text span start that is not
+ // followed by an escaping text span end."
+ // Furthermore, spoofing an escaping text span end could lead
+ // to different interpretation of a </script> sequence otherwise
+ // masked by the escaping text span, and spoofing a start could
+ // allow regular text content to be interpreted as script
+ // allowing script execution via a combination of a JS string
+ // injection followed by an HTML text injection.
+ {"<!--", `\x3c!--`},
+ {"-->", `--\x3e`},
+ // From http://code.google.com/p/doctype/wiki/ArticleUtf7
+ {"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
+ `\x2bADw-script\x2bAD4-alert(1)\x2bADw-\/script\x2bAD4-`,
+ },
+ // Invalid UTF-8 sequence
+ {"foo\xA0bar", "foo\xA0bar"},
+ // Invalid unicode scalar value.
+ {"foo\xed\xa0\x80bar", "foo\xed\xa0\x80bar"},
+ }
+
+ for _, test := range tests {
+ esc := jsStrEscaper(test.x)
+ if esc != test.esc {
+ t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
+ }
+ }
+}
+
+func TestJSRegexpEscaper(t *testing.T) {
+ tests := []struct {
+ x interface{}
+ esc string
+ }{
+ {"", `(?:)`},
+ {"foo", `foo`},
+ {"\u0000", `\0`},
+ {"\t", `\t`},
+ {"\n", `\n`},
+ {"\r", `\r`},
+ {"\u2028", `\u2028`},
+ {"\u2029", `\u2029`},
+ {"\\", `\\`},
+ {"\\n", `\\n`},
+ {"foo\r\nbar", `foo\r\nbar`},
+ // Preserve attribute boundaries.
+ {`"`, `\x22`},
+ {`'`, `\x27`},
+ // Allow embedding in HTML without further escaping.
+ {`&`, `\x26amp;`},
+ // Prevent breaking out of text node and element boundaries.
+ {"</script>", `\x3c\/script\x3e`},
+ {"<![CDATA[", `\x3c!\[CDATA\[`},
+ {"]]>", `\]\]\x3e`},
+ // Escaping text spans.
+ {"<!--", `\x3c!\-\-`},
+ {"-->", `\-\-\x3e`},
+ {"*", `\*`},
+ {"+", `\x2b`},
+ {"?", `\?`},
+ {"[](){}", `\[\]\(\)\{\}`},
+ {"$foo|x.y", `\$foo\|x\.y`},
+ {"x^y", `x\^y`},
+ }
+
+ for _, test := range tests {
+ esc := jsRegexpEscaper(test.x)
+ if esc != test.esc {
+ t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
+ }
+ }
+}
+
+func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+ input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./` +
+ `0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\]^_` +
+ "`abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\x7f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+ tests := []struct {
+ name string
+ escaper func(...interface{}) string
+ escaped string
+ }{
+ {
+ "jsStrEscaper",
+ jsStrEscaper,
+ "\\0\x01\x02\x03\x04\x05\x06\x07" +
+ "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17" +
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !\x22#$%\x26\x27()*\x2b,-.\/` +
+ `0123456789:;\x3c=\x3e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+ "`abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\x7f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+ {
+ "jsRegexpEscaper",
+ jsRegexpEscaper,
+ "\\0\x01\x02\x03\x04\x05\x06\x07" +
+ "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17" +
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !\x22#\$%\x26\x27\(\)\*\x2b,\-\.\/` +
+ `0123456789:;\x3c=\x3e\?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ\[\\\]\^_` +
+ "`abcdefghijklmno" +
+ `pqrstuvwxyz\{\|\}~` + "\u007f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+ }
+
+ for _, test := range tests {
+ if s := test.escaper(input); s != test.escaped {
+ t.Errorf("%s once: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+ continue
+ }
+
+ // Escape it rune by rune to make sure that any
+ // fast-path checking does not break escaping.
+ var buf bytes.Buffer
+ for _, c := range input {
+ buf.WriteString(test.escaper(string(c)))
+ }
+
+ if s := buf.String(); s != test.escaped {
+ t.Errorf("%s rune-wise: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+ continue
+ }
+ }
+}
+
+func BenchmarkJSValEscaperWithNum(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsValEscaper(3.141592654)
+ }
+}
+
+func BenchmarkJSValEscaperWithStr(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsValEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+ }
+}
+
+func BenchmarkJSValEscaperWithStrNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsValEscaper("The quick, brown fox jumps over the lazy dog")
+ }
+}
+
+func BenchmarkJSValEscaperWithObj(b *testing.B) {
+ o := struct {
+ S string
+ N int
+ }{
+ "The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>\u2028",
+ 42,
+ }
+ for i := 0; i < b.N; i++ {
+ jsValEscaper(o)
+ }
+}
+
+func BenchmarkJSValEscaperWithObjNoSpecials(b *testing.B) {
+ o := struct {
+ S string
+ N int
+ }{
+ "The quick, brown fox jumps over the lazy dog",
+ 42,
+ }
+ for i := 0; i < b.N; i++ {
+ jsValEscaper(o)
+ }
+}
+
+func BenchmarkJSStrEscaperNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsStrEscaper("The quick, brown fox jumps over the lazy dog.")
+ }
+}
+
+func BenchmarkJSStrEscaper(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsStrEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+ }
+}
+
+func BenchmarkJSRegexpEscaperNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsRegexpEscaper("The quick, brown fox jumps over the lazy dog")
+ }
+}
+
+func BenchmarkJSRegexpEscaper(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ jsRegexpEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+ "text/template"
+)
+
+// Set is a specialized template.Set that produces a safe HTML document
+// fragment.
+type Set struct {
+ escaped map[string]bool
+ template.Set
+}
+
+// Template is a specialized template.Template that produces a safe HTML
+// document fragment.
+type Template struct {
+ escaped bool
+ *template.Template
+}
+
+// Execute applies the named template to the specified data object, writing
+// the output to wr.
+func (s *Set) Execute(wr io.Writer, name string, data interface{}) error {
+ if !s.escaped[name] {
+ if err := escapeSet(&s.Set, name); err != nil {
+ return err
+ }
+ if s.escaped == nil {
+ s.escaped = make(map[string]bool)
+ }
+ s.escaped[name] = true
+ }
+ return s.Set.Execute(wr, name, data)
+}
+
+// Parse parses a string into a set of named templates. Parse may be called
+// multiple times for a given set, adding the templates defined in the string
+// to the set. If a template is redefined, the element in the set is
+// overwritten with the new definition.
+func (set *Set) Parse(src string) (*Set, error) {
+ set.escaped = nil
+ s, err := set.Set.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+ if s != &(set.Set) {
+ panic("allocated new set")
+ }
+ return set, nil
+}
+
+// Parse parses the template definition string to construct an internal
+// representation of the template for execution.
+func (tmpl *Template) Parse(src string) (*Template, error) {
+ tmpl.escaped = false
+ t, err := tmpl.Template.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+ tmpl.Template = t
+ return tmpl, nil
+}
+
+// Execute applies a parsed template to the specified data object,
+// writing the output to wr.
+func (t *Template) Execute(wr io.Writer, data interface{}) error {
+ if !t.escaped {
+ if err := escape(t.Template); err != nil {
+ return err
+ }
+ t.escaped = true
+ }
+ return t.Template.Execute(wr, data)
+}
+
+// New allocates a new HTML template with the given name.
+func New(name string) *Template {
+ return &Template{false, template.New(name)}
+}
+
+// Must panics if err is non-nil in the same way as template.Must.
+func Must(t *Template, err error) *Template {
+ t.Template = template.Must(t.Template, err)
+ return t
+}
+
+// ParseFile creates a new Template and parses the template definition from
+// the named file. The template name is the base name of the file.
+func ParseFile(filename string) (*Template, error) {
+ t, err := template.ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &Template{false, t}, nil
+}
+
+// ParseFile reads the template definition from a file and parses it to
+// construct an internal representation of the template for execution.
+// The returned template will be nil if an error occurs.
+func (tmpl *Template) ParseFile(filename string) (*Template, error) {
+ t, err := tmpl.Template.ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ tmpl.Template = t
+ return tmpl, nil
+}
+
+// SetMust panics if the error is non-nil just like template.SetMust.
+func SetMust(s *Set, err error) *Set {
+ if err != nil {
+ template.SetMust(&(s.Set), err)
+ }
+ return s
+}
+
+// ParseFiles parses the named files into a set of named templates.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (set *Set) ParseFiles(filenames ...string) (*Set, error) {
+ s, err := set.Set.ParseFiles(filenames...)
+ if err != nil {
+ return nil, err
+ }
+ if s != &(set.Set) {
+ panic("allocated new set")
+ }
+ return set, nil
+}
+
+// ParseSetFiles creates a new Set and parses the set definition from the
+// named files. Each file must be individually parseable.
+func ParseSetFiles(filenames ...string) (*Set, error) {
+ set := new(Set)
+ s, err := set.Set.ParseFiles(filenames...)
+ if err != nil {
+ return nil, err
+ }
+ if s != &(set.Set) {
+ panic("allocated new set")
+ }
+ return set, nil
+}
+
+// ParseGlob parses the set definition from the files identified by the
+// pattern. The pattern is processed by filepath.Glob and must match at
+// least one file.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseGlob(pattern string) (*Set, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
+ }
+ return s.ParseFiles(filenames...)
+}
+
+// ParseSetGlob creates a new Set and parses the set definition from the
+// files identified by the pattern. The pattern is processed by filepath.Glob
+// and must match at least one file.
+func ParseSetGlob(pattern string) (*Set, error) {
+ set, err := new(Set).ParseGlob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ return set, nil
+}
+
+// Functions and methods to parse stand-alone template files into a set.
+
+// ParseTemplateFiles parses the named template files and adds them to the set
+// in the same way as template.ParseTemplateFiles but ensures that templates
+// with upper-case names are contextually-autoescaped.
+func (set *Set) ParseTemplateFiles(filenames ...string) (*Set, error) {
+ s, err := set.Set.ParseTemplateFiles(filenames...)
+ if err != nil {
+ return nil, err
+ }
+ if s != &(set.Set) {
+ panic("new set allocated")
+ }
+ return set, nil
+}
+
+// ParseTemplateGlob parses the template files matched by the
+// patern and adds them to the set. Each template will be named
+// the base name of its file.
+// Unlike with ParseGlob, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateGlob is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseTemplateGlob(pattern string) (*Set, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ return s.ParseTemplateFiles(filenames...)
+}
+
+// ParseTemplateFiles creates a set by parsing the named files,
+// each of which defines a single template. Each template will be
+// named the base name of its file.
+// Unlike with ParseFiles, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateFiles is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself. Parsing stops if an error is
+// encountered.
+func ParseTemplateFiles(filenames ...string) (*Set, error) {
+ return new(Set).ParseTemplateFiles(filenames...)
+}
+
+// ParseTemplateGlob creates a set by parsing the files matched
+// by the pattern, each of which defines a single template. The pattern
+// is processed by filepath.Glob and must match at least one file. Each
+// template will be named the base name of its file.
+// Unlike with ParseGlob, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateGlob is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself. Parsing stops if an error is
+// encountered.
+func ParseTemplateGlob(pattern string) (*Set, error) {
+ return new(Set).ParseTemplateGlob(pattern)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "strings"
+)
+
+// transitionFunc is the array of context transition functions for text nodes.
+// A transition function takes a context and template text input, and returns
+// the updated context and the number of bytes consumed from the front of the
+// input.
+var transitionFunc = [...]func(context, []byte) (context, int){
+ stateText: tText,
+ stateTag: tTag,
+ stateAttrName: tAttrName,
+ stateAfterName: tAfterName,
+ stateBeforeValue: tBeforeValue,
+ stateHTMLCmt: tHTMLCmt,
+ stateRCDATA: tSpecialTagEnd,
+ stateAttr: tAttr,
+ stateURL: tURL,
+ stateJS: tJS,
+ stateJSDqStr: tJSDelimited,
+ stateJSSqStr: tJSDelimited,
+ stateJSRegexp: tJSDelimited,
+ stateJSBlockCmt: tBlockCmt,
+ stateJSLineCmt: tLineCmt,
+ stateCSS: tCSS,
+ stateCSSDqStr: tCSSStr,
+ stateCSSSqStr: tCSSStr,
+ stateCSSDqURL: tCSSStr,
+ stateCSSSqURL: tCSSStr,
+ stateCSSURL: tCSSStr,
+ stateCSSBlockCmt: tBlockCmt,
+ stateCSSLineCmt: tLineCmt,
+ stateError: tError,
+}
+
+var commentStart = []byte("<!--")
+var commentEnd = []byte("-->")
+
+// tText is the context transition function for the text state.
+func tText(c context, s []byte) (context, int) {
+ k := 0
+ for {
+ i := k + bytes.IndexByte(s[k:], '<')
+ if i < k || i+1 == len(s) {
+ return c, len(s)
+ } else if i+4 <= len(s) && bytes.Equal(commentStart, s[i:i+4]) {
+ return context{state: stateHTMLCmt}, i + 4
+ }
+ i++
+ end := false
+ if s[i] == '/' {
+ if i+1 == len(s) {
+ return c, len(s)
+ }
+ end, i = true, i+1
+ }
+ j, e := eatTagName(s, i)
+ if j != i {
+ if end {
+ e = elementNone
+ }
+ // We've found an HTML tag.
+ return context{state: stateTag, element: e}, j
+ }
+ k = j
+ }
+ panic("unreachable")
+}
+
+var elementContentType = [...]state{
+ elementNone: stateText,
+ elementScript: stateJS,
+ elementStyle: stateCSS,
+ elementTextarea: stateRCDATA,
+ elementTitle: stateRCDATA,
+}
+
+// tTag is the context transition function for the tag state.
+func tTag(c context, s []byte) (context, int) {
+ // Find the attribute name.
+ i := eatWhiteSpace(s, 0)
+ if i == len(s) {
+ return c, len(s)
+ }
+ if s[i] == '>' {
+ return context{
+ state: elementContentType[c.element],
+ element: c.element,
+ }, i + 1
+ }
+ j, err := eatAttrName(s, i)
+ if err != nil {
+ return context{state: stateError, err: err}, len(s)
+ }
+ state, attr := stateTag, attrNone
+ if i == j {
+ return context{
+ state: stateError,
+ err: errorf(ErrBadHTML, 0, "expected space, attr name, or end of tag, but got %q", s[i:]),
+ }, len(s)
+ }
+ switch attrType(string(s[i:j])) {
+ case contentTypeURL:
+ attr = attrURL
+ case contentTypeCSS:
+ attr = attrStyle
+ case contentTypeJS:
+ attr = attrScript
+ }
+ if j == len(s) {
+ state = stateAttrName
+ } else {
+ state = stateAfterName
+ }
+ return context{state: state, element: c.element, attr: attr}, j
+}
+
+// tAttrName is the context transition function for stateAttrName.
+func tAttrName(c context, s []byte) (context, int) {
+ i, err := eatAttrName(s, 0)
+ if err != nil {
+ return context{state: stateError, err: err}, len(s)
+ } else if i != len(s) {
+ c.state = stateAfterName
+ }
+ return c, i
+}
+
+// tAfterName is the context transition function for stateAfterName.
+func tAfterName(c context, s []byte) (context, int) {
+ // Look for the start of the value.
+ i := eatWhiteSpace(s, 0)
+ if i == len(s) {
+ return c, len(s)
+ } else if s[i] != '=' {
+ // Occurs due to tag ending '>', and valueless attribute.
+ c.state = stateTag
+ return c, i
+ }
+ c.state = stateBeforeValue
+ // Consume the "=".
+ return c, i + 1
+}
+
+var attrStartStates = [...]state{
+ attrNone: stateAttr,
+ attrScript: stateJS,
+ attrStyle: stateCSS,
+ attrURL: stateURL,
+}
+
+// tBeforeValue is the context transition function for stateBeforeValue.
+func tBeforeValue(c context, s []byte) (context, int) {
+ i := eatWhiteSpace(s, 0)
+ if i == len(s) {
+ return c, len(s)
+ }
+ // Find the attribute delimiter.
+ delim := delimSpaceOrTagEnd
+ switch s[i] {
+ case '\'':
+ delim, i = delimSingleQuote, i+1
+ case '"':
+ delim, i = delimDoubleQuote, i+1
+ }
+ c.state, c.delim, c.attr = attrStartStates[c.attr], delim, attrNone
+ return c, i
+}
+
+// tHTMLCmt is the context transition function for stateHTMLCmt.
+func tHTMLCmt(c context, s []byte) (context, int) {
+ if i := bytes.Index(s, commentEnd); i != -1 {
+ return context{}, i + 3
+ }
+ return c, len(s)
+}
+
+// specialTagEndMarkers maps element types to the character sequence that
+// case-insensitively signals the end of the special tag body.
+var specialTagEndMarkers = [...]string{
+ elementScript: "</script",
+ elementStyle: "</style",
+ elementTextarea: "</textarea",
+ elementTitle: "</title",
+}
+
+// tSpecialTagEnd is the context transition function for raw text and RCDATA
+// element states.
+func tSpecialTagEnd(c context, s []byte) (context, int) {
+ if c.element != elementNone {
+ if i := strings.Index(strings.ToLower(string(s)), specialTagEndMarkers[c.element]); i != -1 {
+ return context{}, i
+ }
+ }
+ return c, len(s)
+}
+
+// tAttr is the context transition function for the attribute state.
+func tAttr(c context, s []byte) (context, int) {
+ return c, len(s)
+}
+
+// tURL is the context transition function for the URL state.
+func tURL(c context, s []byte) (context, int) {
+ if bytes.IndexAny(s, "#?") >= 0 {
+ c.urlPart = urlPartQueryOrFrag
+ } else if len(s) != eatWhiteSpace(s, 0) && c.urlPart == urlPartNone {
+ // HTML5 uses "Valid URL potentially surrounded by spaces" for
+ // attrs: http://www.w3.org/TR/html5/index.html#attributes-1
+ c.urlPart = urlPartPreQuery
+ }
+ return c, len(s)
+}
+
+// tJS is the context transition function for the JS state.
+func tJS(c context, s []byte) (context, int) {
+ i := bytes.IndexAny(s, `"'/`)
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+ return c, len(s)
+ }
+ c.jsCtx = nextJSCtx(s[:i], c.jsCtx)
+ switch s[i] {
+ case '"':
+ c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
+ case '\'':
+ c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
+ case '/':
+ switch {
+ case i+1 < len(s) && s[i+1] == '/':
+ c.state, i = stateJSLineCmt, i+1
+ case i+1 < len(s) && s[i+1] == '*':
+ c.state, i = stateJSBlockCmt, i+1
+ case c.jsCtx == jsCtxRegexp:
+ c.state = stateJSRegexp
+ case c.jsCtx == jsCtxDivOp:
+ c.jsCtx = jsCtxRegexp
+ default:
+ return context{
+ state: stateError,
+ err: errorf(ErrSlashAmbig, 0, "'/' could start a division or regexp: %.32q", s[i:]),
+ }, len(s)
+ }
+ default:
+ panic("unreachable")
+ }
+ return c, i + 1
+}
+
+// tJSDelimited is the context transition function for the JS string and regexp
+// states.
+func tJSDelimited(c context, s []byte) (context, int) {
+ specials := `\"`
+ switch c.state {
+ case stateJSSqStr:
+ specials = `\'`
+ case stateJSRegexp:
+ specials = `\/[]`
+ }
+
+ k, inCharset := 0, false
+ for {
+ i := k + bytes.IndexAny(s[k:], specials)
+ if i < k {
+ break
+ }
+ switch s[i] {
+ case '\\':
+ i++
+ if i == len(s) {
+ return context{
+ state: stateError,
+ err: errorf(ErrPartialEscape, 0, "unfinished escape sequence in JS string: %q", s),
+ }, len(s)
+ }
+ case '[':
+ inCharset = true
+ case ']':
+ inCharset = false
+ default:
+ // end delimiter
+ if !inCharset {
+ c.state, c.jsCtx = stateJS, jsCtxDivOp
+ return c, i + 1
+ }
+ }
+ k = i + 1
+ }
+
+ if inCharset {
+ // This can be fixed by making context richer if interpolation
+ // into charsets is desired.
+ return context{
+ state: stateError,
+ err: errorf(ErrPartialCharset, 0, "unfinished JS regexp charset: %q", s),
+ }, len(s)
+ }
+
+ return c, len(s)
+}
+
+var blockCommentEnd = []byte("*/")
+
+// tBlockCmt is the context transition function for /*comment*/ states.
+func tBlockCmt(c context, s []byte) (context, int) {
+ i := bytes.Index(s, blockCommentEnd)
+ if i == -1 {
+ return c, len(s)
+ }
+ switch c.state {
+ case stateJSBlockCmt:
+ c.state = stateJS
+ case stateCSSBlockCmt:
+ c.state = stateCSS
+ default:
+ panic(c.state.String())
+ }
+ return c, i + 2
+}
+
+// tLineCmt is the context transition function for //comment states.
+func tLineCmt(c context, s []byte) (context, int) {
+ var lineTerminators string
+ var endState state
+ switch c.state {
+ case stateJSLineCmt:
+ lineTerminators, endState = "\n\r\u2028\u2029", stateJS
+ case stateCSSLineCmt:
+ lineTerminators, endState = "\n\f\r", stateCSS
+ // Line comments are not part of any published CSS standard but
+ // are supported by the 4 major browsers.
+ // This defines line comments as
+ // LINECOMMENT ::= "//" [^\n\f\d]*
+ // since http://www.w3.org/TR/css3-syntax/#SUBTOK-nl defines
+ // newlines:
+ // nl ::= #xA | #xD #xA | #xD | #xC
+ default:
+ panic(c.state.String())
+ }
+
+ i := bytes.IndexAny(s, lineTerminators)
+ if i == -1 {
+ return c, len(s)
+ }
+ c.state = endState
+ // Per section 7.4 of EcmaScript 5 : http://es5.github.com/#x7.4
+ // "However, the LineTerminator at the end of the line is not
+ // considered to be part of the single-line comment; it is
+ // recognized separately by the lexical grammar and becomes part
+ // of the stream of input elements for the syntactic grammar."
+ return c, i
+}
+
+// tCSS is the context transition function for the CSS state.
+func tCSS(c context, s []byte) (context, int) {
+ // CSS quoted strings are almost never used except for:
+ // (1) URLs as in background: "/foo.png"
+ // (2) Multiword font-names as in font-family: "Times New Roman"
+ // (3) List separators in content values as in inline-lists:
+ // <style>
+ // ul.inlineList { list-style: none; padding:0 }
+ // ul.inlineList > li { display: inline }
+ // ul.inlineList > li:before { content: ", " }
+ // ul.inlineList > li:first-child:before { content: "" }
+ // </style>
+ // <ul class=inlineList><li>One<li>Two<li>Three</ul>
+ // (4) Attribute value selectors as in a[href="http://example.com/"]
+ //
+ // We conservatively treat all strings as URLs, but make some
+ // allowances to avoid confusion.
+ //
+ // In (1), our conservative assumption is justified.
+ // In (2), valid font names do not contain ':', '?', or '#', so our
+ // conservative assumption is fine since we will never transition past
+ // urlPartPreQuery.
+ // In (3), our protocol heuristic should not be tripped, and there
+ // should not be non-space content after a '?' or '#', so as long as
+ // we only %-encode RFC 3986 reserved characters we are ok.
+ // In (4), we should URL escape for URL attributes, and for others we
+ // have the attribute name available if our conservative assumption
+ // proves problematic for real code.
+
+ k := 0
+ for {
+ i := k + bytes.IndexAny(s[k:], `("'/`)
+ if i < k {
+ return c, len(s)
+ }
+ switch s[i] {
+ case '(':
+ // Look for url to the left.
+ p := bytes.TrimRight(s[:i], "\t\n\f\r ")
+ if endsWithCSSKeyword(p, "url") {
+ j := len(s) - len(bytes.TrimLeft(s[i+1:], "\t\n\f\r "))
+ switch {
+ case j != len(s) && s[j] == '"':
+ c.state, j = stateCSSDqURL, j+1
+ case j != len(s) && s[j] == '\'':
+ c.state, j = stateCSSSqURL, j+1
+ default:
+ c.state = stateCSSURL
+ }
+ return c, j
+ }
+ case '/':
+ if i+1 < len(s) {
+ switch s[i+1] {
+ case '/':
+ c.state = stateCSSLineCmt
+ return c, i + 2
+ case '*':
+ c.state = stateCSSBlockCmt
+ return c, i + 2
+ }
+ }
+ case '"':
+ c.state = stateCSSDqStr
+ return c, i + 1
+ case '\'':
+ c.state = stateCSSSqStr
+ return c, i + 1
+ }
+ k = i + 1
+ }
+ panic("unreachable")
+}
+
+// tCSSStr is the context transition function for the CSS string and URL states.
+func tCSSStr(c context, s []byte) (context, int) {
+ var endAndEsc string
+ switch c.state {
+ case stateCSSDqStr, stateCSSDqURL:
+ endAndEsc = `\"`
+ case stateCSSSqStr, stateCSSSqURL:
+ endAndEsc = `\'`
+ case stateCSSURL:
+ // Unquoted URLs end with a newline or close parenthesis.
+ // The below includes the wc (whitespace character) and nl.
+ endAndEsc = "\\\t\n\f\r )"
+ default:
+ panic(c.state.String())
+ }
+
+ k := 0
+ for {
+ i := k + bytes.IndexAny(s[k:], endAndEsc)
+ if i < k {
+ c, nread := tURL(c, decodeCSS(s[k:]))
+ return c, k + nread
+ }
+ if s[i] == '\\' {
+ i++
+ if i == len(s) {
+ return context{
+ state: stateError,
+ err: errorf(ErrPartialEscape, 0, "unfinished escape sequence in CSS string: %q", s),
+ }, len(s)
+ }
+ } else {
+ c.state = stateCSS
+ return c, i + 1
+ }
+ c, _ = tURL(c, decodeCSS(s[:i+1]))
+ k = i + 1
+ }
+ panic("unreachable")
+}
+
+// tError is the context transition function for the error state.
+func tError(c context, s []byte) (context, int) {
+ return c, len(s)
+}
+
+// eatAttrName returns the largest j such that s[i:j] is an attribute name.
+// It returns an error if s[i:] does not look like it begins with an
+// attribute name, such as encountering a quote mark without a preceding
+// equals sign.
+func eatAttrName(s []byte, i int) (int, *Error) {
+ for j := i; j < len(s); j++ {
+ switch s[j] {
+ case ' ', '\t', '\n', '\f', '\r', '=', '>':
+ return j, nil
+ case '\'', '"', '<':
+ // These result in a parse warning in HTML5 and are
+ // indicative of serious problems if seen in an attr
+ // name in a template.
+ return -1, errorf(ErrBadHTML, 0, "%q in attribute name: %.32q", s[j:j+1], s)
+ default:
+ // No-op.
+ }
+ }
+ return len(s), nil
+}
+
+var elementNameMap = map[string]element{
+ "script": elementScript,
+ "style": elementStyle,
+ "textarea": elementTextarea,
+ "title": elementTitle,
+}
+
+// asciiAlpha returns whether c is an ASCII letter.
+func asciiAlpha(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+// asciiAlphaNum returns whether c is an ASCII letter or digit.
+func asciiAlphaNum(c byte) bool {
+ return asciiAlpha(c) || '0' <= c && c <= '9'
+}
+
+// eatTagName returns the largest j such that s[i:j] is a tag name and the tag type.
+func eatTagName(s []byte, i int) (int, element) {
+ if i == len(s) || !asciiAlpha(s[i]) {
+ return i, elementNone
+ }
+ j := i + 1
+ for j < len(s) {
+ x := s[j]
+ if asciiAlphaNum(x) {
+ j++
+ continue
+ }
+ // Allow "x-y" or "x:y" but not "x-", "-y", or "x--y".
+ if (x == ':' || x == '-') && j+1 < len(s) && asciiAlphaNum(s[j+1]) {
+ j += 2
+ continue
+ }
+ break
+ }
+ return j, elementNameMap[strings.ToLower(string(s[i:j]))]
+}
+
+// eatWhiteSpace returns the largest j such that s[i:j] is white space.
+func eatWhiteSpace(s []byte, i int) int {
+ for j := i; j < len(s); j++ {
+ switch s[j] {
+ case ' ', '\t', '\n', '\f', '\r':
+ // No-op.
+ default:
+ return j
+ }
+ }
+ return len(s)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// urlFilter returns its input unless it contains an unsafe protocol in which
+// case it defangs the entire URL.
+func urlFilter(args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeURL {
+ return s
+ }
+ if i := strings.IndexRune(s, ':'); i >= 0 && strings.IndexRune(s[:i], '/') < 0 {
+ protocol := strings.ToLower(s[:i])
+ if protocol != "http" && protocol != "https" && protocol != "mailto" {
+ return "#" + filterFailsafe
+ }
+ }
+ return s
+}
+
+// urlEscaper produces an output that can be embedded in a URL query.
+// The output can be embedded in an HTML attribute without further escaping.
+func urlEscaper(args ...interface{}) string {
+ return urlProcessor(false, args...)
+}
+
+// urlEscaper normalizes URL content so it can be embedded in a quote-delimited
+// string or parenthesis delimited url(...).
+// The normalizer does not encode all HTML specials. Specifically, it does not
+// encode '&' so correct embedding in an HTML attribute requires escaping of
+// '&' to '&'.
+func urlNormalizer(args ...interface{}) string {
+ return urlProcessor(true, args...)
+}
+
+// urlProcessor normalizes (when norm is true) or escapes its input to produce
+// a valid hierarchical or opaque URL part.
+func urlProcessor(norm bool, args ...interface{}) string {
+ s, t := stringify(args...)
+ if t == contentTypeURL {
+ norm = true
+ }
+ var b bytes.Buffer
+ written := 0
+ // The byte loop below assumes that all URLs use UTF-8 as the
+ // content-encoding. This is similar to the URI to IRI encoding scheme
+ // defined in section 3.1 of RFC 3987, and behaves the same as the
+ // EcmaScript builtin encodeURIComponent.
+ // It should not cause any misencoding of URLs in pages with
+ // Content-type: text/html;charset=UTF-8.
+ for i, n := 0, len(s); i < n; i++ {
+ c := s[i]
+ switch c {
+ // Single quote and parens are sub-delims in RFC 3986, but we
+ // escape them so the output can be embedded in in single
+ // quoted attributes and unquoted CSS url(...) constructs.
+ // Single quotes are reserved in URLs, but are only used in
+ // the obsolete "mark" rule in an appendix in RFC 3986
+ // so can be safely encoded.
+ case '!', '#', '$', '&', '*', '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+ if norm {
+ continue
+ }
+ // Unreserved according to RFC 3986 sec 2.3
+ // "For consistency, percent-encoded octets in the ranges of
+ // ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D),
+ // period (%2E), underscore (%5F), or tilde (%7E) should not be
+ // created by URI producers
+ case '-', '.', '_', '~':
+ continue
+ case '%':
+ // When normalizing do not re-encode valid escapes.
+ if norm && i+2 < len(s) && isHex(s[i+1]) && isHex(s[i+2]) {
+ continue
+ }
+ default:
+ // Unreserved according to RFC 3986 sec 2.3
+ if 'a' <= c && c <= 'z' {
+ continue
+ }
+ if 'A' <= c && c <= 'Z' {
+ continue
+ }
+ if '0' <= c && c <= '9' {
+ continue
+ }
+ }
+ b.WriteString(s[written:i])
+ fmt.Fprintf(&b, "%%%02x", c)
+ written = i + 1
+ }
+ if written == 0 {
+ return s
+ }
+ b.WriteString(s[written:])
+ return b.String()
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "testing"
+)
+
+func TestURLNormalizer(t *testing.T) {
+ tests := []struct {
+ url, want string
+ }{
+ {"", ""},
+ {
+ "http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
+ "http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
+ },
+ {" ", "%20"},
+ {"%7c", "%7c"},
+ {"%7C", "%7C"},
+ {"%2", "%252"},
+ {"%", "%25"},
+ {"%z", "%25z"},
+ {"/foo|bar/%5c\u1234", "/foo%7cbar/%5c%e1%88%b4"},
+ }
+ for _, test := range tests {
+ if got := urlNormalizer(test.url); test.want != got {
+ t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.url, test.want, got)
+ }
+ if test.want != urlNormalizer(test.want) {
+ t.Errorf("not idempotent: %q", test.want)
+ }
+ }
+}
+
+func TestURLFilters(t *testing.T) {
+ input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./` +
+ `0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\]^_` +
+ "`abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\x7f" +
+ "\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+ tests := []struct {
+ name string
+ escaper func(...interface{}) string
+ escaped string
+ }{
+ {
+ "urlEscaper",
+ urlEscaper,
+ "%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
+ "%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
+ "%20%21%22%23%24%25%26%27%28%29%2a%2b%2c-.%2f" +
+ "0123456789%3a%3b%3c%3d%3e%3f" +
+ "%40ABCDEFGHIJKLMNO" +
+ "PQRSTUVWXYZ%5b%5c%5d%5e_" +
+ "%60abcdefghijklmno" +
+ "pqrstuvwxyz%7b%7c%7d~%7f" +
+ "%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
+ },
+ {
+ "urlNormalizer",
+ urlNormalizer,
+ "%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
+ "%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
+ "%20!%22#$%25&%27%28%29*+,-./" +
+ "0123456789:;%3c=%3e?" +
+ "@ABCDEFGHIJKLMNO" +
+ "PQRSTUVWXYZ[%5c]%5e_" +
+ "%60abcdefghijklmno" +
+ "pqrstuvwxyz%7b%7c%7d~%7f" +
+ "%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
+ },
+ }
+
+ for _, test := range tests {
+ if s := test.escaper(input); s != test.escaped {
+ t.Errorf("%s: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+ continue
+ }
+ }
+}
+
+func BenchmarkURLEscaper(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ urlEscaper("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
+ }
+}
+
+func BenchmarkURLEscaperNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ urlEscaper("TheQuickBrownFoxJumpsOverTheLazyDog.")
+ }
+}
+
+func BenchmarkURLNormalizer(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ urlNormalizer("The quick brown fox jumps over the lazy dog.\n")
+ }
+}
+
+func BenchmarkURLNormalizerNoSpecials(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ urlNormalizer("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
+ }
+}
// for tt != Error && err != nil to hold: this means that Next returned a
// valid token but the subsequent Next call will return an error token.
// For example, if the HTML text input was just "plain", then the first
- // Next call would set z.err to os.EOF but return a TextToken, and all
+ // Next call would set z.err to io.EOF but return a TextToken, and all
// subsequent Next calls would return an ErrorToken.
// err is never reset. Once it becomes non-nil, it stays non-nil.
err error
textIsRaw bool
}
-// Error returns the error associated with the most recent ErrorToken token.
-// This is typically os.EOF, meaning the end of tokenization.
-func (z *Tokenizer) Error() error {
+// Err returns the error associated with the most recent ErrorToken token.
+// This is typically io.EOF, meaning the end of tokenization.
+func (z *Tokenizer) Err() error {
if z.tt != ErrorToken {
return nil
}
if tt.golden != "" {
for i, s := range strings.Split(tt.golden, "$") {
if z.Next() == ErrorToken {
- t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Error())
+ t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Err())
continue loop
}
actual := z.Token().String()
}
}
z.Next()
- if z.Error() != io.EOF {
- t.Errorf("%s: want EOF got %q", tt.desc, z.Error())
+ if z.Err() != io.EOF {
+ t.Errorf("%s: want EOF got %q", tt.desc, z.Err())
}
}
}
tt := z.Next()
switch tt {
case ErrorToken:
- if z.Error() != io.EOF {
- t.Error(z.Error())
+ if z.Err() != io.EOF {
+ t.Error(z.Err())
}
break loop
case TextToken:
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements CGI from the perspective of a child
-// process.
-
-package cgi
-
-import (
- "bufio"
- "crypto/tls"
- "errors"
- "fmt"
- "http"
- "io"
- "io/ioutil"
- "net"
- "os"
- "strconv"
- "strings"
- "url"
-)
-
-// Request returns the HTTP request as represented in the current
-// environment. This assumes the current program is being run
-// by a web server in a CGI environment.
-// The returned Request's Body is populated, if applicable.
-func Request() (*http.Request, error) {
- r, err := RequestFromMap(envMap(os.Environ()))
- if err != nil {
- return nil, err
- }
- if r.ContentLength > 0 {
- r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength))
- }
- return r, nil
-}
-
-func envMap(env []string) map[string]string {
- m := make(map[string]string)
- for _, kv := range env {
- if idx := strings.Index(kv, "="); idx != -1 {
- m[kv[:idx]] = kv[idx+1:]
- }
- }
- return m
-}
-
-// RequestFromMap creates an http.Request from CGI variables.
-// The returned Request's Body field is not populated.
-func RequestFromMap(params map[string]string) (*http.Request, error) {
- r := new(http.Request)
- r.Method = params["REQUEST_METHOD"]
- if r.Method == "" {
- return nil, errors.New("cgi: no REQUEST_METHOD in environment")
- }
-
- r.Proto = params["SERVER_PROTOCOL"]
- var ok bool
- r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto)
- if !ok {
- return nil, errors.New("cgi: invalid SERVER_PROTOCOL version")
- }
-
- r.Close = true
- r.Trailer = http.Header{}
- r.Header = http.Header{}
-
- r.Host = params["HTTP_HOST"]
-
- if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
- clen, err := strconv.Atoi64(lenstr)
- if err != nil {
- return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
- }
- r.ContentLength = clen
- }
-
- if ct := params["CONTENT_TYPE"]; ct != "" {
- r.Header.Set("Content-Type", ct)
- }
-
- // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
- for k, v := range params {
- if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" {
- continue
- }
- r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v)
- }
-
- // TODO: cookies. parsing them isn't exported, though.
-
- if r.Host != "" {
- // Hostname is provided, so we can reasonably construct a URL,
- // even if we have to assume 'http' for the scheme.
- rawurl := "http://" + r.Host + params["REQUEST_URI"]
- url, err := url.Parse(rawurl)
- if err != nil {
- return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
- }
- r.URL = url
- }
- // Fallback logic if we don't have a Host header or the URL
- // failed to parse
- if r.URL == nil {
- uriStr := params["REQUEST_URI"]
- url, err := url.Parse(uriStr)
- if err != nil {
- return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
- }
- r.URL = url
- }
-
- // There's apparently a de-facto standard for this.
- // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
- if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" {
- r.TLS = &tls.ConnectionState{HandshakeComplete: true}
- }
-
- // Request.RemoteAddr has its port set by Go's standard http
- // server, so we do here too. We don't have one, though, so we
- // use a dummy one.
- r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0")
-
- return r, nil
-}
-
-// Serve executes the provided Handler on the currently active CGI
-// request, if any. If there's no current CGI environment
-// an error is returned. The provided handler may be nil to use
-// http.DefaultServeMux.
-func Serve(handler http.Handler) error {
- req, err := Request()
- if err != nil {
- return err
- }
- if handler == nil {
- handler = http.DefaultServeMux
- }
- rw := &response{
- req: req,
- header: make(http.Header),
- bufw: bufio.NewWriter(os.Stdout),
- }
- handler.ServeHTTP(rw, req)
- if err = rw.bufw.Flush(); err != nil {
- return err
- }
- return nil
-}
-
-type response struct {
- req *http.Request
- header http.Header
- bufw *bufio.Writer
- headerSent bool
-}
-
-func (r *response) Flush() {
- r.bufw.Flush()
-}
-
-func (r *response) Header() http.Header {
- return r.header
-}
-
-func (r *response) Write(p []byte) (n int, err error) {
- if !r.headerSent {
- r.WriteHeader(http.StatusOK)
- }
- return r.bufw.Write(p)
-}
-
-func (r *response) WriteHeader(code int) {
- if r.headerSent {
- // Note: explicitly using Stderr, as Stdout is our HTTP output.
- fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
- return
- }
- r.headerSent = true
- fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code))
-
- // Set a default Content-Type
- if _, hasType := r.header["Content-Type"]; !hasType {
- r.header.Add("Content-Type", "text/html; charset=utf-8")
- }
-
- r.header.Write(r.bufw)
- r.bufw.WriteString("\r\n")
- r.bufw.Flush()
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for CGI (the child process perspective)
-
-package cgi
-
-import (
- "testing"
-)
-
-func TestRequest(t *testing.T) {
- env := map[string]string{
- "SERVER_PROTOCOL": "HTTP/1.1",
- "REQUEST_METHOD": "GET",
- "HTTP_HOST": "example.com",
- "HTTP_REFERER": "elsewhere",
- "HTTP_USER_AGENT": "goclient",
- "HTTP_FOO_BAR": "baz",
- "REQUEST_URI": "/path?a=b",
- "CONTENT_LENGTH": "123",
- "CONTENT_TYPE": "text/xml",
- "HTTPS": "1",
- "REMOTE_ADDR": "5.6.7.8",
- }
- req, err := RequestFromMap(env)
- if err != nil {
- t.Fatalf("RequestFromMap: %v", err)
- }
- if g, e := req.UserAgent(), "goclient"; e != g {
- t.Errorf("expected UserAgent %q; got %q", e, g)
- }
- if g, e := req.Method, "GET"; e != g {
- t.Errorf("expected Method %q; got %q", e, g)
- }
- if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g {
- t.Errorf("expected Content-Type %q; got %q", e, g)
- }
- if g, e := req.ContentLength, int64(123); e != g {
- t.Errorf("expected ContentLength %d; got %d", e, g)
- }
- if g, e := req.Referer(), "elsewhere"; e != g {
- t.Errorf("expected Referer %q; got %q", e, g)
- }
- if req.Header == nil {
- t.Fatalf("unexpected nil Header")
- }
- if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g {
- t.Errorf("expected Foo-Bar %q; got %q", e, g)
- }
- if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g {
- t.Errorf("expected URL %q; got %q", e, g)
- }
- if g, e := req.FormValue("a"), "b"; e != g {
- t.Errorf("expected FormValue(a) %q; got %q", e, g)
- }
- if req.Trailer == nil {
- t.Errorf("unexpected nil Trailer")
- }
- if req.TLS == nil {
- t.Errorf("expected non-nil TLS")
- }
- if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
- t.Errorf("RemoteAddr: got %q; want %q", g, e)
- }
-}
-
-func TestRequestWithoutHost(t *testing.T) {
- env := map[string]string{
- "SERVER_PROTOCOL": "HTTP/1.1",
- "HTTP_HOST": "",
- "REQUEST_METHOD": "GET",
- "REQUEST_URI": "/path?a=b",
- "CONTENT_LENGTH": "123",
- }
- req, err := RequestFromMap(env)
- if err != nil {
- t.Fatalf("RequestFromMap: %v", err)
- }
- if req.URL == nil {
- t.Fatalf("unexpected nil URL")
- }
- if g, e := req.URL.String(), "/path?a=b"; e != g {
- t.Errorf("expected URL %q; got %q", e, g)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the host side of CGI (being the webserver
-// parent process).
-
-// Package cgi implements CGI (Common Gateway Interface) as specified
-// in RFC 3875.
-//
-// Note that using CGI means starting a new process to handle each
-// request, which is typically less efficient than using a
-// long-running server. This package is intended primarily for
-// compatibility with existing systems.
-package cgi
-
-import (
- "bufio"
- "exec"
- "fmt"
- "http"
- "io"
- "log"
- "os"
- "path/filepath"
- "regexp"
- "runtime"
- "strconv"
- "strings"
-)
-
-var trailingPort = regexp.MustCompile(`:([0-9]+)$`)
-
-var osDefaultInheritEnv = map[string][]string{
- "darwin": {"DYLD_LIBRARY_PATH"},
- "freebsd": {"LD_LIBRARY_PATH"},
- "hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"},
- "irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"},
- "linux": {"LD_LIBRARY_PATH"},
- "openbsd": {"LD_LIBRARY_PATH"},
- "solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"},
- "windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"},
-}
-
-// Handler runs an executable in a subprocess with a CGI environment.
-type Handler struct {
- Path string // path to the CGI executable
- Root string // root URI prefix of handler or empty for "/"
-
- // Dir specifies the CGI executable's working directory.
- // If Dir is empty, the base directory of Path is used.
- // If Path has no base directory, the current working
- // directory is used.
- Dir string
-
- Env []string // extra environment variables to set, if any, as "key=value"
- InheritEnv []string // environment variables to inherit from host, as "key"
- Logger *log.Logger // optional log for errors or nil to use log.Print
- Args []string // optional arguments to pass to child process
-
- // PathLocationHandler specifies the root http Handler that
- // should handle internal redirects when the CGI process
- // returns a Location header value starting with a "/", as
- // specified in RFC 3875 § 6.3.2. This will likely be
- // http.DefaultServeMux.
- //
- // If nil, a CGI response with a local URI path is instead sent
- // back to the client and not redirected internally.
- PathLocationHandler http.Handler
-}
-
-// removeLeadingDuplicates remove leading duplicate in environments.
-// It's possible to override environment like following.
-// cgi.Handler{
-// ...
-// Env: []string{"SCRIPT_FILENAME=foo.php"},
-// }
-func removeLeadingDuplicates(env []string) (ret []string) {
- n := len(env)
- for i := 0; i < n; i++ {
- e := env[i]
- s := strings.SplitN(e, "=", 2)[0]
- found := false
- for j := i + 1; j < n; j++ {
- if s == strings.SplitN(env[j], "=", 2)[0] {
- found = true
- break
- }
- }
- if !found {
- ret = append(ret, e)
- }
- }
- return
-}
-
-func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- root := h.Root
- if root == "" {
- root = "/"
- }
-
- if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
- rw.WriteHeader(http.StatusBadRequest)
- rw.Write([]byte("Chunked request bodies are not supported by CGI."))
- return
- }
-
- pathInfo := req.URL.Path
- if root != "/" && strings.HasPrefix(pathInfo, root) {
- pathInfo = pathInfo[len(root):]
- }
-
- port := "80"
- if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
- port = matches[1]
- }
-
- env := []string{
- "SERVER_SOFTWARE=go",
- "SERVER_NAME=" + req.Host,
- "SERVER_PROTOCOL=HTTP/1.1",
- "HTTP_HOST=" + req.Host,
- "GATEWAY_INTERFACE=CGI/1.1",
- "REQUEST_METHOD=" + req.Method,
- "QUERY_STRING=" + req.URL.RawQuery,
- "REQUEST_URI=" + req.URL.RawPath,
- "PATH_INFO=" + pathInfo,
- "SCRIPT_NAME=" + root,
- "SCRIPT_FILENAME=" + h.Path,
- "REMOTE_ADDR=" + req.RemoteAddr,
- "REMOTE_HOST=" + req.RemoteAddr,
- "SERVER_PORT=" + port,
- }
-
- if req.TLS != nil {
- env = append(env, "HTTPS=on")
- }
-
- for k, v := range req.Header {
- k = strings.Map(upperCaseAndUnderscore, k)
- joinStr := ", "
- if k == "COOKIE" {
- joinStr = "; "
- }
- env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr))
- }
-
- if req.ContentLength > 0 {
- env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength))
- }
- if ctype := req.Header.Get("Content-Type"); ctype != "" {
- env = append(env, "CONTENT_TYPE="+ctype)
- }
-
- if h.Env != nil {
- env = append(env, h.Env...)
- }
-
- envPath := os.Getenv("PATH")
- if envPath == "" {
- envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin"
- }
- env = append(env, "PATH="+envPath)
-
- for _, e := range h.InheritEnv {
- if v := os.Getenv(e); v != "" {
- env = append(env, e+"="+v)
- }
- }
-
- for _, e := range osDefaultInheritEnv[runtime.GOOS] {
- if v := os.Getenv(e); v != "" {
- env = append(env, e+"="+v)
- }
- }
-
- env = removeLeadingDuplicates(env)
-
- var cwd, path string
- if h.Dir != "" {
- path = h.Path
- cwd = h.Dir
- } else {
- cwd, path = filepath.Split(h.Path)
- }
- if cwd == "" {
- cwd = "."
- }
-
- internalError := func(err error) {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("CGI error: %v", err)
- }
-
- cmd := &exec.Cmd{
- Path: path,
- Args: append([]string{h.Path}, h.Args...),
- Dir: cwd,
- Env: env,
- Stderr: os.Stderr, // for now
- }
- if req.ContentLength != 0 {
- cmd.Stdin = req.Body
- }
- stdoutRead, err := cmd.StdoutPipe()
- if err != nil {
- internalError(err)
- return
- }
-
- err = cmd.Start()
- if err != nil {
- internalError(err)
- return
- }
- defer cmd.Wait()
- defer stdoutRead.Close()
-
- linebody, _ := bufio.NewReaderSize(stdoutRead, 1024)
- headers := make(http.Header)
- statusCode := 0
- for {
- line, isPrefix, err := linebody.ReadLine()
- if isPrefix {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("cgi: long header line from subprocess.")
- return
- }
- if err == io.EOF {
- break
- }
- if err != nil {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("cgi: error reading headers: %v", err)
- return
- }
- if len(line) == 0 {
- break
- }
- parts := strings.SplitN(string(line), ":", 2)
- if len(parts) < 2 {
- h.printf("cgi: bogus header line: %s", string(line))
- continue
- }
- header, val := parts[0], parts[1]
- header = strings.TrimSpace(header)
- val = strings.TrimSpace(val)
- switch {
- case header == "Status":
- if len(val) < 3 {
- h.printf("cgi: bogus status (short): %q", val)
- return
- }
- code, err := strconv.Atoi(val[0:3])
- if err != nil {
- h.printf("cgi: bogus status: %q", val)
- h.printf("cgi: line was %q", line)
- return
- }
- statusCode = code
- default:
- headers.Add(header, val)
- }
- }
-
- if loc := headers.Get("Location"); loc != "" {
- if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil {
- h.handleInternalRedirect(rw, req, loc)
- return
- }
- if statusCode == 0 {
- statusCode = http.StatusFound
- }
- }
-
- if statusCode == 0 {
- statusCode = http.StatusOK
- }
-
- // Copy headers to rw's headers, after we've decided not to
- // go into handleInternalRedirect, which won't want its rw
- // headers to have been touched.
- for k, vv := range headers {
- for _, v := range vv {
- rw.Header().Add(k, v)
- }
- }
-
- rw.WriteHeader(statusCode)
-
- _, err = io.Copy(rw, linebody)
- if err != nil {
- h.printf("cgi: copy error: %v", err)
- }
-}
-
-func (h *Handler) printf(format string, v ...interface{}) {
- if h.Logger != nil {
- h.Logger.Printf(format, v...)
- } else {
- log.Printf(format, v...)
- }
-}
-
-func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) {
- url, err := req.URL.Parse(path)
- if err != nil {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("cgi: error resolving local URI path %q: %v", path, err)
- return
- }
- // TODO: RFC 3875 isn't clear if only GET is supported, but it
- // suggests so: "Note that any message-body attached to the
- // request (such as for a POST request) may not be available
- // to the resource that is the target of the redirect." We
- // should do some tests against Apache to see how it handles
- // POST, HEAD, etc. Does the internal redirect get the same
- // method or just GET? What about incoming headers?
- // (e.g. Cookies) Which headers, if any, are copied into the
- // second request?
- newReq := &http.Request{
- Method: "GET",
- URL: url,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(http.Header),
- Host: url.Host,
- RemoteAddr: req.RemoteAddr,
- TLS: req.TLS,
- }
- h.PathLocationHandler.ServeHTTP(rw, newReq)
-}
-
-func upperCaseAndUnderscore(r rune) rune {
- switch {
- case r >= 'a' && r <= 'z':
- return r - ('a' - 'A')
- case r == '-':
- return '_'
- case r == '=':
- // Maybe not part of the CGI 'spec' but would mess up
- // the environment in any case, as Go represents the
- // environment as a slice of "key=value" strings.
- return '_'
- }
- // TODO: other transformations in spec or practice?
- return r
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for package cgi
-
-package cgi
-
-import (
- "bufio"
- "exec"
- "fmt"
- "http"
- "http/httptest"
- "io"
- "os"
- "net"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
- "time"
- "runtime"
-)
-
-func newRequest(httpreq string) *http.Request {
- buf := bufio.NewReader(strings.NewReader(httpreq))
- req, err := http.ReadRequest(buf)
- if err != nil {
- panic("cgi: bogus http request in test: " + httpreq)
- }
- req.RemoteAddr = "1.2.3.4"
- return req
-}
-
-func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder {
- rw := httptest.NewRecorder()
- req := newRequest(httpreq)
- h.ServeHTTP(rw, req)
-
- // Make a map to hold the test map that the CGI returns.
- m := make(map[string]string)
- linesRead := 0
-readlines:
- for {
- line, err := rw.Body.ReadString('\n')
- switch {
- case err == io.EOF:
- break readlines
- case err != nil:
- t.Fatalf("unexpected error reading from CGI: %v", err)
- }
- linesRead++
- trimmedLine := strings.TrimRight(line, "\r\n")
- split := strings.SplitN(trimmedLine, "=", 2)
- if len(split) != 2 {
- t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v",
- len(split), linesRead, line, m)
- }
- m[split[0]] = split[1]
- }
-
- for key, expected := range expectedMap {
- if got := m[key]; got != expected {
- t.Errorf("for key %q got %q; expected %q", key, got, expected)
- }
- }
- return rw
-}
-
-var cgiTested = false
-var cgiWorks bool
-
-func skipTest(t *testing.T) bool {
- if !cgiTested {
- cgiTested = true
- cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil
- }
- if !cgiWorks {
- // No Perl on Windows, needed by test.cgi
- // TODO: make the child process be Go, not Perl.
- t.Logf("Skipping test: test.cgi failed.")
- return true
- }
- return false
-}
-
-func TestCGIBasicGet(t *testing.T) {
- if skipTest(t) {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "test": "Hello CGI",
- "param-a": "b",
- "param-foo": "bar",
- "env-GATEWAY_INTERFACE": "CGI/1.1",
- "env-HTTP_HOST": "example.com",
- "env-PATH_INFO": "",
- "env-QUERY_STRING": "foo=bar&a=b",
- "env-REMOTE_ADDR": "1.2.3.4",
- "env-REMOTE_HOST": "1.2.3.4",
- "env-REQUEST_METHOD": "GET",
- "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/test.cgi",
- "env-SERVER_NAME": "example.com",
- "env-SERVER_PORT": "80",
- "env-SERVER_SOFTWARE": "go",
- }
- replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected {
- t.Errorf("got a Content-Type of %q; expected %q", got, expected)
- }
- if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
- t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
- }
-}
-
-func TestCGIBasicGetAbsPath(t *testing.T) {
- if skipTest(t) {
- return
- }
- pwd, err := os.Getwd()
- if err != nil {
- t.Fatalf("getwd error: %v", err)
- }
- h := &Handler{
- Path: pwd + "/testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
- "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
- "env-SCRIPT_NAME": "/test.cgi",
- }
- runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestPathInfo(t *testing.T) {
- if skipTest(t) {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "param-a": "b",
- "env-PATH_INFO": "/extrapath",
- "env-QUERY_STRING": "a=b",
- "env-REQUEST_URI": "/test.cgi/extrapath?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/test.cgi",
- }
- runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestPathInfoDirRoot(t *testing.T) {
- if skipTest(t) {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/myscript/",
- }
- expectedMap := map[string]string{
- "env-PATH_INFO": "bar",
- "env-QUERY_STRING": "a=b",
- "env-REQUEST_URI": "/myscript/bar?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/myscript/",
- }
- runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestDupHeaders(t *testing.T) {
- if skipTest(t) {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- }
- expectedMap := map[string]string{
- "env-REQUEST_URI": "/myscript/bar?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-HTTP_COOKIE": "nom=NOM; yum=YUM",
- "env-HTTP_X_FOO": "val1, val2",
- }
- runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
- "Cookie: nom=NOM\n"+
- "Cookie: yum=YUM\n"+
- "X-Foo: val1\n"+
- "X-Foo: val2\n"+
- "Host: example.com\n\n",
- expectedMap)
-}
-
-func TestPathInfoNoRoot(t *testing.T) {
- if skipTest(t) {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "",
- }
- expectedMap := map[string]string{
- "env-PATH_INFO": "/bar",
- "env-QUERY_STRING": "a=b",
- "env-REQUEST_URI": "/bar?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/",
- }
- runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestCGIBasicPost(t *testing.T) {
- if skipTest(t) {
- return
- }
- postReq := `POST /test.cgi?a=b HTTP/1.0
-Host: example.com
-Content-Type: application/x-www-form-urlencoded
-Content-Length: 15
-
-postfoo=postbar`
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "test": "Hello CGI",
- "param-postfoo": "postbar",
- "env-REQUEST_METHOD": "POST",
- "env-CONTENT_LENGTH": "15",
- "env-REQUEST_URI": "/test.cgi?a=b",
- }
- runCgiTest(t, h, postReq, expectedMap)
-}
-
-func chunk(s string) string {
- return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
-}
-
-// The CGI spec doesn't allow chunked requests.
-func TestCGIPostChunked(t *testing.T) {
- if skipTest(t) {
- return
- }
- postReq := `POST /test.cgi?a=b HTTP/1.1
-Host: example.com
-Content-Type: application/x-www-form-urlencoded
-Transfer-Encoding: chunked
-
-` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
-
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{}
- resp := runCgiTest(t, h, postReq, expectedMap)
- if got, expected := resp.Code, http.StatusBadRequest; got != expected {
- t.Fatalf("Expected %v response code from chunked request body; got %d",
- expected, got)
- }
-}
-
-func TestRedirect(t *testing.T) {
- if skipTest(t) {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil)
- if e, g := 302, rec.Code; e != g {
- t.Errorf("expected status code %d; got %d", e, g)
- }
- if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g {
- t.Errorf("expected Location header of %q; got %q", e, g)
- }
-}
-
-func TestInternalRedirect(t *testing.T) {
- if skipTest(t) {
- return
- }
- baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
- fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path)
- fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr)
- })
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- PathLocationHandler: baseHandler,
- }
- expectedMap := map[string]string{
- "basepath": "/foo",
- "remoteaddr": "1.2.3.4",
- }
- runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-// TestCopyError tests that we kill the process if there's an error copying
-// its output. (for example, from the client having gone away)
-func TestCopyError(t *testing.T) {
- if skipTest(t) || runtime.GOOS == "windows" {
- return
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- ts := httptest.NewServer(h)
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil)
- err = req.Write(conn)
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
-
- res, err := http.ReadResponse(bufio.NewReader(conn), req)
- if err != nil {
- t.Fatalf("ReadResponse: %v", err)
- }
-
- pidstr := res.Header.Get("X-CGI-Pid")
- if pidstr == "" {
- t.Fatalf("expected an X-CGI-Pid header in response")
- }
- pid, err := strconv.Atoi(pidstr)
- if err != nil {
- t.Fatalf("invalid X-CGI-Pid value")
- }
-
- var buf [5000]byte
- n, err := io.ReadFull(res.Body, buf[:])
- if err != nil {
- t.Fatalf("ReadFull: %d bytes, %v", n, err)
- }
-
- childRunning := func() bool {
- p, err := os.FindProcess(pid)
- if err != nil {
- return false
- }
- return p.Signal(os.UnixSignal(0)) == nil
- }
-
- if !childRunning() {
- t.Fatalf("pre-conn.Close, expected child to be running")
- }
- conn.Close()
-
- if tries := 0; childRunning() {
- for tries < 15 && childRunning() {
- time.Sleep(50e6 * int64(tries))
- tries++
- }
- if childRunning() {
- t.Fatalf("post-conn.Close, expected child to be gone")
- }
- }
-}
-
-/* This test doesn't work in gccgo's testing environment.
-
-func TestDirUnix(t *testing.T) {
- if skipTest(t) || runtime.GOOS == "windows" {
- return
- }
-
- cwd, _ := os.Getwd()
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- Dir: cwd,
- }
- expectedMap := map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- cwd, _ = os.Getwd()
- cwd = filepath.Join(cwd, "testdata")
- h = &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap = map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-*/
-
-func TestDirWindows(t *testing.T) {
- if skipTest(t) || runtime.GOOS != "windows" {
- return
- }
-
- cgifile, _ := filepath.Abs("testdata/test.cgi")
-
- var perl string
- var err error
- perl, err = exec.LookPath("perl")
- if err != nil {
- return
- }
- perl, _ = filepath.Abs(perl)
-
- cwd, _ := os.Getwd()
- h := &Handler{
- Path: perl,
- Root: "/test.cgi",
- Dir: cwd,
- Args: []string{cgifile},
- Env: []string{"SCRIPT_FILENAME=" + cgifile},
- }
- expectedMap := map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- // If not specify Dir on windows, working directory should be
- // base directory of perl.
- cwd, _ = filepath.Split(perl)
- if cwd != "" && cwd[len(cwd)-1] == filepath.Separator {
- cwd = cwd[:len(cwd)-1]
- }
- h = &Handler{
- Path: perl,
- Root: "/test.cgi",
- Args: []string{cgifile},
- Env: []string{"SCRIPT_FILENAME=" + cgifile},
- }
- expectedMap = map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestEnvOverride(t *testing.T) {
- cgifile, _ := filepath.Abs("testdata/test.cgi")
-
- var perl string
- var err error
- perl, err = exec.LookPath("perl")
- if err != nil {
- return
- }
- perl, _ = filepath.Abs(perl)
-
- cwd, _ := os.Getwd()
- h := &Handler{
- Path: perl,
- Root: "/test.cgi",
- Dir: cwd,
- Args: []string{cgifile},
- Env: []string{
- "SCRIPT_FILENAME=" + cgifile,
- "REQUEST_URI=/foo/bar"},
- }
- expectedMap := map[string]string{
- "cwd": cwd,
- "env-SCRIPT_FILENAME": cgifile,
- "env-REQUEST_URI": "/foo/bar",
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests a Go CGI program running under a Go CGI host process.
-// Further, the two programs are the same binary, just checking
-// their environment to figure out what mode to run in.
-
-package cgi
-
-import (
- "fmt"
- "http"
- "os"
- "testing"
-)
-
-// This test is a CGI host (testing host.go) that runs its own binary
-// as a child process testing the other half of CGI (child.go).
-func TestHostingOurselves(t *testing.T) {
- h := &Handler{
- Path: os.Args[0],
- Root: "/test.go",
- Args: []string{"-test.run=TestBeChildCGIProcess"},
- }
- expectedMap := map[string]string{
- "test": "Hello CGI-in-CGI",
- "param-a": "b",
- "param-foo": "bar",
- "env-GATEWAY_INTERFACE": "CGI/1.1",
- "env-HTTP_HOST": "example.com",
- "env-PATH_INFO": "",
- "env-QUERY_STRING": "foo=bar&a=b",
- "env-REMOTE_ADDR": "1.2.3.4",
- "env-REMOTE_HOST": "1.2.3.4",
- "env-REQUEST_METHOD": "GET",
- "env-REQUEST_URI": "/test.go?foo=bar&a=b",
- "env-SCRIPT_FILENAME": os.Args[0],
- "env-SCRIPT_NAME": "/test.go",
- "env-SERVER_NAME": "example.com",
- "env-SERVER_PORT": "80",
- "env-SERVER_SOFTWARE": "go",
- }
- replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
- t.Errorf("got a Content-Type of %q; expected %q", got, expected)
- }
- if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
- t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
- }
-}
-
-// Note: not actually a test.
-func TestBeChildCGIProcess(t *testing.T) {
- if os.Getenv("REQUEST_METHOD") == "" {
- // Not in a CGI environment; skipping test.
- return
- }
- Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
- rw.Header().Set("X-Test-Header", "X-Test-Value")
- fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
- req.ParseForm()
- for k, vv := range req.Form {
- for _, v := range vv {
- fmt.Fprintf(rw, "param-%s=%s\n", k, v)
- }
- }
- for _, kv := range os.Environ() {
- fmt.Fprintf(rw, "env-%s\n", kv)
- }
- }))
- os.Exit(0)
-}
+++ /dev/null
-#!/usr/bin/perl
-# Copyright 2011 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-#
-# Test script run as a child process under cgi_test.go
-
-use strict;
-use Cwd;
-
-my $q = MiniCGI->new;
-my $params = $q->Vars;
-
-if ($params->{"loc"}) {
- print "Location: $params->{loc}\r\n\r\n";
- exit(0);
-}
-
-my $NL = "\r\n";
-$NL = "\n" if $params->{mode} eq "NL";
-
-my $p = sub {
- print "$_[0]$NL";
-};
-
-# With carriage returns
-$p->("Content-Type: text/html");
-$p->("X-CGI-Pid: $$");
-$p->("X-Test-Header: X-Test-Value");
-$p->("");
-
-if ($params->{"bigresponse"}) {
- for (1..1024) {
- print "A" x 1024, "\n";
- }
- exit 0;
-}
-
-print "test=Hello CGI\n";
-
-foreach my $k (sort keys %$params) {
- print "param-$k=$params->{$k}\n";
-}
-
-foreach my $k (sort keys %ENV) {
- my $clean_env = $ENV{$k};
- $clean_env =~ s/[\n\r]//g;
- print "env-$k=$clean_env\n";
-}
-
-# NOTE: don't call getcwd() for windows.
-# msys return /c/go/src/... not C:\go\...
-my $dir;
-if ($^O eq 'MSWin32' || $^O eq 'msys') {
- my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe';
- $cmd =~ s!\\!/!g;
- $dir = `$cmd /c cd`;
- chomp $dir;
-} else {
- $dir = getcwd();
-}
-print "cwd=$dir\n";
-
-
-# A minimal version of CGI.pm, for people without the perl-modules
-# package installed. (CGI.pm used to be part of the Perl core, but
-# some distros now bundle perl-base and perl-modules separately...)
-package MiniCGI;
-
-sub new {
- my $class = shift;
- return bless {}, $class;
-}
-
-sub Vars {
- my $self = shift;
- my $pairs;
- if ($ENV{CONTENT_LENGTH}) {
- $pairs = do { local $/; <STDIN> };
- } else {
- $pairs = $ENV{QUERY_STRING};
- }
- my $vars = {};
- foreach my $kv (split(/&/, $pairs)) {
- my ($k, $v) = split(/=/, $kv, 2);
- $vars->{_urldecode($k)} = _urldecode($v);
- }
- return $vars;
-}
-
-sub _urldecode {
- my $v = shift;
- $v =~ tr/+/ /;
- $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
- return $v;
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "io"
- "log"
- "strconv"
-)
-
-// NewChunkedWriter returns a new writer that translates writes into HTTP
-// "chunked" format before writing them to w. Closing the returned writer
-// sends the final 0-length chunk that marks the end of the stream.
-//
-// NewChunkedWriter is not needed by normal applications. The http
-// package adds chunking automatically if handlers don't set a
-// Content-Length header. Using NewChunkedWriter inside a handler
-// would result in double chunking or chunking with a Content-Length
-// length, both of which are wrong.
-func NewChunkedWriter(w io.Writer) io.WriteCloser {
- if _, bad := w.(*response); bad {
- log.Printf("warning: using NewChunkedWriter in an http.Handler; expect corrupt output")
- }
- return &chunkedWriter{w}
-}
-
-// Writing to ChunkedWriter translates to writing in HTTP chunked Transfer
-// Encoding wire format to the underlying Wire writer.
-type chunkedWriter struct {
- Wire io.Writer
-}
-
-// Write the contents of data as one chunk to Wire.
-// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
-// a bug since it does not check for success of io.WriteString
-func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
-
- // Don't send 0-length data. It looks like EOF for chunked encoding.
- if len(data) == 0 {
- return 0, nil
- }
-
- head := strconv.Itob(len(data), 16) + "\r\n"
-
- if _, err = io.WriteString(cw.Wire, head); err != nil {
- return 0, err
- }
- if n, err = cw.Wire.Write(data); err != nil {
- return
- }
- if n != len(data) {
- err = io.ErrShortWrite
- return
- }
- _, err = io.WriteString(cw.Wire, "\r\n")
-
- return
-}
-
-func (cw *chunkedWriter) Close() error {
- _, err := io.WriteString(cw.Wire, "0\r\n")
- return err
-}
-
-// NewChunkedReader returns a new reader that translates the data read from r
-// out of HTTP "chunked" format before returning it.
-// The reader returns os.EOF when the final 0-length chunk is read.
-//
-// NewChunkedReader is not needed by normal applications. The http package
-// automatically decodes chunking when reading response bodies.
-func NewChunkedReader(r *bufio.Reader) io.Reader {
- return &chunkedReader{r: r}
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP client. See RFC 2616.
-//
-// This is the high-level Client interface.
-// The low-level implementation is in transport.go.
-
-package http
-
-import (
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "strings"
- "url"
-)
-
-// A Client is an HTTP client. Its zero value (DefaultClient) is a usable client
-// that uses DefaultTransport.
-//
-// The Client's Transport typically has internal state (cached
-// TCP connections), so Clients should be reused instead of created as
-// needed. Clients are safe for concurrent use by multiple goroutines.
-//
-// Client is not yet very configurable.
-type Client struct {
- Transport RoundTripper // if nil, DefaultTransport is used
-
- // If CheckRedirect is not nil, the client calls it before
- // following an HTTP redirect. The arguments req and via
- // are the upcoming request and the requests made already,
- // oldest first. If CheckRedirect returns an error, the client
- // returns that error instead of issue the Request req.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect func(req *Request, via []*Request) error
-}
-
-// DefaultClient is the default Client and is used by Get, Head, and Post.
-var DefaultClient = &Client{}
-
-// RoundTripper is an interface representing the ability to execute a
-// single HTTP transaction, obtaining the Response for a given Request.
-//
-// A RoundTripper must be safe for concurrent use by multiple
-// goroutines.
-type RoundTripper interface {
- // RoundTrip executes a single HTTP transaction, returning
- // the Response for the request req. RoundTrip should not
- // attempt to interpret the response. In particular,
- // RoundTrip must return err == nil if it obtained a response,
- // regardless of the response's HTTP status code. A non-nil
- // err should be reserved for failure to obtain a response.
- // Similarly, RoundTrip should not attempt to handle
- // higher-level protocol details such as redirects,
- // authentication, or cookies.
- //
- // RoundTrip should not modify the request, except for
- // consuming the Body. The request's URL and Header fields
- // are guaranteed to be initialized.
- RoundTrip(*Request) (*Response, error)
-}
-
-// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
-// return true if the string includes a port.
-func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
-
-// Used in Send to implement io.ReadCloser by bundling together the
-// bufio.Reader through which we read the response, and the underlying
-// network connection.
-type readClose struct {
- io.Reader
- io.Closer
-}
-
-// Do sends an HTTP request and returns an HTTP response, following
-// policy (e.g. redirects, cookies, auth) as configured on the client.
-//
-// A non-nil response always contains a non-nil resp.Body.
-//
-// Callers should close resp.Body when done reading from it. If
-// resp.Body is not closed, the Client's underlying RoundTripper
-// (typically Transport) may not be able to re-use a persistent TCP
-// connection to the server for a subsequent "keep-alive" request.
-//
-// Generally Get, Post, or PostForm will be used instead of Do.
-func (c *Client) Do(req *Request) (resp *Response, err error) {
- if req.Method == "GET" || req.Method == "HEAD" {
- return c.doFollowingRedirects(req)
- }
- return send(req, c.Transport)
-}
-
-// send issues an HTTP request. Caller should close resp.Body when done reading from it.
-func send(req *Request, t RoundTripper) (resp *Response, err error) {
- if t == nil {
- t = DefaultTransport
- if t == nil {
- err = errors.New("http: no Client.Transport or DefaultTransport")
- return
- }
- }
-
- if req.URL == nil {
- return nil, errors.New("http: nil Request.URL")
- }
-
- // Most the callers of send (Get, Post, et al) don't need
- // Headers, leaving it uninitialized. We guarantee to the
- // Transport that this has been initialized, though.
- if req.Header == nil {
- req.Header = make(Header)
- }
-
- info := req.URL.RawUserinfo
- if len(info) > 0 {
- req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(info)))
- }
- return t.RoundTrip(req)
-}
-
-// True if the specified HTTP status code is one for which the Get utility should
-// automatically redirect.
-func shouldRedirect(statusCode int) bool {
- switch statusCode {
- case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect:
- return true
- }
- return false
-}
-
-// Get issues a GET to the specified URL. If the response is one of the following
-// redirect codes, Get follows the redirect, up to a maximum of 10 redirects:
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-//
-// Caller should close r.Body when done reading from it.
-//
-// Get is a convenience wrapper around DefaultClient.Get.
-func Get(url string) (r *Response, err error) {
- return DefaultClient.Get(url)
-}
-
-// Get issues a GET to the specified URL. If the response is one of the
-// following redirect codes, Get follows the redirect after calling the
-// Client's CheckRedirect function.
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-//
-// Caller should close r.Body when done reading from it.
-func (c *Client) Get(url string) (r *Response, err error) {
- req, err := NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return c.doFollowingRedirects(req)
-}
-
-func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err error) {
- // TODO: if/when we add cookie support, the redirected request shouldn't
- // necessarily supply the same cookies as the original.
- var base *url.URL
- redirectChecker := c.CheckRedirect
- if redirectChecker == nil {
- redirectChecker = defaultCheckRedirect
- }
- var via []*Request
-
- if ireq.URL == nil {
- return nil, errors.New("http: nil Request.URL")
- }
-
- req := ireq
- urlStr := "" // next relative or absolute URL to fetch (after first request)
- for redirect := 0; ; redirect++ {
- if redirect != 0 {
- req = new(Request)
- req.Method = ireq.Method
- req.Header = make(Header)
- req.URL, err = base.Parse(urlStr)
- if err != nil {
- break
- }
- if len(via) > 0 {
- // Add the Referer header.
- lastReq := via[len(via)-1]
- if lastReq.URL.Scheme != "https" {
- req.Header.Set("Referer", lastReq.URL.String())
- }
-
- err = redirectChecker(req, via)
- if err != nil {
- break
- }
- }
- }
-
- urlStr = req.URL.String()
- if r, err = send(req, c.Transport); err != nil {
- break
- }
- if shouldRedirect(r.StatusCode) {
- r.Body.Close()
- if urlStr = r.Header.Get("Location"); urlStr == "" {
- err = errors.New(fmt.Sprintf("%d response missing Location header", r.StatusCode))
- break
- }
- base = req.URL
- via = append(via, req)
- continue
- }
- return
- }
-
- method := ireq.Method
- err = &url.Error{method[0:1] + strings.ToLower(method[1:]), urlStr, err}
- return
-}
-
-func defaultCheckRedirect(req *Request, via []*Request) error {
- if len(via) >= 10 {
- return errors.New("stopped after 10 redirects")
- }
- return nil
-}
-
-// Post issues a POST to the specified URL.
-//
-// Caller should close r.Body when done reading from it.
-//
-// Post is a wrapper around DefaultClient.Post
-func Post(url string, bodyType string, body io.Reader) (r *Response, err error) {
- return DefaultClient.Post(url, bodyType, body)
-}
-
-// Post issues a POST to the specified URL.
-//
-// Caller should close r.Body when done reading from it.
-func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, err error) {
- req, err := NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return send(req, c.Transport)
-}
-
-// PostForm issues a POST to the specified URL,
-// with data's keys and values urlencoded as the request body.
-//
-// Caller should close r.Body when done reading from it.
-//
-// PostForm is a wrapper around DefaultClient.PostForm
-func PostForm(url string, data url.Values) (r *Response, err error) {
- return DefaultClient.PostForm(url, data)
-}
-
-// PostForm issues a POST to the specified URL,
-// with data's keys and values urlencoded as the request body.
-//
-// Caller should close r.Body when done reading from it.
-func (c *Client) PostForm(url string, data url.Values) (r *Response, err error) {
- return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-// Head issues a HEAD to the specified URL. If the response is one of the
-// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function.
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-//
-// Head is a wrapper around DefaultClient.Head
-func Head(url string) (r *Response, err error) {
- return DefaultClient.Head(url)
-}
-
-// Head issues a HEAD to the specified URL. If the response is one of the
-// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function.
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-func (c *Client) Head(url string) (r *Response, err error) {
- req, err := NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return c.doFollowingRedirects(req)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for client.go
-
-package http_test
-
-import (
- "crypto/tls"
- "errors"
- "fmt"
- . "http"
- "http/httptest"
- "io"
- "io/ioutil"
- "net"
- "strconv"
- "strings"
- "testing"
- "url"
-)
-
-var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Last-Modified", "sometime")
- fmt.Fprintf(w, "User-agent: go\nDisallow: /something/")
-})
-
-func TestClient(t *testing.T) {
- ts := httptest.NewServer(robotsTxtHandler)
- defer ts.Close()
-
- r, err := Get(ts.URL)
- var b []byte
- if err == nil {
- b, err = ioutil.ReadAll(r.Body)
- r.Body.Close()
- }
- if err != nil {
- t.Error(err)
- } else if s := string(b); !strings.HasPrefix(s, "User-agent:") {
- t.Errorf("Incorrect page body (did not begin with User-agent): %q", s)
- }
-}
-
-func TestClientHead(t *testing.T) {
- ts := httptest.NewServer(robotsTxtHandler)
- defer ts.Close()
-
- r, err := Head(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if _, ok := r.Header["Last-Modified"]; !ok {
- t.Error("Last-Modified header not found.")
- }
-}
-
-type recordingTransport struct {
- req *Request
-}
-
-func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err error) {
- t.req = req
- return nil, errors.New("dummy impl")
-}
-
-func TestGetRequestFormat(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
- url := "http://dummy.faketld/"
- client.Get(url) // Note: doesn't hit network
- if tr.req.Method != "GET" {
- t.Errorf("expected method %q; got %q", "GET", tr.req.Method)
- }
- if tr.req.URL.String() != url {
- t.Errorf("expected URL %q; got %q", url, tr.req.URL.String())
- }
- if tr.req.Header == nil {
- t.Errorf("expected non-nil request Header")
- }
-}
-
-func TestPostRequestFormat(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
-
- url := "http://dummy.faketld/"
- json := `{"key":"value"}`
- b := strings.NewReader(json)
- client.Post(url, "application/json", b) // Note: doesn't hit network
-
- if tr.req.Method != "POST" {
- t.Errorf("got method %q, want %q", tr.req.Method, "POST")
- }
- if tr.req.URL.String() != url {
- t.Errorf("got URL %q, want %q", tr.req.URL.String(), url)
- }
- if tr.req.Header == nil {
- t.Fatalf("expected non-nil request Header")
- }
- if tr.req.Close {
- t.Error("got Close true, want false")
- }
- if g, e := tr.req.ContentLength, int64(len(json)); g != e {
- t.Errorf("got ContentLength %d, want %d", g, e)
- }
-}
-
-func TestPostFormRequestFormat(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
-
- urlStr := "http://dummy.faketld/"
- form := make(url.Values)
- form.Set("foo", "bar")
- form.Add("foo", "bar2")
- form.Set("bar", "baz")
- client.PostForm(urlStr, form) // Note: doesn't hit network
-
- if tr.req.Method != "POST" {
- t.Errorf("got method %q, want %q", tr.req.Method, "POST")
- }
- if tr.req.URL.String() != urlStr {
- t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr)
- }
- if tr.req.Header == nil {
- t.Fatalf("expected non-nil request Header")
- }
- if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e {
- t.Errorf("got Content-Type %q, want %q", g, e)
- }
- if tr.req.Close {
- t.Error("got Close true, want false")
- }
- // Depending on map iteration, body can be either of these.
- expectedBody := "foo=bar&foo=bar2&bar=baz"
- expectedBody1 := "bar=baz&foo=bar&foo=bar2"
- if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e {
- t.Errorf("got ContentLength %d, want %d", g, e)
- }
- bodyb, err := ioutil.ReadAll(tr.req.Body)
- if err != nil {
- t.Fatalf("ReadAll on req.Body: %v", err)
- }
- if g := string(bodyb); g != expectedBody && g != expectedBody1 {
- t.Errorf("got body %q, want %q or %q", g, expectedBody, expectedBody1)
- }
-}
-
-func TestRedirects(t *testing.T) {
- var ts *httptest.Server
- ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- n, _ := strconv.Atoi(r.FormValue("n"))
- // Test Referer header. (7 is arbitrary position to test at)
- if n == 7 {
- if g, e := r.Referer(), ts.URL+"/?n=6"; e != g {
- t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g)
- }
- }
- if n < 15 {
- Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound)
- return
- }
- fmt.Fprintf(w, "n=%d", n)
- }))
- defer ts.Close()
-
- c := &Client{}
- _, err := c.Get(ts.URL)
- if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client Get, expected error %q, got %q", e, g)
- }
-
- // HEAD request should also have the ability to follow redirects.
- _, err = c.Head(ts.URL)
- if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client Head, expected error %q, got %q", e, g)
- }
-
- // Do should also follow redirects.
- greq, _ := NewRequest("GET", ts.URL, nil)
- _, err = c.Do(greq)
- if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client Do, expected error %q, got %q", e, g)
- }
-
- var checkErr error
- var lastVia []*Request
- c = &Client{CheckRedirect: func(_ *Request, via []*Request) error {
- lastVia = via
- return checkErr
- }}
- res, err := c.Get(ts.URL)
- finalUrl := res.Request.URL.String()
- if e, g := "<nil>", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with custom client, expected error %q, got %q", e, g)
- }
- if !strings.HasSuffix(finalUrl, "/?n=15") {
- t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl)
- }
- if e, g := 15, len(lastVia); e != g {
- t.Errorf("expected lastVia to have contained %d elements; got %d", e, g)
- }
-
- checkErr = errors.New("no redirects allowed")
- res, err = c.Get(ts.URL)
- finalUrl = res.Request.URL.String()
- if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with redirects forbidden, expected error %q, got %q", e, g)
- }
-}
-
-func TestStreamingGet(t *testing.T) {
- say := make(chan string)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.(Flusher).Flush()
- for str := range say {
- w.Write([]byte(str))
- w.(Flusher).Flush()
- }
- }))
- defer ts.Close()
-
- c := &Client{}
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- var buf [10]byte
- for _, str := range []string{"i", "am", "also", "known", "as", "comet"} {
- say <- str
- n, err := io.ReadFull(res.Body, buf[0:len(str)])
- if err != nil {
- t.Fatalf("ReadFull on %q: %v", str, err)
- }
- if n != len(str) {
- t.Fatalf("Receiving %q, only read %d bytes", str, n)
- }
- got := string(buf[0:n])
- if got != str {
- t.Fatalf("Expected %q, got %q", str, got)
- }
- }
- close(say)
- _, err = io.ReadFull(res.Body, buf[0:1])
- if err != io.EOF {
- t.Fatalf("at end expected EOF, got %v", err)
- }
-}
-
-type writeCountingConn struct {
- net.Conn
- count *int
-}
-
-func (c *writeCountingConn) Write(p []byte) (int, error) {
- *c.count++
- return c.Conn.Write(p)
-}
-
-// TestClientWrites verifies that client requests are buffered and we
-// don't send a TCP packet per line of the http request + body.
-func TestClientWrites(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- }))
- defer ts.Close()
-
- writes := 0
- dialer := func(netz string, addr string) (net.Conn, error) {
- c, err := net.Dial(netz, addr)
- if err == nil {
- c = &writeCountingConn{c, &writes}
- }
- return c, err
- }
- c := &Client{Transport: &Transport{Dial: dialer}}
-
- _, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if writes != 1 {
- t.Errorf("Get request did %d Write calls, want 1", writes)
- }
-
- writes = 0
- _, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}})
- if err != nil {
- t.Fatal(err)
- }
- if writes != 1 {
- t.Errorf("Post request did %d Write calls, want 1", writes)
- }
-}
-
-func TestClientInsecureTransport(t *testing.T) {
- ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Write([]byte("Hello"))
- }))
- defer ts.Close()
-
- // TODO(bradfitz): add tests for skipping hostname checks too?
- // would require a new cert for testing, and probably
- // redundant with these tests.
- for _, insecure := range []bool{true, false} {
- tr := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: insecure,
- },
- }
- c := &Client{Transport: tr}
- _, err := c.Get(ts.URL)
- if (err == nil) != insecure {
- t.Errorf("insecure=%v: got unexpected err=%v", insecure, err)
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// This implementation is done according to RFC 6265:
-//
-// http://tools.ietf.org/html/rfc6265
-
-// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
-// HTTP response or the Cookie header of an HTTP request.
-type Cookie struct {
- Name string
- Value string
- Path string
- Domain string
- Expires time.Time
- RawExpires string
-
- // MaxAge=0 means no 'Max-Age' attribute specified.
- // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
- // MaxAge>0 means Max-Age attribute present and given in seconds
- MaxAge int
- Secure bool
- HttpOnly bool
- Raw string
- Unparsed []string // Raw text of unparsed attribute-value pairs
-}
-
-// readSetCookies parses all "Set-Cookie" values from
-// the header h and returns the successfully parsed Cookies.
-func readSetCookies(h Header) []*Cookie {
- cookies := []*Cookie{}
- for _, line := range h["Set-Cookie"] {
- parts := strings.Split(strings.TrimSpace(line), ";")
- if len(parts) == 1 && parts[0] == "" {
- continue
- }
- parts[0] = strings.TrimSpace(parts[0])
- j := strings.Index(parts[0], "=")
- if j < 0 {
- continue
- }
- name, value := parts[0][:j], parts[0][j+1:]
- if !isCookieNameValid(name) {
- continue
- }
- value, success := parseCookieValue(value)
- if !success {
- continue
- }
- c := &Cookie{
- Name: name,
- Value: value,
- Raw: line,
- }
- for i := 1; i < len(parts); i++ {
- parts[i] = strings.TrimSpace(parts[i])
- if len(parts[i]) == 0 {
- continue
- }
-
- attr, val := parts[i], ""
- if j := strings.Index(attr, "="); j >= 0 {
- attr, val = attr[:j], attr[j+1:]
- }
- lowerAttr := strings.ToLower(attr)
- parseCookieValueFn := parseCookieValue
- if lowerAttr == "expires" {
- parseCookieValueFn = parseCookieExpiresValue
- }
- val, success = parseCookieValueFn(val)
- if !success {
- c.Unparsed = append(c.Unparsed, parts[i])
- continue
- }
- switch lowerAttr {
- case "secure":
- c.Secure = true
- continue
- case "httponly":
- c.HttpOnly = true
- continue
- case "domain":
- c.Domain = val
- // TODO: Add domain parsing
- continue
- case "max-age":
- secs, err := strconv.Atoi(val)
- if err != nil || secs < 0 || secs != 0 && val[0] == '0' {
- break
- }
- if secs <= 0 {
- c.MaxAge = -1
- } else {
- c.MaxAge = secs
- }
- continue
- case "expires":
- c.RawExpires = val
- exptime, err := time.Parse(time.RFC1123, val)
- if err != nil {
- exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
- if err != nil {
- c.Expires = time.Time{}
- break
- }
- }
- c.Expires = *exptime
- continue
- case "path":
- c.Path = val
- // TODO: Add path parsing
- continue
- }
- c.Unparsed = append(c.Unparsed, parts[i])
- }
- cookies = append(cookies, c)
- }
- return cookies
-}
-
-// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
-func SetCookie(w ResponseWriter, cookie *Cookie) {
- w.Header().Add("Set-Cookie", cookie.String())
-}
-
-// String returns the serialization of the cookie for use in a Cookie
-// header (if only Name and Value are set) or a Set-Cookie response
-// header (if other fields are set).
-func (c *Cookie) String() string {
- var b bytes.Buffer
- fmt.Fprintf(&b, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
- if len(c.Path) > 0 {
- fmt.Fprintf(&b, "; Path=%s", sanitizeValue(c.Path))
- }
- if len(c.Domain) > 0 {
- fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(c.Domain))
- }
- if len(c.Expires.Zone) > 0 {
- fmt.Fprintf(&b, "; Expires=%s", c.Expires.Format(time.RFC1123))
- }
- if c.MaxAge > 0 {
- fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
- } else if c.MaxAge < 0 {
- fmt.Fprintf(&b, "; Max-Age=0")
- }
- if c.HttpOnly {
- fmt.Fprintf(&b, "; HttpOnly")
- }
- if c.Secure {
- fmt.Fprintf(&b, "; Secure")
- }
- return b.String()
-}
-
-// readCookies parses all "Cookie" values from the header h and
-// returns the successfully parsed Cookies.
-//
-// if filter isn't empty, only cookies of that name are returned
-func readCookies(h Header, filter string) []*Cookie {
- cookies := []*Cookie{}
- lines, ok := h["Cookie"]
- if !ok {
- return cookies
- }
-
- for _, line := range lines {
- parts := strings.Split(strings.TrimSpace(line), ";")
- if len(parts) == 1 && parts[0] == "" {
- continue
- }
- // Per-line attributes
- parsedPairs := 0
- for i := 0; i < len(parts); i++ {
- parts[i] = strings.TrimSpace(parts[i])
- if len(parts[i]) == 0 {
- continue
- }
- name, val := parts[i], ""
- if j := strings.Index(name, "="); j >= 0 {
- name, val = name[:j], name[j+1:]
- }
- if !isCookieNameValid(name) {
- continue
- }
- if filter != "" && filter != name {
- continue
- }
- val, success := parseCookieValue(val)
- if !success {
- continue
- }
- cookies = append(cookies, &Cookie{Name: name, Value: val})
- parsedPairs++
- }
- }
- return cookies
-}
-
-var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
-
-func sanitizeName(n string) string {
- return cookieNameSanitizer.Replace(n)
-}
-
-var cookieValueSanitizer = strings.NewReplacer("\n", " ", "\r", " ", ";", " ")
-
-func sanitizeValue(v string) string {
- return cookieValueSanitizer.Replace(v)
-}
-
-func unquoteCookieValue(v string) string {
- if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' {
- return v[1 : len(v)-1]
- }
- return v
-}
-
-func isCookieByte(c byte) bool {
- switch {
- case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,
- 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:
- return true
- }
- return false
-}
-
-func isCookieExpiresByte(c byte) (ok bool) {
- return isCookieByte(c) || c == ',' || c == ' '
-}
-
-func parseCookieValue(raw string) (string, bool) {
- return parseCookieValueUsing(raw, isCookieByte)
-}
-
-func parseCookieExpiresValue(raw string) (string, bool) {
- return parseCookieValueUsing(raw, isCookieExpiresByte)
-}
-
-func parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) {
- raw = unquoteCookieValue(raw)
- for i := 0; i < len(raw); i++ {
- if !validByte(raw[i]) {
- return "", false
- }
- }
- return raw, true
-}
-
-func isCookieNameValid(raw string) bool {
- for _, c := range raw {
- if !isToken(byte(c)) {
- return false
- }
- }
- return true
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "fmt"
- "json"
- "reflect"
- "testing"
- "time"
-)
-
-var writeSetCookiesTests = []struct {
- Cookie *Cookie
- Raw string
-}{
- {
- &Cookie{Name: "cookie-1", Value: "v$1"},
- "cookie-1=v$1",
- },
- {
- &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600},
- "cookie-2=two; Max-Age=3600",
- },
- {
- &Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"},
- "cookie-3=three; Domain=.example.com",
- },
- {
- &Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"},
- "cookie-4=four; Path=/restricted/",
- },
-}
-
-func TestWriteSetCookies(t *testing.T) {
- for i, tt := range writeSetCookiesTests {
- if g, e := tt.Cookie.String(), tt.Raw; g != e {
- t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g)
- continue
- }
- }
-}
-
-type headerOnlyResponseWriter Header
-
-func (ho headerOnlyResponseWriter) Header() Header {
- return Header(ho)
-}
-
-func (ho headerOnlyResponseWriter) Write([]byte) (int, error) {
- panic("NOIMPL")
-}
-
-func (ho headerOnlyResponseWriter) WriteHeader(int) {
- panic("NOIMPL")
-}
-
-func TestSetCookie(t *testing.T) {
- m := make(Header)
- SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"})
- SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600})
- if l := len(m["Set-Cookie"]); l != 2 {
- t.Fatalf("expected %d cookies, got %d", 2, l)
- }
- if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e {
- t.Errorf("cookie #1: want %q, got %q", e, g)
- }
- if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e {
- t.Errorf("cookie #2: want %q, got %q", e, g)
- }
-}
-
-var addCookieTests = []struct {
- Cookies []*Cookie
- Raw string
-}{
- {
- []*Cookie{},
- "",
- },
- {
- []*Cookie{&Cookie{Name: "cookie-1", Value: "v$1"}},
- "cookie-1=v$1",
- },
- {
- []*Cookie{
- &Cookie{Name: "cookie-1", Value: "v$1"},
- &Cookie{Name: "cookie-2", Value: "v$2"},
- &Cookie{Name: "cookie-3", Value: "v$3"},
- },
- "cookie-1=v$1; cookie-2=v$2; cookie-3=v$3",
- },
-}
-
-func TestAddCookie(t *testing.T) {
- for i, tt := range addCookieTests {
- req, _ := NewRequest("GET", "http://example.com/", nil)
- for _, c := range tt.Cookies {
- req.AddCookie(c)
- }
- if g := req.Header.Get("Cookie"); g != tt.Raw {
- t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g)
- continue
- }
- }
-}
-
-var readSetCookiesTests = []struct {
- Header Header
- Cookies []*Cookie
-}{
- {
- Header{"Set-Cookie": {"Cookie-1=v$1"}},
- []*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}},
- },
- {
- Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}},
- []*Cookie{&Cookie{
- Name: "NID",
- Value: "99=YsDT5i3E-CXax-",
- Path: "/",
- Domain: ".google.ch",
- HttpOnly: true,
- Expires: time.Time{Year: 2011, Month: 11, Day: 23, Hour: 1, Minute: 5, Second: 3, ZoneOffset: 0, Zone: "GMT"},
- RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT",
- Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly",
- }},
- },
-}
-
-func toJSON(v interface{}) string {
- b, err := json.Marshal(v)
- if err != nil {
- return fmt.Sprintf("%#v", v)
- }
- return string(b)
-}
-
-func TestReadSetCookies(t *testing.T) {
- for i, tt := range readSetCookiesTests {
- for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input
- c := readSetCookies(tt.Header)
- if !reflect.DeepEqual(c, tt.Cookies) {
- t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
- continue
- }
- }
- }
-}
-
-var readCookiesTests = []struct {
- Header Header
- Filter string
- Cookies []*Cookie
-}{
- {
- Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
- "",
- []*Cookie{
- &Cookie{Name: "Cookie-1", Value: "v$1"},
- &Cookie{Name: "c2", Value: "v2"},
- },
- },
- {
- Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
- "c2",
- []*Cookie{
- &Cookie{Name: "c2", Value: "v2"},
- },
- },
- {
- Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
- "",
- []*Cookie{
- &Cookie{Name: "Cookie-1", Value: "v$1"},
- &Cookie{Name: "c2", Value: "v2"},
- },
- },
- {
- Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
- "c2",
- []*Cookie{
- &Cookie{Name: "c2", Value: "v2"},
- },
- },
-}
-
-func TestReadCookies(t *testing.T) {
- for i, tt := range readCookiesTests {
- for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input
- c := readCookies(tt.Header, tt.Filter)
- if !reflect.DeepEqual(c, tt.Cookies) {
- t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies))
- continue
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package http provides HTTP client and server implementations.
-
-Get, Head, Post, and PostForm make HTTP requests:
-
- resp, err := http.Get("http://example.com/")
- ...
- resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
- ...
- resp, err := http.PostForm("http://example.com/form",
- url.Values{"key": {"Value"}, "id": {"123"}})
-
-The client must close the response body when finished with it:
-
- resp, err := http.Get("http://example.com/")
- if err != nil {
- // handle error
- }
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- // ...
-
-For control over HTTP client headers, redirect policy, and other
-settings, create a Client:
-
- client := &http.Client{
- CheckRedirect: redirectPolicyFunc,
- }
-
- resp, err := client.Get("http://example.com")
- // ...
-
- req := http.NewRequest("GET", "http://example.com", nil)
- req.Header.Add("If-None-Match", `W/"wyzzy"`)
- resp, err := client.Do(req)
- // ...
-
-For control over proxies, TLS configuration, keep-alives,
-compression, and other settings, create a Transport:
-
- tr := &http.Transport{
- TLSClientConfig: &tls.Config{RootCAs: pool},
- DisableCompression: true,
- }
- client := &http.Client{Transport: tr}
- resp, err := client.Get("https://example.com")
-
-Clients and Transports are safe for concurrent use by multiple
-goroutines and for efficiency should only be created once and re-used.
-
-ListenAndServe starts an HTTP server with a given address and handler.
-The handler is usually nil, which means to use DefaultServeMux.
-Handle and HandleFunc add handlers to DefaultServeMux:
-
- http.Handle("/foo", fooHandler)
-
- http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.RawPath))
- })
-
- log.Fatal(http.ListenAndServe(":8080", nil))
-
-More control over the server's behavior is available by creating a
-custom Server:
-
- s := &http.Server{
- Addr: ":8080",
- Handler: myHandler,
- ReadTimeout: 10e9,
- WriteTimeout: 10e9,
- MaxHeaderBytes: 1 << 20,
- }
- log.Fatal(s.ListenAndServe())
-*/
-package http
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-// One of the copies, say from b to r2, could be avoided by using a more
-// elaborate trick where the other copy is made during Request/Response.Write.
-// This would complicate things too much, given that these functions are for
-// debugging only.
-func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
- var buf bytes.Buffer
- if _, err = buf.ReadFrom(b); err != nil {
- return nil, nil, err
- }
- if err = b.Close(); err != nil {
- return nil, nil, err
- }
- return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
-}
-
-// DumpRequest returns the wire representation of req,
-// optionally including the request body, for debugging.
-// DumpRequest is semantically a no-op, but in order to
-// dump the body, it reads the body data into memory and
-// changes req.Body to refer to the in-memory copy.
-// The documentation for Request.Write details which fields
-// of req are used.
-func DumpRequest(req *Request, body bool) (dump []byte, err error) {
- var b bytes.Buffer
- save := req.Body
- if !body || req.Body == nil {
- req.Body = nil
- } else {
- save, req.Body, err = drainBody(req.Body)
- if err != nil {
- return
- }
- }
- err = req.dumpWrite(&b)
- req.Body = save
- if err != nil {
- return
- }
- dump = b.Bytes()
- return
-}
-
-// DumpResponse is like DumpRequest but dumps a response.
-func DumpResponse(resp *Response, body bool) (dump []byte, err error) {
- var b bytes.Buffer
- save := resp.Body
- savecl := resp.ContentLength
- if !body || resp.Body == nil {
- resp.Body = nil
- resp.ContentLength = 0
- } else {
- save, resp.Body, err = drainBody(resp.Body)
- if err != nil {
- return
- }
- }
- err = resp.Write(&b)
- resp.Body = save
- resp.ContentLength = savecl
- if err != nil {
- return
- }
- dump = b.Bytes()
- return
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Bridge package to expose http internals to tests in the http_test
-// package.
-
-package http
-
-func (t *Transport) IdleConnKeysForTesting() (keys []string) {
- keys = make([]string, 0)
- t.lk.Lock()
- defer t.lk.Unlock()
- if t.idleConn == nil {
- return
- }
- for key := range t.idleConn {
- keys = append(keys, key)
- }
- return
-}
-
-func (t *Transport) IdleConnCountForTesting(cacheKey string) int {
- t.lk.Lock()
- defer t.lk.Unlock()
- if t.idleConn == nil {
- return 0
- }
- conns, ok := t.idleConn[cacheKey]
- if !ok {
- return 0
- }
- return len(conns)
-}
-
-func NewTestTimeoutHandler(handler Handler, ch <-chan int64) Handler {
- f := func() <-chan int64 {
- return ch
- }
- return &timeoutHandler{handler, f, ""}
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fcgi
-
-// This file implements FastCGI from the perspective of a child process.
-
-import (
- "fmt"
- "http"
- "http/cgi"
- "io"
- "net"
- "os"
- "time"
-)
-
-// request holds the state for an in-progress request. As soon as it's complete,
-// it's converted to an http.Request.
-type request struct {
- pw *io.PipeWriter
- reqId uint16
- params map[string]string
- buf [1024]byte
- rawParams []byte
- keepConn bool
-}
-
-func newRequest(reqId uint16, flags uint8) *request {
- r := &request{
- reqId: reqId,
- params: map[string]string{},
- keepConn: flags&flagKeepConn != 0,
- }
- r.rawParams = r.buf[:0]
- return r
-}
-
-// parseParams reads an encoded []byte into Params.
-func (r *request) parseParams() {
- text := r.rawParams
- r.rawParams = nil
- for len(text) > 0 {
- keyLen, n := readSize(text)
- if n == 0 {
- return
- }
- text = text[n:]
- valLen, n := readSize(text)
- if n == 0 {
- return
- }
- text = text[n:]
- key := readString(text, keyLen)
- text = text[keyLen:]
- val := readString(text, valLen)
- text = text[valLen:]
- r.params[key] = val
- }
-}
-
-// response implements http.ResponseWriter.
-type response struct {
- req *request
- header http.Header
- w *bufWriter
- wroteHeader bool
-}
-
-func newResponse(c *child, req *request) *response {
- return &response{
- req: req,
- header: http.Header{},
- w: newWriter(c.conn, typeStdout, req.reqId),
- }
-}
-
-func (r *response) Header() http.Header {
- return r.header
-}
-
-func (r *response) Write(data []byte) (int, error) {
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- return r.w.Write(data)
-}
-
-func (r *response) WriteHeader(code int) {
- if r.wroteHeader {
- return
- }
- r.wroteHeader = true
- if code == http.StatusNotModified {
- // Must not have body.
- r.header.Del("Content-Type")
- r.header.Del("Content-Length")
- r.header.Del("Transfer-Encoding")
- } else if r.header.Get("Content-Type") == "" {
- r.header.Set("Content-Type", "text/html; charset=utf-8")
- }
-
- if r.header.Get("Date") == "" {
- r.header.Set("Date", time.UTC().Format(http.TimeFormat))
- }
-
- fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
- r.header.Write(r.w)
- r.w.WriteString("\r\n")
-}
-
-func (r *response) Flush() {
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- r.w.Flush()
-}
-
-func (r *response) Close() error {
- r.Flush()
- return r.w.Close()
-}
-
-type child struct {
- conn *conn
- handler http.Handler
-}
-
-func newChild(rwc net.Conn, handler http.Handler) *child {
- return &child{newConn(rwc), handler}
-}
-
-func (c *child) serve() {
- requests := map[uint16]*request{}
- defer c.conn.Close()
- var rec record
- var br beginRequest
- for {
- if err := rec.read(c.conn.rwc); err != nil {
- return
- }
-
- req, ok := requests[rec.h.Id]
- if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
- // The spec says to ignore unknown request IDs.
- continue
- }
- if ok && rec.h.Type == typeBeginRequest {
- // The server is trying to begin a request with the same ID
- // as an in-progress request. This is an error.
- return
- }
-
- switch rec.h.Type {
- case typeBeginRequest:
- if err := br.read(rec.content()); err != nil {
- return
- }
- if br.role != roleResponder {
- c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
- break
- }
- requests[rec.h.Id] = newRequest(rec.h.Id, br.flags)
- case typeParams:
- // NOTE(eds): Technically a key-value pair can straddle the boundary
- // between two packets. We buffer until we've received all parameters.
- if len(rec.content()) > 0 {
- req.rawParams = append(req.rawParams, rec.content()...)
- break
- }
- req.parseParams()
- case typeStdin:
- content := rec.content()
- if req.pw == nil {
- var body io.ReadCloser
- if len(content) > 0 {
- // body could be an io.LimitReader, but it shouldn't matter
- // as long as both sides are behaving.
- body, req.pw = io.Pipe()
- }
- go c.serveRequest(req, body)
- }
- if len(content) > 0 {
- // TODO(eds): This blocks until the handler reads from the pipe.
- // If the handler takes a long time, it might be a problem.
- req.pw.Write(content)
- } else if req.pw != nil {
- req.pw.Close()
- }
- case typeGetValues:
- values := map[string]string{"FCGI_MPXS_CONNS": "1"}
- c.conn.writePairs(0, typeGetValuesResult, values)
- case typeData:
- // If the filter role is implemented, read the data stream here.
- case typeAbortRequest:
- delete(requests, rec.h.Id)
- c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
- if !req.keepConn {
- // connection will close upon return
- return
- }
- default:
- b := make([]byte, 8)
- b[0] = rec.h.Type
- c.conn.writeRecord(typeUnknownType, 0, b)
- }
- }
-}
-
-func (c *child) serveRequest(req *request, body io.ReadCloser) {
- r := newResponse(c, req)
- httpReq, err := cgi.RequestFromMap(req.params)
- if err != nil {
- // there was an error reading the request
- r.WriteHeader(http.StatusInternalServerError)
- c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
- } else {
- httpReq.Body = body
- c.handler.ServeHTTP(r, httpReq)
- }
- if body != nil {
- body.Close()
- }
- r.Close()
- c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
- if !req.keepConn {
- c.conn.Close()
- }
-}
-
-// Serve accepts incoming FastCGI connections on the listener l, creating a new
-// service thread for each. The service threads read requests and then call handler
-// to reply to them.
-// If l is nil, Serve accepts connections on stdin.
-// If handler is nil, http.DefaultServeMux is used.
-func Serve(l net.Listener, handler http.Handler) error {
- if l == nil {
- var err error
- l, err = net.FileListener(os.Stdin)
- if err != nil {
- return err
- }
- defer l.Close()
- }
- if handler == nil {
- handler = http.DefaultServeMux
- }
- for {
- rw, err := l.Accept()
- if err != nil {
- return err
- }
- c := newChild(rw, handler)
- go c.serve()
- }
- panic("unreachable")
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fcgi implements the FastCGI protocol.
-// Currently only the responder role is supported.
-// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
-package fcgi
-
-// This file defines the raw protocol and some utilities used by the child and
-// the host.
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "errors"
- "io"
- "sync"
-)
-
-const (
- // Packet Types
- typeBeginRequest = iota + 1
- typeAbortRequest
- typeEndRequest
- typeParams
- typeStdin
- typeStdout
- typeStderr
- typeData
- typeGetValues
- typeGetValuesResult
- typeUnknownType
-)
-
-// keep the connection between web-server and responder open after request
-const flagKeepConn = 1
-
-const (
- maxWrite = 65535 // maximum record body
- maxPad = 255
-)
-
-const (
- roleResponder = iota + 1 // only Responders are implemented.
- roleAuthorizer
- roleFilter
-)
-
-const (
- statusRequestComplete = iota
- statusCantMultiplex
- statusOverloaded
- statusUnknownRole
-)
-
-const headerLen = 8
-
-type header struct {
- Version uint8
- Type uint8
- Id uint16
- ContentLength uint16
- PaddingLength uint8
- Reserved uint8
-}
-
-type beginRequest struct {
- role uint16
- flags uint8
- reserved [5]uint8
-}
-
-func (br *beginRequest) read(content []byte) error {
- if len(content) != 8 {
- return errors.New("fcgi: invalid begin request record")
- }
- br.role = binary.BigEndian.Uint16(content)
- br.flags = content[2]
- return nil
-}
-
-// for padding so we don't have to allocate all the time
-// not synchronized because we don't care what the contents are
-var pad [maxPad]byte
-
-func (h *header) init(recType uint8, reqId uint16, contentLength int) {
- h.Version = 1
- h.Type = recType
- h.Id = reqId
- h.ContentLength = uint16(contentLength)
- h.PaddingLength = uint8(-contentLength & 7)
-}
-
-// conn sends records over rwc
-type conn struct {
- mutex sync.Mutex
- rwc io.ReadWriteCloser
-
- // to avoid allocations
- buf bytes.Buffer
- h header
-}
-
-func newConn(rwc io.ReadWriteCloser) *conn {
- return &conn{rwc: rwc}
-}
-
-func (c *conn) Close() error {
- c.mutex.Lock()
- defer c.mutex.Unlock()
- return c.rwc.Close()
-}
-
-type record struct {
- h header
- buf [maxWrite + maxPad]byte
-}
-
-func (rec *record) read(r io.Reader) (err error) {
- if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
- return err
- }
- if rec.h.Version != 1 {
- return errors.New("fcgi: invalid header version")
- }
- n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
- if _, err = io.ReadFull(r, rec.buf[:n]); err != nil {
- return err
- }
- return nil
-}
-
-func (r *record) content() []byte {
- return r.buf[:r.h.ContentLength]
-}
-
-// writeRecord writes and sends a single record.
-func (c *conn) writeRecord(recType uint8, reqId uint16, b []byte) error {
- c.mutex.Lock()
- defer c.mutex.Unlock()
- c.buf.Reset()
- c.h.init(recType, reqId, len(b))
- if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
- return err
- }
- if _, err := c.buf.Write(b); err != nil {
- return err
- }
- if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
- return err
- }
- _, err := c.rwc.Write(c.buf.Bytes())
- return err
-}
-
-func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
- b := [8]byte{byte(role >> 8), byte(role), flags}
- return c.writeRecord(typeBeginRequest, reqId, b[:])
-}
-
-func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
- b := make([]byte, 8)
- binary.BigEndian.PutUint32(b, uint32(appStatus))
- b[4] = protocolStatus
- return c.writeRecord(typeEndRequest, reqId, b)
-}
-
-func (c *conn) writePairs(recType uint8, reqId uint16, pairs map[string]string) error {
- w := newWriter(c, recType, reqId)
- b := make([]byte, 8)
- for k, v := range pairs {
- n := encodeSize(b, uint32(len(k)))
- n += encodeSize(b[n:], uint32(len(k)))
- if _, err := w.Write(b[:n]); err != nil {
- return err
- }
- if _, err := w.WriteString(k); err != nil {
- return err
- }
- if _, err := w.WriteString(v); err != nil {
- return err
- }
- }
- w.Close()
- return nil
-}
-
-func readSize(s []byte) (uint32, int) {
- if len(s) == 0 {
- return 0, 0
- }
- size, n := uint32(s[0]), 1
- if size&(1<<7) != 0 {
- if len(s) < 4 {
- return 0, 0
- }
- n = 4
- size = binary.BigEndian.Uint32(s)
- size &^= 1 << 31
- }
- return size, n
-}
-
-func readString(s []byte, size uint32) string {
- if size > uint32(len(s)) {
- return ""
- }
- return string(s[:size])
-}
-
-func encodeSize(b []byte, size uint32) int {
- if size > 127 {
- size |= 1 << 31
- binary.BigEndian.PutUint32(b, size)
- return 4
- }
- b[0] = byte(size)
- return 1
-}
-
-// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
-// Closed.
-type bufWriter struct {
- closer io.Closer
- *bufio.Writer
-}
-
-func (w *bufWriter) Close() error {
- if err := w.Writer.Flush(); err != nil {
- w.closer.Close()
- return err
- }
- return w.closer.Close()
-}
-
-func newWriter(c *conn, recType uint8, reqId uint16) *bufWriter {
- s := &streamWriter{c: c, recType: recType, reqId: reqId}
- w, _ := bufio.NewWriterSize(s, maxWrite)
- return &bufWriter{s, w}
-}
-
-// streamWriter abstracts out the separation of a stream into discrete records.
-// It only writes maxWrite bytes at a time.
-type streamWriter struct {
- c *conn
- recType uint8
- reqId uint16
-}
-
-func (w *streamWriter) Write(p []byte) (int, error) {
- nn := 0
- for len(p) > 0 {
- n := len(p)
- if n > maxWrite {
- n = maxWrite
- }
- if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
- return nn, err
- }
- nn += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *streamWriter) Close() error {
- // send empty record to close the stream
- return w.c.writeRecord(w.recType, w.reqId, nil)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fcgi
-
-import (
- "bytes"
- "io"
- "testing"
-)
-
-var sizeTests = []struct {
- size uint32
- bytes []byte
-}{
- {0, []byte{0x00}},
- {127, []byte{0x7F}},
- {128, []byte{0x80, 0x00, 0x00, 0x80}},
- {1000, []byte{0x80, 0x00, 0x03, 0xE8}},
- {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
-}
-
-func TestSize(t *testing.T) {
- b := make([]byte, 4)
- for i, test := range sizeTests {
- n := encodeSize(b, test.size)
- if !bytes.Equal(b[:n], test.bytes) {
- t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
- }
- size, n := readSize(test.bytes)
- if size != test.size {
- t.Errorf("%d expected %d, read %d", i, test.size, size)
- }
- if len(test.bytes) != n {
- t.Errorf("%d did not consume all the bytes", i)
- }
- }
-}
-
-var streamTests = []struct {
- desc string
- recType uint8
- reqId uint16
- content []byte
- raw []byte
-}{
- {"single record", typeStdout, 1, nil,
- []byte{1, typeStdout, 0, 1, 0, 0, 0, 0},
- },
- // this data will have to be split into two records
- {"two records", typeStdin, 300, make([]byte, 66000),
- bytes.Join([][]byte{
- // header for the first record
- {1, typeStdin, 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
- make([]byte, 65536),
- // header for the second
- {1, typeStdin, 0x01, 0x2C, 0x01, 0xD1, 7, 0},
- make([]byte, 472),
- // header for the empty record
- {1, typeStdin, 0x01, 0x2C, 0, 0, 0, 0},
- },
- nil),
- },
-}
-
-type nilCloser struct {
- io.ReadWriter
-}
-
-func (c *nilCloser) Close() error { return nil }
-
-func TestStreams(t *testing.T) {
- var rec record
-outer:
- for _, test := range streamTests {
- buf := bytes.NewBuffer(test.raw)
- var content []byte
- for buf.Len() > 0 {
- if err := rec.read(buf); err != nil {
- t.Errorf("%s: error reading record: %v", test.desc, err)
- continue outer
- }
- content = append(content, rec.content()...)
- }
- if rec.h.Type != test.recType {
- t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
- continue
- }
- if rec.h.Id != test.reqId {
- t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
- continue
- }
- if !bytes.Equal(content, test.content) {
- t.Errorf("%s: read wrong content", test.desc)
- continue
- }
- buf.Reset()
- c := newConn(&nilCloser{buf})
- w := newWriter(c, test.recType, test.reqId)
- if _, err := w.Write(test.content); err != nil {
- t.Errorf("%s: error writing record: %v", test.desc, err)
- continue
- }
- if err := w.Close(); err != nil {
- t.Errorf("%s: error closing stream: %v", test.desc, err)
- continue
- }
- if !bytes.Equal(buf.Bytes(), test.raw) {
- t.Errorf("%s: wrote wrong content", test.desc)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "fmt"
- "io"
-)
-
-// fileTransport implements RoundTripper for the 'file' protocol.
-type fileTransport struct {
- fh fileHandler
-}
-
-// NewFileTransport returns a new RoundTripper, serving the provided
-// FileSystem. The returned RoundTripper ignores the URL host in its
-// incoming requests, as well as most other properties of the
-// request.
-//
-// The typical use case for NewFileTransport is to register the "file"
-// protocol with a Transport, as in:
-//
-// t := &http.Transport{}
-// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
-// c := &http.Client{Transport: t}
-// res, err := c.Get("file:///etc/passwd")
-// ...
-func NewFileTransport(fs FileSystem) RoundTripper {
- return fileTransport{fileHandler{fs}}
-}
-
-func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
- // We start ServeHTTP in a goroutine, which may take a long
- // time if the file is large. The newPopulateResponseWriter
- // call returns a channel which either ServeHTTP or finish()
- // sends our *Response on, once the *Response itself has been
- // populated (even if the body itself is still being
- // written to the res.Body, a pipe)
- rw, resc := newPopulateResponseWriter()
- go func() {
- t.fh.ServeHTTP(rw, req)
- rw.finish()
- }()
- return <-resc, nil
-}
-
-func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
- pr, pw := io.Pipe()
- rw := &populateResponse{
- ch: make(chan *Response),
- pw: pw,
- res: &Response{
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- Header: make(Header),
- Close: true,
- Body: pr,
- },
- }
- return rw, rw.ch
-}
-
-// populateResponse is a ResponseWriter that populates the *Response
-// in res, and writes its body to a pipe connected to the response
-// body. Once writes begin or finish() is called, the response is sent
-// on ch.
-type populateResponse struct {
- res *Response
- ch chan *Response
- wroteHeader bool
- hasContent bool
- sentResponse bool
- pw *io.PipeWriter
-}
-
-func (pr *populateResponse) finish() {
- if !pr.wroteHeader {
- pr.WriteHeader(500)
- }
- if !pr.sentResponse {
- pr.sendResponse()
- }
- pr.pw.Close()
-}
-
-func (pr *populateResponse) sendResponse() {
- if pr.sentResponse {
- return
- }
- pr.sentResponse = true
-
- if pr.hasContent {
- pr.res.ContentLength = -1
- }
- pr.ch <- pr.res
-}
-
-func (pr *populateResponse) Header() Header {
- return pr.res.Header
-}
-
-func (pr *populateResponse) WriteHeader(code int) {
- if pr.wroteHeader {
- return
- }
- pr.wroteHeader = true
-
- pr.res.StatusCode = code
- pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
-}
-
-func (pr *populateResponse) Write(p []byte) (n int, err error) {
- if !pr.wroteHeader {
- pr.WriteHeader(StatusOK)
- }
- pr.hasContent = true
- if !pr.sentResponse {
- pr.sendResponse()
- }
- return pr.pw.Write(p)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "http"
- "io/ioutil"
- "path/filepath"
- "testing"
-)
-
-func checker(t *testing.T) func(string, error) {
- return func(call string, err error) {
- if err == nil {
- return
- }
- t.Fatalf("%s: %v", call, err)
- }
-}
-
-func TestFileTransport(t *testing.T) {
- check := checker(t)
-
- dname, err := ioutil.TempDir("", "")
- check("TempDir", err)
- fname := filepath.Join(dname, "foo.txt")
- err = ioutil.WriteFile(fname, []byte("Bar"), 0644)
- check("WriteFile", err)
-
- tr := &http.Transport{}
- tr.RegisterProtocol("file", http.NewFileTransport(http.Dir(dname)))
- c := &http.Client{Transport: tr}
-
- fooURLs := []string{"file:///foo.txt", "file://../foo.txt"}
- for _, urlstr := range fooURLs {
- res, err := c.Get(urlstr)
- check("Get "+urlstr, err)
- if res.StatusCode != 200 {
- t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode)
- }
- if res.ContentLength != -1 {
- t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength)
- }
- if res.Body == nil {
- t.Fatalf("for %s, nil Body", urlstr)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- check("ReadAll "+urlstr, err)
- if string(slurp) != "Bar" {
- t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
- }
- }
-
- const badURL = "file://../no-exist.txt"
- res, err := c.Get(badURL)
- check("Get "+badURL, err)
- if res.StatusCode != 404 {
- t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP file system request handler
-
-package http
-
-import (
- "errors"
- "fmt"
- "io"
- "mime"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "time"
- "utf8"
-)
-
-// A Dir implements http.FileSystem using the native file
-// system restricted to a specific directory tree.
-type Dir string
-
-func (d Dir) Open(name string) (File, error) {
- if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 {
- return nil, errors.New("http: invalid character in file path")
- }
- f, err := os.Open(filepath.Join(string(d), filepath.FromSlash(path.Clean("/"+name))))
- if err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// A FileSystem implements access to a collection of named files.
-// The elements in a file path are separated by slash ('/', U+002F)
-// characters, regardless of host operating system convention.
-type FileSystem interface {
- Open(name string) (File, error)
-}
-
-// A File is returned by a FileSystem's Open method and can be
-// served by the FileServer implementation.
-type File interface {
- Close() error
- Stat() (*os.FileInfo, error)
- Readdir(count int) ([]os.FileInfo, error)
- Read([]byte) (int, error)
- Seek(offset int64, whence int) (int64, error)
-}
-
-// Heuristic: b is text if it is valid UTF-8 and doesn't
-// contain any unprintable ASCII or Unicode characters.
-func isText(b []byte) bool {
- for len(b) > 0 && utf8.FullRune(b) {
- rune, size := utf8.DecodeRune(b)
- if size == 1 && rune == utf8.RuneError {
- // decoding error
- return false
- }
- if 0x7F <= rune && rune <= 0x9F {
- return false
- }
- if rune < ' ' {
- switch rune {
- case '\n', '\r', '\t':
- // okay
- default:
- // binary garbage
- return false
- }
- }
- b = b[size:]
- }
- return true
-}
-
-func dirList(w ResponseWriter, f File) {
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- fmt.Fprintf(w, "<pre>\n")
- for {
- dirs, err := f.Readdir(100)
- if err != nil || len(dirs) == 0 {
- break
- }
- for _, d := range dirs {
- name := d.Name
- if d.IsDirectory() {
- name += "/"
- }
- // TODO htmlescape
- fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", name, name)
- }
- }
- fmt.Fprintf(w, "</pre>\n")
-}
-
-// name is '/'-separated, not filepath.Separator.
-func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
- const indexPage = "/index.html"
-
- // redirect .../index.html to .../
- // can't use Redirect() because that would make the path absolute,
- // which would be a problem running under StripPrefix
- if strings.HasSuffix(r.URL.Path, indexPage) {
- localRedirect(w, r, "./")
- return
- }
-
- f, err := fs.Open(name)
- if err != nil {
- // TODO expose actual error?
- NotFound(w, r)
- return
- }
- defer f.Close()
-
- d, err1 := f.Stat()
- if err1 != nil {
- // TODO expose actual error?
- NotFound(w, r)
- return
- }
-
- if redirect {
- // redirect to canonical path: / at end of directory url
- // r.URL.Path always begins with /
- url := r.URL.Path
- if d.IsDirectory() {
- if url[len(url)-1] != '/' {
- localRedirect(w, r, path.Base(url)+"/")
- return
- }
- } else {
- if url[len(url)-1] == '/' {
- localRedirect(w, r, "../"+path.Base(url))
- return
- }
- }
- }
-
- if t, _ := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); t != nil && d.Mtime_ns/1e9 <= t.Seconds() {
- w.WriteHeader(StatusNotModified)
- return
- }
- w.Header().Set("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat))
-
- // use contents of index.html for directory, if present
- if d.IsDirectory() {
- index := name + indexPage
- ff, err := fs.Open(index)
- if err == nil {
- defer ff.Close()
- dd, err := ff.Stat()
- if err == nil {
- name = index
- d = dd
- f = ff
- }
- }
- }
-
- if d.IsDirectory() {
- dirList(w, f)
- return
- }
-
- // serve file
- size := d.Size
- code := StatusOK
-
- // If Content-Type isn't set, use the file's extension to find it.
- if w.Header().Get("Content-Type") == "" {
- ctype := mime.TypeByExtension(filepath.Ext(name))
- if ctype == "" {
- // read a chunk to decide between utf-8 text and binary
- var buf [1024]byte
- n, _ := io.ReadFull(f, buf[:])
- b := buf[:n]
- if isText(b) {
- ctype = "text/plain; charset=utf-8"
- } else {
- // generic binary
- ctype = "application/octet-stream"
- }
- f.Seek(0, os.SEEK_SET) // rewind to output whole file
- }
- w.Header().Set("Content-Type", ctype)
- }
-
- // handle Content-Range header.
- // TODO(adg): handle multiple ranges
- ranges, err := parseRange(r.Header.Get("Range"), size)
- if err == nil && len(ranges) > 1 {
- err = errors.New("multiple ranges not supported")
- }
- if err != nil {
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- if len(ranges) == 1 {
- ra := ranges[0]
- if _, err := f.Seek(ra.start, os.SEEK_SET); err != nil {
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- size = ra.length
- code = StatusPartialContent
- w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size))
- }
-
- w.Header().Set("Accept-Ranges", "bytes")
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.Itoa64(size))
- }
-
- w.WriteHeader(code)
-
- if r.Method != "HEAD" {
- io.CopyN(w, f, size)
- }
-}
-
-// localRedirect gives a Moved Permanently response.
-// It does not convert relative paths to absolute paths like Redirect does.
-func localRedirect(w ResponseWriter, r *Request, newPath string) {
- if q := r.URL.RawQuery; q != "" {
- newPath += "?" + q
- }
- w.Header().Set("Location", newPath)
- w.WriteHeader(StatusMovedPermanently)
-}
-
-// ServeFile replies to the request with the contents of the named file or directory.
-func ServeFile(w ResponseWriter, r *Request, name string) {
- dir, file := filepath.Split(name)
- serveFile(w, r, Dir(dir), file, false)
-}
-
-type fileHandler struct {
- root FileSystem
-}
-
-// FileServer returns a handler that serves HTTP requests
-// with the contents of the file system rooted at root.
-//
-// To use the operating system's file system implementation,
-// use http.Dir:
-//
-// http.Handle("/", http.FileServer(http.Dir("/tmp")))
-func FileServer(root FileSystem) Handler {
- return &fileHandler{root}
-}
-
-func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
- upath := r.URL.Path
- if !strings.HasPrefix(upath, "/") {
- upath = "/" + upath
- r.URL.Path = upath
- }
- serveFile(w, r, f.root, path.Clean(upath), true)
-}
-
-// httpRange specifies the byte range to be sent to the client.
-type httpRange struct {
- start, length int64
-}
-
-// parseRange parses a Range header string as per RFC 2616.
-func parseRange(s string, size int64) ([]httpRange, error) {
- if s == "" {
- return nil, nil // header not present
- }
- const b = "bytes="
- if !strings.HasPrefix(s, b) {
- return nil, errors.New("invalid range")
- }
- var ranges []httpRange
- for _, ra := range strings.Split(s[len(b):], ",") {
- i := strings.Index(ra, "-")
- if i < 0 {
- return nil, errors.New("invalid range")
- }
- start, end := ra[:i], ra[i+1:]
- var r httpRange
- if start == "" {
- // If no start is specified, end specifies the
- // range start relative to the end of the file.
- i, err := strconv.Atoi64(end)
- if err != nil {
- return nil, errors.New("invalid range")
- }
- if i > size {
- i = size
- }
- r.start = size - i
- r.length = size - r.start
- } else {
- i, err := strconv.Atoi64(start)
- if err != nil || i > size || i < 0 {
- return nil, errors.New("invalid range")
- }
- r.start = i
- if end == "" {
- // If no end is specified, range extends to end of the file.
- r.length = size - r.start
- } else {
- i, err := strconv.Atoi64(end)
- if err != nil || r.start > i {
- return nil, errors.New("invalid range")
- }
- if i >= size {
- i = size - 1
- }
- r.length = i - r.start + 1
- }
- }
- ranges = append(ranges, r)
- }
- return ranges, nil
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "fmt"
- . "http"
- "http/httptest"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "url"
-)
-
-const (
- testFile = "testdata/file"
- testFileLength = 11
-)
-
-var ServeFileRangeTests = []struct {
- start, end int
- r string
- code int
-}{
- {0, testFileLength, "", StatusOK},
- {0, 5, "0-4", StatusPartialContent},
- {2, testFileLength, "2-", StatusPartialContent},
- {testFileLength - 5, testFileLength, "-5", StatusPartialContent},
- {3, 8, "3-7", StatusPartialContent},
- {0, 0, "20-", StatusRequestedRangeNotSatisfiable},
-}
-
-func TestServeFile(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "testdata/file")
- }))
- defer ts.Close()
-
- var err error
-
- file, err := ioutil.ReadFile(testFile)
- if err != nil {
- t.Fatal("reading file:", err)
- }
-
- // set up the Request (re-used for all tests)
- var req Request
- req.Header = make(Header)
- if req.URL, err = url.Parse(ts.URL); err != nil {
- t.Fatal("ParseURL:", err)
- }
- req.Method = "GET"
-
- // straight GET
- _, body := getBody(t, req)
- if !equal(body, file) {
- t.Fatalf("body mismatch: got %q, want %q", body, file)
- }
-
- // Range tests
- for _, rt := range ServeFileRangeTests {
- req.Header.Set("Range", "bytes="+rt.r)
- if rt.r == "" {
- req.Header["Range"] = nil
- }
- r, body := getBody(t, req)
- if r.StatusCode != rt.code {
- t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, r.StatusCode, rt.code)
- }
- if rt.code == StatusRequestedRangeNotSatisfiable {
- continue
- }
- h := fmt.Sprintf("bytes %d-%d/%d", rt.start, rt.end-1, testFileLength)
- if rt.r == "" {
- h = ""
- }
- cr := r.Header.Get("Content-Range")
- if cr != h {
- t.Errorf("header mismatch: range=%q: got %q, want %q", rt.r, cr, h)
- }
- if !equal(body, file[rt.start:rt.end]) {
- t.Errorf("body mismatch: range=%q: got %q, want %q", rt.r, body, file[rt.start:rt.end])
- }
- }
-}
-
-var fsRedirectTestData = []struct {
- original, redirect string
-}{
- {"/test/index.html", "/test/"},
- {"/test/testdata", "/test/testdata/"},
- {"/test/testdata/file/", "/test/testdata/file"},
-}
-
-func TestFSRedirect(t *testing.T) {
- ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
- defer ts.Close()
-
- for _, data := range fsRedirectTestData {
- res, err := Get(ts.URL + data.original)
- if err != nil {
- t.Fatal(err)
- }
- res.Body.Close()
- if g, e := res.Request.URL.Path, data.redirect; g != e {
- t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
- }
- }
-}
-
-type testFileSystem struct {
- open func(name string) (File, error)
-}
-
-func (fs *testFileSystem) Open(name string) (File, error) {
- return fs.open(name)
-}
-
-func TestFileServerCleans(t *testing.T) {
- ch := make(chan string, 1)
- fs := FileServer(&testFileSystem{func(name string) (File, error) {
- ch <- name
- return nil, os.ENOENT
- }})
- tests := []struct {
- reqPath, openArg string
- }{
- {"/foo.txt", "/foo.txt"},
- {"//foo.txt", "/foo.txt"},
- {"/../foo.txt", "/foo.txt"},
- }
- req, _ := NewRequest("GET", "http://example.com", nil)
- for n, test := range tests {
- rec := httptest.NewRecorder()
- req.URL.Path = test.reqPath
- fs.ServeHTTP(rec, req)
- if got := <-ch; got != test.openArg {
- t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
- }
- }
-}
-
-func TestFileServerImplicitLeadingSlash(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("TempDir: %v", err)
- }
- defer os.RemoveAll(tempDir)
- if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
- t.Fatalf("WriteFile: %v", err)
- }
- ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
- defer ts.Close()
- get := func(suffix string) string {
- res, err := Get(ts.URL + suffix)
- if err != nil {
- t.Fatalf("Get %s: %v", suffix, err)
- }
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("ReadAll %s: %v", suffix, err)
- }
- return string(b)
- }
- if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
- t.Logf("expected a directory listing with foo.txt, got %q", s)
- }
- if s := get("/bar/foo.txt"); s != "Hello world" {
- t.Logf("expected %q, got %q", "Hello world", s)
- }
-}
-
-func TestDirJoin(t *testing.T) {
- wfi, err := os.Stat("/etc/hosts")
- if err != nil {
- t.Logf("skipping test; no /etc/hosts file")
- return
- }
- test := func(d Dir, name string) {
- f, err := d.Open(name)
- if err != nil {
- t.Fatalf("open of %s: %v", name, err)
- }
- defer f.Close()
- gfi, err := f.Stat()
- if err != nil {
- t.Fatalf("stat of %s: %v", name, err)
- }
- if gfi.Ino != wfi.Ino {
- t.Errorf("%s got different inode", name)
- }
- }
- test(Dir("/etc/"), "/hosts")
- test(Dir("/etc/"), "hosts")
- test(Dir("/etc/"), "../../../../hosts")
- test(Dir("/etc"), "/hosts")
- test(Dir("/etc"), "hosts")
- test(Dir("/etc"), "../../../../hosts")
-
- // Not really directories, but since we use this trick in
- // ServeFile, test it:
- test(Dir("/etc/hosts"), "")
- test(Dir("/etc/hosts"), "/")
- test(Dir("/etc/hosts"), "../")
-}
-
-func TestServeFileContentType(t *testing.T) {
- const ctype = "icecream/chocolate"
- override := false
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if override {
- w.Header().Set("Content-Type", ctype)
- }
- ServeFile(w, r, "testdata/file")
- }))
- defer ts.Close()
- get := func(want string) {
- resp, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if h := resp.Header.Get("Content-Type"); h != want {
- t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
- }
- }
- get("text/plain; charset=utf-8")
- override = true
- get(ctype)
-}
-
-func TestServeFileMimeType(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "testdata/style.css")
- }))
- defer ts.Close()
- resp, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- want := "text/css; charset=utf-8"
- if h := resp.Header.Get("Content-Type"); h != want {
- t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
- }
-}
-
-func TestServeFileWithContentEncoding(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Encoding", "foo")
- ServeFile(w, r, "testdata/file")
- }))
- defer ts.Close()
- resp, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if g, e := resp.ContentLength, int64(-1); g != e {
- t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
- }
-}
-
-func TestServeIndexHtml(t *testing.T) {
- const want = "index.html says hello\n"
- ts := httptest.NewServer(FileServer(Dir(".")))
- defer ts.Close()
-
- for _, path := range []string{"/testdata/", "/testdata/index.html"} {
- res, err := Get(ts.URL + path)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal("reading Body:", err)
- }
- if s := string(b); s != want {
- t.Errorf("for path %q got %q, want %q", path, s, want)
- }
- }
-}
-
-func getBody(t *testing.T, req Request) (*Response, []byte) {
- r, err := DefaultClient.Do(&req)
- if err != nil {
- t.Fatal(req.URL.String(), "send:", err)
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Fatal("reading Body:", err)
- }
- return r, b
-}
-
-func equal(a, b []byte) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if a[i] != b[i] {
- return false
- }
- }
- return true
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "fmt"
- "io"
- "net/textproto"
- "sort"
- "strings"
-)
-
-// A Header represents the key-value pairs in an HTTP header.
-type Header map[string][]string
-
-// Add adds the key, value pair to the header.
-// It appends to any existing values associated with key.
-func (h Header) Add(key, value string) {
- textproto.MIMEHeader(h).Add(key, value)
-}
-
-// Set sets the header entries associated with key to
-// the single element value. It replaces any existing
-// values associated with key.
-func (h Header) Set(key, value string) {
- textproto.MIMEHeader(h).Set(key, value)
-}
-
-// Get gets the first value associated with the given key.
-// If there are no values associated with the key, Get returns "".
-// Get is a convenience method. For more complex queries,
-// access the map directly.
-func (h Header) Get(key string) string {
- return textproto.MIMEHeader(h).Get(key)
-}
-
-// Del deletes the values associated with key.
-func (h Header) Del(key string) {
- textproto.MIMEHeader(h).Del(key)
-}
-
-// Write writes a header in wire format.
-func (h Header) Write(w io.Writer) error {
- return h.WriteSubset(w, nil)
-}
-
-var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
-
-// WriteSubset writes a header in wire format.
-// If exclude is not nil, keys where exclude[key] == true are not written.
-func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
- keys := make([]string, 0, len(h))
- for k := range h {
- if exclude == nil || !exclude[k] {
- keys = append(keys, k)
- }
- }
- sort.Strings(keys)
- for _, k := range keys {
- for _, v := range h[k] {
- v = headerNewlineToSpace.Replace(v)
- v = strings.TrimSpace(v)
- if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// CanonicalHeaderKey returns the canonical format of the
-// header key s. The canonicalization converts the first
-// letter and any letter following a hyphen to upper case;
-// the rest are converted to lowercase. For example, the
-// canonical key for "accept-encoding" is "Accept-Encoding".
-func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "testing"
-)
-
-var headerWriteTests = []struct {
- h Header
- exclude map[string]bool
- expected string
-}{
- {Header{}, nil, ""},
- {
- Header{
- "Content-Type": {"text/html; charset=UTF-8"},
- "Content-Length": {"0"},
- },
- nil,
- "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n",
- },
- {
- Header{
- "Content-Length": {"0", "1", "2"},
- },
- nil,
- "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n",
- },
- {
- Header{
- "Expires": {"-1"},
- "Content-Length": {"0"},
- "Content-Encoding": {"gzip"},
- },
- map[string]bool{"Content-Length": true},
- "Content-Encoding: gzip\r\nExpires: -1\r\n",
- },
- {
- Header{
- "Expires": {"-1"},
- "Content-Length": {"0", "1", "2"},
- "Content-Encoding": {"gzip"},
- },
- map[string]bool{"Content-Length": true},
- "Content-Encoding: gzip\r\nExpires: -1\r\n",
- },
- {
- Header{
- "Expires": {"-1"},
- "Content-Length": {"0"},
- "Content-Encoding": {"gzip"},
- },
- map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true},
- "",
- },
- {
- Header{
- "Nil": nil,
- "Empty": {},
- "Blank": {""},
- "Double-Blank": {"", ""},
- },
- nil,
- "Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n",
- },
-}
-
-func TestHeaderWrite(t *testing.T) {
- var buf bytes.Buffer
- for i, test := range headerWriteTests {
- test.h.WriteSubset(&buf, test.exclude)
- if buf.String() != test.expected {
- t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected)
- }
- buf.Reset()
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package httptest provides utilities for HTTP testing.
-package httptest
-
-import (
- "bytes"
- "http"
-)
-
-// ResponseRecorder is an implementation of http.ResponseWriter that
-// records its mutations for later inspection in tests.
-type ResponseRecorder struct {
- Code int // the HTTP response code from WriteHeader
- HeaderMap http.Header // the HTTP response headers
- Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
- Flushed bool
-}
-
-// NewRecorder returns an initialized ResponseRecorder.
-func NewRecorder() *ResponseRecorder {
- return &ResponseRecorder{
- HeaderMap: make(http.Header),
- Body: new(bytes.Buffer),
- }
-}
-
-// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
-// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
-const DefaultRemoteAddr = "1.2.3.4"
-
-// Header returns the response headers.
-func (rw *ResponseRecorder) Header() http.Header {
- return rw.HeaderMap
-}
-
-// Write always succeeds and writes to rw.Body, if not nil.
-func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
- if rw.Body != nil {
- rw.Body.Write(buf)
- }
- if rw.Code == 0 {
- rw.Code = http.StatusOK
- }
- return len(buf), nil
-}
-
-// WriteHeader sets rw.Code.
-func (rw *ResponseRecorder) WriteHeader(code int) {
- rw.Code = code
-}
-
-// Flush sets rw.Flushed to true.
-func (rw *ResponseRecorder) Flush() {
- rw.Flushed = true
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Implementation of Server
-
-package httptest
-
-import (
- "crypto/rand"
- "crypto/tls"
- "flag"
- "fmt"
- "http"
- "net"
- "os"
- "time"
-)
-
-// A Server is an HTTP server listening on a system-chosen port on the
-// local loopback interface, for use in end-to-end HTTP tests.
-type Server struct {
- URL string // base URL of form http://ipaddr:port with no trailing slash
- Listener net.Listener
- TLS *tls.Config // nil if not using using TLS
-
- // Config may be changed after calling NewUnstartedServer and
- // before Start or StartTLS.
- Config *http.Server
-}
-
-// historyListener keeps track of all connections that it's ever
-// accepted.
-type historyListener struct {
- net.Listener
- history []net.Conn
-}
-
-func (hs *historyListener) Accept() (c net.Conn, err error) {
- c, err = hs.Listener.Accept()
- if err == nil {
- hs.history = append(hs.history, c)
- }
- return
-}
-
-func newLocalListener() net.Listener {
- if *serve != "" {
- l, err := net.Listen("tcp", *serve)
- if err != nil {
- panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err))
- }
- return l
- }
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
- panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
- }
- }
- return l
-}
-
-// When debugging a particular http server-based test,
-// this flag lets you run
-// gotest -run=BrokenTest -httptest.serve=127.0.0.1:8000
-// to start the broken server so you can interact with it manually.
-var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks")
-
-// NewServer starts and returns a new Server.
-// The caller should call Close when finished, to shut it down.
-func NewServer(handler http.Handler) *Server {
- ts := NewUnstartedServer(handler)
- ts.Start()
- return ts
-}
-
-// NewUnstartedServer returns a new Server but doesn't start it.
-//
-// After changing its configuration, the caller should call Start or
-// StartTLS.
-//
-// The caller should call Close when finished, to shut it down.
-func NewUnstartedServer(handler http.Handler) *Server {
- return &Server{
- Listener: newLocalListener(),
- Config: &http.Server{Handler: handler},
- }
-}
-
-// Start starts a server from NewUnstartedServer.
-func (s *Server) Start() {
- if s.URL != "" {
- panic("Server already started")
- }
- s.Listener = &historyListener{s.Listener, make([]net.Conn, 0)}
- s.URL = "http://" + s.Listener.Addr().String()
- go s.Config.Serve(s.Listener)
- if *serve != "" {
- fmt.Println(os.Stderr, "httptest: serving on", s.URL)
- select {}
- }
-}
-
-// StartTLS starts TLS on a server from NewUnstartedServer.
-func (s *Server) StartTLS() {
- if s.URL != "" {
- panic("Server already started")
- }
- cert, err := tls.X509KeyPair(localhostCert, localhostKey)
- if err != nil {
- panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
- }
-
- s.TLS = &tls.Config{
- Rand: rand.Reader,
- Time: time.Seconds,
- NextProtos: []string{"http/1.1"},
- Certificates: []tls.Certificate{cert},
- }
- tlsListener := tls.NewListener(s.Listener, s.TLS)
-
- s.Listener = &historyListener{tlsListener, make([]net.Conn, 0)}
- s.URL = "https://" + s.Listener.Addr().String()
- go s.Config.Serve(s.Listener)
-}
-
-// NewTLSServer starts and returns a new Server using TLS.
-// The caller should call Close when finished, to shut it down.
-func NewTLSServer(handler http.Handler) *Server {
- ts := NewUnstartedServer(handler)
- ts.StartTLS()
- return ts
-}
-
-// Close shuts down the server.
-func (s *Server) Close() {
- s.Listener.Close()
-}
-
-// CloseClientConnections closes any currently open HTTP connections
-// to the test Server.
-func (s *Server) CloseClientConnections() {
- hl, ok := s.Listener.(*historyListener)
- if !ok {
- return
- }
- for _, conn := range hl.history {
- conn.Close()
- }
-}
-
-// localhostCert is a PEM-encoded TLS cert with SAN DNS names
-// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end
-// of ASN.1 time).
-var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
-MIIBOTCB5qADAgECAgEAMAsGCSqGSIb3DQEBBTAAMB4XDTcwMDEwMTAwMDAwMFoX
-DTQ5MTIzMTIzNTk1OVowADBaMAsGCSqGSIb3DQEBAQNLADBIAkEAsuA5mAFMj6Q7
-qoBzcvKzIq4kzuT5epSp2AkcQfyBHm7K13Ws7u+0b5Vb9gqTf5cAiIKcrtrXVqkL
-8i1UQF6AzwIDAQABo08wTTAOBgNVHQ8BAf8EBAMCACQwDQYDVR0OBAYEBAECAwQw
-DwYDVR0jBAgwBoAEAQIDBDAbBgNVHREEFDASggkxMjcuMC4wLjGCBVs6OjFdMAsG
-CSqGSIb3DQEBBQNBAJH30zjLWRztrWpOCgJL8RQWLaKzhK79pVhAx6q/3NrF16C7
-+l1BRZstTwIGdoGId8BRpErK1TXkniFb95ZMynM=
------END CERTIFICATE-----
-`)
-
-// localhostKey is the private key for localhostCert.
-var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIIBPQIBAAJBALLgOZgBTI+kO6qAc3LysyKuJM7k+XqUqdgJHEH8gR5uytd1rO7v
-tG+VW/YKk3+XAIiCnK7a11apC/ItVEBegM8CAwEAAQJBAI5sxq7naeR9ahyqRkJi
-SIv2iMxLuPEHaezf5CYOPWjSjBPyVhyRevkhtqEjF/WkgL7C2nWpYHsUcBDBQVF0
-3KECIQDtEGB2ulnkZAahl3WuJziXGLB+p8Wgx7wzSM6bHu1c6QIhAMEp++CaS+SJ
-/TrU0zwY/fW4SvQeb49BPZUF3oqR8Xz3AiEA1rAJHBzBgdOQKdE3ksMUPcnvNJSN
-poCcELmz2clVXtkCIQCLytuLV38XHToTipR4yMl6O+6arzAjZ56uq7m7ZRV0TwIh
-AM65XAOw8Dsg9Kq78aYXiOEDc5DL0sbFUu/SlmRcCg93
------END RSA PRIVATE KEY-----
-`)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-// This file deals with lexical matters of HTTP
-
-func isSeparator(c byte) bool {
- switch c {
- case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
- return true
- }
- return false
-}
-
-func isSpace(c byte) bool {
- switch c {
- case ' ', '\t', '\r', '\n':
- return true
- }
- return false
-}
-
-func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 }
-
-func isChar(c byte) bool { return 0 <= c && c <= 127 }
-
-func isAnyText(c byte) bool { return !isCtl(c) }
-
-func isQdText(c byte) bool { return isAnyText(c) && c != '"' }
-
-func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) }
-
-// Valid escaped sequences are not specified in RFC 2616, so for now, we assume
-// that they coincide with the common sense ones used by GO. Malformed
-// characters should probably not be treated as errors by a robust (forgiving)
-// parser, so we replace them with the '?' character.
-func httpUnquotePair(b byte) byte {
- // skip the first byte, which should always be '\'
- switch b {
- case 'a':
- return '\a'
- case 'b':
- return '\b'
- case 'f':
- return '\f'
- case 'n':
- return '\n'
- case 'r':
- return '\r'
- case 't':
- return '\t'
- case 'v':
- return '\v'
- case '\\':
- return '\\'
- case '\'':
- return '\''
- case '"':
- return '"'
- }
- return '?'
-}
-
-// raw must begin with a valid quoted string. Only the first quoted string is
-// parsed and is unquoted in result. eaten is the number of bytes parsed, or -1
-// upon failure.
-func httpUnquote(raw []byte) (eaten int, result string) {
- buf := make([]byte, len(raw))
- if raw[0] != '"' {
- return -1, ""
- }
- eaten = 1
- j := 0 // # of bytes written in buf
- for i := 1; i < len(raw); i++ {
- switch b := raw[i]; b {
- case '"':
- eaten++
- buf = buf[0:j]
- return i + 1, string(buf)
- case '\\':
- if len(raw) < i+2 {
- return -1, ""
- }
- buf[j] = httpUnquotePair(raw[i+1])
- eaten += 2
- j++
- i++
- default:
- if isQdText(b) {
- buf[j] = b
- } else {
- buf[j] = '?'
- }
- eaten++
- j++
- }
- }
- return -1, ""
-}
-
-// This is a best effort parse, so errors are not returned, instead not all of
-// the input string might be parsed. result is always non-nil.
-func httpSplitFieldValue(fv string) (eaten int, result []string) {
- result = make([]string, 0, len(fv))
- raw := []byte(fv)
- i := 0
- chunk := ""
- for i < len(raw) {
- b := raw[i]
- switch {
- case b == '"':
- eaten, unq := httpUnquote(raw[i:len(raw)])
- if eaten < 0 {
- return i, result
- } else {
- i += eaten
- chunk += unq
- }
- case isSeparator(b):
- if chunk != "" {
- result = result[0 : len(result)+1]
- result[len(result)-1] = chunk
- chunk = ""
- }
- i++
- case isToken(b):
- chunk += string(b)
- i++
- case b == '\n' || b == '\r':
- i++
- default:
- chunk += "?"
- i++
- }
- }
- if chunk != "" {
- result = result[0 : len(result)+1]
- result[len(result)-1] = chunk
- chunk = ""
- }
- return i, result
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "testing"
-)
-
-type lexTest struct {
- Raw string
- Parsed int // # of parsed characters
- Result []string
-}
-
-var lexTests = []lexTest{
- {
- Raw: `"abc"def,:ghi`,
- Parsed: 13,
- Result: []string{"abcdef", "ghi"},
- },
- // My understanding of the RFC is that escape sequences outside of
- // quotes are not interpreted?
- {
- Raw: `"\t"\t"\t"`,
- Parsed: 10,
- Result: []string{"\t", "t\t"},
- },
- {
- Raw: `"\yab"\r\n`,
- Parsed: 10,
- Result: []string{"?ab", "r", "n"},
- },
- {
- Raw: "ab\f",
- Parsed: 3,
- Result: []string{"ab?"},
- },
- {
- Raw: "\"ab \" c,de f, gh, ij\n\t\r",
- Parsed: 23,
- Result: []string{"ab ", "c", "de", "f", "gh", "ij"},
- },
-}
-
-func min(x, y int) int {
- if x <= y {
- return x
- }
- return y
-}
-
-func TestSplitFieldValue(t *testing.T) {
- for k, l := range lexTests {
- parsed, result := httpSplitFieldValue(l.Raw)
- if parsed != l.Parsed {
- t.Errorf("#%d: Parsed %d, expected %d", k, parsed, l.Parsed)
- }
- if len(result) != len(l.Result) {
- t.Errorf("#%d: Result len %d, expected %d", k, len(result), len(l.Result))
- }
- for i := 0; i < min(len(result), len(l.Result)); i++ {
- if result[i] != l.Result[i] {
- t.Errorf("#%d: %d-th entry mismatch. Have {%s}, expect {%s}",
- k, i, result[i], l.Result[i])
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "errors"
- "io"
- "net"
- "net/textproto"
- "os"
- "sync"
-)
-
-var (
- ErrPersistEOF = &ProtocolError{"persistent connection closed"}
- ErrPipeline = &ProtocolError{"pipeline error"}
-)
-
-// A ServerConn reads requests and sends responses over an underlying
-// connection, until the HTTP keepalive logic commands an end. ServerConn
-// also allows hijacking the underlying connection by calling Hijack
-// to regain control over the connection. ServerConn supports pipe-lining,
-// i.e. requests can be read out of sync (but in the same order) while the
-// respective responses are sent.
-//
-// ServerConn is low-level and should not be needed by most applications.
-// See Server.
-type ServerConn struct {
- lk sync.Mutex // read-write protects the following fields
- c net.Conn
- r *bufio.Reader
- re, we error // read/write errors
- lastbody io.ReadCloser
- nread, nwritten int
- pipereq map[*Request]uint
-
- pipe textproto.Pipeline
-}
-
-// NewServerConn returns a new ServerConn reading and writing c. If r is not
-// nil, it is the buffer to use when reading c.
-func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn {
- if r == nil {
- r = bufio.NewReader(c)
- }
- return &ServerConn{c: c, r: r, pipereq: make(map[*Request]uint)}
-}
-
-// Hijack detaches the ServerConn and returns the underlying connection as well
-// as the read-side bufio which may have some left over data. Hijack may be
-// called before Read has signaled the end of the keep-alive logic. The user
-// should not call Hijack while Read or Write is in progress.
-func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) {
- sc.lk.Lock()
- defer sc.lk.Unlock()
- c = sc.c
- r = sc.r
- sc.c = nil
- sc.r = nil
- return
-}
-
-// Close calls Hijack and then also closes the underlying connection
-func (sc *ServerConn) Close() error {
- c, _ := sc.Hijack()
- if c != nil {
- return c.Close()
- }
- return nil
-}
-
-// Read returns the next request on the wire. An ErrPersistEOF is returned if
-// it is gracefully determined that there are no more requests (e.g. after the
-// first request on an HTTP/1.0 connection, or after a Connection:close on a
-// HTTP/1.1 connection).
-func (sc *ServerConn) Read() (req *Request, err error) {
-
- // Ensure ordered execution of Reads and Writes
- id := sc.pipe.Next()
- sc.pipe.StartRequest(id)
- defer func() {
- sc.pipe.EndRequest(id)
- if req == nil {
- sc.pipe.StartResponse(id)
- sc.pipe.EndResponse(id)
- } else {
- // Remember the pipeline id of this request
- sc.lk.Lock()
- sc.pipereq[req] = id
- sc.lk.Unlock()
- }
- }()
-
- sc.lk.Lock()
- if sc.we != nil { // no point receiving if write-side broken or closed
- defer sc.lk.Unlock()
- return nil, sc.we
- }
- if sc.re != nil {
- defer sc.lk.Unlock()
- return nil, sc.re
- }
- if sc.r == nil { // connection closed by user in the meantime
- defer sc.lk.Unlock()
- return nil, os.EBADF
- }
- r := sc.r
- lastbody := sc.lastbody
- sc.lastbody = nil
- sc.lk.Unlock()
-
- // Make sure body is fully consumed, even if user does not call body.Close
- if lastbody != nil {
- // body.Close is assumed to be idempotent and multiple calls to
- // it should return the error that its first invocation
- // returned.
- err = lastbody.Close()
- if err != nil {
- sc.lk.Lock()
- defer sc.lk.Unlock()
- sc.re = err
- return nil, err
- }
- }
-
- req, err = ReadRequest(r)
- sc.lk.Lock()
- defer sc.lk.Unlock()
- if err != nil {
- if err == io.ErrUnexpectedEOF {
- // A close from the opposing client is treated as a
- // graceful close, even if there was some unparse-able
- // data before the close.
- sc.re = ErrPersistEOF
- return nil, sc.re
- } else {
- sc.re = err
- return req, err
- }
- }
- sc.lastbody = req.Body
- sc.nread++
- if req.Close {
- sc.re = ErrPersistEOF
- return req, sc.re
- }
- return req, err
-}
-
-// Pending returns the number of unanswered requests
-// that have been received on the connection.
-func (sc *ServerConn) Pending() int {
- sc.lk.Lock()
- defer sc.lk.Unlock()
- return sc.nread - sc.nwritten
-}
-
-// Write writes resp in response to req. To close the connection gracefully, set the
-// Response.Close field to true. Write should be considered operational until
-// it returns an error, regardless of any errors returned on the Read side.
-func (sc *ServerConn) Write(req *Request, resp *Response) error {
-
- // Retrieve the pipeline ID of this request/response pair
- sc.lk.Lock()
- id, ok := sc.pipereq[req]
- delete(sc.pipereq, req)
- if !ok {
- sc.lk.Unlock()
- return ErrPipeline
- }
- sc.lk.Unlock()
-
- // Ensure pipeline order
- sc.pipe.StartResponse(id)
- defer sc.pipe.EndResponse(id)
-
- sc.lk.Lock()
- if sc.we != nil {
- defer sc.lk.Unlock()
- return sc.we
- }
- if sc.c == nil { // connection closed by user in the meantime
- defer sc.lk.Unlock()
- return os.EBADF
- }
- c := sc.c
- if sc.nread <= sc.nwritten {
- defer sc.lk.Unlock()
- return errors.New("persist server pipe count")
- }
- if resp.Close {
- // After signaling a keep-alive close, any pipelined unread
- // requests will be lost. It is up to the user to drain them
- // before signaling.
- sc.re = ErrPersistEOF
- }
- sc.lk.Unlock()
-
- err := resp.Write(c)
- sc.lk.Lock()
- defer sc.lk.Unlock()
- if err != nil {
- sc.we = err
- return err
- }
- sc.nwritten++
-
- return nil
-}
-
-// A ClientConn sends request and receives headers over an underlying
-// connection, while respecting the HTTP keepalive logic. ClientConn
-// supports hijacking the connection calling Hijack to
-// regain control of the underlying net.Conn and deal with it as desired.
-//
-// ClientConn is low-level and should not be needed by most applications.
-// See Client.
-type ClientConn struct {
- lk sync.Mutex // read-write protects the following fields
- c net.Conn
- r *bufio.Reader
- re, we error // read/write errors
- lastbody io.ReadCloser
- nread, nwritten int
- pipereq map[*Request]uint
-
- pipe textproto.Pipeline
- writeReq func(*Request, io.Writer) error
-}
-
-// NewClientConn returns a new ClientConn reading and writing c. If r is not
-// nil, it is the buffer to use when reading c.
-func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
- if r == nil {
- r = bufio.NewReader(c)
- }
- return &ClientConn{
- c: c,
- r: r,
- pipereq: make(map[*Request]uint),
- writeReq: (*Request).Write,
- }
-}
-
-// NewProxyClientConn works like NewClientConn but writes Requests
-// using Request's WriteProxy method.
-func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
- cc := NewClientConn(c, r)
- cc.writeReq = (*Request).WriteProxy
- return cc
-}
-
-// Hijack detaches the ClientConn and returns the underlying connection as well
-// as the read-side bufio which may have some left over data. Hijack may be
-// called before the user or Read have signaled the end of the keep-alive
-// logic. The user should not call Hijack while Read or Write is in progress.
-func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) {
- cc.lk.Lock()
- defer cc.lk.Unlock()
- c = cc.c
- r = cc.r
- cc.c = nil
- cc.r = nil
- return
-}
-
-// Close calls Hijack and then also closes the underlying connection
-func (cc *ClientConn) Close() error {
- c, _ := cc.Hijack()
- if c != nil {
- return c.Close()
- }
- return nil
-}
-
-// Write writes a request. An ErrPersistEOF error is returned if the connection
-// has been closed in an HTTP keepalive sense. If req.Close equals true, the
-// keepalive connection is logically closed after this request and the opposing
-// server is informed. An ErrUnexpectedEOF indicates the remote closed the
-// underlying TCP connection, which is usually considered as graceful close.
-func (cc *ClientConn) Write(req *Request) (err error) {
-
- // Ensure ordered execution of Writes
- id := cc.pipe.Next()
- cc.pipe.StartRequest(id)
- defer func() {
- cc.pipe.EndRequest(id)
- if err != nil {
- cc.pipe.StartResponse(id)
- cc.pipe.EndResponse(id)
- } else {
- // Remember the pipeline id of this request
- cc.lk.Lock()
- cc.pipereq[req] = id
- cc.lk.Unlock()
- }
- }()
-
- cc.lk.Lock()
- if cc.re != nil { // no point sending if read-side closed or broken
- defer cc.lk.Unlock()
- return cc.re
- }
- if cc.we != nil {
- defer cc.lk.Unlock()
- return cc.we
- }
- if cc.c == nil { // connection closed by user in the meantime
- defer cc.lk.Unlock()
- return os.EBADF
- }
- c := cc.c
- if req.Close {
- // We write the EOF to the write-side error, because there
- // still might be some pipelined reads
- cc.we = ErrPersistEOF
- }
- cc.lk.Unlock()
-
- err = cc.writeReq(req, c)
- cc.lk.Lock()
- defer cc.lk.Unlock()
- if err != nil {
- cc.we = err
- return err
- }
- cc.nwritten++
-
- return nil
-}
-
-// Pending returns the number of unanswered requests
-// that have been sent on the connection.
-func (cc *ClientConn) Pending() int {
- cc.lk.Lock()
- defer cc.lk.Unlock()
- return cc.nwritten - cc.nread
-}
-
-// Read reads the next response from the wire. A valid response might be
-// returned together with an ErrPersistEOF, which means that the remote
-// requested that this be the last request serviced. Read can be called
-// concurrently with Write, but not with another Read.
-func (cc *ClientConn) Read(req *Request) (*Response, error) {
- return cc.readUsing(req, ReadResponse)
-}
-
-// readUsing is the implementation of Read with a replaceable
-// ReadResponse-like function, used by the Transport.
-func (cc *ClientConn) readUsing(req *Request, readRes func(*bufio.Reader, *Request) (*Response, error)) (resp *Response, err error) {
- // Retrieve the pipeline ID of this request/response pair
- cc.lk.Lock()
- id, ok := cc.pipereq[req]
- delete(cc.pipereq, req)
- if !ok {
- cc.lk.Unlock()
- return nil, ErrPipeline
- }
- cc.lk.Unlock()
-
- // Ensure pipeline order
- cc.pipe.StartResponse(id)
- defer cc.pipe.EndResponse(id)
-
- cc.lk.Lock()
- if cc.re != nil {
- defer cc.lk.Unlock()
- return nil, cc.re
- }
- if cc.r == nil { // connection closed by user in the meantime
- defer cc.lk.Unlock()
- return nil, os.EBADF
- }
- r := cc.r
- lastbody := cc.lastbody
- cc.lastbody = nil
- cc.lk.Unlock()
-
- // Make sure body is fully consumed, even if user does not call body.Close
- if lastbody != nil {
- // body.Close is assumed to be idempotent and multiple calls to
- // it should return the error that its first invokation
- // returned.
- err = lastbody.Close()
- if err != nil {
- cc.lk.Lock()
- defer cc.lk.Unlock()
- cc.re = err
- return nil, err
- }
- }
-
- resp, err = readRes(r, req)
- cc.lk.Lock()
- defer cc.lk.Unlock()
- if err != nil {
- cc.re = err
- return resp, err
- }
- cc.lastbody = resp.Body
-
- cc.nread++
-
- if resp.Close {
- cc.re = ErrPersistEOF // don't send any more requests
- return resp, cc.re
- }
- return resp, err
-}
-
-// Do is convenience method that writes a request and reads a response.
-func (cc *ClientConn) Do(req *Request) (resp *Response, err error) {
- err = cc.Write(req)
- if err != nil {
- return
- }
- return cc.Read(req)
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pprof serves via its HTTP server runtime profiling data
-// in the format expected by the pprof visualization tool.
-// For more information about pprof, see
-// http://code.google.com/p/google-perftools/.
-//
-// The package is typically only imported for the side effect of
-// registering its HTTP handlers.
-// The handled paths all begin with /debug/pprof/.
-//
-// To use pprof, link this package into your program:
-// import _ "http/pprof"
-//
-// Then use the pprof tool to look at the heap profile:
-//
-// pprof http://localhost:6060/debug/pprof/heap
-//
-// Or to look at a 30-second CPU profile:
-//
-// pprof http://localhost:6060/debug/pprof/profile
-//
-package pprof
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "http"
- "io"
- "os"
- "runtime"
- "runtime/pprof"
- "strconv"
- "strings"
- "time"
-)
-
-func init() {
- http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
- http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
- http.Handle("/debug/pprof/heap", http.HandlerFunc(Heap))
- http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
-}
-
-// Cmdline responds with the running program's
-// command line, with arguments separated by NUL bytes.
-// The package initialization registers it as /debug/pprof/cmdline.
-func Cmdline(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
-}
-
-// Heap responds with the pprof-formatted heap profile.
-// The package initialization registers it as /debug/pprof/heap.
-func Heap(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- pprof.WriteHeapProfile(w)
-}
-
-// Profile responds with the pprof-formatted cpu profile.
-// The package initialization registers it as /debug/pprof/profile.
-func Profile(w http.ResponseWriter, r *http.Request) {
- sec, _ := strconv.Atoi64(r.FormValue("seconds"))
- if sec == 0 {
- sec = 30
- }
-
- // Set Content Type assuming StartCPUProfile will work,
- // because if it does it starts writing.
- w.Header().Set("Content-Type", "application/octet-stream")
- if err := pprof.StartCPUProfile(w); err != nil {
- // StartCPUProfile failed, so no writes yet.
- // Can change header back to text content
- // and send error code.
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.WriteHeader(http.StatusInternalServerError)
- fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
- return
- }
- time.Sleep(sec * 1e9)
- pprof.StopCPUProfile()
-}
-
-// Symbol looks up the program counters listed in the request,
-// responding with a table mapping program counters to function names.
-// The package initialization registers it as /debug/pprof/symbol.
-func Symbol(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
-
- // We have to read the whole POST body before
- // writing any output. Buffer the output here.
- var buf bytes.Buffer
-
- // We don't know how many symbols we have, but we
- // do have symbol information. Pprof only cares whether
- // this number is 0 (no symbols available) or > 0.
- fmt.Fprintf(&buf, "num_symbols: 1\n")
-
- var b *bufio.Reader
- if r.Method == "POST" {
- b = bufio.NewReader(r.Body)
- } else {
- b = bufio.NewReader(strings.NewReader(r.URL.RawQuery))
- }
-
- for {
- word, err := b.ReadSlice('+')
- if err == nil {
- word = word[0 : len(word)-1] // trim +
- }
- pc, _ := strconv.Btoui64(string(word), 0)
- if pc != 0 {
- f := runtime.FuncForPC(uintptr(pc))
- if f != nil {
- fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
- }
- }
-
- // Wait until here to check for err; the last
- // symbol will have an err because it doesn't end in +.
- if err != nil {
- if err != io.EOF {
- fmt.Fprintf(&buf, "reading request: %v\n", err)
- }
- break
- }
- }
-
- w.Write(buf.Bytes())
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "os"
- "testing"
-)
-
-// TODO(mattn):
-// test ProxyAuth
-
-var UseProxyTests = []struct {
- host string
- match bool
-}{
- // Never proxy localhost:
- {"localhost:80", false},
- {"127.0.0.1", false},
- {"127.0.0.2", false},
- {"[::1]", false},
- {"[::2]", true}, // not a loopback address
-
- {"barbaz.net", false}, // match as .barbaz.net
- {"foobar.com", false}, // have a port but match
- {"foofoobar.com", true}, // not match as a part of foobar.com
- {"baz.com", true}, // not match as a part of barbaz.com
- {"localhost.net", true}, // not match as suffix of address
- {"local.localhost", true}, // not match as prefix as address
- {"barbarbaz.net", true}, // not match because NO_PROXY have a '.'
- {"www.foobar.com", true}, // not match because NO_PROXY is not .foobar.com
-}
-
-func TestUseProxy(t *testing.T) {
- oldenv := os.Getenv("NO_PROXY")
- defer os.Setenv("NO_PROXY", oldenv)
-
- no_proxy := "foobar.com, .barbaz.net"
- os.Setenv("NO_PROXY", no_proxy)
-
- for _, test := range UseProxyTests {
- if useProxy(test.host+":80") != test.match {
- t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "testing"
-)
-
-var ParseRangeTests = []struct {
- s string
- length int64
- r []httpRange
-}{
- {"", 0, nil},
- {"foo", 0, nil},
- {"bytes=", 0, nil},
- {"bytes=5-4", 10, nil},
- {"bytes=0-2,5-4", 10, nil},
- {"bytes=0-9", 10, []httpRange{{0, 10}}},
- {"bytes=0-", 10, []httpRange{{0, 10}}},
- {"bytes=5-", 10, []httpRange{{5, 5}}},
- {"bytes=0-20", 10, []httpRange{{0, 10}}},
- {"bytes=15-,0-5", 10, nil},
- {"bytes=-5", 10, []httpRange{{5, 5}}},
- {"bytes=-15", 10, []httpRange{{0, 10}}},
- {"bytes=0-499", 10000, []httpRange{{0, 500}}},
- {"bytes=500-999", 10000, []httpRange{{500, 500}}},
- {"bytes=-500", 10000, []httpRange{{9500, 500}}},
- {"bytes=9500-", 10000, []httpRange{{9500, 500}}},
- {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
- {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
- {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
-}
-
-func TestParseRange(t *testing.T) {
- for _, test := range ParseRangeTests {
- r := test.r
- ranges, err := parseRange(test.s, test.length)
- if err != nil && r != nil {
- t.Errorf("parseRange(%q) returned error %q", test.s, err)
- }
- if len(ranges) != len(r) {
- t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
- continue
- }
- for i := range r {
- if ranges[i].start != r[i].start {
- t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
- }
- if ranges[i].length != r[i].length {
- t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "testing"
- "url"
-)
-
-type reqTest struct {
- Raw string
- Req *Request
- Body string
- Error string
-}
-
-var noError = ""
-var noBody = ""
-
-var reqTests = []reqTest{
- // Baseline test; All Request fields included for template use
- {
- "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
- "Host: www.techcrunch.com\r\n" +
- "User-Agent: Fake\r\n" +
- "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
- "Accept-Language: en-us,en;q=0.5\r\n" +
- "Accept-Encoding: gzip,deflate\r\n" +
- "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
- "Keep-Alive: 300\r\n" +
- "Content-Length: 7\r\n" +
- "Proxy-Connection: keep-alive\r\n\r\n" +
- "abcdef\n???",
-
- &Request{
- Method: "GET",
- URL: &url.URL{
- Raw: "http://www.techcrunch.com/",
- Scheme: "http",
- RawPath: "/",
- RawAuthority: "www.techcrunch.com",
- RawUserinfo: "",
- Host: "www.techcrunch.com",
- Path: "/",
- RawQuery: "",
- Fragment: "",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{
- "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
- "Accept-Language": {"en-us,en;q=0.5"},
- "Accept-Encoding": {"gzip,deflate"},
- "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
- "Keep-Alive": {"300"},
- "Proxy-Connection": {"keep-alive"},
- "Content-Length": {"7"},
- "User-Agent": {"Fake"},
- },
- Close: false,
- ContentLength: 7,
- Host: "www.techcrunch.com",
- Form: url.Values{},
- },
-
- "abcdef\n",
-
- noError,
- },
-
- // GET request with no body (the normal case)
- {
- "GET / HTTP/1.1\r\n" +
- "Host: foo.com\r\n\r\n",
-
- &Request{
- Method: "GET",
- URL: &url.URL{
- Raw: "/",
- Path: "/",
- RawPath: "/",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Close: false,
- ContentLength: 0,
- Host: "foo.com",
- Form: url.Values{},
- },
-
- noBody,
- noError,
- },
-
- // Tests that we don't parse a path that looks like a
- // scheme-relative URI as a scheme-relative URI.
- {
- "GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" +
- "Host: test\r\n\r\n",
-
- &Request{
- Method: "GET",
- URL: &url.URL{
- Raw: "//user@host/is/actually/a/path/",
- Scheme: "",
- RawPath: "//user@host/is/actually/a/path/",
- RawAuthority: "",
- RawUserinfo: "",
- Host: "",
- Path: "//user@host/is/actually/a/path/",
- RawQuery: "",
- Fragment: "",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: false,
- ContentLength: 0,
- Host: "test",
- Form: url.Values{},
- },
-
- noBody,
- noError,
- },
-
- // Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2)
- {
- "GET ../../../../etc/passwd HTTP/1.1\r\n" +
- "Host: test\r\n\r\n",
- nil,
- noBody,
- "parse ../../../../etc/passwd: invalid URI for request",
- },
-
- // Tests missing URL:
- {
- "GET HTTP/1.1\r\n" +
- "Host: test\r\n\r\n",
- nil,
- noBody,
- "parse : empty url",
- },
-}
-
-func TestReadRequest(t *testing.T) {
- for i := range reqTests {
- tt := &reqTests[i]
- var braw bytes.Buffer
- braw.WriteString(tt.Raw)
- req, err := ReadRequest(bufio.NewReader(&braw))
- if err != nil {
- if err.Error() != tt.Error {
- t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error)
- }
- continue
- }
- rbody := req.Body
- req.Body = nil
- diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req)
- var bout bytes.Buffer
- if rbody != nil {
- io.Copy(&bout, rbody)
- rbody.Close()
- }
- body := bout.String()
- if body != tt.Body {
- t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP Request reading and parsing.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "mime/multipart"
- "net/textproto"
- "strconv"
- "strings"
- "url"
-)
-
-const (
- maxLineLength = 4096 // assumed <= bufio.defaultBufSize
- maxValueLength = 4096
- maxHeaderLines = 1024
- chunkSize = 4 << 10 // 4 KB chunks
- defaultMaxMemory = 32 << 20 // 32 MB
-)
-
-// ErrMissingFile is returned by FormFile when the provided file field name
-// is either not present in the request or not a file field.
-var ErrMissingFile = errors.New("http: no such file")
-
-// HTTP request parsing errors.
-type ProtocolError struct {
- ErrorString string
-}
-
-func (err *ProtocolError) Error() string { return err.ErrorString }
-
-var (
- ErrLineTooLong = &ProtocolError{"header line too long"}
- ErrHeaderTooLong = &ProtocolError{"header too long"}
- ErrShortBody = &ProtocolError{"entity body too short"}
- ErrNotSupported = &ProtocolError{"feature not supported"}
- ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"}
- ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"}
- ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"}
- ErrMissingBoundary = &ProtocolError{"no multipart boundary param Content-Type"}
-)
-
-type badStringError struct {
- what string
- str string
-}
-
-func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
-
-// Headers that Request.Write handles itself and should be skipped.
-var reqWriteExcludeHeader = map[string]bool{
- "Host": true, // not in Header map anyway
- "User-Agent": true,
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-var reqWriteExcludeHeaderDump = map[string]bool{
- "Host": true, // not in Header map anyway
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// A Request represents a parsed HTTP request header.
-type Request struct {
- Method string // GET, POST, PUT, etc.
- URL *url.URL
-
- // The protocol version for incoming requests.
- // Outgoing requests always use HTTP/1.1.
- Proto string // "HTTP/1.0"
- ProtoMajor int // 1
- ProtoMinor int // 0
-
- // A header maps request lines to their values.
- // If the header says
- //
- // accept-encoding: gzip, deflate
- // Accept-Language: en-us
- // Connection: keep-alive
- //
- // then
- //
- // Header = map[string][]string{
- // "Accept-Encoding": {"gzip, deflate"},
- // "Accept-Language": {"en-us"},
- // "Connection": {"keep-alive"},
- // }
- //
- // HTTP defines that header names are case-insensitive.
- // The request parser implements this by canonicalizing the
- // name, making the first character and any characters
- // following a hyphen uppercase and the rest lowercase.
- Header Header
-
- // The message body.
- Body io.ReadCloser
-
- // ContentLength records the length of the associated content.
- // The value -1 indicates that the length is unknown.
- // Values >= 0 indicate that the given number of bytes may be read from Body.
- ContentLength int64
-
- // TransferEncoding lists the transfer encodings from outermost to innermost.
- // An empty list denotes the "identity" encoding.
- TransferEncoding []string
-
- // Whether to close the connection after replying to this request.
- Close bool
-
- // The host on which the URL is sought.
- // Per RFC 2616, this is either the value of the Host: header
- // or the host name given in the URL itself.
- Host string
-
- // The parsed form. Only available after ParseForm is called.
- Form url.Values
-
- // The parsed multipart form, including file uploads.
- // Only available after ParseMultipartForm is called.
- MultipartForm *multipart.Form
-
- // Trailer maps trailer keys to values. Like for Header, if the
- // response has multiple trailer lines with the same key, they will be
- // concatenated, delimited by commas.
- Trailer Header
-
- // RemoteAddr allows HTTP servers and other software to record
- // the network address that sent the request, usually for
- // logging. This field is not filled in by ReadRequest and
- // has no defined format. The HTTP server in this package
- // sets RemoteAddr to an "IP:port" address before invoking a
- // handler.
- RemoteAddr string
-
- // TLS allows HTTP servers and other software to record
- // information about the TLS connection on which the request
- // was received. This field is not filled in by ReadRequest.
- // The HTTP server in this package sets the field for
- // TLS-enabled connections before invoking a handler;
- // otherwise it leaves the field nil.
- TLS *tls.ConnectionState
-}
-
-// ProtoAtLeast returns whether the HTTP protocol used
-// in the request is at least major.minor.
-func (r *Request) ProtoAtLeast(major, minor int) bool {
- return r.ProtoMajor > major ||
- r.ProtoMajor == major && r.ProtoMinor >= minor
-}
-
-// UserAgent returns the client's User-Agent, if sent in the request.
-func (r *Request) UserAgent() string {
- return r.Header.Get("User-Agent")
-}
-
-// Cookies parses and returns the HTTP cookies sent with the request.
-func (r *Request) Cookies() []*Cookie {
- return readCookies(r.Header, "")
-}
-
-var ErrNoCookie = errors.New("http: named cookied not present")
-
-// Cookie returns the named cookie provided in the request or
-// ErrNoCookie if not found.
-func (r *Request) Cookie(name string) (*Cookie, error) {
- for _, c := range readCookies(r.Header, name) {
- return c, nil
- }
- return nil, ErrNoCookie
-}
-
-// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
-// AddCookie does not attach more than one Cookie header field. That
-// means all cookies, if any, are written into the same line,
-// separated by semicolon.
-func (r *Request) AddCookie(c *Cookie) {
- s := fmt.Sprintf("%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
- if c := r.Header.Get("Cookie"); c != "" {
- r.Header.Set("Cookie", c+"; "+s)
- } else {
- r.Header.Set("Cookie", s)
- }
-}
-
-// Referer returns the referring URL, if sent in the request.
-//
-// Referer is misspelled as in the request itself, a mistake from the
-// earliest days of HTTP. This value can also be fetched from the
-// Header map as Header["Referer"]; the benefit of making it available
-// as a method is that the compiler can diagnose programs that use the
-// alternate (correct English) spelling req.Referrer() but cannot
-// diagnose programs that use Header["Referrer"].
-func (r *Request) Referer() string {
- return r.Header.Get("Referer")
-}
-
-// multipartByReader is a sentinel value.
-// Its presence in Request.MultipartForm indicates that parsing of the request
-// body has been handed off to a MultipartReader instead of ParseMultipartFrom.
-var multipartByReader = &multipart.Form{
- Value: make(map[string][]string),
- File: make(map[string][]*multipart.FileHeader),
-}
-
-// MultipartReader returns a MIME multipart reader if this is a
-// multipart/form-data POST request, else returns nil and an error.
-// Use this function instead of ParseMultipartForm to
-// process the request body as a stream.
-func (r *Request) MultipartReader() (*multipart.Reader, error) {
- if r.MultipartForm == multipartByReader {
- return nil, errors.New("http: MultipartReader called twice")
- }
- if r.MultipartForm != nil {
- return nil, errors.New("http: multipart handled by ParseMultipartForm")
- }
- r.MultipartForm = multipartByReader
- return r.multipartReader()
-}
-
-func (r *Request) multipartReader() (*multipart.Reader, error) {
- v := r.Header.Get("Content-Type")
- if v == "" {
- return nil, ErrNotMultipart
- }
- d, params, err := mime.ParseMediaType(v)
- if err != nil || d != "multipart/form-data" {
- return nil, ErrNotMultipart
- }
- boundary, ok := params["boundary"]
- if !ok {
- return nil, ErrMissingBoundary
- }
- return multipart.NewReader(r.Body, boundary), nil
-}
-
-// Return value if nonempty, def otherwise.
-func valueOrDefault(value, def string) string {
- if value != "" {
- return value
- }
- return def
-}
-
-const defaultUserAgent = "Go http package"
-
-// Write writes an HTTP/1.1 request -- header and body -- in wire format.
-// This method consults the following fields of req:
-// Host
-// URL
-// Method (defaults to "GET")
-// Header
-// ContentLength
-// TransferEncoding
-// Body
-//
-// If Body is present, Content-Length is <= 0 and TransferEncoding
-// hasn't been set to "identity", Write adds "Transfer-Encoding:
-// chunked" to the header. Body is closed after it is sent.
-func (req *Request) Write(w io.Writer) error {
- return req.write(w, false, nil)
-}
-
-// WriteProxy is like Write but writes the request in the form
-// expected by an HTTP proxy. In particular, WriteProxy writes the
-// initial Request-URI line of the request with an absolute URI, per
-// section 5.1.2 of RFC 2616, including the scheme and host. In
-// either case, WriteProxy also writes a Host header, using either
-// req.Host or req.URL.Host.
-func (req *Request) WriteProxy(w io.Writer) error {
- return req.write(w, true, nil)
-}
-
-func (req *Request) dumpWrite(w io.Writer) error {
- // TODO(bradfitz): RawPath here?
- urlStr := valueOrDefault(req.URL.EncodedPath(), "/")
- if req.URL.RawQuery != "" {
- urlStr += "?" + req.URL.RawQuery
- }
-
- bw := bufio.NewWriter(w)
- fmt.Fprintf(bw, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), urlStr,
- req.ProtoMajor, req.ProtoMinor)
-
- host := req.Host
- if host == "" && req.URL != nil {
- host = req.URL.Host
- }
- if host != "" {
- fmt.Fprintf(bw, "Host: %s\r\n", host)
- }
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(req)
- if err != nil {
- return err
- }
- err = tw.WriteHeader(bw)
- if err != nil {
- return err
- }
-
- err = req.Header.WriteSubset(bw, reqWriteExcludeHeaderDump)
- if err != nil {
- return err
- }
-
- io.WriteString(bw, "\r\n")
-
- // Write body and trailer
- err = tw.WriteBody(bw)
- if err != nil {
- return err
- }
- bw.Flush()
- return nil
-}
-
-// extraHeaders may be nil
-func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error {
- host := req.Host
- if host == "" {
- if req.URL == nil {
- return errors.New("http: Request.Write on Request with no Host or URL set")
- }
- host = req.URL.Host
- }
-
- urlStr := req.URL.RawPath
- if strings.HasPrefix(urlStr, "?") {
- urlStr = "/" + urlStr // Issue 2344
- }
- if urlStr == "" {
- urlStr = valueOrDefault(req.URL.RawPath, valueOrDefault(req.URL.EncodedPath(), "/"))
- if req.URL.RawQuery != "" {
- urlStr += "?" + req.URL.RawQuery
- }
- if usingProxy {
- if urlStr == "" || urlStr[0] != '/' {
- urlStr = "/" + urlStr
- }
- urlStr = req.URL.Scheme + "://" + host + urlStr
- }
- }
- // TODO(bradfitz): escape at least newlines in urlStr?
-
- bw := bufio.NewWriter(w)
- fmt.Fprintf(bw, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), urlStr)
-
- // Header lines
- fmt.Fprintf(bw, "Host: %s\r\n", host)
-
- // Use the defaultUserAgent unless the Header contains one, which
- // may be blank to not send the header.
- userAgent := defaultUserAgent
- if req.Header != nil {
- if ua := req.Header["User-Agent"]; len(ua) > 0 {
- userAgent = ua[0]
- }
- }
- if userAgent != "" {
- fmt.Fprintf(bw, "User-Agent: %s\r\n", userAgent)
- }
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(req)
- if err != nil {
- return err
- }
- err = tw.WriteHeader(bw)
- if err != nil {
- return err
- }
-
- // TODO: split long values? (If so, should share code with Conn.Write)
- err = req.Header.WriteSubset(bw, reqWriteExcludeHeader)
- if err != nil {
- return err
- }
-
- if extraHeaders != nil {
- err = extraHeaders.Write(bw)
- if err != nil {
- return err
- }
- }
-
- io.WriteString(bw, "\r\n")
-
- // Write body and trailer
- err = tw.WriteBody(bw)
- if err != nil {
- return err
- }
- bw.Flush()
- return nil
-}
-
-// Read a line of bytes (up to \n) from b.
-// Give up if the line exceeds maxLineLength.
-// The returned bytes are a pointer into storage in
-// the bufio, so they are only valid until the next bufio read.
-func readLineBytes(b *bufio.Reader) (p []byte, err error) {
- if p, err = b.ReadSlice('\n'); err != nil {
- // We always know when EOF is coming.
- // If the caller asked for a line, there should be a line.
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- } else if err == bufio.ErrBufferFull {
- err = ErrLineTooLong
- }
- return nil, err
- }
- if len(p) >= maxLineLength {
- return nil, ErrLineTooLong
- }
-
- // Chop off trailing white space.
- var i int
- for i = len(p); i > 0; i-- {
- if c := p[i-1]; c != ' ' && c != '\r' && c != '\t' && c != '\n' {
- break
- }
- }
- return p[0:i], nil
-}
-
-// readLineBytes, but convert the bytes into a string.
-func readLine(b *bufio.Reader) (s string, err error) {
- p, e := readLineBytes(b)
- if e != nil {
- return "", e
- }
- return string(p), nil
-}
-
-// Convert decimal at s[i:len(s)] to integer,
-// returning value, string position where the digits stopped,
-// and whether there was a valid number (digits, not too big).
-func atoi(s string, i int) (n, i1 int, ok bool) {
- const Big = 1000000
- if i >= len(s) || s[i] < '0' || s[i] > '9' {
- return 0, 0, false
- }
- n = 0
- for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
- n = n*10 + int(s[i]-'0')
- if n > Big {
- return 0, 0, false
- }
- }
- return n, i, true
-}
-
-// ParseHTTPVersion parses a HTTP version string.
-// "HTTP/1.0" returns (1, 0, true).
-func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
- if len(vers) < 5 || vers[0:5] != "HTTP/" {
- return 0, 0, false
- }
- major, i, ok := atoi(vers, 5)
- if !ok || i >= len(vers) || vers[i] != '.' {
- return 0, 0, false
- }
- minor, i, ok = atoi(vers, i+1)
- if !ok || i != len(vers) {
- return 0, 0, false
- }
- return major, minor, true
-}
-
-type chunkedReader struct {
- r *bufio.Reader
- n uint64 // unread bytes in chunk
- err error
-}
-
-func (cr *chunkedReader) beginChunk() {
- // chunk-size CRLF
- var line string
- line, cr.err = readLine(cr.r)
- if cr.err != nil {
- return
- }
- cr.n, cr.err = strconv.Btoui64(line, 16)
- if cr.err != nil {
- return
- }
- if cr.n == 0 {
- // trailer CRLF
- for {
- line, cr.err = readLine(cr.r)
- if cr.err != nil {
- return
- }
- if line == "" {
- break
- }
- }
- cr.err = io.EOF
- }
-}
-
-func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
- if cr.err != nil {
- return 0, cr.err
- }
- if cr.n == 0 {
- cr.beginChunk()
- if cr.err != nil {
- return 0, cr.err
- }
- }
- if uint64(len(b)) > cr.n {
- b = b[0:cr.n]
- }
- n, cr.err = cr.r.Read(b)
- cr.n -= uint64(n)
- if cr.n == 0 && cr.err == nil {
- // end of chunk (CRLF)
- b := make([]byte, 2)
- if _, cr.err = io.ReadFull(cr.r, b); cr.err == nil {
- if b[0] != '\r' || b[1] != '\n' {
- cr.err = errors.New("malformed chunked encoding")
- }
- }
- }
- return n, cr.err
-}
-
-// NewRequest returns a new Request given a method, URL, and optional body.
-func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
- u, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- req := &Request{
- Method: method,
- URL: u,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(Header),
- Body: rc,
- Host: u.Host,
- }
- if body != nil {
- switch v := body.(type) {
- case *strings.Reader:
- req.ContentLength = int64(v.Len())
- case *bytes.Buffer:
- req.ContentLength = int64(v.Len())
- }
- }
-
- return req, nil
-}
-
-// SetBasicAuth sets the request's Authorization header to use HTTP
-// Basic Authentication with the provided username and password.
-//
-// With HTTP Basic Authentication the provided username and password
-// are not encrypted.
-func (r *Request) SetBasicAuth(username, password string) {
- s := username + ":" + password
- r.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(s)))
-}
-
-// ReadRequest reads and parses a request from b.
-func ReadRequest(b *bufio.Reader) (req *Request, err error) {
-
- tp := textproto.NewReader(b)
- req = new(Request)
-
- // First line: GET /index.html HTTP/1.0
- var s string
- if s, err = tp.ReadLine(); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return nil, err
- }
-
- var f []string
- if f = strings.SplitN(s, " ", 3); len(f) < 3 {
- return nil, &badStringError{"malformed HTTP request", s}
- }
- var rawurl string
- req.Method, rawurl, req.Proto = f[0], f[1], f[2]
- var ok bool
- if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
- return nil, &badStringError{"malformed HTTP version", req.Proto}
- }
-
- if req.URL, err = url.ParseRequest(rawurl); err != nil {
- return nil, err
- }
-
- // Subsequent lines: Key: value.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil {
- return nil, err
- }
- req.Header = Header(mimeHeader)
-
- // RFC2616: Must treat
- // GET /index.html HTTP/1.1
- // Host: www.google.com
- // and
- // GET http://www.google.com/index.html HTTP/1.1
- // Host: doesntmatter
- // the same. In the second case, any Host line is ignored.
- req.Host = req.URL.Host
- if req.Host == "" {
- req.Host = req.Header.Get("Host")
- }
- req.Header.Del("Host")
-
- fixPragmaCacheControl(req.Header)
-
- // TODO: Parse specific header values:
- // Accept
- // Accept-Encoding
- // Accept-Language
- // Authorization
- // Cache-Control
- // Connection
- // Date
- // Expect
- // From
- // If-Match
- // If-Modified-Since
- // If-None-Match
- // If-Range
- // If-Unmodified-Since
- // Max-Forwards
- // Proxy-Authorization
- // Referer [sic]
- // TE (transfer-codings)
- // Trailer
- // Transfer-Encoding
- // Upgrade
- // User-Agent
- // Via
- // Warning
-
- err = readTransfer(req, b)
- if err != nil {
- return nil, err
- }
-
- return req, nil
-}
-
-// MaxBytesReader is similar to io.LimitReader but is intended for
-// limiting the size of incoming request bodies. In contrast to
-// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// non-EOF error for a Read beyond the limit, and Closes the
-// underlying reader when its Close method is called.
-//
-// MaxBytesReader prevents clients from accidentally or maliciously
-// sending a large request and wasting server resources.
-func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
- return &maxBytesReader{w: w, r: r, n: n}
-}
-
-type maxBytesReader struct {
- w ResponseWriter
- r io.ReadCloser // underlying reader
- n int64 // max bytes remaining
- stopped bool
-}
-
-func (l *maxBytesReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- if !l.stopped {
- l.stopped = true
- if res, ok := l.w.(*response); ok {
- res.requestTooLarge()
- }
- }
- return 0, errors.New("http: request body too large")
- }
- if int64(len(p)) > l.n {
- p = p[:l.n]
- }
- n, err = l.r.Read(p)
- l.n -= int64(n)
- return
-}
-
-func (l *maxBytesReader) Close() error {
- return l.r.Close()
-}
-
-// ParseForm parses the raw query from the URL.
-//
-// For POST or PUT requests, it also parses the request body as a form.
-// If the request Body's size has not already been limited by MaxBytesReader,
-// the size is capped at 10MB.
-//
-// ParseMultipartForm calls ParseForm automatically.
-// It is idempotent.
-func (r *Request) ParseForm() (err error) {
- if r.Form != nil {
- return
- }
- if r.URL != nil {
- r.Form, err = url.ParseQuery(r.URL.RawQuery)
- }
- if r.Method == "POST" || r.Method == "PUT" {
- if r.Body == nil {
- return errors.New("missing form body")
- }
- ct := r.Header.Get("Content-Type")
- ct, _, err := mime.ParseMediaType(ct)
- switch {
- case ct == "text/plain" || ct == "application/x-www-form-urlencoded" || ct == "":
- var reader io.Reader = r.Body
- maxFormSize := int64(1<<63 - 1)
- if _, ok := r.Body.(*maxBytesReader); !ok {
- maxFormSize = int64(10 << 20) // 10 MB is a lot of text.
- reader = io.LimitReader(r.Body, maxFormSize+1)
- }
- b, e := ioutil.ReadAll(reader)
- if e != nil {
- if err == nil {
- err = e
- }
- break
- }
- if int64(len(b)) > maxFormSize {
- return errors.New("http: POST too large")
- }
- var newValues url.Values
- newValues, e = url.ParseQuery(string(b))
- if err == nil {
- err = e
- }
- if r.Form == nil {
- r.Form = make(url.Values)
- }
- // Copy values into r.Form. TODO: make this smoother.
- for k, vs := range newValues {
- for _, value := range vs {
- r.Form.Add(k, value)
- }
- }
- case ct == "multipart/form-data":
- // handled by ParseMultipartForm (which is calling us, or should be)
- // TODO(bradfitz): there are too many possible
- // orders to call too many functions here.
- // Clean this up and write more tests.
- // request_test.go contains the start of this,
- // in TestRequestMultipartCallOrder.
- default:
- return &badStringError{"unknown Content-Type", ct}
- }
- }
- return err
-}
-
-// ParseMultipartForm parses a request body as multipart/form-data.
-// The whole request body is parsed and up to a total of maxMemory bytes of
-// its file parts are stored in memory, with the remainder stored on
-// disk in temporary files.
-// ParseMultipartForm calls ParseForm if necessary.
-// After one call to ParseMultipartForm, subsequent calls have no effect.
-func (r *Request) ParseMultipartForm(maxMemory int64) error {
- if r.MultipartForm == multipartByReader {
- return errors.New("http: multipart handled by MultipartReader")
- }
- if r.Form == nil {
- err := r.ParseForm()
- if err != nil {
- return err
- }
- }
- if r.MultipartForm != nil {
- return nil
- }
-
- mr, err := r.multipartReader()
- if err == ErrNotMultipart {
- return nil
- } else if err != nil {
- return err
- }
-
- f, err := mr.ReadForm(maxMemory)
- if err != nil {
- return err
- }
- for k, v := range f.Value {
- r.Form[k] = append(r.Form[k], v...)
- }
- r.MultipartForm = f
-
- return nil
-}
-
-// FormValue returns the first value for the named component of the query.
-// FormValue calls ParseMultipartForm and ParseForm if necessary.
-func (r *Request) FormValue(key string) string {
- if r.Form == nil {
- r.ParseMultipartForm(defaultMaxMemory)
- }
- if vs := r.Form[key]; len(vs) > 0 {
- return vs[0]
- }
- return ""
-}
-
-// FormFile returns the first file for the provided form key.
-// FormFile calls ParseMultipartForm and ParseForm if necessary.
-func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
- if r.MultipartForm == multipartByReader {
- return nil, nil, errors.New("http: multipart handled by MultipartReader")
- }
- if r.MultipartForm == nil {
- err := r.ParseMultipartForm(defaultMaxMemory)
- if err != nil {
- return nil, nil, err
- }
- }
- if r.MultipartForm != nil && r.MultipartForm.File != nil {
- if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
- f, err := fhs[0].Open()
- return f, fhs[0], err
- }
- }
- return nil, nil, ErrMissingFile
-}
-
-func (r *Request) expectsContinue() bool {
- return strings.ToLower(r.Header.Get("Expect")) == "100-continue"
-}
-
-func (r *Request) wantsHttp10KeepAlive() bool {
- if r.ProtoMajor != 1 || r.ProtoMinor != 0 {
- return false
- }
- return strings.Contains(strings.ToLower(r.Header.Get("Connection")), "keep-alive")
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "bytes"
- "fmt"
- . "http"
- "http/httptest"
- "io"
- "io/ioutil"
- "mime/multipart"
- "os"
- "reflect"
- "regexp"
- "strings"
- "testing"
- "url"
-)
-
-func TestQuery(t *testing.T) {
- req := &Request{Method: "GET"}
- req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar")
- if q := req.FormValue("q"); q != "foo" {
- t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
- }
-}
-
-func TestPostQuery(t *testing.T) {
- req := &Request{Method: "POST"}
- req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar&both=x")
- req.Header = Header{
- "Content-Type": {"application/x-www-form-urlencoded; boo!"},
- }
- req.Body = ioutil.NopCloser(strings.NewReader("z=post&both=y"))
- if q := req.FormValue("q"); q != "foo" {
- t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
- }
- if z := req.FormValue("z"); z != "post" {
- t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
- }
- if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"x", "y"}) {
- t.Errorf(`req.FormValue("both") = %q, want ["x", "y"]`, both)
- }
-}
-
-type stringMap map[string][]string
-type parseContentTypeTest struct {
- contentType stringMap
- err bool
-}
-
-var parseContentTypeTests = []parseContentTypeTest{
- {contentType: stringMap{"Content-Type": {"text/plain"}}},
- {contentType: stringMap{}}, // Non-existent keys are not placed. The value nil is illegal.
- {contentType: stringMap{"Content-Type": {"text/plain; boundary="}}},
- {
- contentType: stringMap{"Content-Type": {"application/unknown"}},
- err: true,
- },
-}
-
-func TestPostContentTypeParsing(t *testing.T) {
- for i, test := range parseContentTypeTests {
- req := &Request{
- Method: "POST",
- Header: Header(test.contentType),
- Body: ioutil.NopCloser(bytes.NewBufferString("body")),
- }
- err := req.ParseForm()
- if !test.err && err != nil {
- t.Errorf("test %d: Unexpected error: %v", i, err)
- }
- if test.err && err == nil {
- t.Errorf("test %d should have returned error", i)
- }
- }
-}
-
-func TestMultipartReader(t *testing.T) {
- req := &Request{
- Method: "POST",
- Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
- Body: ioutil.NopCloser(new(bytes.Buffer)),
- }
- multipart, err := req.MultipartReader()
- if multipart == nil {
- t.Errorf("expected multipart; error: %v", err)
- }
-
- req.Header = Header{"Content-Type": {"text/plain"}}
- multipart, err = req.MultipartReader()
- if multipart != nil {
- t.Errorf("unexpected multipart for text/plain")
- }
-}
-
-func TestRedirect(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- switch r.URL.Path {
- case "/":
- w.Header().Set("Location", "/foo/")
- w.WriteHeader(StatusSeeOther)
- case "/foo/":
- fmt.Fprintf(w, "foo")
- default:
- w.WriteHeader(StatusBadRequest)
- }
- }))
- defer ts.Close()
-
- var end = regexp.MustCompile("/foo/$")
- r, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- r.Body.Close()
- url := r.Request.URL.String()
- if r.StatusCode != 200 || !end.MatchString(url) {
- t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url)
- }
-}
-
-func TestSetBasicAuth(t *testing.T) {
- r, _ := NewRequest("GET", "http://example.com/", nil)
- r.SetBasicAuth("Aladdin", "open sesame")
- if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e {
- t.Errorf("got header %q, want %q", g, e)
- }
-}
-
-func TestMultipartRequest(t *testing.T) {
- // Test that we can read the values and files of a
- // multipart request with FormValue and FormFile,
- // and that ParseMultipartForm can be called multiple times.
- req := newTestMultipartRequest(t)
- if err := req.ParseMultipartForm(25); err != nil {
- t.Fatal("ParseMultipartForm first call:", err)
- }
- defer req.MultipartForm.RemoveAll()
- validateTestMultipartContents(t, req, false)
- if err := req.ParseMultipartForm(25); err != nil {
- t.Fatal("ParseMultipartForm second call:", err)
- }
- validateTestMultipartContents(t, req, false)
-}
-
-func TestMultipartRequestAuto(t *testing.T) {
- // Test that FormValue and FormFile automatically invoke
- // ParseMultipartForm and return the right values.
- req := newTestMultipartRequest(t)
- defer func() {
- if req.MultipartForm != nil {
- req.MultipartForm.RemoveAll()
- }
- }()
- validateTestMultipartContents(t, req, true)
-}
-
-func TestEmptyMultipartRequest(t *testing.T) {
- // Test that FormValue and FormFile automatically invoke
- // ParseMultipartForm and return the right values.
- req, err := NewRequest("GET", "/", nil)
- if err != nil {
- t.Errorf("NewRequest err = %q", err)
- }
- testMissingFile(t, req)
-}
-
-func TestRequestMultipartCallOrder(t *testing.T) {
- req := newTestMultipartRequest(t)
- _, err := req.MultipartReader()
- if err != nil {
- t.Fatalf("MultipartReader: %v", err)
- }
- err = req.ParseMultipartForm(1024)
- if err == nil {
- t.Errorf("expected an error from ParseMultipartForm after call to MultipartReader")
- }
-}
-
-func testMissingFile(t *testing.T, req *Request) {
- f, fh, err := req.FormFile("missing")
- if f != nil {
- t.Errorf("FormFile file = %q, want nil", f)
- }
- if fh != nil {
- t.Errorf("FormFile file header = %q, want nil", fh)
- }
- if err != ErrMissingFile {
- t.Errorf("FormFile err = %q, want ErrMissingFile", err)
- }
-}
-
-func newTestMultipartRequest(t *testing.T) *Request {
- b := bytes.NewBufferString(strings.Replace(message, "\n", "\r\n", -1))
- req, err := NewRequest("POST", "/", b)
- if err != nil {
- t.Fatal("NewRequest:", err)
- }
- ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary)
- req.Header.Set("Content-type", ctype)
- return req
-}
-
-func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
- if g, e := req.FormValue("texta"), textaValue; g != e {
- t.Errorf("texta value = %q, want %q", g, e)
- }
- if g, e := req.FormValue("texta"), textaValue; g != e {
- t.Errorf("texta value = %q, want %q", g, e)
- }
- if g := req.FormValue("missing"); g != "" {
- t.Errorf("missing value = %q, want empty string", g)
- }
-
- assertMem := func(n string, fd multipart.File) {
- if _, ok := fd.(*os.File); ok {
- t.Error(n, " is *os.File, should not be")
- }
- }
- fd := testMultipartFile(t, req, "filea", "filea.txt", fileaContents)
- assertMem("filea", fd)
- fd = testMultipartFile(t, req, "fileb", "fileb.txt", filebContents)
- if allMem {
- assertMem("fileb", fd)
- } else {
- if _, ok := fd.(*os.File); !ok {
- t.Errorf("fileb has unexpected underlying type %T", fd)
- }
- }
-
- testMissingFile(t, req)
-}
-
-func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File {
- f, fh, err := req.FormFile(key)
- if err != nil {
- t.Fatalf("FormFile(%q): %q", key, err)
- }
- if fh.Filename != expectFilename {
- t.Errorf("filename = %q, want %q", fh.Filename, expectFilename)
- }
- var b bytes.Buffer
- _, err = io.Copy(&b, f)
- if err != nil {
- t.Fatal("copying contents:", err)
- }
- if g := b.String(); g != expectContent {
- t.Errorf("contents = %q, want %q", g, expectContent)
- }
- return f
-}
-
-const (
- fileaContents = "This is a test file."
- filebContents = "Another test file."
- textaValue = "foo"
- textbValue = "bar"
- boundary = `MyBoundary`
-)
-
-const message = `
---MyBoundary
-Content-Disposition: form-data; name="filea"; filename="filea.txt"
-Content-Type: text/plain
-
-` + fileaContents + `
---MyBoundary
-Content-Disposition: form-data; name="fileb"; filename="fileb.txt"
-Content-Type: text/plain
-
-` + filebContents + `
---MyBoundary
-Content-Disposition: form-data; name="texta"
-
-` + textaValue + `
---MyBoundary
-Content-Disposition: form-data; name="textb"
-
-` + textbValue + `
---MyBoundary--
-`
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "testing"
- "url"
-)
-
-type reqWriteTest struct {
- Req Request
- Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
-
- // Any of these three may be empty to skip that test.
- WantWrite string // Request.Write
- WantProxy string // Request.WriteProxy
- WantDump string // DumpRequest
-
- WantError error // wanted error from Request.Write
-}
-
-var reqWriteTests = []reqWriteTest{
- // HTTP/1.1 => chunked coding; no body; no trailer
- {
- Req: Request{
- Method: "GET",
- URL: &url.URL{
- Raw: "http://www.techcrunch.com/",
- Scheme: "http",
- RawPath: "http://www.techcrunch.com/",
- RawAuthority: "www.techcrunch.com",
- RawUserinfo: "",
- Host: "www.techcrunch.com",
- Path: "/",
- RawQuery: "",
- Fragment: "",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{
- "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
- "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
- "Accept-Encoding": {"gzip,deflate"},
- "Accept-Language": {"en-us,en;q=0.5"},
- "Keep-Alive": {"300"},
- "Proxy-Connection": {"keep-alive"},
- "User-Agent": {"Fake"},
- },
- Body: nil,
- Close: false,
- Host: "www.techcrunch.com",
- Form: map[string][]string{},
- },
-
- WantWrite: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
- "Host: www.techcrunch.com\r\n" +
- "User-Agent: Fake\r\n" +
- "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
- "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
- "Accept-Encoding: gzip,deflate\r\n" +
- "Accept-Language: en-us,en;q=0.5\r\n" +
- "Keep-Alive: 300\r\n" +
- "Proxy-Connection: keep-alive\r\n\r\n",
-
- WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
- "Host: www.techcrunch.com\r\n" +
- "User-Agent: Fake\r\n" +
- "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
- "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
- "Accept-Encoding: gzip,deflate\r\n" +
- "Accept-Language: en-us,en;q=0.5\r\n" +
- "Keep-Alive: 300\r\n" +
- "Proxy-Connection: keep-alive\r\n\r\n",
- },
- // HTTP/1.1 => chunked coding; body; empty trailer
- {
- Req: Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- TransferEncoding: []string{"chunked"},
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "GET /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
-
- WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
-
- WantDump: "GET /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
- },
- // HTTP/1.1 POST => chunked coding; body; empty trailer
- {
- Req: Request{
- Method: "POST",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: true,
- TransferEncoding: []string{"chunked"},
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "POST /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
-
- WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
- },
-
- // HTTP/1.1 POST with Content-Length, no chunking
- {
- Req: Request{
- Method: "POST",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: true,
- ContentLength: 6,
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "POST /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
-
- WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
- },
-
- // HTTP/1.1 POST with Content-Length in headers
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("http://example.com/"),
- Host: "example.com",
- Header: Header{
- "Content-Length": []string{"10"}, // ignored
- },
- ContentLength: 6,
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
-
- WantProxy: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
- },
-
- // default to HTTP/1.1
- {
- Req: Request{
- Method: "GET",
- URL: mustParseURL("/search"),
- Host: "www.google.com",
- },
-
- WantWrite: "GET /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "\r\n",
- },
-
- // Request with a 0 ContentLength and a 0 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 0, // as if unset by user
- },
-
- Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) },
-
- // RFC 2616 Section 14.13 says Content-Length should be specified
- // unless body is prohibited by the request method.
- // Also, nginx expects it for POST and PUT.
- WantWrite: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 0\r\n" +
- "\r\n",
-
- WantProxy: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 0\r\n" +
- "\r\n",
- },
-
- // Request with a 0 ContentLength and a 1 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 0, // as if unset by user
- },
-
- Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) },
-
- WantWrite: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("x") + chunk(""),
-
- WantProxy: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("x") + chunk(""),
- },
-
- // Request with a ContentLength of 10 but a 5 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 10, // but we're going to send only 5 bytes
- },
- Body: []byte("12345"),
- WantError: errors.New("http: Request.ContentLength=10 with Body length 5"),
- },
-
- // Request with a ContentLength of 4 but an 8 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 4, // but we're going to try to send 8 bytes
- },
- Body: []byte("12345678"),
- WantError: errors.New("http: Request.ContentLength=4 with Body length 8"),
- },
-
- // Request with a 5 ContentLength and nil body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 5, // but we'll omit the body
- },
- WantError: errors.New("http: Request.ContentLength=5 with nil Body"),
- },
-
- // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
- // and doesn't add a User-Agent.
- {
- Req: Request{
- Method: "GET",
- URL: mustParseURL("/foo"),
- ProtoMajor: 1,
- ProtoMinor: 0,
- Header: Header{
- "X-Foo": []string{"X-Bar"},
- },
- },
-
- // We can dump it:
- WantDump: "GET /foo HTTP/1.0\r\n" +
- "X-Foo: X-Bar\r\n\r\n",
-
- // .. but we can't call Request.Write on it, due to its lack of Host header.
- // TODO(bradfitz): there might be an argument to allow this, but for now I'd
- // rather let HTTP/1.0 continue to die.
- WantWrite: "GET /foo HTTP/1.1\r\n" +
- "Host: \r\n" +
- "User-Agent: Go http package\r\n" +
- "X-Foo: X-Bar\r\n\r\n",
- },
-}
-
-func TestRequestWrite(t *testing.T) {
- for i := range reqWriteTests {
- tt := &reqWriteTests[i]
-
- setBody := func() {
- if tt.Body == nil {
- return
- }
- switch b := tt.Body.(type) {
- case []byte:
- tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
- case func() io.ReadCloser:
- tt.Req.Body = b()
- }
- }
- setBody()
- if tt.Req.Header == nil {
- tt.Req.Header = make(Header)
- }
-
- var braw bytes.Buffer
- err := tt.Req.Write(&braw)
- if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e {
- t.Errorf("writing #%d, err = %q, want %q", i, g, e)
- continue
- }
- if err != nil {
- continue
- }
-
- if tt.WantWrite != "" {
- sraw := braw.String()
- if sraw != tt.WantWrite {
- t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw)
- continue
- }
- }
-
- if tt.WantProxy != "" {
- setBody()
- var praw bytes.Buffer
- err = tt.Req.WriteProxy(&praw)
- if err != nil {
- t.Errorf("WriteProxy #%d: %s", i, err)
- continue
- }
- sraw := praw.String()
- if sraw != tt.WantProxy {
- t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw)
- continue
- }
- }
-
- if tt.WantDump != "" {
- setBody()
- dump, err := DumpRequest(&tt.Req, true)
- if err != nil {
- t.Errorf("DumpRequest #%d: %s", i, err)
- continue
- }
- if string(dump) != tt.WantDump {
- t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
- continue
- }
- }
- }
-}
-
-type closeChecker struct {
- io.Reader
- closed bool
-}
-
-func (rc *closeChecker) Close() error {
- rc.closed = true
- return nil
-}
-
-// TestRequestWriteClosesBody tests that Request.Write does close its request.Body.
-// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer
-// inside a NopCloser, and that it serializes it correctly.
-func TestRequestWriteClosesBody(t *testing.T) {
- rc := &closeChecker{Reader: strings.NewReader("my body")}
- req, _ := NewRequest("POST", "http://foo.com/", rc)
- if req.ContentLength != 0 {
- t.Errorf("got req.ContentLength %d, want 0", req.ContentLength)
- }
- buf := new(bytes.Buffer)
- req.Write(buf)
- if !rc.closed {
- t.Error("body not closed after write")
- }
- expected := "POST / HTTP/1.1\r\n" +
- "Host: foo.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- // TODO: currently we don't buffer before chunking, so we get a
- // single "m" chunk before the other chunks, as this was the 1-byte
- // read from our MultiReader where we stiched the Body back together
- // after sniffing whether the Body was 0 bytes or not.
- chunk("m") +
- chunk("y body") +
- chunk("")
- if buf.String() != expected {
- t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected)
- }
-}
-
-func chunk(s string) string {
- return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
-}
-
-func mustParseURL(s string) *url.URL {
- u, err := url.Parse(s)
- if err != nil {
- panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
- }
- return u
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP Response reading and parsing.
-
-package http
-
-import (
- "bufio"
- "errors"
- "io"
- "net/textproto"
- "strconv"
- "strings"
- "url"
-)
-
-var respExcludeHeader = map[string]bool{
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// Response represents the response from an HTTP request.
-//
-type Response struct {
- Status string // e.g. "200 OK"
- StatusCode int // e.g. 200
- Proto string // e.g. "HTTP/1.0"
- ProtoMajor int // e.g. 1
- ProtoMinor int // e.g. 0
-
- // Header maps header keys to values. If the response had multiple
- // headers with the same key, they will be concatenated, with comma
- // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers
- // be semantically equivalent to a comma-delimited sequence.) Values
- // duplicated by other fields in this struct (e.g., ContentLength) are
- // omitted from Header.
- //
- // Keys in the map are canonicalized (see CanonicalHeaderKey).
- Header Header
-
- // Body represents the response body.
- //
- // The http Client and Transport guarantee that Body is always
- // non-nil, even on responses without a body or responses with
- // a zero-lengthed body.
- Body io.ReadCloser
-
- // ContentLength records the length of the associated content. The
- // value -1 indicates that the length is unknown. Unless RequestMethod
- // is "HEAD", values >= 0 indicate that the given number of bytes may
- // be read from Body.
- ContentLength int64
-
- // Contains transfer encodings from outer-most to inner-most. Value is
- // nil, means that "identity" encoding is used.
- TransferEncoding []string
-
- // Close records whether the header directed that the connection be
- // closed after reading Body. The value is advice for clients: neither
- // ReadResponse nor Response.Write ever closes a connection.
- Close bool
-
- // Trailer maps trailer keys to values, in the same
- // format as the header.
- Trailer Header
-
- // The Request that was sent to obtain this Response.
- // Request's Body is nil (having already been consumed).
- // This is only populated for Client requests.
- Request *Request
-}
-
-// Cookies parses and returns the cookies set in the Set-Cookie headers.
-func (r *Response) Cookies() []*Cookie {
- return readSetCookies(r.Header)
-}
-
-var ErrNoLocation = errors.New("http: no Location header in response")
-
-// Location returns the URL of the response's "Location" header,
-// if present. Relative redirects are resolved relative to
-// the Response's Request. ErrNoLocation is returned if no
-// Location header is present.
-func (r *Response) Location() (*url.URL, error) {
- lv := r.Header.Get("Location")
- if lv == "" {
- return nil, ErrNoLocation
- }
- if r.Request != nil && r.Request.URL != nil {
- return r.Request.URL.Parse(lv)
- }
- return url.Parse(lv)
-}
-
-// ReadResponse reads and returns an HTTP response from r. The
-// req parameter specifies the Request that corresponds to
-// this Response. Clients must call resp.Body.Close when finished
-// reading resp.Body. After that call, clients can inspect
-// resp.Trailer to find key/value pairs included in the response
-// trailer.
-func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err error) {
-
- tp := textproto.NewReader(r)
- resp = new(Response)
-
- resp.Request = req
- resp.Request.Method = strings.ToUpper(resp.Request.Method)
-
- // Parse the first line of the response.
- line, err := tp.ReadLine()
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return nil, err
- }
- f := strings.SplitN(line, " ", 3)
- if len(f) < 2 {
- return nil, &badStringError{"malformed HTTP response", line}
- }
- reasonPhrase := ""
- if len(f) > 2 {
- reasonPhrase = f[2]
- }
- resp.Status = f[1] + " " + reasonPhrase
- resp.StatusCode, err = strconv.Atoi(f[1])
- if err != nil {
- return nil, &badStringError{"malformed HTTP status code", f[1]}
- }
-
- resp.Proto = f[0]
- var ok bool
- if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
- return nil, &badStringError{"malformed HTTP version", resp.Proto}
- }
-
- // Parse the response headers.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil {
- return nil, err
- }
- resp.Header = Header(mimeHeader)
-
- fixPragmaCacheControl(resp.Header)
-
- err = readTransfer(resp, r)
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-// RFC2616: Should treat
-// Pragma: no-cache
-// like
-// Cache-Control: no-cache
-func fixPragmaCacheControl(header Header) {
- if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" {
- if _, presentcc := header["Cache-Control"]; !presentcc {
- header["Cache-Control"] = []string{"no-cache"}
- }
- }
-}
-
-// ProtoAtLeast returns whether the HTTP protocol used
-// in the response is at least major.minor.
-func (r *Response) ProtoAtLeast(major, minor int) bool {
- return r.ProtoMajor > major ||
- r.ProtoMajor == major && r.ProtoMinor >= minor
-}
-
-// Writes the response (header, body and trailer) in wire format. This method
-// consults the following fields of resp:
-//
-// StatusCode
-// ProtoMajor
-// ProtoMinor
-// RequestMethod
-// TransferEncoding
-// Trailer
-// Body
-// ContentLength
-// Header, values for non-canonical keys will have unpredictable behavior
-//
-func (resp *Response) Write(w io.Writer) error {
-
- // RequestMethod should be upper-case
- if resp.Request != nil {
- resp.Request.Method = strings.ToUpper(resp.Request.Method)
- }
-
- // Status line
- text := resp.Status
- if text == "" {
- var ok bool
- text, ok = statusText[resp.StatusCode]
- if !ok {
- text = "status code " + strconv.Itoa(resp.StatusCode)
- }
- }
- io.WriteString(w, "HTTP/"+strconv.Itoa(resp.ProtoMajor)+".")
- io.WriteString(w, strconv.Itoa(resp.ProtoMinor)+" ")
- io.WriteString(w, strconv.Itoa(resp.StatusCode)+" "+text+"\r\n")
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(resp)
- if err != nil {
- return err
- }
- err = tw.WriteHeader(w)
- if err != nil {
- return err
- }
-
- // Rest of header
- err = resp.Header.WriteSubset(w, respExcludeHeader)
- if err != nil {
- return err
- }
-
- // End-of-header
- io.WriteString(w, "\r\n")
-
- // Write body and trailer
- err = tw.WriteBody(w)
- if err != nil {
- return err
- }
-
- // Success
- return nil
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "crypto/rand"
- "fmt"
- "io"
- "io/ioutil"
- "reflect"
- "testing"
- "url"
-)
-
-type respTest struct {
- Raw string
- Resp Response
- Body string
-}
-
-func dummyReq(method string) *Request {
- return &Request{Method: method}
-}
-
-var respTests = []respTest{
- // Unchunked response without Content-Length.
- {
- "HTTP/1.0 200 OK\r\n" +
- "Connection: close\r\n" +
- "\r\n" +
- "Body here\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{
- "Connection": {"close"}, // TODO(rsc): Delete?
- },
- Close: true,
- ContentLength: -1,
- },
-
- "Body here\n",
- },
-
- // Unchunked HTTP/1.1 response without Content-Length or
- // Connection headers.
- {
- "HTTP/1.1 200 OK\r\n" +
- "\r\n" +
- "Body here\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Close: true,
- ContentLength: -1,
- },
-
- "Body here\n",
- },
-
- // Unchunked HTTP/1.1 204 response without Content-Length.
- {
- "HTTP/1.1 204 No Content\r\n" +
- "\r\n" +
- "Body should not be read!\n",
-
- Response{
- Status: "204 No Content",
- StatusCode: 204,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Close: false,
- ContentLength: 0,
- },
-
- "",
- },
-
- // Unchunked response with Content-Length.
- {
- "HTTP/1.0 200 OK\r\n" +
- "Content-Length: 10\r\n" +
- "Connection: close\r\n" +
- "\r\n" +
- "Body here\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{
- "Connection": {"close"}, // TODO(rsc): Delete?
- "Content-Length": {"10"}, // TODO(rsc): Delete?
- },
- Close: true,
- ContentLength: 10,
- },
-
- "Body here\n",
- },
-
- // Chunked response without Content-Length.
- {
- "HTTP/1.0 200 OK\r\n" +
- "Transfer-Encoding: chunked\r\n" +
- "\r\n" +
- "0a\r\n" +
- "Body here\n\r\n" +
- "09\r\n" +
- "continued\r\n" +
- "0\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: true,
- ContentLength: -1,
- TransferEncoding: []string{"chunked"},
- },
-
- "Body here\ncontinued",
- },
-
- // Chunked response with Content-Length.
- {
- "HTTP/1.0 200 OK\r\n" +
- "Transfer-Encoding: chunked\r\n" +
- "Content-Length: 10\r\n" +
- "\r\n" +
- "0a\r\n" +
- "Body here\n" +
- "0\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: true,
- ContentLength: -1, // TODO(rsc): Fix?
- TransferEncoding: []string{"chunked"},
- },
-
- "Body here\n",
- },
-
- // Chunked response in response to a HEAD request (the "chunked" should
- // be ignored, as HEAD responses never have bodies)
- {
- "HTTP/1.0 200 OK\r\n" +
- "Transfer-Encoding: chunked\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("HEAD"),
- Header: Header{},
- Close: true,
- ContentLength: 0,
- },
-
- "",
- },
-
- // explicit Content-Length of 0.
- {
- "HTTP/1.1 200 OK\r\n" +
- "Content-Length: 0\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{
- "Content-Length": {"0"},
- },
- Close: false,
- ContentLength: 0,
- },
-
- "",
- },
-
- // Status line without a Reason-Phrase, but trailing space.
- // (permitted by RFC 2616)
- {
- "HTTP/1.0 303 \r\n\r\n",
- Response{
- Status: "303 ",
- StatusCode: 303,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: true,
- ContentLength: -1,
- },
-
- "",
- },
-
- // Status line without a Reason-Phrase, and no trailing space.
- // (not permitted by RFC 2616, but we'll accept it anyway)
- {
- "HTTP/1.0 303\r\n\r\n",
- Response{
- Status: "303 ",
- StatusCode: 303,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: true,
- ContentLength: -1,
- },
-
- "",
- },
-}
-
-func TestReadResponse(t *testing.T) {
- for i := range respTests {
- tt := &respTests[i]
- var braw bytes.Buffer
- braw.WriteString(tt.Raw)
- resp, err := ReadResponse(bufio.NewReader(&braw), tt.Resp.Request)
- if err != nil {
- t.Errorf("#%d: %s", i, err)
- continue
- }
- rbody := resp.Body
- resp.Body = nil
- diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp)
- var bout bytes.Buffer
- if rbody != nil {
- io.Copy(&bout, rbody)
- rbody.Close()
- }
- body := bout.String()
- if body != tt.Body {
- t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
- }
- }
-}
-
-var readResponseCloseInMiddleTests = []struct {
- chunked, compressed bool
-}{
- {false, false},
- {true, false},
- {true, true},
-}
-
-// TestReadResponseCloseInMiddle tests that closing a body after
-// reading only part of its contents advances the read to the end of
-// the request, right up until the next request.
-func TestReadResponseCloseInMiddle(t *testing.T) {
- for _, test := range readResponseCloseInMiddleTests {
- fatalf := func(format string, args ...interface{}) {
- args = append([]interface{}{test.chunked, test.compressed}, args...)
- t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...)
- }
- checkErr := func(err error, msg string) {
- if err == nil {
- return
- }
- fatalf(msg+": %v", err)
- }
- var buf bytes.Buffer
- buf.WriteString("HTTP/1.1 200 OK\r\n")
- if test.chunked {
- buf.WriteString("Transfer-Encoding: chunked\r\n")
- } else {
- buf.WriteString("Content-Length: 1000000\r\n")
- }
- var wr io.Writer = &buf
- if test.chunked {
- wr = &chunkedWriter{wr}
- }
- if test.compressed {
- buf.WriteString("Content-Encoding: gzip\r\n")
- var err error
- wr, err = gzip.NewWriter(wr)
- checkErr(err, "gzip.NewWriter")
- }
- buf.WriteString("\r\n")
-
- chunk := bytes.Repeat([]byte{'x'}, 1000)
- for i := 0; i < 1000; i++ {
- if test.compressed {
- // Otherwise this compresses too well.
- _, err := io.ReadFull(rand.Reader, chunk)
- checkErr(err, "rand.Reader ReadFull")
- }
- wr.Write(chunk)
- }
- if test.compressed {
- err := wr.(*gzip.Compressor).Close()
- checkErr(err, "compressor close")
- }
- if test.chunked {
- buf.WriteString("0\r\n\r\n")
- }
- buf.WriteString("Next Request Here")
-
- bufr := bufio.NewReader(&buf)
- resp, err := ReadResponse(bufr, dummyReq("GET"))
- checkErr(err, "ReadResponse")
- expectedLength := int64(-1)
- if !test.chunked {
- expectedLength = 1000000
- }
- if resp.ContentLength != expectedLength {
- fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength)
- }
- if resp.Body == nil {
- fatalf("nil body")
- }
- if test.compressed {
- gzReader, err := gzip.NewReader(resp.Body)
- checkErr(err, "gzip.NewReader")
- resp.Body = &readFirstCloseBoth{gzReader, resp.Body}
- }
-
- rbuf := make([]byte, 2500)
- n, err := io.ReadFull(resp.Body, rbuf)
- checkErr(err, "2500 byte ReadFull")
- if n != 2500 {
- fatalf("ReadFull only read %d bytes", n)
- }
- if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) {
- fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf))
- }
- resp.Body.Close()
-
- rest, err := ioutil.ReadAll(bufr)
- checkErr(err, "ReadAll on remainder")
- if e, g := "Next Request Here", string(rest); e != g {
- fatalf("remainder = %q, expected %q", g, e)
- }
- }
-}
-
-func diff(t *testing.T, prefix string, have, want interface{}) {
- hv := reflect.ValueOf(have).Elem()
- wv := reflect.ValueOf(want).Elem()
- if hv.Type() != wv.Type() {
- t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type())
- }
- for i := 0; i < hv.NumField(); i++ {
- hf := hv.Field(i).Interface()
- wf := wv.Field(i).Interface()
- if !reflect.DeepEqual(hf, wf) {
- t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf)
- }
- }
-}
-
-type responseLocationTest struct {
- location string // Response's Location header or ""
- requrl string // Response.Request.URL or ""
- want string
- wantErr error
-}
-
-var responseLocationTests = []responseLocationTest{
- {"/foo", "http://bar.com/baz", "http://bar.com/foo", nil},
- {"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil},
- {"", "http://bar.com/baz", "", ErrNoLocation},
-}
-
-func TestLocationResponse(t *testing.T) {
- for i, tt := range responseLocationTests {
- res := new(Response)
- res.Header = make(Header)
- res.Header.Set("Location", tt.location)
- if tt.requrl != "" {
- res.Request = &Request{}
- var err error
- res.Request.URL, err = url.Parse(tt.requrl)
- if err != nil {
- t.Fatalf("bad test URL %q: %v", tt.requrl, err)
- }
- }
-
- got, err := res.Location()
- if tt.wantErr != nil {
- if err == nil {
- t.Errorf("%d. err=nil; want %q", i, tt.wantErr)
- continue
- }
- if g, e := err.Error(), tt.wantErr.Error(); g != e {
- t.Errorf("%d. err=%q; want %q", i, g, e)
- continue
- }
- continue
- }
- if err != nil {
- t.Errorf("%d. err=%q", i, err)
- continue
- }
- if g, e := got.String(), tt.want; g != e {
- t.Errorf("%d. Location=%q; want %q", i, g, e)
- }
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "io/ioutil"
- "testing"
-)
-
-type respWriteTest struct {
- Resp Response
- Raw string
-}
-
-var respWriteTests = []respWriteTest{
- // HTTP/1.0, identity coding; no trailer
- {
- Response{
- StatusCode: 503,
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
- ContentLength: 6,
- },
-
- "HTTP/1.0 503 Service Unavailable\r\n" +
- "Content-Length: 6\r\n\r\n" +
- "abcdef",
- },
- // Unchunked response without Content-Length.
- {
- Response{
- StatusCode: 200,
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
- ContentLength: -1,
- },
- "HTTP/1.0 200 OK\r\n" +
- "\r\n" +
- "abcdef",
- },
- // HTTP/1.1, chunked coding; empty trailer; close
- {
- Response{
- StatusCode: 200,
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{},
- Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
- ContentLength: 6,
- TransferEncoding: []string{"chunked"},
- Close: true,
- },
-
- "HTTP/1.1 200 OK\r\n" +
- "Connection: close\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- "6\r\nabcdef\r\n0\r\n\r\n",
- },
-
- // Header value with a newline character (Issue 914).
- // Also tests removal of leading and trailing whitespace.
- {
- Response{
- StatusCode: 204,
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{
- "Foo": []string{" Bar\nBaz "},
- },
- Body: nil,
- ContentLength: 0,
- TransferEncoding: []string{"chunked"},
- Close: true,
- },
-
- "HTTP/1.1 204 No Content\r\n" +
- "Connection: close\r\n" +
- "Foo: Bar Baz\r\n" +
- "\r\n",
- },
-}
-
-func TestResponseWrite(t *testing.T) {
- for i := range respWriteTests {
- tt := &respWriteTests[i]
- var braw bytes.Buffer
- err := tt.Resp.Write(&braw)
- if err != nil {
- t.Errorf("error writing #%d: %s", i, err)
- continue
- }
- sraw := braw.String()
- if sraw != tt.Raw {
- t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw)
- continue
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP reverse proxy handler
-
-package http
-
-import (
- "io"
- "log"
- "net"
- "strings"
- "sync"
- "time"
- "url"
-)
-
-// ReverseProxy is an HTTP Handler that takes an incoming request and
-// sends it to another server, proxying the response back to the
-// client.
-type ReverseProxy struct {
- // Director must be a function which modifies
- // the request into a new request to be sent
- // using Transport. Its response is then copied
- // back to the original client unmodified.
- Director func(*Request)
-
- // The Transport used to perform proxy requests.
- // If nil, DefaultTransport is used.
- Transport RoundTripper
-
- // FlushInterval specifies the flush interval, in
- // nanoseconds, to flush to the client while
- // coping the response body.
- // If zero, no periodic flushing is done.
- FlushInterval int64
-}
-
-func singleJoiningSlash(a, b string) string {
- aslash := strings.HasSuffix(a, "/")
- bslash := strings.HasPrefix(b, "/")
- switch {
- case aslash && bslash:
- return a + b[1:]
- case !aslash && !bslash:
- return a + "/" + b
- }
- return a + b
-}
-
-// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
-// URLs to the scheme, host, and base path provided in target. If the
-// target's path is "/base" and the incoming request was for "/dir",
-// the target request will be for /base/dir.
-func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
- director := func(req *Request) {
- req.URL.Scheme = target.Scheme
- req.URL.Host = target.Host
- req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
- if q := req.URL.RawQuery; q != "" {
- req.URL.RawPath = req.URL.Path + "?" + q
- } else {
- req.URL.RawPath = req.URL.Path
- }
- req.URL.RawQuery = target.RawQuery
- }
- return &ReverseProxy{Director: director}
-}
-
-func copyHeader(dst, src Header) {
- for k, vv := range src {
- for _, v := range vv {
- dst.Add(k, v)
- }
- }
-}
-
-func (p *ReverseProxy) ServeHTTP(rw ResponseWriter, req *Request) {
- transport := p.Transport
- if transport == nil {
- transport = DefaultTransport
- }
-
- outreq := new(Request)
- *outreq = *req // includes shallow copies of maps, but okay
-
- p.Director(outreq)
- outreq.Proto = "HTTP/1.1"
- outreq.ProtoMajor = 1
- outreq.ProtoMinor = 1
- outreq.Close = false
-
- // Remove the connection header to the backend. We want a
- // persistent connection, regardless of what the client sent
- // to us. This is modifying the same underlying map from req
- // (shallow copied above) so we only copy it if necessary.
- if outreq.Header.Get("Connection") != "" {
- outreq.Header = make(Header)
- copyHeader(outreq.Header, req.Header)
- outreq.Header.Del("Connection")
- }
-
- if clientIp, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
- outreq.Header.Set("X-Forwarded-For", clientIp)
- }
-
- res, err := transport.RoundTrip(outreq)
- if err != nil {
- log.Printf("http: proxy error: %v", err)
- rw.WriteHeader(StatusInternalServerError)
- return
- }
-
- copyHeader(rw.Header(), res.Header)
-
- rw.WriteHeader(res.StatusCode)
-
- if res.Body != nil {
- var dst io.Writer = rw
- if p.FlushInterval != 0 {
- if wf, ok := rw.(writeFlusher); ok {
- dst = &maxLatencyWriter{dst: wf, latency: p.FlushInterval}
- }
- }
- io.Copy(dst, res.Body)
- }
-}
-
-type writeFlusher interface {
- io.Writer
- Flusher
-}
-
-type maxLatencyWriter struct {
- dst writeFlusher
- latency int64 // nanos
-
- lk sync.Mutex // protects init of done, as well Write + Flush
- done chan bool
-}
-
-func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
- m.lk.Lock()
- defer m.lk.Unlock()
- if m.done == nil {
- m.done = make(chan bool)
- go m.flushLoop()
- }
- n, err = m.dst.Write(p)
- if err != nil {
- m.done <- true
- }
- return
-}
-
-func (m *maxLatencyWriter) flushLoop() {
- t := time.NewTicker(m.latency)
- defer t.Stop()
- for {
- select {
- case <-t.C:
- m.lk.Lock()
- m.dst.Flush()
- m.lk.Unlock()
- case <-m.done:
- return
- }
- }
- panic("unreached")
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Reverse proxy tests.
-
-package http_test
-
-import (
- . "http"
- "http/httptest"
- "io/ioutil"
- "testing"
- "url"
-)
-
-func TestReverseProxy(t *testing.T) {
- const backendResponse = "I am the backend"
- const backendStatus = 404
- backend := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if len(r.TransferEncoding) > 0 {
- t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding)
- }
- if r.Header.Get("X-Forwarded-For") == "" {
- t.Errorf("didn't get X-Forwarded-For header")
- }
- if c := r.Header.Get("Connection"); c != "" {
- t.Errorf("handler got Connection header value %q", c)
- }
- if g, e := r.Host, "some-name"; g != e {
- t.Errorf("backend got Host header %q, want %q", g, e)
- }
- w.Header().Set("X-Foo", "bar")
- SetCookie(w, &Cookie{Name: "flavor", Value: "chocolateChip"})
- w.WriteHeader(backendStatus)
- w.Write([]byte(backendResponse))
- }))
- defer backend.Close()
- backendURL, err := url.Parse(backend.URL)
- if err != nil {
- t.Fatal(err)
- }
- proxyHandler := NewSingleHostReverseProxy(backendURL)
- frontend := httptest.NewServer(proxyHandler)
- defer frontend.Close()
-
- getReq, _ := NewRequest("GET", frontend.URL, nil)
- getReq.Host = "some-name"
- getReq.Header.Set("Connection", "close")
- getReq.Close = true
- res, err := DefaultClient.Do(getReq)
- if err != nil {
- t.Fatalf("Get: %v", err)
- }
- if g, e := res.StatusCode, backendStatus; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- if g, e := res.Header.Get("X-Foo"), "bar"; g != e {
- t.Errorf("got X-Foo %q; expected %q", g, e)
- }
- if g, e := len(res.Header["Set-Cookie"]), 1; g != e {
- t.Fatalf("got %d SetCookies, want %d", g, e)
- }
- if cookie := res.Cookies()[0]; cookie.Name != "flavor" {
- t.Errorf("unexpected cookie %q", cookie.Name)
- }
- bodyBytes, _ := ioutil.ReadAll(res.Body)
- if g, e := string(bodyBytes), backendResponse; g != e {
- t.Errorf("got body %q; expected %q", g, e)
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// End-to-end serving tests
-
-package http_test
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "fmt"
- . "http"
- "http/httptest"
- "io"
- "io/ioutil"
- "log"
- "net"
- "os"
- "reflect"
- "strings"
- "syscall"
- "testing"
- "time"
- "url"
-)
-
-type dummyAddr string
-type oneConnListener struct {
- conn net.Conn
-}
-
-func (l *oneConnListener) Accept() (c net.Conn, err error) {
- c = l.conn
- if c == nil {
- err = io.EOF
- return
- }
- err = nil
- l.conn = nil
- return
-}
-
-func (l *oneConnListener) Close() error {
- return nil
-}
-
-func (l *oneConnListener) Addr() net.Addr {
- return dummyAddr("test-address")
-}
-
-func (a dummyAddr) Network() string {
- return string(a)
-}
-
-func (a dummyAddr) String() string {
- return string(a)
-}
-
-type testConn struct {
- readBuf bytes.Buffer
- writeBuf bytes.Buffer
-}
-
-func (c *testConn) Read(b []byte) (int, error) {
- return c.readBuf.Read(b)
-}
-
-func (c *testConn) Write(b []byte) (int, error) {
- return c.writeBuf.Write(b)
-}
-
-func (c *testConn) Close() error {
- return nil
-}
-
-func (c *testConn) LocalAddr() net.Addr {
- return dummyAddr("local-addr")
-}
-
-func (c *testConn) RemoteAddr() net.Addr {
- return dummyAddr("remote-addr")
-}
-
-func (c *testConn) SetTimeout(nsec int64) error {
- return nil
-}
-
-func (c *testConn) SetReadTimeout(nsec int64) error {
- return nil
-}
-
-func (c *testConn) SetWriteTimeout(nsec int64) error {
- return nil
-}
-
-func TestConsumingBodyOnNextConn(t *testing.T) {
- conn := new(testConn)
- for i := 0; i < 2; i++ {
- conn.readBuf.Write([]byte(
- "POST / HTTP/1.1\r\n" +
- "Host: test\r\n" +
- "Content-Length: 11\r\n" +
- "\r\n" +
- "foo=1&bar=1"))
- }
-
- reqNum := 0
- ch := make(chan *Request)
- servech := make(chan error)
- listener := &oneConnListener{conn}
- handler := func(res ResponseWriter, req *Request) {
- reqNum++
- ch <- req
- }
-
- go func() {
- servech <- Serve(listener, HandlerFunc(handler))
- }()
-
- var req *Request
- req = <-ch
- if req == nil {
- t.Fatal("Got nil first request.")
- }
- if req.Method != "POST" {
- t.Errorf("For request #1's method, got %q; expected %q",
- req.Method, "POST")
- }
-
- req = <-ch
- if req == nil {
- t.Fatal("Got nil first request.")
- }
- if req.Method != "POST" {
- t.Errorf("For request #2's method, got %q; expected %q",
- req.Method, "POST")
- }
-
- if serveerr := <-servech; serveerr != io.EOF {
- t.Errorf("Serve returned %q; expected EOF", serveerr)
- }
-}
-
-type stringHandler string
-
-func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) {
- w.Header().Set("Result", string(s))
-}
-
-var handlers = []struct {
- pattern string
- msg string
-}{
- {"/", "Default"},
- {"/someDir/", "someDir"},
- {"someHost.com/someDir/", "someHost.com/someDir"},
-}
-
-var vtests = []struct {
- url string
- expected string
-}{
- {"http://localhost/someDir/apage", "someDir"},
- {"http://localhost/otherDir/apage", "Default"},
- {"http://someHost.com/someDir/apage", "someHost.com/someDir"},
- {"http://otherHost.com/someDir/apage", "someDir"},
- {"http://otherHost.com/aDir/apage", "Default"},
-}
-
-func TestHostHandlers(t *testing.T) {
- for _, h := range handlers {
- Handle(h.pattern, stringHandler(h.msg))
- }
- ts := httptest.NewServer(nil)
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
- cc := NewClientConn(conn, nil)
- for _, vt := range vtests {
- var r *Response
- var req Request
- if req.URL, err = url.Parse(vt.url); err != nil {
- t.Errorf("cannot parse url: %v", err)
- continue
- }
- if err := cc.Write(&req); err != nil {
- t.Errorf("writing request: %v", err)
- continue
- }
- r, err := cc.Read(&req)
- if err != nil {
- t.Errorf("reading response: %v", err)
- continue
- }
- s := r.Header.Get("Result")
- if s != vt.expected {
- t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
- }
- }
-}
-
-// Tests for http://code.google.com/p/go/issues/detail?id=900
-func TestMuxRedirectLeadingSlashes(t *testing.T) {
- paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"}
- for _, path := range paths {
- req, err := ReadRequest(bufio.NewReader(bytes.NewBufferString("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n")))
- if err != nil {
- t.Errorf("%s", err)
- }
- mux := NewServeMux()
- resp := httptest.NewRecorder()
-
- mux.ServeHTTP(resp, req)
-
- if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected {
- t.Errorf("Expected Location header set to %q; got %q", expected, loc)
- return
- }
-
- if code, expected := resp.Code, StatusMovedPermanently; code != expected {
- t.Errorf("Expected response code of StatusMovedPermanently; got %d", code)
- return
- }
- }
-}
-
-func TestServerTimeouts(t *testing.T) {
- // TODO(bradfitz): convert this to use httptest.Server
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("listen error: %v", err)
- }
- addr, _ := l.Addr().(*net.TCPAddr)
-
- reqNum := 0
- handler := HandlerFunc(func(res ResponseWriter, req *Request) {
- reqNum++
- fmt.Fprintf(res, "req=%d", reqNum)
- })
-
- const second = 1000000000 /* nanos */
- server := &Server{Handler: handler, ReadTimeout: 0.25 * second, WriteTimeout: 0.25 * second}
- go server.Serve(l)
-
- url := fmt.Sprintf("http://%s/", addr)
-
- // Hit the HTTP server successfully.
- tr := &Transport{DisableKeepAlives: true} // they interfere with this test
- c := &Client{Transport: tr}
- r, err := c.Get(url)
- if err != nil {
- t.Fatalf("http Get #1: %v", err)
- }
- got, _ := ioutil.ReadAll(r.Body)
- expected := "req=1"
- if string(got) != expected {
- t.Errorf("Unexpected response for request #1; got %q; expected %q",
- string(got), expected)
- }
-
- // Slow client that should timeout.
- t1 := time.Nanoseconds()
- conn, err := net.Dial("tcp", addr.String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- buf := make([]byte, 1)
- n, err := conn.Read(buf)
- latency := time.Nanoseconds() - t1
- if n != 0 || err != io.EOF {
- t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
- }
- if latency < second*0.20 /* fudge from 0.25 above */ {
- t.Errorf("got EOF after %d ns, want >= %d", latency, second*0.20)
- }
-
- // Hit the HTTP server successfully again, verifying that the
- // previous slow connection didn't run our handler. (that we
- // get "req=2", not "req=3")
- r, err = Get(url)
- if err != nil {
- t.Fatalf("http Get #2: %v", err)
- }
- got, _ = ioutil.ReadAll(r.Body)
- expected = "req=2"
- if string(got) != expected {
- t.Errorf("Get #2 got %q, want %q", string(got), expected)
- }
-
- l.Close()
-}
-
-// TestIdentityResponse verifies that a handler can unset
-func TestIdentityResponse(t *testing.T) {
- handler := HandlerFunc(func(rw ResponseWriter, req *Request) {
- rw.Header().Set("Content-Length", "3")
- rw.Header().Set("Transfer-Encoding", req.FormValue("te"))
- switch {
- case req.FormValue("overwrite") == "1":
- _, err := rw.Write([]byte("foo TOO LONG"))
- if err != ErrContentLength {
- t.Errorf("expected ErrContentLength; got %v", err)
- }
- case req.FormValue("underwrite") == "1":
- rw.Header().Set("Content-Length", "500")
- rw.Write([]byte("too short"))
- default:
- rw.Write([]byte("foo"))
- }
- })
-
- ts := httptest.NewServer(handler)
- defer ts.Close()
-
- // Note: this relies on the assumption (which is true) that
- // Get sends HTTP/1.1 or greater requests. Otherwise the
- // server wouldn't have the choice to send back chunked
- // responses.
- for _, te := range []string{"", "identity"} {
- url := ts.URL + "/?te=" + te
- res, err := Get(url)
- if err != nil {
- t.Fatalf("error with Get of %s: %v", url, err)
- }
- if cl, expected := res.ContentLength, int64(3); cl != expected {
- t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl)
- }
- if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected {
- t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl)
- }
- if tl, expected := len(res.TransferEncoding), 0; tl != expected {
- t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)",
- url, expected, tl, res.TransferEncoding)
- }
- res.Body.Close()
- }
-
- // Verify that ErrContentLength is returned
- url := ts.URL + "/?overwrite=1"
- _, err := Get(url)
- if err != nil {
- t.Fatalf("error with Get of %s: %v", url, err)
- }
- // Verify that the connection is closed when the declared Content-Length
- // is larger than what the handler wrote.
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("error dialing: %v", err)
- }
- _, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n"))
- if err != nil {
- t.Fatalf("error writing: %v", err)
- }
-
- // The ReadAll will hang for a failing test, so use a Timer to
- // fail explicitly.
- goTimeout(t, 2e9, func() {
- got, _ := ioutil.ReadAll(conn)
- expectedSuffix := "\r\n\r\ntoo short"
- if !strings.HasSuffix(string(got), expectedSuffix) {
- t.Errorf("Expected output to end with %q; got response body %q",
- expectedSuffix, string(got))
- }
- })
-}
-
-func testTcpConnectionCloses(t *testing.T, req string, h Handler) {
- s := httptest.NewServer(h)
- defer s.Close()
-
- conn, err := net.Dial("tcp", s.Listener.Addr().String())
- if err != nil {
- t.Fatal("dial error:", err)
- }
- defer conn.Close()
-
- _, err = fmt.Fprint(conn, req)
- if err != nil {
- t.Fatal("print error:", err)
- }
-
- r := bufio.NewReader(conn)
- _, err = ReadResponse(r, &Request{Method: "GET"})
- if err != nil {
- t.Fatal("ReadResponse error:", err)
- }
-
- success := make(chan bool)
- go func() {
- select {
- case <-time.After(5e9):
- t.Fatal("body not closed after 5s")
- case <-success:
- }
- }()
-
- _, err = ioutil.ReadAll(r)
- if err != nil {
- t.Fatal("read error:", err)
- }
-
- success <- true
-}
-
-// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive.
-func TestServeHTTP10Close(t *testing.T) {
- testTcpConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "testdata/file")
- }))
-}
-
-// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close,
-// even for HTTP/1.1 requests.
-func TestHandlersCanSetConnectionClose11(t *testing.T) {
- testTcpConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Connection", "close")
- }))
-}
-
-func TestHandlersCanSetConnectionClose10(t *testing.T) {
- testTcpConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Connection", "close")
- }))
-}
-
-func TestSetsRemoteAddr(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- fmt.Fprintf(w, "%s", r.RemoteAddr)
- }))
- defer ts.Close()
-
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatalf("Get error: %v", err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("ReadAll error: %v", err)
- }
- ip := string(body)
- if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") {
- t.Fatalf("Expected local addr; got %q", ip)
- }
-}
-
-func TestChunkedResponseHeaders(t *testing.T) {
- log.SetOutput(ioutil.Discard) // is noisy otherwise
- defer log.SetOutput(os.Stderr)
-
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted
- fmt.Fprintf(w, "I am a chunked response.")
- }))
- defer ts.Close()
-
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatalf("Get error: %v", err)
- }
- if g, e := res.ContentLength, int64(-1); g != e {
- t.Errorf("expected ContentLength of %d; got %d", e, g)
- }
- if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) {
- t.Errorf("expected TransferEncoding of %v; got %v", e, g)
- }
- if _, haveCL := res.Header["Content-Length"]; haveCL {
- t.Errorf("Unexpected Content-Length")
- }
-}
-
-// Test304Responses verifies that 304s don't declare that they're
-// chunking in their response headers and aren't allowed to produce
-// output.
-func Test304Responses(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.WriteHeader(StatusNotModified)
- _, err := w.Write([]byte("illegal body"))
- if err != ErrBodyNotAllowed {
- t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err)
- }
- }))
- defer ts.Close()
- res, err := Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if len(res.TransferEncoding) > 0 {
- t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Error(err)
- }
- if len(body) > 0 {
- t.Errorf("got unexpected body %q", string(body))
- }
-}
-
-// TestHeadResponses verifies that responses to HEAD requests don't
-// declare that they're chunking in their response headers and aren't
-// allowed to produce output.
-func TestHeadResponses(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- _, err := w.Write([]byte("Ignored body"))
- if err != ErrBodyNotAllowed {
- t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err)
- }
-
- // Also exercise the ReaderFrom path
- _, err = io.Copy(w, strings.NewReader("Ignored body"))
- if err != ErrBodyNotAllowed {
- t.Errorf("on Copy, expected ErrBodyNotAllowed, got %v", err)
- }
- }))
- defer ts.Close()
- res, err := Head(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if len(res.TransferEncoding) > 0 {
- t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Error(err)
- }
- if len(body) > 0 {
- t.Errorf("got unexpected body %q", string(body))
- }
-}
-
-func TestTLSHandshakeTimeout(t *testing.T) {
- ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
- ts.Config.ReadTimeout = 250e6
- ts.StartTLS()
- defer ts.Close()
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer conn.Close()
- goTimeout(t, 10e9, func() {
- var buf [1]byte
- n, err := conn.Read(buf[:])
- if err == nil || n != 0 {
- t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
- }
- })
-}
-
-func TestTLSServer(t *testing.T) {
- ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.TLS != nil {
- w.Header().Set("X-TLS-Set", "true")
- if r.TLS.HandshakeComplete {
- w.Header().Set("X-TLS-HandshakeComplete", "true")
- }
- }
- }))
- defer ts.Close()
-
- // Connect an idle TCP connection to this server before we run
- // our real tests. This idle connection used to block forever
- // in the TLS handshake, preventing future connections from
- // being accepted. It may prevent future accidental blocking
- // in newConn.
- idleConn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer idleConn.Close()
- goTimeout(t, 10e9, func() {
- if !strings.HasPrefix(ts.URL, "https://") {
- t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
- return
- }
- noVerifyTransport := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- }
- client := &Client{Transport: noVerifyTransport}
- res, err := client.Get(ts.URL)
- if err != nil {
- t.Error(err)
- return
- }
- if res == nil {
- t.Errorf("got nil Response")
- return
- }
- defer res.Body.Close()
- if res.Header.Get("X-TLS-Set") != "true" {
- t.Errorf("expected X-TLS-Set response header")
- return
- }
- if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
- t.Errorf("expected X-TLS-HandshakeComplete header")
- }
- })
-}
-
-type serverExpectTest struct {
- contentLength int // of request body
- expectation string // e.g. "100-continue"
- readBody bool // whether handler should read the body (if false, sends StatusUnauthorized)
- expectedResponse string // expected substring in first line of http response
-}
-
-var serverExpectTests = []serverExpectTest{
- // Normal 100-continues, case-insensitive.
- {100, "100-continue", true, "100 Continue"},
- {100, "100-cOntInUE", true, "100 Continue"},
-
- // No 100-continue.
- {100, "", true, "200 OK"},
-
- // 100-continue but requesting client to deny us,
- // so it never reads the body.
- {100, "100-continue", false, "401 Unauthorized"},
- // Likewise without 100-continue:
- {100, "", false, "401 Unauthorized"},
-
- // Non-standard expectations are failures
- {0, "a-pony", false, "417 Expectation Failed"},
-
- // Expect-100 requested but no body
- {0, "100-continue", true, "400 Bad Request"},
-}
-
-// Tests that the server responds to the "Expect" request header
-// correctly.
-func TestServerExpect(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- // Note using r.FormValue("readbody") because for POST
- // requests that would read from r.Body, which we only
- // conditionally want to do.
- if strings.Contains(r.URL.RawPath, "readbody=true") {
- ioutil.ReadAll(r.Body)
- w.Write([]byte("Hi"))
- } else {
- w.WriteHeader(StatusUnauthorized)
- }
- }))
- defer ts.Close()
-
- runTest := func(test serverExpectTest) {
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer conn.Close()
- sendf := func(format string, args ...interface{}) {
- _, err := fmt.Fprintf(conn, format, args...)
- if err != nil {
- t.Fatalf("On test %#v, error writing %q: %v", test, format, err)
- }
- }
- go func() {
- sendf("POST /?readbody=%v HTTP/1.1\r\n"+
- "Connection: close\r\n"+
- "Content-Length: %d\r\n"+
- "Expect: %s\r\nHost: foo\r\n\r\n",
- test.readBody, test.contentLength, test.expectation)
- if test.contentLength > 0 && strings.ToLower(test.expectation) != "100-continue" {
- body := strings.Repeat("A", test.contentLength)
- sendf(body)
- }
- }()
- bufr := bufio.NewReader(conn)
- line, err := bufr.ReadString('\n')
- if err != nil {
- t.Fatalf("ReadString: %v", err)
- }
- if !strings.Contains(line, test.expectedResponse) {
- t.Errorf("for test %#v got first line=%q", test, line)
- }
- }
-
- for _, test := range serverExpectTests {
- runTest(test)
- }
-}
-
-// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server
-// should consume client request bodies that a handler didn't read.
-func TestServerUnreadRequestBodyLittle(t *testing.T) {
- conn := new(testConn)
- body := strings.Repeat("x", 100<<10)
- conn.readBuf.Write([]byte(fmt.Sprintf(
- "POST / HTTP/1.1\r\n"+
- "Host: test\r\n"+
- "Content-Length: %d\r\n"+
- "\r\n", len(body))))
- conn.readBuf.Write([]byte(body))
-
- done := make(chan bool)
-
- ls := &oneConnListener{conn}
- go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
- defer close(done)
- if conn.readBuf.Len() < len(body)/2 {
- t.Errorf("on request, read buffer length is %d; expected about 100 KB", conn.readBuf.Len())
- }
- rw.WriteHeader(200)
- if g, e := conn.readBuf.Len(), 0; g != e {
- t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e)
- }
- if c := rw.Header().Get("Connection"); c != "" {
- t.Errorf(`Connection header = %q; want ""`, c)
- }
- }))
- <-done
-}
-
-// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server
-// should ignore client request bodies that a handler didn't read
-// and close the connection.
-func TestServerUnreadRequestBodyLarge(t *testing.T) {
- conn := new(testConn)
- body := strings.Repeat("x", 1<<20)
- conn.readBuf.Write([]byte(fmt.Sprintf(
- "POST / HTTP/1.1\r\n"+
- "Host: test\r\n"+
- "Content-Length: %d\r\n"+
- "\r\n", len(body))))
- conn.readBuf.Write([]byte(body))
-
- done := make(chan bool)
-
- ls := &oneConnListener{conn}
- go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
- defer close(done)
- if conn.readBuf.Len() < len(body)/2 {
- t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
- }
- rw.WriteHeader(200)
- if conn.readBuf.Len() < len(body)/2 {
- t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
- }
- if c := rw.Header().Get("Connection"); c != "close" {
- t.Errorf(`Connection header = %q; want "close"`, c)
- }
- }))
- <-done
-}
-
-func TestTimeoutHandler(t *testing.T) {
- sendHi := make(chan bool, 1)
- writeErrors := make(chan error, 1)
- sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
- <-sendHi
- _, werr := w.Write([]byte("hi"))
- writeErrors <- werr
- })
- timeout := make(chan int64, 1) // write to this to force timeouts
- ts := httptest.NewServer(NewTestTimeoutHandler(sayHi, timeout))
- defer ts.Close()
-
- // Succeed without timing out:
- sendHi <- true
- res, err := Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if g, e := res.StatusCode, StatusOK; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- body, _ := ioutil.ReadAll(res.Body)
- if g, e := string(body), "hi"; g != e {
- t.Errorf("got body %q; expected %q", g, e)
- }
- if g := <-writeErrors; g != nil {
- t.Errorf("got unexpected Write error on first request: %v", g)
- }
-
- // Times out:
- timeout <- 1
- res, err = Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- body, _ = ioutil.ReadAll(res.Body)
- if !strings.Contains(string(body), "<title>Timeout</title>") {
- t.Errorf("expected timeout body; got %q", string(body))
- }
-
- // Now make the previously-timed out handler speak again,
- // which verifies the panic is handled:
- sendHi <- true
- if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
- t.Errorf("expected Write error of %v; got %v", e, g)
- }
-}
-
-// Verifies we don't path.Clean() on the wrong parts in redirects.
-func TestRedirectMunging(t *testing.T) {
- req, _ := NewRequest("GET", "http://example.com/", nil)
-
- resp := httptest.NewRecorder()
- Redirect(resp, req, "/foo?next=http://bar.com/", 302)
- if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e {
- t.Errorf("Location header was %q; want %q", g, e)
- }
-
- resp = httptest.NewRecorder()
- Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302)
- if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e {
- t.Errorf("Location header was %q; want %q", g, e)
- }
-}
-
-// TestZeroLengthPostAndResponse exercises an optimization done by the Transport:
-// when there is no body (either because the method doesn't permit a body, or an
-// explicit Content-Length of zero is present), then the transport can re-use the
-// connection immediately. But when it re-uses the connection, it typically closes
-// the previous request's body, which is not optimal for zero-lengthed bodies,
-// as the client would then see http.ErrBodyReadAfterClose and not 0, os.EOF.
-func TestZeroLengthPostAndResponse(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
- all, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Fatalf("handler ReadAll: %v", err)
- }
- if len(all) != 0 {
- t.Errorf("handler got %d bytes; expected 0", len(all))
- }
- rw.Header().Set("Content-Length", "0")
- }))
- defer ts.Close()
-
- req, err := NewRequest("POST", ts.URL, strings.NewReader(""))
- if err != nil {
- t.Fatal(err)
- }
- req.ContentLength = 0
-
- var resp [5]*Response
- for i := range resp {
- resp[i], err = DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("client post #%d: %v", i, err)
- }
- }
-
- for i := range resp {
- all, err := ioutil.ReadAll(resp[i].Body)
- if err != nil {
- t.Fatalf("req #%d: client ReadAll: %v", i, err)
- }
- if len(all) != 0 {
- t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all))
- }
- }
-}
-
-func TestHandlerPanic(t *testing.T) {
- testHandlerPanic(t, false)
-}
-
-func TestHandlerPanicWithHijack(t *testing.T) {
- testHandlerPanic(t, true)
-}
-
-func testHandlerPanic(t *testing.T, withHijack bool) {
- // Unlike the other tests that set the log output to ioutil.Discard
- // to quiet the output, this test uses a pipe. The pipe serves three
- // purposes:
- //
- // 1) The log.Print from the http server (generated by the caught
- // panic) will go to the pipe instead of stderr, making the
- // output quiet.
- //
- // 2) We read from the pipe to verify that the handler
- // actually caught the panic and logged something.
- //
- // 3) The blocking Read call prevents this TestHandlerPanic
- // function from exiting before the HTTP server handler
- // finishes crashing. If this text function exited too
- // early (and its defer log.SetOutput(os.Stderr) ran),
- // then the crash output could spill into the next test.
- pr, pw := io.Pipe()
- log.SetOutput(pw)
- defer log.SetOutput(os.Stderr)
-
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if withHijack {
- rwc, _, err := w.(Hijacker).Hijack()
- if err != nil {
- t.Logf("unexpected error: %v", err)
- }
- defer rwc.Close()
- }
- panic("intentional death for testing")
- }))
- defer ts.Close()
- _, err := Get(ts.URL)
- if err == nil {
- t.Logf("expected an error")
- }
-
- // Do a blocking read on the log output pipe so its logging
- // doesn't bleed into the next test. But wait only 5 seconds
- // for it.
- done := make(chan bool)
- go func() {
- buf := make([]byte, 1024)
- _, err := pr.Read(buf)
- pr.Close()
- if err != nil {
- t.Fatal(err)
- }
- done <- true
- }()
- select {
- case <-done:
- return
- case <-time.After(5e9):
- t.Fatal("expected server handler to log an error")
- }
-}
-
-func TestNoDate(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header()["Date"] = nil
- }))
- defer ts.Close()
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- _, present := res.Header["Date"]
- if present {
- t.Fatalf("Expected no Date header; got %v", res.Header["Date"])
- }
-}
-
-func TestStripPrefix(t *testing.T) {
- h := HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("X-Path", r.URL.Path)
- })
- ts := httptest.NewServer(StripPrefix("/foo", h))
- defer ts.Close()
-
- res, err := Get(ts.URL + "/foo/bar")
- if err != nil {
- t.Fatal(err)
- }
- if g, e := res.Header.Get("X-Path"), "/bar"; g != e {
- t.Errorf("test 1: got %s, want %s", g, e)
- }
-
- res, err = Get(ts.URL + "/bar")
- if err != nil {
- t.Fatal(err)
- }
- if g, e := res.StatusCode, 404; g != e {
- t.Errorf("test 2: got status %v, want %v", g, e)
- }
-}
-
-func TestRequestLimit(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- t.Fatalf("didn't expect to get request in Handler")
- }))
- defer ts.Close()
- req, _ := NewRequest("GET", ts.URL, nil)
- var bytesPerHeader = len("header12345: val12345\r\n")
- for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ {
- req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i))
- }
- res, err := DefaultClient.Do(req)
- if err != nil {
- // Some HTTP clients may fail on this undefined behavior (server replying and
- // closing the connection while the request is still being written), but
- // we do support it (at least currently), so we expect a response below.
- t.Fatalf("Do: %v", err)
- }
- if res.StatusCode != 413 {
- t.Fatalf("expected 413 response status; got: %d %s", res.StatusCode, res.Status)
- }
-}
-
-type neverEnding byte
-
-func (b neverEnding) Read(p []byte) (n int, err error) {
- for i := range p {
- p[i] = byte(b)
- }
- return len(p), nil
-}
-
-type countReader struct {
- r io.Reader
- n *int64
-}
-
-func (cr countReader) Read(p []byte) (n int, err error) {
- n, err = cr.r.Read(p)
- *cr.n += int64(n)
- return
-}
-
-func TestRequestBodyLimit(t *testing.T) {
- const limit = 1 << 20
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- r.Body = MaxBytesReader(w, r.Body, limit)
- n, err := io.Copy(ioutil.Discard, r.Body)
- if err == nil {
- t.Errorf("expected error from io.Copy")
- }
- if n != limit {
- t.Errorf("io.Copy = %d, want %d", n, limit)
- }
- }))
- defer ts.Close()
-
- nWritten := int64(0)
- req, _ := NewRequest("POST", ts.URL, io.LimitReader(countReader{neverEnding('a'), &nWritten}, limit*200))
-
- // Send the POST, but don't care it succeeds or not. The
- // remote side is going to reply and then close the TCP
- // connection, and HTTP doesn't really define if that's
- // allowed or not. Some HTTP clients will get the response
- // and some (like ours, currently) will complain that the
- // request write failed, without reading the response.
- //
- // But that's okay, since what we're really testing is that
- // the remote side hung up on us before we wrote too much.
- _, _ = DefaultClient.Do(req)
-
- if nWritten > limit*100 {
- t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d",
- limit, nWritten)
- }
-}
-
-// TestClientWriteShutdown tests that if the client shuts down the write
-// side of their TCP connection, the server doesn't send a 400 Bad Request.
-func TestClientWriteShutdown(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
- defer ts.Close()
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- err = conn.(*net.TCPConn).CloseWrite()
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- donec := make(chan bool)
- go func() {
- defer close(donec)
- bs, err := ioutil.ReadAll(conn)
- if err != nil {
- t.Fatalf("ReadAll: %v", err)
- }
- got := string(bs)
- if got != "" {
- t.Errorf("read %q from server; want nothing", got)
- }
- }()
- select {
- case <-donec:
- case <-time.After(10e9):
- t.Fatalf("timeout")
- }
-}
-
-// goTimeout runs f, failing t if f takes more than ns to complete.
-func goTimeout(t *testing.T, ns int64, f func()) {
- ch := make(chan bool, 2)
- timer := time.AfterFunc(ns, func() {
- t.Errorf("Timeout expired after %d ns", ns)
- ch <- true
- })
- defer timer.Stop()
- go func() {
- defer func() { ch <- true }()
- f()
- }()
- <-ch
-}
-
-type errorListener struct {
- errs []error
-}
-
-func (l *errorListener) Accept() (c net.Conn, err error) {
- if len(l.errs) == 0 {
- return nil, io.EOF
- }
- err = l.errs[0]
- l.errs = l.errs[1:]
- return
-}
-
-func (l *errorListener) Close() error {
- return nil
-}
-
-func (l *errorListener) Addr() net.Addr {
- return dummyAddr("test-address")
-}
-
-func TestAcceptMaxFds(t *testing.T) {
- log.SetOutput(ioutil.Discard) // is noisy otherwise
- defer log.SetOutput(os.Stderr)
-
- ln := &errorListener{[]error{
- &net.OpError{
- Op: "accept",
- Err: os.Errno(syscall.EMFILE),
- }}}
- err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {})))
- if err != io.EOF {
- t.Errorf("got error %v, want EOF", err)
- }
-}
-
-func BenchmarkClientServer(b *testing.B) {
- b.StopTimer()
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
- fmt.Fprintf(rw, "Hello world.\n")
- }))
- defer ts.Close()
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- res, err := Get(ts.URL)
- if err != nil {
- panic("Get: " + err.Error())
- }
- all, err := ioutil.ReadAll(res.Body)
- if err != nil {
- panic("ReadAll: " + err.Error())
- }
- body := string(all)
- if body != "Hello world.\n" {
- panic("Got body: " + body)
- }
- }
-
- b.StopTimer()
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP server. See RFC 2616.
-
-// TODO(rsc):
-// logging
-
-package http
-
-import (
- "bufio"
- "bytes"
- "crypto/rand"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "path"
- "runtime/debug"
- "strconv"
- "strings"
- "sync"
- "time"
- "url"
-)
-
-// Errors introduced by the HTTP server.
-var (
- ErrWriteAfterFlush = errors.New("Conn.Write called after Flush")
- ErrBodyNotAllowed = errors.New("http: response status code does not allow body")
- ErrHijacked = errors.New("Conn has been hijacked")
- ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length")
-)
-
-// Objects implementing the Handler interface can be
-// registered to serve a particular path or subtree
-// in the HTTP server.
-//
-// ServeHTTP should write reply headers and data to the ResponseWriter
-// and then return. Returning signals that the request is finished
-// and that the HTTP server can move on to the next request on
-// the connection.
-type Handler interface {
- ServeHTTP(ResponseWriter, *Request)
-}
-
-// A ResponseWriter interface is used by an HTTP handler to
-// construct an HTTP response.
-type ResponseWriter interface {
- // Header returns the header map that will be sent by WriteHeader.
- // Changing the header after a call to WriteHeader (or Write) has
- // no effect.
- Header() Header
-
- // Write writes the data to the connection as part of an HTTP reply.
- // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK)
- // before writing the data.
- Write([]byte) (int, error)
-
- // WriteHeader sends an HTTP response header with status code.
- // If WriteHeader is not called explicitly, the first call to Write
- // will trigger an implicit WriteHeader(http.StatusOK).
- // Thus explicit calls to WriteHeader are mainly used to
- // send error codes.
- WriteHeader(int)
-}
-
-// The Flusher interface is implemented by ResponseWriters that allow
-// an HTTP handler to flush buffered data to the client.
-//
-// Note that even for ResponseWriters that support Flush,
-// if the client is connected through an HTTP proxy,
-// the buffered data may not reach the client until the response
-// completes.
-type Flusher interface {
- // Flush sends any buffered data to the client.
- Flush()
-}
-
-// The Hijacker interface is implemented by ResponseWriters that allow
-// an HTTP handler to take over the connection.
-type Hijacker interface {
- // Hijack lets the caller take over the connection.
- // After a call to Hijack(), the HTTP server library
- // will not do anything else with the connection.
- // It becomes the caller's responsibility to manage
- // and close the connection.
- Hijack() (net.Conn, *bufio.ReadWriter, error)
-}
-
-// A conn represents the server side of an HTTP connection.
-type conn struct {
- remoteAddr string // network address of remote side
- server *Server // the Server on which the connection arrived
- rwc net.Conn // i/o connection
- lr *io.LimitedReader // io.LimitReader(rwc)
- buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->rwc
- hijacked bool // connection has been hijacked by handler
- tlsState *tls.ConnectionState // or nil when not using TLS
- body []byte
-}
-
-// A response represents the server side of an HTTP response.
-type response struct {
- conn *conn
- req *Request // request for this response
- chunking bool // using chunked transfer encoding for reply body
- wroteHeader bool // reply header has been written
- wroteContinue bool // 100 Continue response was written
- header Header // reply header parameters
- written int64 // number of bytes written in body
- contentLength int64 // explicitly-declared Content-Length; or -1
- status int // status code passed to WriteHeader
- needSniff bool // need to sniff to find Content-Type
-
- // close connection after this reply. set on request and
- // updated after response from handler if there's a
- // "Connection: keep-alive" response header and a
- // Content-Length.
- closeAfterReply bool
-
- // requestBodyLimitHit is set by requestTooLarge when
- // maxBytesReader hits its max size. It is checked in
- // WriteHeader, to make sure we don't consume the the
- // remaining request body to try to advance to the next HTTP
- // request. Instead, when this is set, we stop doing
- // subsequent requests on this connection and stop reading
- // input from it.
- requestBodyLimitHit bool
-}
-
-// requestTooLarge is called by maxBytesReader when too much input has
-// been read from the client.
-func (w *response) requestTooLarge() {
- w.closeAfterReply = true
- w.requestBodyLimitHit = true
- if !w.wroteHeader {
- w.Header().Set("Connection", "close")
- }
-}
-
-type writerOnly struct {
- io.Writer
-}
-
-func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
- // Flush before checking w.chunking, as Flush will call
- // WriteHeader if it hasn't been called yet, and WriteHeader
- // is what sets w.chunking.
- w.Flush()
- if !w.chunking && w.bodyAllowed() && !w.needSniff {
- if rf, ok := w.conn.rwc.(io.ReaderFrom); ok {
- n, err = rf.ReadFrom(src)
- w.written += n
- return
- }
- }
- // Fall back to default io.Copy implementation.
- // Use wrapper to hide w.ReadFrom from io.Copy.
- return io.Copy(writerOnly{w}, src)
-}
-
-// noLimit is an effective infinite upper bound for io.LimitedReader
-const noLimit int64 = (1 << 63) - 1
-
-// Create new connection from rwc.
-func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) {
- c = new(conn)
- c.remoteAddr = rwc.RemoteAddr().String()
- c.server = srv
- c.rwc = rwc
- c.body = make([]byte, sniffLen)
- c.lr = io.LimitReader(rwc, noLimit).(*io.LimitedReader)
- br := bufio.NewReader(c.lr)
- bw := bufio.NewWriter(rwc)
- c.buf = bufio.NewReadWriter(br, bw)
- return c, nil
-}
-
-// DefaultMaxHeaderBytes is the maximum permitted size of the headers
-// in an HTTP request.
-// This can be overridden by setting Server.MaxHeaderBytes.
-const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
-
-func (srv *Server) maxHeaderBytes() int {
- if srv.MaxHeaderBytes > 0 {
- return srv.MaxHeaderBytes
- }
- return DefaultMaxHeaderBytes
-}
-
-// wrapper around io.ReaderCloser which on first read, sends an
-// HTTP/1.1 100 Continue header
-type expectContinueReader struct {
- resp *response
- readCloser io.ReadCloser
- closed bool
-}
-
-func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
- if ecr.closed {
- return 0, errors.New("http: Read after Close on request Body")
- }
- if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked {
- ecr.resp.wroteContinue = true
- io.WriteString(ecr.resp.conn.buf, "HTTP/1.1 100 Continue\r\n\r\n")
- ecr.resp.conn.buf.Flush()
- }
- return ecr.readCloser.Read(p)
-}
-
-func (ecr *expectContinueReader) Close() error {
- ecr.closed = true
- return ecr.readCloser.Close()
-}
-
-// TimeFormat is the time format to use with
-// time.Parse and time.Time.Format when parsing
-// or generating times in HTTP headers.
-// It is like time.RFC1123 but hard codes GMT as the time zone.
-const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
-
-var errTooLarge = errors.New("http: request too large")
-
-// Read next request from connection.
-func (c *conn) readRequest() (w *response, err error) {
- if c.hijacked {
- return nil, ErrHijacked
- }
- c.lr.N = int64(c.server.maxHeaderBytes()) + 4096 /* bufio slop */
- var req *Request
- if req, err = ReadRequest(c.buf.Reader); err != nil {
- if c.lr.N == 0 {
- return nil, errTooLarge
- }
- return nil, err
- }
- c.lr.N = noLimit
-
- req.RemoteAddr = c.remoteAddr
- req.TLS = c.tlsState
-
- w = new(response)
- w.conn = c
- w.req = req
- w.header = make(Header)
- w.contentLength = -1
- c.body = c.body[:0]
- return w, nil
-}
-
-func (w *response) Header() Header {
- return w.header
-}
-
-// maxPostHandlerReadBytes is the max number of Request.Body bytes not
-// consumed by a handler that the server will read from the a client
-// in order to keep a connection alive. If there are more bytes than
-// this then the server to be paranoid instead sends a "Connection:
-// close" response.
-//
-// This number is approximately what a typical machine's TCP buffer
-// size is anyway. (if we have the bytes on the machine, we might as
-// well read them)
-const maxPostHandlerReadBytes = 256 << 10
-
-func (w *response) WriteHeader(code int) {
- if w.conn.hijacked {
- log.Print("http: response.WriteHeader on hijacked connection")
- return
- }
- if w.wroteHeader {
- log.Print("http: multiple response.WriteHeader calls")
- return
- }
- w.wroteHeader = true
- w.status = code
-
- // Check for a explicit (and valid) Content-Length header.
- var hasCL bool
- var contentLength int64
- if clenStr := w.header.Get("Content-Length"); clenStr != "" {
- var err error
- contentLength, err = strconv.Atoi64(clenStr)
- if err == nil {
- hasCL = true
- } else {
- log.Printf("http: invalid Content-Length of %q sent", clenStr)
- w.header.Del("Content-Length")
- }
- }
-
- if w.req.wantsHttp10KeepAlive() && (w.req.Method == "HEAD" || hasCL) {
- _, connectionHeaderSet := w.header["Connection"]
- if !connectionHeaderSet {
- w.header.Set("Connection", "keep-alive")
- }
- } else if !w.req.ProtoAtLeast(1, 1) {
- // Client did not ask to keep connection alive.
- w.closeAfterReply = true
- }
-
- if w.header.Get("Connection") == "close" {
- w.closeAfterReply = true
- }
-
- // Per RFC 2616, we should consume the request body before
- // replying, if the handler hasn't already done so. But we
- // don't want to do an unbounded amount of reading here for
- // DoS reasons, so we only try up to a threshold.
- if w.req.ContentLength != 0 && !w.closeAfterReply {
- ecr, isExpecter := w.req.Body.(*expectContinueReader)
- if !isExpecter || ecr.resp.wroteContinue {
- n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1)
- if n >= maxPostHandlerReadBytes {
- w.requestTooLarge()
- w.header.Set("Connection", "close")
- } else {
- w.req.Body.Close()
- }
- }
- }
-
- if code == StatusNotModified {
- // Must not have body.
- for _, header := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} {
- if w.header.Get(header) != "" {
- // TODO: return an error if WriteHeader gets a return parameter
- // or set a flag on w to make future Writes() write an error page?
- // for now just log and drop the header.
- log.Printf("http: StatusNotModified response with header %q defined", header)
- w.header.Del(header)
- }
- }
- } else {
- // If no content type, apply sniffing algorithm to body.
- if w.header.Get("Content-Type") == "" {
- w.needSniff = true
- }
- }
-
- if _, ok := w.header["Date"]; !ok {
- w.Header().Set("Date", time.UTC().Format(TimeFormat))
- }
-
- te := w.header.Get("Transfer-Encoding")
- hasTE := te != ""
- if hasCL && hasTE && te != "identity" {
- // TODO: return an error if WriteHeader gets a return parameter
- // For now just ignore the Content-Length.
- log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
- te, contentLength)
- w.header.Del("Content-Length")
- hasCL = false
- }
-
- if w.req.Method == "HEAD" || code == StatusNotModified {
- // do nothing
- } else if hasCL {
- w.contentLength = contentLength
- w.header.Del("Transfer-Encoding")
- } else if w.req.ProtoAtLeast(1, 1) {
- // HTTP/1.1 or greater: use chunked transfer encoding
- // to avoid closing the connection at EOF.
- // TODO: this blows away any custom or stacked Transfer-Encoding they
- // might have set. Deal with that as need arises once we have a valid
- // use case.
- w.chunking = true
- w.header.Set("Transfer-Encoding", "chunked")
- } else {
- // HTTP version < 1.1: cannot do chunked transfer
- // encoding and we don't know the Content-Length so
- // signal EOF by closing connection.
- w.closeAfterReply = true
- w.header.Del("Transfer-Encoding") // in case already set
- }
-
- // Cannot use Content-Length with non-identity Transfer-Encoding.
- if w.chunking {
- w.header.Del("Content-Length")
- }
- if !w.req.ProtoAtLeast(1, 0) {
- return
- }
- proto := "HTTP/1.0"
- if w.req.ProtoAtLeast(1, 1) {
- proto = "HTTP/1.1"
- }
- codestring := strconv.Itoa(code)
- text, ok := statusText[code]
- if !ok {
- text = "status code " + codestring
- }
- io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n")
- w.header.Write(w.conn.buf)
-
- // If we need to sniff the body, leave the header open.
- // Otherwise, end it here.
- if !w.needSniff {
- io.WriteString(w.conn.buf, "\r\n")
- }
-}
-
-// sniff uses the first block of written data,
-// stored in w.conn.body, to decide the Content-Type
-// for the HTTP body.
-func (w *response) sniff() {
- if !w.needSniff {
- return
- }
- w.needSniff = false
-
- data := w.conn.body
- fmt.Fprintf(w.conn.buf, "Content-Type: %s\r\n\r\n", DetectContentType(data))
-
- if len(data) == 0 {
- return
- }
- if w.chunking {
- fmt.Fprintf(w.conn.buf, "%x\r\n", len(data))
- }
- _, err := w.conn.buf.Write(data)
- if w.chunking && err == nil {
- io.WriteString(w.conn.buf, "\r\n")
- }
-}
-
-// bodyAllowed returns true if a Write is allowed for this response type.
-// It's illegal to call this before the header has been flushed.
-func (w *response) bodyAllowed() bool {
- if !w.wroteHeader {
- panic("")
- }
- return w.status != StatusNotModified && w.req.Method != "HEAD"
-}
-
-func (w *response) Write(data []byte) (n int, err error) {
- if w.conn.hijacked {
- log.Print("http: response.Write on hijacked connection")
- return 0, ErrHijacked
- }
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- if len(data) == 0 {
- return 0, nil
- }
- if !w.bodyAllowed() {
- return 0, ErrBodyNotAllowed
- }
-
- w.written += int64(len(data)) // ignoring errors, for errorKludge
- if w.contentLength != -1 && w.written > w.contentLength {
- return 0, ErrContentLength
- }
-
- var m int
- if w.needSniff {
- // We need to sniff the beginning of the output to
- // determine the content type. Accumulate the
- // initial writes in w.conn.body.
- // Cap m so that append won't allocate.
- m := cap(w.conn.body) - len(w.conn.body)
- if m > len(data) {
- m = len(data)
- }
- w.conn.body = append(w.conn.body, data[:m]...)
- data = data[m:]
- if len(data) == 0 {
- // Copied everything into the buffer.
- // Wait for next write.
- return m, nil
- }
-
- // Filled the buffer; more data remains.
- // Sniff the content (flushes the buffer)
- // and then proceed with the remainder
- // of the data as a normal Write.
- // Calling sniff clears needSniff.
- w.sniff()
- }
-
- // TODO(rsc): if chunking happened after the buffering,
- // then there would be fewer chunk headers.
- // On the other hand, it would make hijacking more difficult.
- if w.chunking {
- fmt.Fprintf(w.conn.buf, "%x\r\n", len(data)) // TODO(rsc): use strconv not fmt
- }
- n, err = w.conn.buf.Write(data)
- if err == nil && w.chunking {
- if n != len(data) {
- err = io.ErrShortWrite
- }
- if err == nil {
- io.WriteString(w.conn.buf, "\r\n")
- }
- }
-
- return m + n, err
-}
-
-func (w *response) finishRequest() {
- // If this was an HTTP/1.0 request with keep-alive and we sent a Content-Length
- // back, we can make this a keep-alive response ...
- if w.req.wantsHttp10KeepAlive() {
- sentLength := w.header.Get("Content-Length") != ""
- if sentLength && w.header.Get("Connection") == "keep-alive" {
- w.closeAfterReply = false
- }
- }
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- if w.needSniff {
- w.sniff()
- }
- if w.chunking {
- io.WriteString(w.conn.buf, "0\r\n")
- // trailer key/value pairs, followed by blank line
- io.WriteString(w.conn.buf, "\r\n")
- }
- w.conn.buf.Flush()
- // Close the body, unless we're about to close the whole TCP connection
- // anyway.
- if !w.closeAfterReply {
- w.req.Body.Close()
- }
- if w.req.MultipartForm != nil {
- w.req.MultipartForm.RemoveAll()
- }
-
- if w.contentLength != -1 && w.contentLength != w.written {
- // Did not write enough. Avoid getting out of sync.
- w.closeAfterReply = true
- }
-}
-
-func (w *response) Flush() {
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- w.sniff()
- w.conn.buf.Flush()
-}
-
-// Close the connection.
-func (c *conn) close() {
- if c.buf != nil {
- c.buf.Flush()
- c.buf = nil
- }
- if c.rwc != nil {
- c.rwc.Close()
- c.rwc = nil
- }
-}
-
-// Serve a new connection.
-func (c *conn) serve() {
- defer func() {
- err := recover()
- if err == nil {
- return
- }
- if c.rwc != nil { // may be nil if connection hijacked
- c.rwc.Close()
- }
-
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "http: panic serving %v: %v\n", c.remoteAddr, err)
- buf.Write(debug.Stack())
- log.Print(buf.String())
- }()
-
- if tlsConn, ok := c.rwc.(*tls.Conn); ok {
- if err := tlsConn.Handshake(); err != nil {
- c.close()
- return
- }
- c.tlsState = new(tls.ConnectionState)
- *c.tlsState = tlsConn.ConnectionState()
- }
-
- for {
- w, err := c.readRequest()
- if err != nil {
- msg := "400 Bad Request"
- if err == errTooLarge {
- // Their HTTP client may or may not be
- // able to read this if we're
- // responding to them and hanging up
- // while they're still writing their
- // request. Undefined behavior.
- msg = "413 Request Entity Too Large"
- } else if err == io.ErrUnexpectedEOF {
- break // Don't reply
- } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
- break // Don't reply
- }
- fmt.Fprintf(c.rwc, "HTTP/1.1 %s\r\n\r\n", msg)
- break
- }
-
- // Expect 100 Continue support
- req := w.req
- if req.expectsContinue() {
- if req.ProtoAtLeast(1, 1) {
- // Wrap the Body reader with one that replies on the connection
- req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
- }
- if req.ContentLength == 0 {
- w.Header().Set("Connection", "close")
- w.WriteHeader(StatusBadRequest)
- w.finishRequest()
- break
- }
- req.Header.Del("Expect")
- } else if req.Header.Get("Expect") != "" {
- // TODO(bradfitz): let ServeHTTP handlers handle
- // requests with non-standard expectation[s]? Seems
- // theoretical at best, and doesn't fit into the
- // current ServeHTTP model anyway. We'd need to
- // make the ResponseWriter an optional
- // "ExpectReplier" interface or something.
- //
- // For now we'll just obey RFC 2616 14.20 which says
- // "If a server receives a request containing an
- // Expect field that includes an expectation-
- // extension that it does not support, it MUST
- // respond with a 417 (Expectation Failed) status."
- w.Header().Set("Connection", "close")
- w.WriteHeader(StatusExpectationFailed)
- w.finishRequest()
- break
- }
-
- handler := c.server.Handler
- if handler == nil {
- handler = DefaultServeMux
- }
-
- // HTTP cannot have multiple simultaneous active requests.[*]
- // Until the server replies to this request, it can't read another,
- // so we might as well run the handler in this goroutine.
- // [*] Not strictly true: HTTP pipelining. We could let them all process
- // in parallel even if their responses need to be serialized.
- handler.ServeHTTP(w, w.req)
- if c.hijacked {
- return
- }
- w.finishRequest()
- if w.closeAfterReply {
- break
- }
- }
- c.close()
-}
-
-// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
-// and a Hijacker.
-func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
- if w.conn.hijacked {
- return nil, nil, ErrHijacked
- }
- w.conn.hijacked = true
- rwc = w.conn.rwc
- buf = w.conn.buf
- w.conn.rwc = nil
- w.conn.buf = nil
- return
-}
-
-// The HandlerFunc type is an adapter to allow the use of
-// ordinary functions as HTTP handlers. If f is a function
-// with the appropriate signature, HandlerFunc(f) is a
-// Handler object that calls f.
-type HandlerFunc func(ResponseWriter, *Request)
-
-// ServeHTTP calls f(w, r).
-func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
- f(w, r)
-}
-
-// Helper handlers
-
-// Error replies to the request with the specified error message and HTTP code.
-func Error(w ResponseWriter, error string, code int) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.WriteHeader(code)
- fmt.Fprintln(w, error)
-}
-
-// NotFound replies to the request with an HTTP 404 not found error.
-func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
-
-// NotFoundHandler returns a simple request handler
-// that replies to each request with a ``404 page not found'' reply.
-func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
-
-// StripPrefix returns a handler that serves HTTP requests
-// by removing the given prefix from the request URL's Path
-// and invoking the handler h. StripPrefix handles a
-// request for a path that doesn't begin with prefix by
-// replying with an HTTP 404 not found error.
-func StripPrefix(prefix string, h Handler) Handler {
- return HandlerFunc(func(w ResponseWriter, r *Request) {
- if !strings.HasPrefix(r.URL.Path, prefix) {
- NotFound(w, r)
- return
- }
- r.URL.Path = r.URL.Path[len(prefix):]
- h.ServeHTTP(w, r)
- })
-}
-
-// Redirect replies to the request with a redirect to url,
-// which may be a path relative to the request path.
-func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
- if u, err := url.Parse(urlStr); err == nil {
- // If url was relative, make absolute by
- // combining with request path.
- // The browser would probably do this for us,
- // but doing it ourselves is more reliable.
-
- // NOTE(rsc): RFC 2616 says that the Location
- // line must be an absolute URI, like
- // "http://www.google.com/redirect/",
- // not a path like "/redirect/".
- // Unfortunately, we don't know what to
- // put in the host name section to get the
- // client to connect to us again, so we can't
- // know the right absolute URI to send back.
- // Because of this problem, no one pays attention
- // to the RFC; they all send back just a new path.
- // So do we.
- oldpath := r.URL.Path
- if oldpath == "" { // should not happen, but avoid a crash if it does
- oldpath = "/"
- }
- if u.Scheme == "" {
- // no leading http://server
- if urlStr == "" || urlStr[0] != '/' {
- // make relative path absolute
- olddir, _ := path.Split(oldpath)
- urlStr = olddir + urlStr
- }
-
- var query string
- if i := strings.Index(urlStr, "?"); i != -1 {
- urlStr, query = urlStr[:i], urlStr[i:]
- }
-
- // clean up but preserve trailing slash
- trailing := urlStr[len(urlStr)-1] == '/'
- urlStr = path.Clean(urlStr)
- if trailing && urlStr[len(urlStr)-1] != '/' {
- urlStr += "/"
- }
- urlStr += query
- }
- }
-
- w.Header().Set("Location", urlStr)
- w.WriteHeader(code)
-
- // RFC2616 recommends that a short note "SHOULD" be included in the
- // response because older user agents may not understand 301/307.
- // Shouldn't send the response for POST or HEAD; that leaves GET.
- if r.Method == "GET" {
- note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n"
- fmt.Fprintln(w, note)
- }
-}
-
-var htmlReplacer = strings.NewReplacer(
- "&", "&",
- "<", "<",
- ">", ">",
- `"`, """,
- "'", "'",
-)
-
-func htmlEscape(s string) string {
- return htmlReplacer.Replace(s)
-}
-
-// Redirect to a fixed URL
-type redirectHandler struct {
- url string
- code int
-}
-
-func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
- Redirect(w, r, rh.url, rh.code)
-}
-
-// RedirectHandler returns a request handler that redirects
-// each request it receives to the given url using the given
-// status code.
-func RedirectHandler(url string, code int) Handler {
- return &redirectHandler{url, code}
-}
-
-// ServeMux is an HTTP request multiplexer.
-// It matches the URL of each incoming request against a list of registered
-// patterns and calls the handler for the pattern that
-// most closely matches the URL.
-//
-// Patterns named fixed, rooted paths, like "/favicon.ico",
-// or rooted subtrees, like "/images/" (note the trailing slash).
-// Longer patterns take precedence over shorter ones, so that
-// if there are handlers registered for both "/images/"
-// and "/images/thumbnails/", the latter handler will be
-// called for paths beginning "/images/thumbnails/" and the
-// former will receiver requests for any other paths in the
-// "/images/" subtree.
-//
-// Patterns may optionally begin with a host name, restricting matches to
-// URLs on that host only. Host-specific patterns take precedence over
-// general patterns, so that a handler might register for the two patterns
-// "/codesearch" and "codesearch.google.com/" without also taking over
-// requests for "http://www.google.com/".
-//
-// ServeMux also takes care of sanitizing the URL request path,
-// redirecting any request containing . or .. elements to an
-// equivalent .- and ..-free URL.
-type ServeMux struct {
- m map[string]Handler
-}
-
-// NewServeMux allocates and returns a new ServeMux.
-func NewServeMux() *ServeMux { return &ServeMux{make(map[string]Handler)} }
-
-// DefaultServeMux is the default ServeMux used by Serve.
-var DefaultServeMux = NewServeMux()
-
-// Does path match pattern?
-func pathMatch(pattern, path string) bool {
- if len(pattern) == 0 {
- // should not happen
- return false
- }
- n := len(pattern)
- if pattern[n-1] != '/' {
- return pattern == path
- }
- return len(path) >= n && path[0:n] == pattern
-}
-
-// Return the canonical path for p, eliminating . and .. elements.
-func cleanPath(p string) string {
- if p == "" {
- return "/"
- }
- if p[0] != '/' {
- p = "/" + p
- }
- np := path.Clean(p)
- // path.Clean removes trailing slash except for root;
- // put the trailing slash back if necessary.
- if p[len(p)-1] == '/' && np != "/" {
- np += "/"
- }
- return np
-}
-
-// Find a handler on a handler map given a path string
-// Most-specific (longest) pattern wins
-func (mux *ServeMux) match(path string) Handler {
- var h Handler
- var n = 0
- for k, v := range mux.m {
- if !pathMatch(k, path) {
- continue
- }
- if h == nil || len(k) > n {
- n = len(k)
- h = v
- }
- }
- return h
-}
-
-// ServeHTTP dispatches the request to the handler whose
-// pattern most closely matches the request URL.
-func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
- // Clean path to canonical form and redirect.
- if p := cleanPath(r.URL.Path); p != r.URL.Path {
- w.Header().Set("Location", p)
- w.WriteHeader(StatusMovedPermanently)
- return
- }
- // Host-specific pattern takes precedence over generic ones
- h := mux.match(r.Host + r.URL.Path)
- if h == nil {
- h = mux.match(r.URL.Path)
- }
- if h == nil {
- h = NotFoundHandler()
- }
- h.ServeHTTP(w, r)
-}
-
-// Handle registers the handler for the given pattern.
-func (mux *ServeMux) Handle(pattern string, handler Handler) {
- if pattern == "" {
- panic("http: invalid pattern " + pattern)
- }
-
- mux.m[pattern] = handler
-
- // Helpful behavior:
- // If pattern is /tree/, insert permanent redirect for /tree.
- n := len(pattern)
- if n > 0 && pattern[n-1] == '/' {
- mux.m[pattern[0:n-1]] = RedirectHandler(pattern, StatusMovedPermanently)
- }
-}
-
-// HandleFunc registers the handler function for the given pattern.
-func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
- mux.Handle(pattern, HandlerFunc(handler))
-}
-
-// Handle registers the handler for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
-
-// HandleFunc registers the handler function for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
- DefaultServeMux.HandleFunc(pattern, handler)
-}
-
-// Serve accepts incoming HTTP connections on the listener l,
-// creating a new service thread for each. The service threads
-// read requests and then call handler to reply to them.
-// Handler is typically nil, in which case the DefaultServeMux is used.
-func Serve(l net.Listener, handler Handler) error {
- srv := &Server{Handler: handler}
- return srv.Serve(l)
-}
-
-// A Server defines parameters for running an HTTP server.
-type Server struct {
- Addr string // TCP address to listen on, ":http" if empty
- Handler Handler // handler to invoke, http.DefaultServeMux if nil
- ReadTimeout int64 // the net.Conn.SetReadTimeout value for new connections
- WriteTimeout int64 // the net.Conn.SetWriteTimeout value for new connections
- MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0
-}
-
-// ListenAndServe listens on the TCP network address srv.Addr and then
-// calls Serve to handle requests on incoming connections. If
-// srv.Addr is blank, ":http" is used.
-func (srv *Server) ListenAndServe() error {
- addr := srv.Addr
- if addr == "" {
- addr = ":http"
- }
- l, e := net.Listen("tcp", addr)
- if e != nil {
- return e
- }
- return srv.Serve(l)
-}
-
-// Serve accepts incoming connections on the Listener l, creating a
-// new service thread for each. The service threads read requests and
-// then call srv.Handler to reply to them.
-func (srv *Server) Serve(l net.Listener) error {
- defer l.Close()
- for {
- rw, e := l.Accept()
- if e != nil {
- if ne, ok := e.(net.Error); ok && ne.Temporary() {
- log.Printf("http: Accept error: %v", e)
- continue
- }
- return e
- }
- if srv.ReadTimeout != 0 {
- rw.SetReadTimeout(srv.ReadTimeout)
- }
- if srv.WriteTimeout != 0 {
- rw.SetWriteTimeout(srv.WriteTimeout)
- }
- c, err := srv.newConn(rw)
- if err != nil {
- continue
- }
- go c.serve()
- }
- panic("not reached")
-}
-
-// ListenAndServe listens on the TCP network address addr
-// and then calls Serve with handler to handle requests
-// on incoming connections. Handler is typically nil,
-// in which case the DefaultServeMux is used.
-//
-// A trivial example server is:
-//
-// package main
-//
-// import (
-// "http"
-// "io"
-// "log"
-// )
-//
-// // hello world, the web server
-// func HelloServer(w http.ResponseWriter, req *http.Request) {
-// io.WriteString(w, "hello, world!\n")
-// }
-//
-// func main() {
-// http.HandleFunc("/hello", HelloServer)
-// err := http.ListenAndServe(":12345", nil)
-// if err != nil {
-// log.Fatal("ListenAndServe: ", err.String())
-// }
-// }
-func ListenAndServe(addr string, handler Handler) error {
- server := &Server{Addr: addr, Handler: handler}
- return server.ListenAndServe()
-}
-
-// ListenAndServeTLS acts identically to ListenAndServe, except that it
-// expects HTTPS connections. Additionally, files containing a certificate and
-// matching private key for the server must be provided. If the certificate
-// is signed by a certificate authority, the certFile should be the concatenation
-// of the server's certificate followed by the CA's certificate.
-//
-// A trivial example server is:
-//
-// import (
-// "http"
-// "log"
-// )
-//
-// func handler(w http.ResponseWriter, req *http.Request) {
-// w.Header().Set("Content-Type", "text/plain")
-// w.Write([]byte("This is an example server.\n"))
-// }
-//
-// func main() {
-// http.HandleFunc("/", handler)
-// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/")
-// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil)
-// if err != nil {
-// log.Fatal(err)
-// }
-// }
-//
-// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
-func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error {
- server := &Server{Addr: addr, Handler: handler}
- return server.ListenAndServeTLS(certFile, keyFile)
-}
-
-// ListenAndServeTLS listens on the TCP network address srv.Addr and
-// then calls Serve to handle requests on incoming TLS connections.
-//
-// Filenames containing a certificate and matching private key for
-// the server must be provided. If the certificate is signed by a
-// certificate authority, the certFile should be the concatenation
-// of the server's certificate followed by the CA's certificate.
-//
-// If srv.Addr is blank, ":https" is used.
-func (s *Server) ListenAndServeTLS(certFile, keyFile string) error {
- addr := s.Addr
- if addr == "" {
- addr = ":https"
- }
- config := &tls.Config{
- Rand: rand.Reader,
- Time: time.Seconds,
- NextProtos: []string{"http/1.1"},
- }
-
- var err error
- config.Certificates = make([]tls.Certificate, 1)
- config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return err
- }
-
- conn, err := net.Listen("tcp", addr)
- if err != nil {
- return err
- }
-
- tlsListener := tls.NewListener(conn, config)
- return s.Serve(tlsListener)
-}
-
-// TimeoutHandler returns a Handler that runs h with the given time limit.
-//
-// The new Handler calls h.ServeHTTP to handle each request, but if a
-// call runs for more than ns nanoseconds, the handler responds with
-// a 503 Service Unavailable error and the given message in its body.
-// (If msg is empty, a suitable default message will be sent.)
-// After such a timeout, writes by h to its ResponseWriter will return
-// ErrHandlerTimeout.
-func TimeoutHandler(h Handler, ns int64, msg string) Handler {
- f := func() <-chan int64 {
- return time.After(ns)
- }
- return &timeoutHandler{h, f, msg}
-}
-
-// ErrHandlerTimeout is returned on ResponseWriter Write calls
-// in handlers which have timed out.
-var ErrHandlerTimeout = errors.New("http: Handler timeout")
-
-type timeoutHandler struct {
- handler Handler
- timeout func() <-chan int64 // returns channel producing a timeout
- body string
-}
-
-func (h *timeoutHandler) errorBody() string {
- if h.body != "" {
- return h.body
- }
- return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
-}
-
-func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
- done := make(chan bool)
- tw := &timeoutWriter{w: w}
- go func() {
- h.handler.ServeHTTP(tw, r)
- done <- true
- }()
- select {
- case <-done:
- return
- case <-h.timeout():
- tw.mu.Lock()
- defer tw.mu.Unlock()
- if !tw.wroteHeader {
- tw.w.WriteHeader(StatusServiceUnavailable)
- tw.w.Write([]byte(h.errorBody()))
- }
- tw.timedOut = true
- }
-}
-
-type timeoutWriter struct {
- w ResponseWriter
-
- mu sync.Mutex
- timedOut bool
- wroteHeader bool
-}
-
-func (tw *timeoutWriter) Header() Header {
- return tw.w.Header()
-}
-
-func (tw *timeoutWriter) Write(p []byte) (int, error) {
- tw.mu.Lock()
- timedOut := tw.timedOut
- tw.mu.Unlock()
- if timedOut {
- return 0, ErrHandlerTimeout
- }
- return tw.w.Write(p)
-}
-
-func (tw *timeoutWriter) WriteHeader(code int) {
- tw.mu.Lock()
- if tw.timedOut || tw.wroteHeader {
- tw.mu.Unlock()
- return
- }
- tw.wroteHeader = true
- tw.mu.Unlock()
- tw.w.WriteHeader(code)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "encoding/binary"
-)
-
-// Content-type sniffing algorithm.
-// References in this file refer to this draft specification:
-// http://tools.ietf.org/html/draft-ietf-websec-mime-sniff-03
-
-// The algorithm prefers to use sniffLen bytes to make its decision.
-const sniffLen = 512
-
-// DetectContentType returns the sniffed Content-Type string
-// for the given data. This function always returns a valid MIME type.
-func DetectContentType(data []byte) string {
- if len(data) > sniffLen {
- data = data[:sniffLen]
- }
-
- // Index of the first non-whitespace byte in data.
- firstNonWS := 0
- for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ {
- }
-
- for _, sig := range sniffSignatures {
- if ct := sig.match(data, firstNonWS); ct != "" {
- return ct
- }
- }
-
- return "application/octet-stream" // fallback
-}
-
-func isWS(b byte) bool {
- return bytes.IndexByte([]byte("\t\n\x0C\n "), b) != -1
-}
-
-type sniffSig interface {
- // match returns the MIME type of the data, or "" if unknown.
- match(data []byte, firstNonWS int) string
-}
-
-// Data matching the table in section 6.
-var sniffSignatures = []sniffSig{
- htmlSig([]byte("<!DOCTYPE HTML")),
- htmlSig([]byte("<HTML")),
- htmlSig([]byte("<HEAD")),
- htmlSig([]byte("<SCRIPT")),
- htmlSig([]byte("<IFRAME")),
- htmlSig([]byte("<H1")),
- htmlSig([]byte("<DIV")),
- htmlSig([]byte("<FONT")),
- htmlSig([]byte("<TABLE")),
- htmlSig([]byte("<A")),
- htmlSig([]byte("<STYLE")),
- htmlSig([]byte("<TITLE")),
- htmlSig([]byte("<B")),
- htmlSig([]byte("<BODY")),
- htmlSig([]byte("<BR")),
- htmlSig([]byte("<P")),
- htmlSig([]byte("<!--")),
-
- &maskedSig{mask: []byte("\xFF\xFF\xFF\xFF\xFF"), pat: []byte("<?xml"), skipWS: true, ct: "text/xml; charset=utf-8"},
-
- &exactSig{[]byte("%PDF-"), "application/pdf"},
- &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"},
-
- // UTF BOMs.
- &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFE\xFF\x00\x00"), ct: "text/plain; charset=utf-16be"},
- &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFF\xFE\x00\x00"), ct: "text/plain; charset=utf-16le"},
- &maskedSig{mask: []byte("\xFF\xFF\xFF\x00"), pat: []byte("\xEF\xBB\xBF\x00"), ct: "text/plain; charset=utf-8"},
-
- &exactSig{[]byte("GIF87a"), "image/gif"},
- &exactSig{[]byte("GIF89a"), "image/gif"},
- &exactSig{[]byte("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), "image/png"},
- &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"},
- &exactSig{[]byte("BM"), "image/bmp"},
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"),
- ct: "image/webp",
- },
- &exactSig{[]byte("\x00\x00\x01\x00"), "image/vnd.microsoft.icon"},
- &exactSig{[]byte("\x4F\x67\x67\x53\x00"), "application/ogg"},
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00WAVE"),
- ct: "audio/wave",
- },
- &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"},
- &exactSig{[]byte("\x52\x61\x72\x20\x1A\x07\x00"), "application/x-rar-compressed"},
- &exactSig{[]byte("\x50\x4B\x03\x04"), "application/zip"},
- &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"},
-
- // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
- //mp4Sig(0),
-
- textSig(0), // should be last
-}
-
-type exactSig struct {
- sig []byte
- ct string
-}
-
-func (e *exactSig) match(data []byte, firstNonWS int) string {
- if bytes.HasPrefix(data, e.sig) {
- return e.ct
- }
- return ""
-}
-
-type maskedSig struct {
- mask, pat []byte
- skipWS bool
- ct string
-}
-
-func (m *maskedSig) match(data []byte, firstNonWS int) string {
- if m.skipWS {
- data = data[firstNonWS:]
- }
- if len(data) < len(m.mask) {
- return ""
- }
- for i, mask := range m.mask {
- db := data[i] & mask
- if db != m.pat[i] {
- return ""
- }
- }
- return m.ct
-}
-
-type htmlSig []byte
-
-func (h htmlSig) match(data []byte, firstNonWS int) string {
- data = data[firstNonWS:]
- if len(data) < len(h)+1 {
- return ""
- }
- for i, b := range h {
- db := data[i]
- if 'A' <= b && b <= 'Z' {
- db &= 0xDF
- }
- if b != db {
- return ""
- }
- }
- // Next byte must be space or right angle bracket.
- if db := data[len(h)]; db != ' ' && db != '>' {
- return ""
- }
- return "text/html; charset=utf-8"
-}
-
-type mp4Sig int
-
-func (mp4Sig) match(data []byte, firstNonWS int) string {
- // c.f. section 6.1.
- if len(data) < 8 {
- return ""
- }
- boxSize := int(binary.BigEndian.Uint32(data[:4]))
- if boxSize%4 != 0 || len(data) < boxSize {
- return ""
- }
- if !bytes.Equal(data[4:8], []byte("ftyp")) {
- return ""
- }
- for st := 8; st < boxSize; st += 4 {
- if st == 12 {
- // minor version number
- continue
- }
- seg := string(data[st : st+3])
- switch seg {
- case "mp4", "iso", "M4V", "M4P", "M4B":
- return "video/mp4"
- /* The remainder are not in the spec.
- case "M4A":
- return "audio/mp4"
- case "3gp":
- return "video/3gpp"
- case "jp2":
- return "image/jp2" // JPEG 2000
- */
- }
- }
- return ""
-}
-
-type textSig int
-
-func (textSig) match(data []byte, firstNonWS int) string {
- // c.f. section 5, step 4.
- for _, b := range data[firstNonWS:] {
- switch {
- case 0x00 <= b && b <= 0x08,
- b == 0x0B,
- 0x0E <= b && b <= 0x1A,
- 0x1C <= b && b <= 0x1F:
- return ""
- }
- }
- return "text/plain; charset=utf-8"
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "bytes"
- . "http"
- "http/httptest"
- "io/ioutil"
- "log"
- "strconv"
- "testing"
-)
-
-var sniffTests = []struct {
- desc string
- data []byte
- contentType string
-}{
- // Some nonsense.
- {"Empty", []byte{}, "text/plain; charset=utf-8"},
- {"Binary", []byte{1, 2, 3}, "application/octet-stream"},
-
- {"HTML document #1", []byte(`<HtMl><bOdY>blah blah blah</body></html>`), "text/html; charset=utf-8"},
- {"HTML document #2", []byte(`<HTML></HTML>`), "text/html; charset=utf-8"},
- {"HTML document #3 (leading whitespace)", []byte(` <!DOCTYPE HTML>...`), "text/html; charset=utf-8"},
-
- {"Plain text", []byte(`This is not HTML. It has ☃ though.`), "text/plain; charset=utf-8"},
-
- {"XML", []byte("\n<?xml!"), "text/xml; charset=utf-8"},
-
- // Image types.
- {"GIF 87a", []byte(`GIF87a`), "image/gif"},
- {"GIF 89a", []byte(`GIF89a...`), "image/gif"},
-
- // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
- //{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"},
- //{"MP4 audio", []byte("\x00\x00\x00\x20ftypM4A \x00\x00\x00\x00M4A mp42isom\x00\x00\x00\x00"), "audio/mp4"},
-}
-
-func TestDetectContentType(t *testing.T) {
- for _, tt := range sniffTests {
- ct := DetectContentType(tt.data)
- if ct != tt.contentType {
- t.Errorf("%v: DetectContentType = %q, want %q", tt.desc, ct, tt.contentType)
- }
- }
-}
-
-func TestServerContentType(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- i, _ := strconv.Atoi(r.FormValue("i"))
- tt := sniffTests[i]
- n, err := w.Write(tt.data)
- if n != len(tt.data) || err != nil {
- log.Fatalf("%v: Write(%q) = %v, %v want %d, nil", tt.desc, tt.data, n, err, len(tt.data))
- }
- }))
- defer ts.Close()
-
- for i, tt := range sniffTests {
- resp, err := Get(ts.URL + "/?i=" + strconv.Itoa(i))
- if err != nil {
- t.Errorf("%v: %v", tt.desc, err)
- continue
- }
- if ct := resp.Header.Get("Content-Type"); ct != tt.contentType {
- t.Errorf("%v: Content-Type = %q, want %q", tt.desc, ct, tt.contentType)
- }
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Errorf("%v: reading body: %v", tt.desc, err)
- } else if !bytes.Equal(data, tt.data) {
- t.Errorf("%v: data is %q, want %q", tt.desc, data, tt.data)
- }
- resp.Body.Close()
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-// HTTP status codes, defined in RFC 2616.
-const (
- StatusContinue = 100
- StatusSwitchingProtocols = 101
-
- StatusOK = 200
- StatusCreated = 201
- StatusAccepted = 202
- StatusNonAuthoritativeInfo = 203
- StatusNoContent = 204
- StatusResetContent = 205
- StatusPartialContent = 206
-
- StatusMultipleChoices = 300
- StatusMovedPermanently = 301
- StatusFound = 302
- StatusSeeOther = 303
- StatusNotModified = 304
- StatusUseProxy = 305
- StatusTemporaryRedirect = 307
-
- StatusBadRequest = 400
- StatusUnauthorized = 401
- StatusPaymentRequired = 402
- StatusForbidden = 403
- StatusNotFound = 404
- StatusMethodNotAllowed = 405
- StatusNotAcceptable = 406
- StatusProxyAuthRequired = 407
- StatusRequestTimeout = 408
- StatusConflict = 409
- StatusGone = 410
- StatusLengthRequired = 411
- StatusPreconditionFailed = 412
- StatusRequestEntityTooLarge = 413
- StatusRequestURITooLong = 414
- StatusUnsupportedMediaType = 415
- StatusRequestedRangeNotSatisfiable = 416
- StatusExpectationFailed = 417
-
- StatusInternalServerError = 500
- StatusNotImplemented = 501
- StatusBadGateway = 502
- StatusServiceUnavailable = 503
- StatusGatewayTimeout = 504
- StatusHTTPVersionNotSupported = 505
-)
-
-var statusText = map[int]string{
- StatusContinue: "Continue",
- StatusSwitchingProtocols: "Switching Protocols",
-
- StatusOK: "OK",
- StatusCreated: "Created",
- StatusAccepted: "Accepted",
- StatusNonAuthoritativeInfo: "Non-Authoritative Information",
- StatusNoContent: "No Content",
- StatusResetContent: "Reset Content",
- StatusPartialContent: "Partial Content",
-
- StatusMultipleChoices: "Multiple Choices",
- StatusMovedPermanently: "Moved Permanently",
- StatusFound: "Found",
- StatusSeeOther: "See Other",
- StatusNotModified: "Not Modified",
- StatusUseProxy: "Use Proxy",
- StatusTemporaryRedirect: "Temporary Redirect",
-
- StatusBadRequest: "Bad Request",
- StatusUnauthorized: "Unauthorized",
- StatusPaymentRequired: "Payment Required",
- StatusForbidden: "Forbidden",
- StatusNotFound: "Not Found",
- StatusMethodNotAllowed: "Method Not Allowed",
- StatusNotAcceptable: "Not Acceptable",
- StatusProxyAuthRequired: "Proxy Authentication Required",
- StatusRequestTimeout: "Request Timeout",
- StatusConflict: "Conflict",
- StatusGone: "Gone",
- StatusLengthRequired: "Length Required",
- StatusPreconditionFailed: "Precondition Failed",
- StatusRequestEntityTooLarge: "Request Entity Too Large",
- StatusRequestURITooLong: "Request URI Too Long",
- StatusUnsupportedMediaType: "Unsupported Media Type",
- StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
- StatusExpectationFailed: "Expectation Failed",
-
- StatusInternalServerError: "Internal Server Error",
- StatusNotImplemented: "Not Implemented",
- StatusBadGateway: "Bad Gateway",
- StatusServiceUnavailable: "Service Unavailable",
- StatusGatewayTimeout: "Gateway Timeout",
- StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
-}
-
-// StatusText returns a text for the HTTP status code. It returns the empty
-// string if the code is unknown.
-func StatusText(code int) string {
- return statusText[code]
-}
+++ /dev/null
-0123456789
+++ /dev/null
-index.html says hello
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "bufio"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "strconv"
- "strings"
-)
-
-// transferWriter inspects the fields of a user-supplied Request or Response,
-// sanitizes them without changing the user object and provides methods for
-// writing the respective header, body and trailer in wire format.
-type transferWriter struct {
- Method string
- Body io.Reader
- BodyCloser io.Closer
- ResponseToHEAD bool
- ContentLength int64 // -1 means unknown, 0 means exactly none
- Close bool
- TransferEncoding []string
- Trailer Header
-}
-
-func newTransferWriter(r interface{}) (t *transferWriter, err error) {
- t = &transferWriter{}
-
- // Extract relevant fields
- atLeastHTTP11 := false
- switch rr := r.(type) {
- case *Request:
- if rr.ContentLength != 0 && rr.Body == nil {
- return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength)
- }
- t.Method = rr.Method
- t.Body = rr.Body
- t.BodyCloser = rr.Body
- t.ContentLength = rr.ContentLength
- t.Close = rr.Close
- t.TransferEncoding = rr.TransferEncoding
- t.Trailer = rr.Trailer
- atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
- if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 {
- if t.ContentLength == 0 {
- // Test to see if it's actually zero or just unset.
- var buf [1]byte
- n, _ := io.ReadFull(t.Body, buf[:])
- if n == 1 {
- // Oh, guess there is data in this Body Reader after all.
- // The ContentLength field just wasn't set.
- // Stich the Body back together again, re-attaching our
- // consumed byte.
- t.ContentLength = -1
- t.Body = io.MultiReader(bytes.NewBuffer(buf[:]), t.Body)
- } else {
- // Body is actually empty.
- t.Body = nil
- t.BodyCloser = nil
- }
- }
- if t.ContentLength < 0 {
- t.TransferEncoding = []string{"chunked"}
- }
- }
- case *Response:
- t.Method = rr.Request.Method
- t.Body = rr.Body
- t.BodyCloser = rr.Body
- t.ContentLength = rr.ContentLength
- t.Close = rr.Close
- t.TransferEncoding = rr.TransferEncoding
- t.Trailer = rr.Trailer
- atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
- t.ResponseToHEAD = noBodyExpected(rr.Request.Method)
- }
-
- // Sanitize Body,ContentLength,TransferEncoding
- if t.ResponseToHEAD {
- t.Body = nil
- t.TransferEncoding = nil
- // ContentLength is expected to hold Content-Length
- if t.ContentLength < 0 {
- return nil, ErrMissingContentLength
- }
- } else {
- if !atLeastHTTP11 || t.Body == nil {
- t.TransferEncoding = nil
- }
- if chunked(t.TransferEncoding) {
- t.ContentLength = -1
- } else if t.Body == nil { // no chunking, no body
- t.ContentLength = 0
- }
- }
-
- // Sanitize Trailer
- if !chunked(t.TransferEncoding) {
- t.Trailer = nil
- }
-
- return t, nil
-}
-
-func noBodyExpected(requestMethod string) bool {
- return requestMethod == "HEAD"
-}
-
-func (t *transferWriter) shouldSendContentLength() bool {
- if chunked(t.TransferEncoding) {
- return false
- }
- if t.ContentLength > 0 {
- return true
- }
- if t.ResponseToHEAD {
- return true
- }
- // Many servers expect a Content-Length for these methods
- if t.Method == "POST" || t.Method == "PUT" {
- return true
- }
- if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
- return true
- }
-
- return false
-}
-
-func (t *transferWriter) WriteHeader(w io.Writer) (err error) {
- if t.Close {
- _, err = io.WriteString(w, "Connection: close\r\n")
- if err != nil {
- return
- }
- }
-
- // Write Content-Length and/or Transfer-Encoding whose values are a
- // function of the sanitized field triple (Body, ContentLength,
- // TransferEncoding)
- if t.shouldSendContentLength() {
- io.WriteString(w, "Content-Length: ")
- _, err = io.WriteString(w, strconv.Itoa64(t.ContentLength)+"\r\n")
- if err != nil {
- return
- }
- } else if chunked(t.TransferEncoding) {
- _, err = io.WriteString(w, "Transfer-Encoding: chunked\r\n")
- if err != nil {
- return
- }
- }
-
- // Write Trailer header
- if t.Trailer != nil {
- // TODO: At some point, there should be a generic mechanism for
- // writing long headers, using HTTP line splitting
- io.WriteString(w, "Trailer: ")
- needComma := false
- for k := range t.Trailer {
- k = CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return &badStringError{"invalid Trailer key", k}
- }
- if needComma {
- io.WriteString(w, ",")
- }
- io.WriteString(w, k)
- needComma = true
- }
- _, err = io.WriteString(w, "\r\n")
- }
-
- return
-}
-
-func (t *transferWriter) WriteBody(w io.Writer) (err error) {
- var ncopy int64
-
- // Write body
- if t.Body != nil {
- if chunked(t.TransferEncoding) {
- cw := NewChunkedWriter(w)
- _, err = io.Copy(cw, t.Body)
- if err == nil {
- err = cw.Close()
- }
- } else if t.ContentLength == -1 {
- ncopy, err = io.Copy(w, t.Body)
- } else {
- ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength))
- nextra, err := io.Copy(ioutil.Discard, t.Body)
- if err != nil {
- return err
- }
- ncopy += nextra
- }
- if err != nil {
- return err
- }
- if err = t.BodyCloser.Close(); err != nil {
- return err
- }
- }
-
- if t.ContentLength != -1 && t.ContentLength != ncopy {
- return fmt.Errorf("http: Request.ContentLength=%d with Body length %d",
- t.ContentLength, ncopy)
- }
-
- // TODO(petar): Place trailer writer code here.
- if chunked(t.TransferEncoding) {
- // Last chunk, empty trailer
- _, err = io.WriteString(w, "\r\n")
- }
-
- return
-}
-
-type transferReader struct {
- // Input
- Header Header
- StatusCode int
- RequestMethod string
- ProtoMajor int
- ProtoMinor int
- // Output
- Body io.ReadCloser
- ContentLength int64
- TransferEncoding []string
- Close bool
- Trailer Header
-}
-
-// bodyAllowedForStatus returns whether a given response status code
-// permits a body. See RFC2616, section 4.4.
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-// msg is *Request or *Response.
-func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
- t := &transferReader{}
-
- // Unify input
- isResponse := false
- switch rr := msg.(type) {
- case *Response:
- t.Header = rr.Header
- t.StatusCode = rr.StatusCode
- t.RequestMethod = rr.Request.Method
- t.ProtoMajor = rr.ProtoMajor
- t.ProtoMinor = rr.ProtoMinor
- t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header)
- isResponse = true
- case *Request:
- t.Header = rr.Header
- t.ProtoMajor = rr.ProtoMajor
- t.ProtoMinor = rr.ProtoMinor
- // Transfer semantics for Requests are exactly like those for
- // Responses with status code 200, responding to a GET method
- t.StatusCode = 200
- t.RequestMethod = "GET"
- default:
- panic("unexpected type")
- }
-
- // Default to HTTP/1.1
- if t.ProtoMajor == 0 && t.ProtoMinor == 0 {
- t.ProtoMajor, t.ProtoMinor = 1, 1
- }
-
- // Transfer encoding, content length
- t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header)
- if err != nil {
- return err
- }
-
- t.ContentLength, err = fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
- if err != nil {
- return err
- }
-
- // Trailer
- t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
- if err != nil {
- return err
- }
-
- // If there is no Content-Length or chunked Transfer-Encoding on a *Response
- // and the status is not 1xx, 204 or 304, then the body is unbounded.
- // See RFC2616, section 4.4.
- switch msg.(type) {
- case *Response:
- if t.ContentLength == -1 &&
- !chunked(t.TransferEncoding) &&
- bodyAllowedForStatus(t.StatusCode) {
- // Unbounded body.
- t.Close = true
- }
- }
-
- // Prepare body reader. ContentLength < 0 means chunked encoding
- // or close connection when finished, since multipart is not supported yet
- switch {
- case chunked(t.TransferEncoding):
- t.Body = &body{Reader: NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
- case t.ContentLength >= 0:
- // TODO: limit the Content-Length. This is an easy DoS vector.
- t.Body = &body{Reader: io.LimitReader(r, t.ContentLength), closing: t.Close}
- default:
- // t.ContentLength < 0, i.e. "Content-Length" not mentioned in header
- if t.Close {
- // Close semantics (i.e. HTTP/1.0)
- t.Body = &body{Reader: r, closing: t.Close}
- } else {
- // Persistent connection (i.e. HTTP/1.1)
- t.Body = &body{Reader: io.LimitReader(r, 0), closing: t.Close}
- }
- }
-
- // Unify output
- switch rr := msg.(type) {
- case *Request:
- rr.Body = t.Body
- rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
- rr.Close = t.Close
- rr.Trailer = t.Trailer
- case *Response:
- rr.Body = t.Body
- rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
- rr.Close = t.Close
- rr.Trailer = t.Trailer
- }
-
- return nil
-}
-
-// Checks whether chunked is part of the encodings stack
-func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
-
-// Checks whether the encoding is explicitly "identity".
-func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
-
-// Sanitize transfer encoding
-func fixTransferEncoding(requestMethod string, header Header) ([]string, error) {
- raw, present := header["Transfer-Encoding"]
- if !present {
- return nil, nil
- }
-
- delete(header, "Transfer-Encoding")
-
- // Head responses have no bodies, so the transfer encoding
- // should be ignored.
- if requestMethod == "HEAD" {
- return nil, nil
- }
-
- encodings := strings.Split(raw[0], ",")
- te := make([]string, 0, len(encodings))
- // TODO: Even though we only support "identity" and "chunked"
- // encodings, the loop below is designed with foresight. One
- // invariant that must be maintained is that, if present,
- // chunked encoding must always come first.
- for _, encoding := range encodings {
- encoding = strings.ToLower(strings.TrimSpace(encoding))
- // "identity" encoding is not recored
- if encoding == "identity" {
- break
- }
- if encoding != "chunked" {
- return nil, &badStringError{"unsupported transfer encoding", encoding}
- }
- te = te[0 : len(te)+1]
- te[len(te)-1] = encoding
- }
- if len(te) > 1 {
- return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")}
- }
- if len(te) > 0 {
- // Chunked encoding trumps Content-Length. See RFC 2616
- // Section 4.4. Currently len(te) > 0 implies chunked
- // encoding.
- delete(header, "Content-Length")
- return te, nil
- }
-
- return nil, nil
-}
-
-// Determine the expected body length, using RFC 2616 Section 4.4. This
-// function is not a method, because ultimately it should be shared by
-// ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
-
- // Logic based on response type or status
- if noBodyExpected(requestMethod) {
- return 0, nil
- }
- if status/100 == 1 {
- return 0, nil
- }
- switch status {
- case 204, 304:
- return 0, nil
- }
-
- // Logic based on Transfer-Encoding
- if chunked(te) {
- return -1, nil
- }
-
- // Logic based on Content-Length
- cl := strings.TrimSpace(header.Get("Content-Length"))
- if cl != "" {
- n, err := strconv.Atoi64(cl)
- if err != nil || n < 0 {
- return -1, &badStringError{"bad Content-Length", cl}
- }
- return n, nil
- } else {
- header.Del("Content-Length")
- }
-
- if !isResponse && requestMethod == "GET" {
- // RFC 2616 doesn't explicitly permit nor forbid an
- // entity-body on a GET request so we permit one if
- // declared, but we default to 0 here (not -1 below)
- // if there's no mention of a body.
- return 0, nil
- }
-
- // Logic based on media type. The purpose of the following code is just
- // to detect whether the unsupported "multipart/byteranges" is being
- // used. A proper Content-Type parser is needed in the future.
- if strings.Contains(strings.ToLower(header.Get("Content-Type")), "multipart/byteranges") {
- return -1, ErrNotSupported
- }
-
- // Body-EOF logic based on other methods (like closing, or chunked coding)
- return -1, nil
-}
-
-// Determine whether to hang up after sending a request and body, or
-// receiving a response and body
-// 'header' is the request headers
-func shouldClose(major, minor int, header Header) bool {
- if major < 1 {
- return true
- } else if major == 1 && minor == 0 {
- if !strings.Contains(strings.ToLower(header.Get("Connection")), "keep-alive") {
- return true
- }
- return false
- } else {
- // TODO: Should split on commas, toss surrounding white space,
- // and check each field.
- if strings.ToLower(header.Get("Connection")) == "close" {
- header.Del("Connection")
- return true
- }
- }
- return false
-}
-
-// Parse the trailer header
-func fixTrailer(header Header, te []string) (Header, error) {
- raw := header.Get("Trailer")
- if raw == "" {
- return nil, nil
- }
-
- header.Del("Trailer")
- trailer := make(Header)
- keys := strings.Split(raw, ",")
- for _, key := range keys {
- key = CanonicalHeaderKey(strings.TrimSpace(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return nil, &badStringError{"bad trailer key", key}
- }
- trailer.Del(key)
- }
- if len(trailer) == 0 {
- return nil, nil
- }
- if !chunked(te) {
- // Trailer and no chunking
- return nil, ErrUnexpectedTrailer
- }
- return trailer, nil
-}
-
-// body turns a Reader into a ReadCloser.
-// Close ensures that the body has been fully read
-// and then reads the trailer if necessary.
-type body struct {
- io.Reader
- hdr interface{} // non-nil (Response or Request) value means read trailer
- r *bufio.Reader // underlying wire-format reader for the trailer
- closing bool // is the connection to be closed after reading body?
- closed bool
-
- res *response // response writer for server requests, else nil
-}
-
-// ErrBodyReadAfterClose is returned when reading a Request Body after
-// the body has been closed. This typically happens when the body is
-// read after an HTTP Handler calls WriteHeader or Write on its
-// ResponseWriter.
-var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed request Body")
-
-func (b *body) Read(p []byte) (n int, err error) {
- if b.closed {
- return 0, ErrBodyReadAfterClose
- }
- return b.Reader.Read(p)
-}
-
-func (b *body) Close() error {
- if b.closed {
- return nil
- }
- defer func() {
- b.closed = true
- }()
- if b.hdr == nil && b.closing {
- // no trailer and closing the connection next.
- // no point in reading to EOF.
- return nil
- }
-
- // In a server request, don't continue reading from the client
- // if we've already hit the maximum body size set by the
- // handler. If this is set, that also means the TCP connection
- // is about to be closed, so getting to the next HTTP request
- // in the stream is not necessary.
- if b.res != nil && b.res.requestBodyLimitHit {
- return nil
- }
-
- if _, err := io.Copy(ioutil.Discard, b); err != nil {
- return err
- }
-
- if b.hdr == nil { // not reading trailer
- return nil
- }
-
- // TODO(petar): Put trailer reader code here
-
- return nil
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP client implementation. See RFC 2616.
-//
-// This is the low-level Transport implementation of RoundTripper.
-// The high-level interface is in client.go.
-
-package http
-
-import (
- "bufio"
- "compress/gzip"
- "crypto/tls"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "os"
- "strings"
- "sync"
- "url"
-)
-
-// DefaultTransport is the default implementation of Transport and is
-// used by DefaultClient. It establishes a new network connection for
-// each call to Do and uses HTTP proxies as directed by the
-// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy)
-// environment variables.
-var DefaultTransport RoundTripper = &Transport{Proxy: ProxyFromEnvironment}
-
-// DefaultMaxIdleConnsPerHost is the default value of Transport's
-// MaxIdleConnsPerHost.
-const DefaultMaxIdleConnsPerHost = 2
-
-// Transport is an implementation of RoundTripper that supports http,
-// https, and http proxies (for either http or https with CONNECT).
-// Transport can also cache connections for future re-use.
-type Transport struct {
- lk sync.Mutex
- idleConn map[string][]*persistConn
- altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper
-
- // TODO: tunable on global max cached connections
- // TODO: tunable on timeout on cached connections
- // TODO: optional pipelining
-
- // Proxy specifies a function to return a proxy for a given
- // Request. If the function returns a non-nil error, the
- // request is aborted with the provided error.
- // If Proxy is nil or returns a nil *URL, no proxy is used.
- Proxy func(*Request) (*url.URL, error)
-
- // Dial specifies the dial function for creating TCP
- // connections.
- // If Dial is nil, net.Dial is used.
- Dial func(net, addr string) (c net.Conn, err error)
-
- // TLSClientConfig specifies the TLS configuration to use with
- // tls.Client. If nil, the default configuration is used.
- TLSClientConfig *tls.Config
-
- DisableKeepAlives bool
- DisableCompression bool
-
- // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
- // (keep-alive) to keep to keep per-host. If zero,
- // DefaultMaxIdleConnsPerHost is used.
- MaxIdleConnsPerHost int
-}
-
-// ProxyFromEnvironment returns the URL of the proxy to use for a
-// given request, as indicated by the environment variables
-// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy).
-// Either URL or an error is returned.
-func ProxyFromEnvironment(req *Request) (*url.URL, error) {
- proxy := getenvEitherCase("HTTP_PROXY")
- if proxy == "" {
- return nil, nil
- }
- if !useProxy(canonicalAddr(req.URL)) {
- return nil, nil
- }
- proxyURL, err := url.ParseRequest(proxy)
- if err != nil {
- return nil, errors.New("invalid proxy address")
- }
- if proxyURL.Host == "" {
- proxyURL, err = url.ParseRequest("http://" + proxy)
- if err != nil {
- return nil, errors.New("invalid proxy address")
- }
- }
- return proxyURL, nil
-}
-
-// ProxyURL returns a proxy function (for use in a Transport)
-// that always returns the same URL.
-func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
- return func(*Request) (*url.URL, error) {
- return fixedURL, nil
- }
-}
-
-// transportRequest is a wrapper around a *Request that adds
-// optional extra headers to write.
-type transportRequest struct {
- *Request // original request, not to be mutated
- extra Header // extra headers to write, or nil
-}
-
-func (tr *transportRequest) extraHeaders() Header {
- if tr.extra == nil {
- tr.extra = make(Header)
- }
- return tr.extra
-}
-
-// RoundTrip implements the RoundTripper interface.
-func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) {
- if req.URL == nil {
- return nil, errors.New("http: nil Request.URL")
- }
- if req.Header == nil {
- return nil, errors.New("http: nil Request.Header")
- }
- if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
- t.lk.Lock()
- var rt RoundTripper
- if t.altProto != nil {
- rt = t.altProto[req.URL.Scheme]
- }
- t.lk.Unlock()
- if rt == nil {
- return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme}
- }
- return rt.RoundTrip(req)
- }
- treq := &transportRequest{Request: req}
- cm, err := t.connectMethodForRequest(treq)
- if err != nil {
- return nil, err
- }
-
- // Get the cached or newly-created connection to either the
- // host (for http or https), the http proxy, or the http proxy
- // pre-CONNECTed to https server. In any case, we'll be ready
- // to send it requests.
- pconn, err := t.getConn(cm)
- if err != nil {
- return nil, err
- }
-
- return pconn.roundTrip(treq)
-}
-
-// RegisterProtocol registers a new protocol with scheme.
-// The Transport will pass requests using the given scheme to rt.
-// It is rt's responsibility to simulate HTTP request semantics.
-//
-// RegisterProtocol can be used by other packages to provide
-// implementations of protocol schemes like "ftp" or "file".
-func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
- if scheme == "http" || scheme == "https" {
- panic("protocol " + scheme + " already registered")
- }
- t.lk.Lock()
- defer t.lk.Unlock()
- if t.altProto == nil {
- t.altProto = make(map[string]RoundTripper)
- }
- if _, exists := t.altProto[scheme]; exists {
- panic("protocol " + scheme + " already registered")
- }
- t.altProto[scheme] = rt
-}
-
-// CloseIdleConnections closes any connections which were previously
-// connected from previous requests but are now sitting idle in
-// a "keep-alive" state. It does not interrupt any connections currently
-// in use.
-func (t *Transport) CloseIdleConnections() {
- t.lk.Lock()
- defer t.lk.Unlock()
- if t.idleConn == nil {
- return
- }
- for _, conns := range t.idleConn {
- for _, pconn := range conns {
- pconn.close()
- }
- }
- t.idleConn = nil
-}
-
-//
-// Private implementation past this point.
-//
-
-func getenvEitherCase(k string) string {
- if v := os.Getenv(strings.ToUpper(k)); v != "" {
- return v
- }
- return os.Getenv(strings.ToLower(k))
-}
-
-func (t *Transport) connectMethodForRequest(treq *transportRequest) (*connectMethod, error) {
- cm := &connectMethod{
- targetScheme: treq.URL.Scheme,
- targetAddr: canonicalAddr(treq.URL),
- }
- if t.Proxy != nil {
- var err error
- cm.proxyURL, err = t.Proxy(treq.Request)
- if err != nil {
- return nil, err
- }
- }
- return cm, nil
-}
-
-// proxyAuth returns the Proxy-Authorization header to set
-// on requests, if applicable.
-func (cm *connectMethod) proxyAuth() string {
- if cm.proxyURL == nil {
- return ""
- }
- proxyInfo := cm.proxyURL.RawUserinfo
- if proxyInfo != "" {
- return "Basic " + base64.URLEncoding.EncodeToString([]byte(proxyInfo))
- }
- return ""
-}
-
-func (t *Transport) putIdleConn(pconn *persistConn) {
- t.lk.Lock()
- defer t.lk.Unlock()
- if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
- pconn.close()
- return
- }
- if pconn.isBroken() {
- return
- }
- key := pconn.cacheKey
- max := t.MaxIdleConnsPerHost
- if max == 0 {
- max = DefaultMaxIdleConnsPerHost
- }
- if len(t.idleConn[key]) >= max {
- pconn.close()
- return
- }
- t.idleConn[key] = append(t.idleConn[key], pconn)
-}
-
-func (t *Transport) getIdleConn(cm *connectMethod) (pconn *persistConn) {
- t.lk.Lock()
- defer t.lk.Unlock()
- if t.idleConn == nil {
- t.idleConn = make(map[string][]*persistConn)
- }
- key := cm.String()
- for {
- pconns, ok := t.idleConn[key]
- if !ok {
- return nil
- }
- if len(pconns) == 1 {
- pconn = pconns[0]
- delete(t.idleConn, key)
- } else {
- // 2 or more cached connections; pop last
- // TODO: queue?
- pconn = pconns[len(pconns)-1]
- t.idleConn[key] = pconns[0 : len(pconns)-1]
- }
- if !pconn.isBroken() {
- return
- }
- }
- return
-}
-
-func (t *Transport) dial(network, addr string) (c net.Conn, err error) {
- if t.Dial != nil {
- return t.Dial(network, addr)
- }
- return net.Dial(network, addr)
-}
-
-// getConn dials and creates a new persistConn to the target as
-// specified in the connectMethod. This includes doing a proxy CONNECT
-// and/or setting up TLS. If this doesn't return an error, the persistConn
-// is ready to write requests to.
-func (t *Transport) getConn(cm *connectMethod) (*persistConn, error) {
- if pc := t.getIdleConn(cm); pc != nil {
- return pc, nil
- }
-
- conn, err := t.dial("tcp", cm.addr())
- if err != nil {
- if cm.proxyURL != nil {
- err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err)
- }
- return nil, err
- }
-
- pa := cm.proxyAuth()
-
- pconn := &persistConn{
- t: t,
- cacheKey: cm.String(),
- conn: conn,
- reqch: make(chan requestAndChan, 50),
- }
-
- switch {
- case cm.proxyURL == nil:
- // Do nothing.
- case cm.targetScheme == "http":
- pconn.isProxy = true
- if pa != "" {
- pconn.mutateHeaderFunc = func(h Header) {
- h.Set("Proxy-Authorization", pa)
- }
- }
- case cm.targetScheme == "https":
- connectReq := &Request{
- Method: "CONNECT",
- URL: &url.URL{RawPath: cm.targetAddr},
- Host: cm.targetAddr,
- Header: make(Header),
- }
- if pa != "" {
- connectReq.Header.Set("Proxy-Authorization", pa)
- }
- connectReq.Write(conn)
-
- // Read response.
- // Okay to use and discard buffered reader here, because
- // TLS server will not speak until spoken to.
- br := bufio.NewReader(conn)
- resp, err := ReadResponse(br, connectReq)
- if err != nil {
- conn.Close()
- return nil, err
- }
- if resp.StatusCode != 200 {
- f := strings.SplitN(resp.Status, " ", 2)
- conn.Close()
- return nil, errors.New(f[1])
- }
- }
-
- if cm.targetScheme == "https" {
- // Initiate TLS and check remote host name against certificate.
- conn = tls.Client(conn, t.TLSClientConfig)
- if err = conn.(*tls.Conn).Handshake(); err != nil {
- return nil, err
- }
- if t.TLSClientConfig == nil || !t.TLSClientConfig.InsecureSkipVerify {
- if err = conn.(*tls.Conn).VerifyHostname(cm.tlsHost()); err != nil {
- return nil, err
- }
- }
- pconn.conn = conn
- }
-
- pconn.br = bufio.NewReader(pconn.conn)
- pconn.cc = NewClientConn(conn, pconn.br)
- go pconn.readLoop()
- return pconn, nil
-}
-
-// useProxy returns true if requests to addr should use a proxy,
-// according to the NO_PROXY or no_proxy environment variable.
-// addr is always a canonicalAddr with a host and port.
-func useProxy(addr string) bool {
- if len(addr) == 0 {
- return true
- }
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return false
- }
- if host == "localhost" {
- return false
- }
- if ip := net.ParseIP(host); ip != nil {
- if ip.IsLoopback() {
- return false
- }
- }
-
- no_proxy := getenvEitherCase("NO_PROXY")
- if no_proxy == "*" {
- return false
- }
-
- addr = strings.ToLower(strings.TrimSpace(addr))
- if hasPort(addr) {
- addr = addr[:strings.LastIndex(addr, ":")]
- }
-
- for _, p := range strings.Split(no_proxy, ",") {
- p = strings.ToLower(strings.TrimSpace(p))
- if len(p) == 0 {
- continue
- }
- if hasPort(p) {
- p = p[:strings.LastIndex(p, ":")]
- }
- if addr == p || (p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:])) {
- return false
- }
- }
- return true
-}
-
-// connectMethod is the map key (in its String form) for keeping persistent
-// TCP connections alive for subsequent HTTP requests.
-//
-// A connect method may be of the following types:
-//
-// Cache key form Description
-// ----------------- -------------------------
-// ||http|foo.com http directly to server, no proxy
-// ||https|foo.com https directly to server, no proxy
-// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
-// http://proxy.com|http http to proxy, http to anywhere after that
-//
-// Note: no support to https to the proxy yet.
-//
-type connectMethod struct {
- proxyURL *url.URL // nil for no proxy, else full proxy URL
- targetScheme string // "http" or "https"
- targetAddr string // Not used if proxy + http targetScheme (4th example in table)
-}
-
-func (ck *connectMethod) String() string {
- proxyStr := ""
- if ck.proxyURL != nil {
- proxyStr = ck.proxyURL.String()
- }
- return strings.Join([]string{proxyStr, ck.targetScheme, ck.targetAddr}, "|")
-}
-
-// addr returns the first hop "host:port" to which we need to TCP connect.
-func (cm *connectMethod) addr() string {
- if cm.proxyURL != nil {
- return canonicalAddr(cm.proxyURL)
- }
- return cm.targetAddr
-}
-
-// tlsHost returns the host name to match against the peer's
-// TLS certificate.
-func (cm *connectMethod) tlsHost() string {
- h := cm.targetAddr
- if hasPort(h) {
- h = h[:strings.LastIndex(h, ":")]
- }
- return h
-}
-
-// persistConn wraps a connection, usually a persistent one
-// (but may be used for non-keep-alive requests as well)
-type persistConn struct {
- t *Transport
- cacheKey string // its connectMethod.String()
- conn net.Conn
- cc *ClientConn
- br *bufio.Reader
- reqch chan requestAndChan // written by roundTrip(); read by readLoop()
- isProxy bool
-
- // mutateHeaderFunc is an optional func to modify extra
- // headers on each outbound request before it's written. (the
- // original Request given to RoundTrip is not modified)
- mutateHeaderFunc func(Header)
-
- lk sync.Mutex // guards numExpectedResponses and broken
- numExpectedResponses int
- broken bool // an error has happened on this connection; marked broken so it's not reused.
-}
-
-func (pc *persistConn) isBroken() bool {
- pc.lk.Lock()
- defer pc.lk.Unlock()
- return pc.broken
-}
-
-func (pc *persistConn) expectingResponse() bool {
- pc.lk.Lock()
- defer pc.lk.Unlock()
- return pc.numExpectedResponses > 0
-}
-
-var remoteSideClosedFunc func(error) bool // or nil to use default
-
-func remoteSideClosed(err error) bool {
- if err == io.EOF || err == os.EINVAL {
- return true
- }
- if remoteSideClosedFunc != nil {
- return remoteSideClosedFunc(err)
- }
- return false
-}
-
-func (pc *persistConn) readLoop() {
- alive := true
- for alive {
- pb, err := pc.br.Peek(1)
- if err != nil {
- if remoteSideClosed(err) && !pc.expectingResponse() {
- // Remote side closed on us. (We probably hit their
- // max idle timeout)
- pc.close()
- return
- }
- }
- if !pc.expectingResponse() {
- log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v",
- string(pb), err)
- pc.close()
- return
- }
-
- rc := <-pc.reqch
- resp, err := pc.cc.readUsing(rc.req, func(buf *bufio.Reader, forReq *Request) (*Response, error) {
- resp, err := ReadResponse(buf, forReq)
- if err != nil || resp.ContentLength == 0 {
- return resp, err
- }
- if rc.addedGzip && resp.Header.Get("Content-Encoding") == "gzip" {
- resp.Header.Del("Content-Encoding")
- resp.Header.Del("Content-Length")
- resp.ContentLength = -1
- gzReader, err := gzip.NewReader(resp.Body)
- if err != nil {
- pc.close()
- return nil, err
- }
- resp.Body = &readFirstCloseBoth{&discardOnCloseReadCloser{gzReader}, resp.Body}
- }
- resp.Body = &bodyEOFSignal{body: resp.Body}
- return resp, err
- })
-
- if err == ErrPersistEOF {
- // Succeeded, but we can't send any more
- // persistent connections on this again. We
- // hide this error to upstream callers.
- alive = false
- err = nil
- } else if err != nil || rc.req.Close {
- alive = false
- }
-
- hasBody := resp != nil && resp.ContentLength != 0
- var waitForBodyRead chan bool
- if alive {
- if hasBody {
- waitForBodyRead = make(chan bool)
- resp.Body.(*bodyEOFSignal).fn = func() {
- pc.t.putIdleConn(pc)
- waitForBodyRead <- true
- }
- } else {
- // When there's no response body, we immediately
- // reuse the TCP connection (putIdleConn), but
- // we need to prevent ClientConn.Read from
- // closing the Response.Body on the next
- // loop, otherwise it might close the body
- // before the client code has had a chance to
- // read it (even though it'll just be 0, EOF).
- pc.cc.lk.Lock()
- pc.cc.lastbody = nil
- pc.cc.lk.Unlock()
-
- pc.t.putIdleConn(pc)
- }
- }
-
- rc.ch <- responseAndError{resp, err}
-
- // Wait for the just-returned response body to be fully consumed
- // before we race and peek on the underlying bufio reader.
- if waitForBodyRead != nil {
- <-waitForBodyRead
- }
- }
-}
-
-type responseAndError struct {
- res *Response
- err error
-}
-
-type requestAndChan struct {
- req *Request
- ch chan responseAndError
-
- // did the Transport (as opposed to the client code) add an
- // Accept-Encoding gzip header? only if it we set it do
- // we transparently decode the gzip.
- addedGzip bool
-}
-
-func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
- if pc.mutateHeaderFunc != nil {
- pc.mutateHeaderFunc(req.extraHeaders())
- }
-
- // Ask for a compressed version if the caller didn't set their
- // own value for Accept-Encoding. We only attempted to
- // uncompress the gzip stream if we were the layer that
- // requested it.
- requestedGzip := false
- if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
- requestedGzip = true
- req.extraHeaders().Set("Accept-Encoding", "gzip")
- }
-
- pc.lk.Lock()
- pc.numExpectedResponses++
- pc.lk.Unlock()
-
- pc.cc.writeReq = func(r *Request, w io.Writer) error {
- return r.write(w, pc.isProxy, req.extra)
- }
-
- err = pc.cc.Write(req.Request)
- if err != nil {
- pc.close()
- return
- }
-
- ch := make(chan responseAndError, 1)
- pc.reqch <- requestAndChan{req.Request, ch, requestedGzip}
- re := <-ch
- pc.lk.Lock()
- pc.numExpectedResponses--
- pc.lk.Unlock()
-
- return re.res, re.err
-}
-
-func (pc *persistConn) close() {
- pc.lk.Lock()
- defer pc.lk.Unlock()
- pc.broken = true
- pc.cc.Close()
- pc.conn.Close()
- pc.mutateHeaderFunc = nil
-}
-
-var portMap = map[string]string{
- "http": "80",
- "https": "443",
-}
-
-// canonicalAddr returns url.Host but always with a ":port" suffix
-func canonicalAddr(url *url.URL) string {
- addr := url.Host
- if !hasPort(addr) {
- return addr + ":" + portMap[url.Scheme]
- }
- return addr
-}
-
-func responseIsKeepAlive(res *Response) bool {
- // TODO: implement. for now just always shutting down the connection.
- return false
-}
-
-// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most
-// once, right before the final Read() or Close() call returns, but after
-// EOF has been seen.
-type bodyEOFSignal struct {
- body io.ReadCloser
- fn func()
- isClosed bool
-}
-
-func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
- n, err = es.body.Read(p)
- if es.isClosed && n > 0 {
- panic("http: unexpected bodyEOFSignal Read after Close; see issue 1725")
- }
- if err == io.EOF && es.fn != nil {
- es.fn()
- es.fn = nil
- }
- return
-}
-
-func (es *bodyEOFSignal) Close() (err error) {
- if es.isClosed {
- return nil
- }
- es.isClosed = true
- err = es.body.Close()
- if err == nil && es.fn != nil {
- es.fn()
- es.fn = nil
- }
- return
-}
-
-type readFirstCloseBoth struct {
- io.ReadCloser
- io.Closer
-}
-
-func (r *readFirstCloseBoth) Close() error {
- if err := r.ReadCloser.Close(); err != nil {
- r.Closer.Close()
- return err
- }
- if err := r.Closer.Close(); err != nil {
- return err
- }
- return nil
-}
-
-// discardOnCloseReadCloser consumes all its input on Close.
-type discardOnCloseReadCloser struct {
- io.ReadCloser
-}
-
-func (d *discardOnCloseReadCloser) Close() error {
- io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed
- return d.ReadCloser.Close()
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for transport.go
-
-package http_test
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/rand"
- "fmt"
- . "http"
- "http/httptest"
- "io"
- "io/ioutil"
- "strconv"
- "strings"
- "testing"
- "time"
- "url"
-)
-
-// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close
-// and then verify that the final 2 responses get errors back.
-
-// hostPortHandler writes back the client's "host:port".
-var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.FormValue("close") == "true" {
- w.Header().Set("Connection", "close")
- }
- w.Write([]byte(r.RemoteAddr))
-})
-
-// Two subsequent requests and verify their response is the same.
-// The response from the server is our own IP:port
-func TestTransportKeepAlives(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- for _, disableKeepAlive := range []bool{false, true} {
- tr := &Transport{DisableKeepAlives: disableKeepAlive}
- c := &Client{Transport: tr}
-
- fetch := func(n int) string {
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err)
- }
- return string(body)
- }
-
- body1 := fetch(1)
- body2 := fetch(2)
-
- bodiesDiffer := body1 != body2
- if bodiesDiffer != disableKeepAlive {
- t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
- disableKeepAlive, bodiesDiffer, body1, body2)
- }
- }
-}
-
-func TestTransportConnectionCloseOnResponse(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- for _, connectionClose := range []bool{false, true} {
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- fetch := func(n int) string {
- req := new(Request)
- var err error
- req.URL, err = url.Parse(ts.URL + fmt.Sprintf("/?close=%v", connectionClose))
- if err != nil {
- t.Fatalf("URL parse error: %v", err)
- }
- req.Method = "GET"
- req.Proto = "HTTP/1.1"
- req.ProtoMajor = 1
- req.ProtoMinor = 1
-
- res, err := c.Do(req)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
- }
- body, err := ioutil.ReadAll(res.Body)
- defer res.Body.Close()
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
- }
- return string(body)
- }
-
- body1 := fetch(1)
- body2 := fetch(2)
- bodiesDiffer := body1 != body2
- if bodiesDiffer != connectionClose {
- t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
- connectionClose, bodiesDiffer, body1, body2)
- }
- }
-}
-
-func TestTransportConnectionCloseOnRequest(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- for _, connectionClose := range []bool{false, true} {
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- fetch := func(n int) string {
- req := new(Request)
- var err error
- req.URL, err = url.Parse(ts.URL)
- if err != nil {
- t.Fatalf("URL parse error: %v", err)
- }
- req.Method = "GET"
- req.Proto = "HTTP/1.1"
- req.ProtoMajor = 1
- req.ProtoMinor = 1
- req.Close = connectionClose
-
- res, err := c.Do(req)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
- }
- return string(body)
- }
-
- body1 := fetch(1)
- body2 := fetch(2)
- bodiesDiffer := body1 != body2
- if bodiesDiffer != connectionClose {
- t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
- connectionClose, bodiesDiffer, body1, body2)
- }
- }
-}
-
-func TestTransportIdleCacheKeys(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
-
- if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
- t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
- }
-
- resp, err := c.Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- ioutil.ReadAll(resp.Body)
-
- keys := tr.IdleConnKeysForTesting()
- if e, g := 1, len(keys); e != g {
- t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g)
- }
-
- if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e {
- t.Errorf("Expected idle cache key %q; got %q", e, keys[0])
- }
-
- tr.CloseIdleConnections()
- if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
- t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
- }
-}
-
-func TestTransportMaxPerHostIdleConns(t *testing.T) {
- resch := make(chan string)
- gotReq := make(chan bool)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- gotReq <- true
- msg := <-resch
- _, err := w.Write([]byte(msg))
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
- }))
- defer ts.Close()
- maxIdleConns := 2
- tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns}
- c := &Client{Transport: tr}
-
- // Start 3 outstanding requests and wait for the server to get them.
- // Their responses will hang until we we write to resch, though.
- donech := make(chan bool)
- doReq := func() {
- resp, err := c.Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- _, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatalf("ReadAll: %v", err)
- }
- donech <- true
- }
- go doReq()
- <-gotReq
- go doReq()
- <-gotReq
- go doReq()
- <-gotReq
-
- if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
- t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g)
- }
-
- resch <- "res1"
- <-donech
- keys := tr.IdleConnKeysForTesting()
- if e, g := 1, len(keys); e != g {
- t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g)
- }
- cacheKey := "|http|" + ts.Listener.Addr().String()
- if keys[0] != cacheKey {
- t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0])
- }
- if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after first response, expected %d idle conns; got %d", e, g)
- }
-
- resch <- "res2"
- <-donech
- if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after second response, expected %d idle conns; got %d", e, g)
- }
-
- resch <- "res3"
- <-donech
- if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after third response, still expected %d idle conns; got %d", e, g)
- }
-}
-
-func TestTransportServerClosingUnexpectedly(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- fetch := func(n, retries int) string {
- condFatalf := func(format string, arg ...interface{}) {
- if retries <= 0 {
- t.Fatalf(format, arg...)
- }
- t.Logf("retrying shortly after expected error: "+format, arg...)
- time.Sleep(1e9 / int64(retries))
- }
- for retries >= 0 {
- retries--
- res, err := c.Get(ts.URL)
- if err != nil {
- condFatalf("error in req #%d, GET: %v", n, err)
- continue
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- condFatalf("error in req #%d, ReadAll: %v", n, err)
- continue
- }
- res.Body.Close()
- return string(body)
- }
- panic("unreachable")
- }
-
- body1 := fetch(1, 0)
- body2 := fetch(2, 0)
-
- ts.CloseClientConnections() // surprise!
-
- // This test has an expected race. Sleeping for 25 ms prevents
- // it on most fast machines, causing the next fetch() call to
- // succeed quickly. But if we do get errors, fetch() will retry 5
- // times with some delays between.
- time.Sleep(25e6)
-
- body3 := fetch(3, 5)
-
- if body1 != body2 {
- t.Errorf("expected body1 and body2 to be equal")
- }
- if body2 == body3 {
- t.Errorf("expected body2 and body3 to be different")
- }
-}
-
-// TestTransportHeadResponses verifies that we deal with Content-Lengths
-// with no bodies properly
-func TestTransportHeadResponses(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.Method != "HEAD" {
- panic("expected HEAD; got " + r.Method)
- }
- w.Header().Set("Content-Length", "123")
- w.WriteHeader(200)
- }))
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
- for i := 0; i < 2; i++ {
- res, err := c.Head(ts.URL)
- if err != nil {
- t.Errorf("error on loop %d: %v", i, err)
- }
- if e, g := "123", res.Header.Get("Content-Length"); e != g {
- t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g)
- }
- if e, g := int64(0), res.ContentLength; e != g {
- t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g)
- }
- }
-}
-
-// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding
-// on responses to HEAD requests.
-func TestTransportHeadChunkedResponse(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.Method != "HEAD" {
- panic("expected HEAD; got " + r.Method)
- }
- w.Header().Set("Transfer-Encoding", "chunked") // client should ignore
- w.Header().Set("x-client-ipport", r.RemoteAddr)
- w.WriteHeader(200)
- }))
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
-
- res1, err := c.Head(ts.URL)
- if err != nil {
- t.Fatalf("request 1 error: %v", err)
- }
- res2, err := c.Head(ts.URL)
- if err != nil {
- t.Fatalf("request 2 error: %v", err)
- }
- if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 {
- t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2)
- }
-}
-
-var roundTripTests = []struct {
- accept string
- expectAccept string
- compressed bool
-}{
- // Requests with no accept-encoding header use transparent compression
- {"", "gzip", false},
- // Requests with other accept-encoding should pass through unmodified
- {"foo", "foo", false},
- // Requests with accept-encoding == gzip should be passed through
- {"gzip", "gzip", true},
-}
-
-// Test that the modification made to the Request by the RoundTripper is cleaned up
-func TestRoundTripGzip(t *testing.T) {
- const responseBody = "test response body"
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- accept := req.Header.Get("Accept-Encoding")
- if expect := req.FormValue("expect_accept"); accept != expect {
- t.Errorf("in handler, test %v: Accept-Encoding = %q, want %q",
- req.FormValue("testnum"), accept, expect)
- }
- if accept == "gzip" {
- rw.Header().Set("Content-Encoding", "gzip")
- gz, _ := gzip.NewWriter(rw)
- gz.Write([]byte(responseBody))
- gz.Close()
- } else {
- rw.Header().Set("Content-Encoding", accept)
- rw.Write([]byte(responseBody))
- }
- }))
- defer ts.Close()
-
- for i, test := range roundTripTests {
- // Test basic request (no accept-encoding)
- req, _ := NewRequest("GET", fmt.Sprintf("%s/?testnum=%d&expect_accept=%s", ts.URL, i, test.expectAccept), nil)
- if test.accept != "" {
- req.Header.Set("Accept-Encoding", test.accept)
- }
- res, err := DefaultTransport.RoundTrip(req)
- var body []byte
- if test.compressed {
- gzip, _ := gzip.NewReader(res.Body)
- body, err = ioutil.ReadAll(gzip)
- res.Body.Close()
- } else {
- body, err = ioutil.ReadAll(res.Body)
- }
- if err != nil {
- t.Errorf("%d. Error: %q", i, err)
- continue
- }
- if g, e := string(body), responseBody; g != e {
- t.Errorf("%d. body = %q; want %q", i, g, e)
- }
- if g, e := req.Header.Get("Accept-Encoding"), test.accept; g != e {
- t.Errorf("%d. Accept-Encoding = %q; want %q (it was mutated, in violation of RoundTrip contract)", i, g, e)
- }
- if g, e := res.Header.Get("Content-Encoding"), test.accept; g != e {
- t.Errorf("%d. Content-Encoding = %q; want %q", i, g, e)
- }
- }
-
-}
-
-func TestTransportGzip(t *testing.T) {
- const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- const nRandBytes = 1024 * 1024
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e {
- t.Errorf("Accept-Encoding = %q, want %q", g, e)
- }
- rw.Header().Set("Content-Encoding", "gzip")
- if req.Method == "HEAD" {
- return
- }
-
- var w io.Writer = rw
- var buf bytes.Buffer
- if req.FormValue("chunked") == "0" {
- w = &buf
- defer io.Copy(rw, &buf)
- defer func() {
- rw.Header().Set("Content-Length", strconv.Itoa(buf.Len()))
- }()
- }
- gz, _ := gzip.NewWriter(w)
- gz.Write([]byte(testString))
- if req.FormValue("body") == "large" {
- io.CopyN(gz, rand.Reader, nRandBytes)
- }
- gz.Close()
- }))
- defer ts.Close()
-
- for _, chunked := range []string{"1", "0"} {
- c := &Client{Transport: &Transport{}}
-
- // First fetch something large, but only read some of it.
- res, err := c.Get(ts.URL + "/?body=large&chunked=" + chunked)
- if err != nil {
- t.Fatalf("large get: %v", err)
- }
- buf := make([]byte, len(testString))
- n, err := io.ReadFull(res.Body, buf)
- if err != nil {
- t.Fatalf("partial read of large response: size=%d, %v", n, err)
- }
- if e, g := testString, string(buf); e != g {
- t.Errorf("partial read got %q, expected %q", g, e)
- }
- res.Body.Close()
- // Read on the body, even though it's closed
- n, err = res.Body.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("expected error post-closed large Read; got = %d, %v", n, err)
- }
-
- // Then something small.
- res, err = c.Get(ts.URL + "/?chunked=" + chunked)
- if err != nil {
- t.Fatal(err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if g, e := string(body), testString; g != e {
- t.Fatalf("body = %q; want %q", g, e)
- }
- if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
- t.Fatalf("Content-Encoding = %q; want %q", g, e)
- }
-
- // Read on the body after it's been fully read:
- n, err = res.Body.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err)
- }
- res.Body.Close()
- n, err = res.Body.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("expected Read error after Close; got %d, %v", n, err)
- }
- }
-
- // And a HEAD request too, because they're always weird.
- c := &Client{Transport: &Transport{}}
- res, err := c.Head(ts.URL)
- if err != nil {
- t.Fatalf("Head: %v", err)
- }
- if res.StatusCode != 200 {
- t.Errorf("Head status=%d; want=200", res.StatusCode)
- }
-}
-
-func TestTransportProxy(t *testing.T) {
- ch := make(chan string, 1)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ch <- "real server"
- }))
- defer ts.Close()
- proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ch <- "proxy for " + r.URL.String()
- }))
- defer proxy.Close()
-
- pu, err := url.Parse(proxy.URL)
- if err != nil {
- t.Fatal(err)
- }
- c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}}
- c.Head(ts.URL)
- got := <-ch
- want := "proxy for " + ts.URL + "/"
- if got != want {
- t.Errorf("want %q, got %q", want, got)
- }
-}
-
-// TestTransportGzipRecursive sends a gzip quine and checks that the
-// client gets the same value back. This is more cute than anything,
-// but checks that we don't recurse forever, and checks that
-// Content-Encoding is removed.
-func TestTransportGzipRecursive(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Encoding", "gzip")
- w.Write(rgz)
- }))
- defer ts.Close()
-
- c := &Client{Transport: &Transport{}}
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(body, rgz) {
- t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x",
- body, rgz)
- }
- if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
- t.Fatalf("Content-Encoding = %q; want %q", g, e)
- }
-}
-
-type fooProto struct{}
-
-func (fooProto) RoundTrip(req *Request) (*Response, error) {
- res := &Response{
- Status: "200 OK",
- StatusCode: 200,
- Header: make(Header),
- Body: ioutil.NopCloser(strings.NewReader("You wanted " + req.URL.String())),
- }
- return res, nil
-}
-
-func TestTransportAltProto(t *testing.T) {
- tr := &Transport{}
- c := &Client{Transport: tr}
- tr.RegisterProtocol("foo", fooProto{})
- res, err := c.Get("foo://bar.com/path")
- if err != nil {
- t.Fatal(err)
- }
- bodyb, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- body := string(bodyb)
- if e := "You wanted foo://bar.com/path"; body != e {
- t.Errorf("got response %q, want %q", body, e)
- }
-}
-
-// rgz is a gzip quine that uncompresses to itself.
-var rgz = []byte{
- 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
- 0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0,
- 0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2,
- 0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17,
- 0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60,
- 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2,
- 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00,
- 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00,
- 0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16,
- 0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05,
- 0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff,
- 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00,
- 0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00,
- 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
- 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88,
- 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff,
- 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00,
- 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00,
- 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff,
- 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00,
- 0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16,
- 0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08,
- 0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa,
- 0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00,
- 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
- 0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
- 0x00, 0x00,
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "os"
- "net"
-)
-
-func init() {
- remoteSideClosedFunc = func(err error) (out bool) {
- op, ok := err.(*net.OpError)
- if ok && op.Op == "WSARecv" && op.Net == "tcp" && op.Err == os.Errno(10058) {
- // TODO(bradfitz): find the symbol for 10058
- return true
- }
- return false
- }
-}
import (
"errors"
- "image/color"
"image"
+ "image/color"
"io"
)
"image/color"
"image/png"
"io/ioutil"
- "rand"
+ "math/rand"
"os"
"testing"
)
import (
"bytes"
- "rand"
+ "math/rand"
"regexp"
"sort"
"strings"
package io_test
import (
- . "io"
"bytes"
"crypto/sha1"
"fmt"
+ . "io"
"strings"
"testing"
)
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Represents JSON data structure using native Go types: booleans, floats,
-// strings, arrays, and maps.
-
-package json
-
-import (
- "encoding/base64"
- "errors"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "unicode"
- "utf16"
- "utf8"
-)
-
-// Unmarshal parses the JSON-encoded data and stores the result
-// in the value pointed to by v.
-//
-// Unmarshal uses the inverse of the encodings that
-// Marshal uses, allocating maps, slices, and pointers as necessary,
-// with the following additional rules:
-//
-// To unmarshal JSON into a pointer, Unmarshal first handles the case of
-// the JSON being the JSON literal null. In that case, Unmarshal sets
-// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
-// the value pointed at by the pointer. If the pointer is nil, Unmarshal
-// allocates a new value for it to point to.
-//
-// To unmarshal JSON into an interface value, Unmarshal unmarshals
-// the JSON into the concrete value contained in the interface value.
-// If the interface value is nil, that is, has no concrete value stored in it,
-// Unmarshal stores one of these in the interface value:
-//
-// bool, for JSON booleans
-// float64, for JSON numbers
-// string, for JSON strings
-// []interface{}, for JSON arrays
-// map[string]interface{}, for JSON objects
-// nil for JSON null
-//
-// If a JSON value is not appropriate for a given target type,
-// or if a JSON number overflows the target type, Unmarshal
-// skips that field and completes the unmarshalling as best it can.
-// If no more serious errors are encountered, Unmarshal returns
-// an UnmarshalTypeError describing the earliest such error.
-//
-func Unmarshal(data []byte, v interface{}) error {
- d := new(decodeState).init(data)
-
- // Quick check for well-formedness.
- // Avoids filling out half a data structure
- // before discovering a JSON syntax error.
- err := checkValid(data, &d.scan)
- if err != nil {
- return err
- }
-
- return d.unmarshal(v)
-}
-
-// Unmarshaler is the interface implemented by objects
-// that can unmarshal a JSON description of themselves.
-// The input can be assumed to be a valid JSON object
-// encoding. UnmarshalJSON must copy the JSON data
-// if it wishes to retain the data after returning.
-type Unmarshaler interface {
- UnmarshalJSON([]byte) error
-}
-
-// An UnmarshalTypeError describes a JSON value that was
-// not appropriate for a value of a specific Go type.
-type UnmarshalTypeError struct {
- Value string // description of JSON value - "bool", "array", "number -5"
- Type reflect.Type // type of Go value it could not be assigned to
-}
-
-func (e *UnmarshalTypeError) Error() string {
- return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
-}
-
-// An UnmarshalFieldError describes a JSON object key that
-// led to an unexported (and therefore unwritable) struct field.
-type UnmarshalFieldError struct {
- Key string
- Type reflect.Type
- Field reflect.StructField
-}
-
-func (e *UnmarshalFieldError) Error() string {
- return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
-}
-
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
-type InvalidUnmarshalError struct {
- Type reflect.Type
-}
-
-func (e *InvalidUnmarshalError) Error() string {
- if e.Type == nil {
- return "json: Unmarshal(nil)"
- }
-
- if e.Type.Kind() != reflect.Ptr {
- return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
- }
- return "json: Unmarshal(nil " + e.Type.String() + ")"
-}
-
-func (d *decodeState) unmarshal(v interface{}) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if _, ok := r.(runtime.Error); ok {
- panic(r)
- }
- err = r.(error)
- }
- }()
-
- rv := reflect.ValueOf(v)
- pv := rv
- if pv.Kind() != reflect.Ptr || pv.IsNil() {
- return &InvalidUnmarshalError{reflect.TypeOf(v)}
- }
-
- d.scan.reset()
- // We decode rv not pv.Elem because the Unmarshaler interface
- // test must be applied at the top level of the value.
- d.value(rv)
- return d.savedError
-}
-
-// decodeState represents the state while decoding a JSON value.
-type decodeState struct {
- data []byte
- off int // read offset in data
- scan scanner
- nextscan scanner // for calls to nextValue
- savedError error
- tempstr string // scratch space to avoid some allocations
-}
-
-// errPhase is used for errors that should not happen unless
-// there is a bug in the JSON decoder or something is editing
-// the data slice while the decoder executes.
-var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
-
-func (d *decodeState) init(data []byte) *decodeState {
- d.data = data
- d.off = 0
- d.savedError = nil
- return d
-}
-
-// error aborts the decoding by panicking with err.
-func (d *decodeState) error(err error) {
- panic(err)
-}
-
-// saveError saves the first err it is called with,
-// for reporting at the end of the unmarshal.
-func (d *decodeState) saveError(err error) {
- if d.savedError == nil {
- d.savedError = err
- }
-}
-
-// next cuts off and returns the next full JSON value in d.data[d.off:].
-// The next value is known to be an object or array, not a literal.
-func (d *decodeState) next() []byte {
- c := d.data[d.off]
- item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
- if err != nil {
- d.error(err)
- }
- d.off = len(d.data) - len(rest)
-
- // Our scanner has seen the opening brace/bracket
- // and thinks we're still in the middle of the object.
- // invent a closing brace/bracket to get it out.
- if c == '{' {
- d.scan.step(&d.scan, '}')
- } else {
- d.scan.step(&d.scan, ']')
- }
-
- return item
-}
-
-// scanWhile processes bytes in d.data[d.off:] until it
-// receives a scan code not equal to op.
-// It updates d.off and returns the new scan code.
-func (d *decodeState) scanWhile(op int) int {
- var newOp int
- for {
- if d.off >= len(d.data) {
- newOp = d.scan.eof()
- d.off = len(d.data) + 1 // mark processed EOF with len+1
- } else {
- c := int(d.data[d.off])
- d.off++
- newOp = d.scan.step(&d.scan, c)
- }
- if newOp != op {
- break
- }
- }
- return newOp
-}
-
-// value decodes a JSON value from d.data[d.off:] into the value.
-// it updates d.off to point past the decoded value.
-func (d *decodeState) value(v reflect.Value) {
- if !v.IsValid() {
- _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
- if err != nil {
- d.error(err)
- }
- d.off = len(d.data) - len(rest)
-
- // d.scan thinks we're still at the beginning of the item.
- // Feed in an empty string - the shortest, simplest value -
- // so that it knows we got to the end of the value.
- if d.scan.step == stateRedo {
- panic("redo")
- }
- d.scan.step(&d.scan, '"')
- d.scan.step(&d.scan, '"')
- return
- }
-
- switch op := d.scanWhile(scanSkipSpace); op {
- default:
- d.error(errPhase)
-
- case scanBeginArray:
- d.array(v)
-
- case scanBeginObject:
- d.object(v)
-
- case scanBeginLiteral:
- d.literal(v)
- }
-}
-
-// indirect walks down v allocating pointers as needed,
-// until it gets to a non-pointer.
-// if it encounters an Unmarshaler, indirect stops and returns that.
-// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
-func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
- // If v is a named type and is addressable,
- // start with its address, so that if the type has pointer methods,
- // we find them.
- if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
- v = v.Addr()
- }
- for {
- var isUnmarshaler bool
- if v.Type().NumMethod() > 0 {
- // Remember that this is an unmarshaler,
- // but wait to return it until after allocating
- // the pointer (if necessary).
- _, isUnmarshaler = v.Interface().(Unmarshaler)
- }
-
- if iv := v; iv.Kind() == reflect.Interface && !iv.IsNil() {
- v = iv.Elem()
- continue
- }
-
- pv := v
- if pv.Kind() != reflect.Ptr {
- break
- }
-
- if pv.Elem().Kind() != reflect.Ptr && decodingNull && pv.CanSet() {
- return nil, pv
- }
- if pv.IsNil() {
- pv.Set(reflect.New(pv.Type().Elem()))
- }
- if isUnmarshaler {
- // Using v.Interface().(Unmarshaler)
- // here means that we have to use a pointer
- // as the struct field. We cannot use a value inside
- // a pointer to a struct, because in that case
- // v.Interface() is the value (x.f) not the pointer (&x.f).
- // This is an unfortunate consequence of reflect.
- // An alternative would be to look up the
- // UnmarshalJSON method and return a FuncValue.
- return v.Interface().(Unmarshaler), reflect.Value{}
- }
- v = pv.Elem()
- }
- return nil, v
-}
-
-// array consumes an array from d.data[d.off-1:], decoding into the value v.
-// the first byte of the array ('[') has been read already.
-func (d *decodeState) array(v reflect.Value) {
- // Check for unmarshaler.
- unmarshaler, pv := d.indirect(v, false)
- if unmarshaler != nil {
- d.off--
- err := unmarshaler.UnmarshalJSON(d.next())
- if err != nil {
- d.error(err)
- }
- return
- }
- v = pv
-
- // Decoding into nil interface? Switch to non-reflect code.
- iv := v
- ok := iv.Kind() == reflect.Interface
- if ok {
- iv.Set(reflect.ValueOf(d.arrayInterface()))
- return
- }
-
- // Check type of target.
- av := v
- if av.Kind() != reflect.Array && av.Kind() != reflect.Slice {
- d.saveError(&UnmarshalTypeError{"array", v.Type()})
- d.off--
- d.next()
- return
- }
-
- sv := v
-
- i := 0
- for {
- // Look ahead for ] - can only happen on first iteration.
- op := d.scanWhile(scanSkipSpace)
- if op == scanEndArray {
- break
- }
-
- // Back up so d.value can have the byte we just read.
- d.off--
- d.scan.undo(op)
-
- // Get element of array, growing if necessary.
- if i >= av.Cap() && sv.IsValid() {
- newcap := sv.Cap() + sv.Cap()/2
- if newcap < 4 {
- newcap = 4
- }
- newv := reflect.MakeSlice(sv.Type(), sv.Len(), newcap)
- reflect.Copy(newv, sv)
- sv.Set(newv)
- }
- if i >= av.Len() && sv.IsValid() {
- // Must be slice; gave up on array during i >= av.Cap().
- sv.SetLen(i + 1)
- }
-
- // Decode into element.
- if i < av.Len() {
- d.value(av.Index(i))
- } else {
- // Ran out of fixed array: skip.
- d.value(reflect.Value{})
- }
- i++
-
- // Next token must be , or ].
- op = d.scanWhile(scanSkipSpace)
- if op == scanEndArray {
- break
- }
- if op != scanArrayValue {
- d.error(errPhase)
- }
- }
- if i < av.Len() {
- if !sv.IsValid() {
- // Array. Zero the rest.
- z := reflect.Zero(av.Type().Elem())
- for ; i < av.Len(); i++ {
- av.Index(i).Set(z)
- }
- } else {
- sv.SetLen(i)
- }
- }
-}
-
-// object consumes an object from d.data[d.off-1:], decoding into the value v.
-// the first byte of the object ('{') has been read already.
-func (d *decodeState) object(v reflect.Value) {
- // Check for unmarshaler.
- unmarshaler, pv := d.indirect(v, false)
- if unmarshaler != nil {
- d.off--
- err := unmarshaler.UnmarshalJSON(d.next())
- if err != nil {
- d.error(err)
- }
- return
- }
- v = pv
-
- // Decoding into nil interface? Switch to non-reflect code.
- iv := v
- if iv.Kind() == reflect.Interface {
- iv.Set(reflect.ValueOf(d.objectInterface()))
- return
- }
-
- // Check type of target: struct or map[string]T
- var (
- mv reflect.Value
- sv reflect.Value
- )
- switch v.Kind() {
- case reflect.Map:
- // map must have string type
- t := v.Type()
- if t.Key() != reflect.TypeOf("") {
- d.saveError(&UnmarshalTypeError{"object", v.Type()})
- break
- }
- mv = v
- if mv.IsNil() {
- mv.Set(reflect.MakeMap(t))
- }
- case reflect.Struct:
- sv = v
- default:
- d.saveError(&UnmarshalTypeError{"object", v.Type()})
- }
-
- if !mv.IsValid() && !sv.IsValid() {
- d.off--
- d.next() // skip over { } in input
- return
- }
-
- var mapElem reflect.Value
-
- for {
- // Read opening " of string key or closing }.
- op := d.scanWhile(scanSkipSpace)
- if op == scanEndObject {
- // closing } - can only happen on first iteration.
- break
- }
- if op != scanBeginLiteral {
- d.error(errPhase)
- }
-
- // Read string key.
- start := d.off - 1
- op = d.scanWhile(scanContinue)
- item := d.data[start : d.off-1]
- key, ok := unquote(item)
- if !ok {
- d.error(errPhase)
- }
-
- // Figure out field corresponding to key.
- var subv reflect.Value
- destring := false // whether the value is wrapped in a string to be decoded first
-
- if mv.IsValid() {
- elemType := mv.Type().Elem()
- if !mapElem.IsValid() {
- mapElem = reflect.New(elemType).Elem()
- } else {
- mapElem.Set(reflect.Zero(elemType))
- }
- subv = mapElem
- } else {
- var f reflect.StructField
- var ok bool
- st := sv.Type()
- for i := 0; i < sv.NumField(); i++ {
- sf := st.Field(i)
- tag := sf.Tag.Get("json")
- if tag == "-" {
- // Pretend this field doesn't exist.
- continue
- }
- // First, tag match
- tagName, _ := parseTag(tag)
- if tagName == key {
- f = sf
- ok = true
- break // no better match possible
- }
- // Second, exact field name match
- if sf.Name == key {
- f = sf
- ok = true
- }
- // Third, case-insensitive field name match,
- // but only if a better match hasn't already been seen
- if !ok && strings.EqualFold(sf.Name, key) {
- f = sf
- ok = true
- }
- }
-
- // Extract value; name must be exported.
- if ok {
- if f.PkgPath != "" {
- d.saveError(&UnmarshalFieldError{key, st, f})
- } else {
- subv = sv.FieldByIndex(f.Index)
- }
- _, opts := parseTag(f.Tag.Get("json"))
- destring = opts.Contains("string")
- }
- }
-
- // Read : before value.
- if op == scanSkipSpace {
- op = d.scanWhile(scanSkipSpace)
- }
- if op != scanObjectKey {
- d.error(errPhase)
- }
-
- // Read value.
- if destring {
- d.value(reflect.ValueOf(&d.tempstr))
- d.literalStore([]byte(d.tempstr), subv)
- } else {
- d.value(subv)
- }
- // Write value back to map;
- // if using struct, subv points into struct already.
- if mv.IsValid() {
- mv.SetMapIndex(reflect.ValueOf(key), subv)
- }
-
- // Next token must be , or }.
- op = d.scanWhile(scanSkipSpace)
- if op == scanEndObject {
- break
- }
- if op != scanObjectValue {
- d.error(errPhase)
- }
- }
-}
-
-// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
-// The first byte of the literal has been read already
-// (that's how the caller knows it's a literal).
-func (d *decodeState) literal(v reflect.Value) {
- // All bytes inside literal return scanContinue op code.
- start := d.off - 1
- op := d.scanWhile(scanContinue)
-
- // Scan read one byte too far; back up.
- d.off--
- d.scan.undo(op)
-
- d.literalStore(d.data[start:d.off], v)
-}
-
-// literalStore decodes a literal stored in item into v.
-func (d *decodeState) literalStore(item []byte, v reflect.Value) {
- // Check for unmarshaler.
- wantptr := item[0] == 'n' // null
- unmarshaler, pv := d.indirect(v, wantptr)
- if unmarshaler != nil {
- err := unmarshaler.UnmarshalJSON(item)
- if err != nil {
- d.error(err)
- }
- return
- }
- v = pv
-
- switch c := item[0]; c {
- case 'n': // null
- switch v.Kind() {
- default:
- d.saveError(&UnmarshalTypeError{"null", v.Type()})
- case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
- v.Set(reflect.Zero(v.Type()))
- }
-
- case 't', 'f': // true, false
- value := c == 't'
- switch v.Kind() {
- default:
- d.saveError(&UnmarshalTypeError{"bool", v.Type()})
- case reflect.Bool:
- v.SetBool(value)
- case reflect.Interface:
- v.Set(reflect.ValueOf(value))
- }
-
- case '"': // string
- s, ok := unquoteBytes(item)
- if !ok {
- d.error(errPhase)
- }
- switch v.Kind() {
- default:
- d.saveError(&UnmarshalTypeError{"string", v.Type()})
- case reflect.Slice:
- if v.Type() != byteSliceType {
- d.saveError(&UnmarshalTypeError{"string", v.Type()})
- break
- }
- b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
- n, err := base64.StdEncoding.Decode(b, s)
- if err != nil {
- d.saveError(err)
- break
- }
- v.Set(reflect.ValueOf(b[0:n]))
- case reflect.String:
- v.SetString(string(s))
- case reflect.Interface:
- v.Set(reflect.ValueOf(string(s)))
- }
-
- default: // number
- if c != '-' && (c < '0' || c > '9') {
- d.error(errPhase)
- }
- s := string(item)
- switch v.Kind() {
- default:
- d.error(&UnmarshalTypeError{"number", v.Type()})
- case reflect.Interface:
- n, err := strconv.Atof64(s)
- if err != nil {
- d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
- break
- }
- v.Set(reflect.ValueOf(n))
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n, err := strconv.Atoi64(s)
- if err != nil || v.OverflowInt(n) {
- d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
- break
- }
- v.SetInt(n)
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- n, err := strconv.Atoui64(s)
- if err != nil || v.OverflowUint(n) {
- d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
- break
- }
- v.SetUint(n)
-
- case reflect.Float32, reflect.Float64:
- n, err := strconv.AtofN(s, v.Type().Bits())
- if err != nil || v.OverflowFloat(n) {
- d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
- break
- }
- v.SetFloat(n)
- }
- }
-}
-
-// The xxxInterface routines build up a value to be stored
-// in an empty interface. They are not strictly necessary,
-// but they avoid the weight of reflection in this common case.
-
-// valueInterface is like value but returns interface{}
-func (d *decodeState) valueInterface() interface{} {
- switch d.scanWhile(scanSkipSpace) {
- default:
- d.error(errPhase)
- case scanBeginArray:
- return d.arrayInterface()
- case scanBeginObject:
- return d.objectInterface()
- case scanBeginLiteral:
- return d.literalInterface()
- }
- panic("unreachable")
-}
-
-// arrayInterface is like array but returns []interface{}.
-func (d *decodeState) arrayInterface() []interface{} {
- var v []interface{}
- for {
- // Look ahead for ] - can only happen on first iteration.
- op := d.scanWhile(scanSkipSpace)
- if op == scanEndArray {
- break
- }
-
- // Back up so d.value can have the byte we just read.
- d.off--
- d.scan.undo(op)
-
- v = append(v, d.valueInterface())
-
- // Next token must be , or ].
- op = d.scanWhile(scanSkipSpace)
- if op == scanEndArray {
- break
- }
- if op != scanArrayValue {
- d.error(errPhase)
- }
- }
- return v
-}
-
-// objectInterface is like object but returns map[string]interface{}.
-func (d *decodeState) objectInterface() map[string]interface{} {
- m := make(map[string]interface{})
- for {
- // Read opening " of string key or closing }.
- op := d.scanWhile(scanSkipSpace)
- if op == scanEndObject {
- // closing } - can only happen on first iteration.
- break
- }
- if op != scanBeginLiteral {
- d.error(errPhase)
- }
-
- // Read string key.
- start := d.off - 1
- op = d.scanWhile(scanContinue)
- item := d.data[start : d.off-1]
- key, ok := unquote(item)
- if !ok {
- d.error(errPhase)
- }
-
- // Read : before value.
- if op == scanSkipSpace {
- op = d.scanWhile(scanSkipSpace)
- }
- if op != scanObjectKey {
- d.error(errPhase)
- }
-
- // Read value.
- m[key] = d.valueInterface()
-
- // Next token must be , or }.
- op = d.scanWhile(scanSkipSpace)
- if op == scanEndObject {
- break
- }
- if op != scanObjectValue {
- d.error(errPhase)
- }
- }
- return m
-}
-
-// literalInterface is like literal but returns an interface value.
-func (d *decodeState) literalInterface() interface{} {
- // All bytes inside literal return scanContinue op code.
- start := d.off - 1
- op := d.scanWhile(scanContinue)
-
- // Scan read one byte too far; back up.
- d.off--
- d.scan.undo(op)
- item := d.data[start:d.off]
-
- switch c := item[0]; c {
- case 'n': // null
- return nil
-
- case 't', 'f': // true, false
- return c == 't'
-
- case '"': // string
- s, ok := unquote(item)
- if !ok {
- d.error(errPhase)
- }
- return s
-
- default: // number
- if c != '-' && (c < '0' || c > '9') {
- d.error(errPhase)
- }
- n, err := strconv.Atof64(string(item))
- if err != nil {
- d.saveError(&UnmarshalTypeError{"number " + string(item), reflect.TypeOf(0.0)})
- }
- return n
- }
- panic("unreachable")
-}
-
-// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
-// or it returns -1.
-func getu4(s []byte) rune {
- if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
- return -1
- }
- r, err := strconv.Btoui64(string(s[2:6]), 16)
- if err != nil {
- return -1
- }
- return rune(r)
-}
-
-// unquote converts a quoted JSON string literal s into an actual string t.
-// The rules are different than for Go, so cannot use strconv.Unquote.
-func unquote(s []byte) (t string, ok bool) {
- s, ok = unquoteBytes(s)
- t = string(s)
- return
-}
-
-func unquoteBytes(s []byte) (t []byte, ok bool) {
- if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
- return
- }
- s = s[1 : len(s)-1]
-
- // Check for unusual characters. If there are none,
- // then no unquoting is needed, so return a slice of the
- // original bytes.
- r := 0
- for r < len(s) {
- c := s[r]
- if c == '\\' || c == '"' || c < ' ' {
- break
- }
- if c < utf8.RuneSelf {
- r++
- continue
- }
- rr, size := utf8.DecodeRune(s[r:])
- if rr == utf8.RuneError && size == 1 {
- break
- }
- r += size
- }
- if r == len(s) {
- return s, true
- }
-
- b := make([]byte, len(s)+2*utf8.UTFMax)
- w := copy(b, s[0:r])
- for r < len(s) {
- // Out of room? Can only happen if s is full of
- // malformed UTF-8 and we're replacing each
- // byte with RuneError.
- if w >= len(b)-2*utf8.UTFMax {
- nb := make([]byte, (len(b)+utf8.UTFMax)*2)
- copy(nb, b[0:w])
- b = nb
- }
- switch c := s[r]; {
- case c == '\\':
- r++
- if r >= len(s) {
- return
- }
- switch s[r] {
- default:
- return
- case '"', '\\', '/', '\'':
- b[w] = s[r]
- r++
- w++
- case 'b':
- b[w] = '\b'
- r++
- w++
- case 'f':
- b[w] = '\f'
- r++
- w++
- case 'n':
- b[w] = '\n'
- r++
- w++
- case 'r':
- b[w] = '\r'
- r++
- w++
- case 't':
- b[w] = '\t'
- r++
- w++
- case 'u':
- r--
- rr := getu4(s[r:])
- if rr < 0 {
- return
- }
- r += 6
- if utf16.IsSurrogate(rr) {
- rr1 := getu4(s[r:])
- if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
- // A valid pair; consume.
- r += 6
- w += utf8.EncodeRune(b[w:], dec)
- break
- }
- // Invalid surrogate; fall back to replacement rune.
- rr = unicode.ReplacementChar
- }
- w += utf8.EncodeRune(b[w:], rr)
- }
-
- // Quote, control characters are invalid.
- case c == '"', c < ' ':
- return
-
- // ASCII
- case c < utf8.RuneSelf:
- b[w] = c
- r++
- w++
-
- // Coerce to well-formed UTF-8.
- default:
- rr, size := utf8.DecodeRune(s[r:])
- r += size
- w += utf8.EncodeRune(b[w:], rr)
- }
- }
- return b[0:w], true
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "reflect"
- "strings"
- "testing"
-)
-
-type T struct {
- X string
- Y int
- Z int `json:"-"`
-}
-
-type tx struct {
- x int
-}
-
-var txType = reflect.TypeOf((*tx)(nil)).Elem()
-
-// A type that can unmarshal itself.
-
-type unmarshaler struct {
- T bool
-}
-
-func (u *unmarshaler) UnmarshalJSON(b []byte) error {
- *u = unmarshaler{true} // All we need to see that UnmarshalJson is called.
- return nil
-}
-
-type ustruct struct {
- M unmarshaler
-}
-
-var (
- um0, um1 unmarshaler // target2 of unmarshaling
- ump = &um1
- umtrue = unmarshaler{true}
- umslice = []unmarshaler{{true}}
- umslicep = new([]unmarshaler)
- umstruct = ustruct{unmarshaler{true}}
-)
-
-type unmarshalTest struct {
- in string
- ptr interface{}
- out interface{}
- err error
-}
-
-var unmarshalTests = []unmarshalTest{
- // basic types
- {`true`, new(bool), true, nil},
- {`1`, new(int), 1, nil},
- {`1.2`, new(float64), 1.2, nil},
- {`-5`, new(int16), int16(-5), nil},
- {`"a\u1234"`, new(string), "a\u1234", nil},
- {`"http:\/\/"`, new(string), "http://", nil},
- {`"g-clef: \uD834\uDD1E"`, new(string), "g-clef: \U0001D11E", nil},
- {`"invalid: \uD834x\uDD1E"`, new(string), "invalid: \uFFFDx\uFFFD", nil},
- {"null", new(interface{}), nil, nil},
- {`{"X": [1,2,3], "Y": 4}`, new(T), T{Y: 4}, &UnmarshalTypeError{"array", reflect.TypeOf("")}},
- {`{"x": 1}`, new(tx), tx{}, &UnmarshalFieldError{"x", txType, txType.Field(0)}},
-
- // Z has a "-" tag.
- {`{"Y": 1, "Z": 2}`, new(T), T{Y: 1}, nil},
-
- // syntax errors
- {`{"X": "foo", "Y"}`, nil, nil, &SyntaxError{"invalid character '}' after object key", 17}},
-
- // composite tests
- {allValueIndent, new(All), allValue, nil},
- {allValueCompact, new(All), allValue, nil},
- {allValueIndent, new(*All), &allValue, nil},
- {allValueCompact, new(*All), &allValue, nil},
- {pallValueIndent, new(All), pallValue, nil},
- {pallValueCompact, new(All), pallValue, nil},
- {pallValueIndent, new(*All), &pallValue, nil},
- {pallValueCompact, new(*All), &pallValue, nil},
-
- // unmarshal interface test
- {`{"T":false}`, &um0, umtrue, nil}, // use "false" so test will fail if custom unmarshaler is not called
- {`{"T":false}`, &ump, &umtrue, nil},
- {`[{"T":false}]`, &umslice, umslice, nil},
- {`[{"T":false}]`, &umslicep, &umslice, nil},
- {`{"M":{"T":false}}`, &umstruct, umstruct, nil},
-}
-
-func TestMarshal(t *testing.T) {
- b, err := Marshal(allValue)
- if err != nil {
- t.Fatalf("Marshal allValue: %v", err)
- }
- if string(b) != allValueCompact {
- t.Errorf("Marshal allValueCompact")
- diff(t, b, []byte(allValueCompact))
- return
- }
-
- b, err = Marshal(pallValue)
- if err != nil {
- t.Fatalf("Marshal pallValue: %v", err)
- }
- if string(b) != pallValueCompact {
- t.Errorf("Marshal pallValueCompact")
- diff(t, b, []byte(pallValueCompact))
- return
- }
-}
-
-func TestMarshalBadUTF8(t *testing.T) {
- s := "hello\xffworld"
- b, err := Marshal(s)
- if err == nil {
- t.Fatal("Marshal bad UTF8: no error")
- }
- if len(b) != 0 {
- t.Fatal("Marshal returned data")
- }
- if _, ok := err.(*InvalidUTF8Error); !ok {
- t.Fatalf("Marshal did not return InvalidUTF8Error: %T %v", err, err)
- }
-}
-
-func TestUnmarshal(t *testing.T) {
- for i, tt := range unmarshalTests {
- var scan scanner
- in := []byte(tt.in)
- if err := checkValid(in, &scan); err != nil {
- if !reflect.DeepEqual(err, tt.err) {
- t.Errorf("#%d: checkValid: %#v", i, err)
- continue
- }
- }
- if tt.ptr == nil {
- continue
- }
- // v = new(right-type)
- v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
- if err := Unmarshal([]byte(in), v.Interface()); !reflect.DeepEqual(err, tt.err) {
- t.Errorf("#%d: %v want %v", i, err, tt.err)
- continue
- }
- if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
- t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
- data, _ := Marshal(v.Elem().Interface())
- println(string(data))
- data, _ = Marshal(tt.out)
- println(string(data))
- continue
- }
- }
-}
-
-func TestUnmarshalMarshal(t *testing.T) {
- initBig()
- var v interface{}
- if err := Unmarshal(jsonBig, &v); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- b, err := Marshal(v)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if bytes.Compare(jsonBig, b) != 0 {
- t.Errorf("Marshal jsonBig")
- diff(t, b, jsonBig)
- return
- }
-}
-
-func TestLargeByteSlice(t *testing.T) {
- s0 := make([]byte, 2000)
- for i := range s0 {
- s0[i] = byte(i)
- }
- b, err := Marshal(s0)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- var s1 []byte
- if err := Unmarshal(b, &s1); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if bytes.Compare(s0, s1) != 0 {
- t.Errorf("Marshal large byte slice")
- diff(t, s0, s1)
- }
-}
-
-type Xint struct {
- X int
-}
-
-func TestUnmarshalInterface(t *testing.T) {
- var xint Xint
- var i interface{} = &xint
- if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if xint.X != 1 {
- t.Fatalf("Did not write to xint")
- }
-}
-
-func TestUnmarshalPtrPtr(t *testing.T) {
- var xint Xint
- pxint := &xint
- if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if xint.X != 1 {
- t.Fatalf("Did not write to xint")
- }
-}
-
-func TestEscape(t *testing.T) {
- const input = `"foobar"<html>`
- const expected = `"\"foobar\"\u003chtml\u003e"`
- b, err := Marshal(input)
- if err != nil {
- t.Fatalf("Marshal error: %v", err)
- }
- if s := string(b); s != expected {
- t.Errorf("Encoding of [%s] was [%s], want [%s]", input, s, expected)
- }
-}
-
-func TestHTMLEscape(t *testing.T) {
- b, err := MarshalForHTML("foobarbaz<>&quux")
- if err != nil {
- t.Fatalf("MarshalForHTML error: %v", err)
- }
- if !bytes.Equal(b, []byte(`"foobarbaz\u003c\u003e\u0026quux"`)) {
- t.Fatalf("Unexpected encoding of \"<>&\": %s", b)
- }
-}
-
-func noSpace(c rune) rune {
- if isSpace(c) {
- return -1
- }
- return c
-}
-
-type All struct {
- Bool bool
- Int int
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- Uint uint
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- Uintptr uintptr
- Float32 float32
- Float64 float64
-
- Foo string `json:"bar"`
- Foo2 string `json:"bar2,dummyopt"`
-
- IntStr int64 `json:",string"`
-
- PBool *bool
- PInt *int
- PInt8 *int8
- PInt16 *int16
- PInt32 *int32
- PInt64 *int64
- PUint *uint
- PUint8 *uint8
- PUint16 *uint16
- PUint32 *uint32
- PUint64 *uint64
- PUintptr *uintptr
- PFloat32 *float32
- PFloat64 *float64
-
- String string
- PString *string
-
- Map map[string]Small
- MapP map[string]*Small
- PMap *map[string]Small
- PMapP *map[string]*Small
-
- EmptyMap map[string]Small
- NilMap map[string]Small
-
- Slice []Small
- SliceP []*Small
- PSlice *[]Small
- PSliceP *[]*Small
-
- EmptySlice []Small
- NilSlice []Small
-
- StringSlice []string
- ByteSlice []byte
-
- Small Small
- PSmall *Small
- PPSmall **Small
-
- Interface interface{}
- PInterface *interface{}
-
- unexported int
-}
-
-type Small struct {
- Tag string
-}
-
-var allValue = All{
- Bool: true,
- Int: 2,
- Int8: 3,
- Int16: 4,
- Int32: 5,
- Int64: 6,
- Uint: 7,
- Uint8: 8,
- Uint16: 9,
- Uint32: 10,
- Uint64: 11,
- Uintptr: 12,
- Float32: 14.1,
- Float64: 15.1,
- Foo: "foo",
- Foo2: "foo2",
- IntStr: 42,
- String: "16",
- Map: map[string]Small{
- "17": {Tag: "tag17"},
- "18": {Tag: "tag18"},
- },
- MapP: map[string]*Small{
- "19": &Small{Tag: "tag19"},
- "20": nil,
- },
- EmptyMap: map[string]Small{},
- Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}},
- SliceP: []*Small{&Small{Tag: "tag22"}, nil, &Small{Tag: "tag23"}},
- EmptySlice: []Small{},
- StringSlice: []string{"str24", "str25", "str26"},
- ByteSlice: []byte{27, 28, 29},
- Small: Small{Tag: "tag30"},
- PSmall: &Small{Tag: "tag31"},
- Interface: 5.2,
-}
-
-var pallValue = All{
- PBool: &allValue.Bool,
- PInt: &allValue.Int,
- PInt8: &allValue.Int8,
- PInt16: &allValue.Int16,
- PInt32: &allValue.Int32,
- PInt64: &allValue.Int64,
- PUint: &allValue.Uint,
- PUint8: &allValue.Uint8,
- PUint16: &allValue.Uint16,
- PUint32: &allValue.Uint32,
- PUint64: &allValue.Uint64,
- PUintptr: &allValue.Uintptr,
- PFloat32: &allValue.Float32,
- PFloat64: &allValue.Float64,
- PString: &allValue.String,
- PMap: &allValue.Map,
- PMapP: &allValue.MapP,
- PSlice: &allValue.Slice,
- PSliceP: &allValue.SliceP,
- PPSmall: &allValue.PSmall,
- PInterface: &allValue.Interface,
-}
-
-var allValueIndent = `{
- "Bool": true,
- "Int": 2,
- "Int8": 3,
- "Int16": 4,
- "Int32": 5,
- "Int64": 6,
- "Uint": 7,
- "Uint8": 8,
- "Uint16": 9,
- "Uint32": 10,
- "Uint64": 11,
- "Uintptr": 12,
- "Float32": 14.1,
- "Float64": 15.1,
- "bar": "foo",
- "bar2": "foo2",
- "IntStr": "42",
- "PBool": null,
- "PInt": null,
- "PInt8": null,
- "PInt16": null,
- "PInt32": null,
- "PInt64": null,
- "PUint": null,
- "PUint8": null,
- "PUint16": null,
- "PUint32": null,
- "PUint64": null,
- "PUintptr": null,
- "PFloat32": null,
- "PFloat64": null,
- "String": "16",
- "PString": null,
- "Map": {
- "17": {
- "Tag": "tag17"
- },
- "18": {
- "Tag": "tag18"
- }
- },
- "MapP": {
- "19": {
- "Tag": "tag19"
- },
- "20": null
- },
- "PMap": null,
- "PMapP": null,
- "EmptyMap": {},
- "NilMap": null,
- "Slice": [
- {
- "Tag": "tag20"
- },
- {
- "Tag": "tag21"
- }
- ],
- "SliceP": [
- {
- "Tag": "tag22"
- },
- null,
- {
- "Tag": "tag23"
- }
- ],
- "PSlice": null,
- "PSliceP": null,
- "EmptySlice": [],
- "NilSlice": null,
- "StringSlice": [
- "str24",
- "str25",
- "str26"
- ],
- "ByteSlice": "Gxwd",
- "Small": {
- "Tag": "tag30"
- },
- "PSmall": {
- "Tag": "tag31"
- },
- "PPSmall": null,
- "Interface": 5.2,
- "PInterface": null
-}`
-
-var allValueCompact = strings.Map(noSpace, allValueIndent)
-
-var pallValueIndent = `{
- "Bool": false,
- "Int": 0,
- "Int8": 0,
- "Int16": 0,
- "Int32": 0,
- "Int64": 0,
- "Uint": 0,
- "Uint8": 0,
- "Uint16": 0,
- "Uint32": 0,
- "Uint64": 0,
- "Uintptr": 0,
- "Float32": 0,
- "Float64": 0,
- "bar": "",
- "bar2": "",
- "IntStr": "0",
- "PBool": true,
- "PInt": 2,
- "PInt8": 3,
- "PInt16": 4,
- "PInt32": 5,
- "PInt64": 6,
- "PUint": 7,
- "PUint8": 8,
- "PUint16": 9,
- "PUint32": 10,
- "PUint64": 11,
- "PUintptr": 12,
- "PFloat32": 14.1,
- "PFloat64": 15.1,
- "String": "",
- "PString": "16",
- "Map": null,
- "MapP": null,
- "PMap": {
- "17": {
- "Tag": "tag17"
- },
- "18": {
- "Tag": "tag18"
- }
- },
- "PMapP": {
- "19": {
- "Tag": "tag19"
- },
- "20": null
- },
- "EmptyMap": null,
- "NilMap": null,
- "Slice": null,
- "SliceP": null,
- "PSlice": [
- {
- "Tag": "tag20"
- },
- {
- "Tag": "tag21"
- }
- ],
- "PSliceP": [
- {
- "Tag": "tag22"
- },
- null,
- {
- "Tag": "tag23"
- }
- ],
- "EmptySlice": null,
- "NilSlice": null,
- "StringSlice": null,
- "ByteSlice": null,
- "Small": {
- "Tag": ""
- },
- "PSmall": null,
- "PPSmall": {
- "Tag": "tag31"
- },
- "Interface": null,
- "PInterface": 5.2
-}`
-
-var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package json implements encoding and decoding of JSON objects as defined in
-// RFC 4627.
-//
-// See "JSON and Go" for an introduction to this package:
-// http://blog.golang.org/2011/01/json-and-go.html
-package json
-
-import (
- "bytes"
- "encoding/base64"
- "reflect"
- "runtime"
- "sort"
- "strconv"
- "unicode"
- "utf8"
-)
-
-// Marshal returns the JSON encoding of v.
-//
-// Marshal traverses the value v recursively.
-// If an encountered value implements the Marshaler interface
-// and is not a nil pointer, Marshal calls its MarshalJSON method
-// to produce JSON. The nil pointer exception is not strictly necessary
-// but mimics a similar, necessary exception in the behavior of
-// UnmarshalJSON.
-//
-// Otherwise, Marshal uses the following type-dependent default encodings:
-//
-// Boolean values encode as JSON booleans.
-//
-// Floating point and integer values encode as JSON numbers.
-//
-// String values encode as JSON strings, with each invalid UTF-8 sequence
-// replaced by the encoding of the Unicode replacement character U+FFFD.
-//
-// Array and slice values encode as JSON arrays, except that
-// []byte encodes as a base64-encoded string.
-//
-// Struct values encode as JSON objects. Each exported struct field
-// becomes a member of the object unless
-// - the field's tag is "-", or
-// - the field is empty and its tag specifies the "omitempty" option.
-// The empty values are false, 0, any
-// nil pointer or interface value, and any array, slice, map, or string of
-// length zero. The object's default key string is the struct field name
-// but can be specified in the struct field's tag value. The "json" key in
-// struct field's tag value is the key name, followed by an optional comma
-// and options. Examples:
-//
-// // Field is ignored by this package.
-// Field int `json:"-"`
-//
-// // Field appears in JSON as key "myName".
-// Field int `json:"myName"`
-//
-// // Field appears in JSON as key "myName" and
-// // the field is omitted from the object if its value is empty,
-// // as defined above.
-// Field int `json:"myName,omitempty"`
-//
-// // Field appears in JSON as key "Field" (the default), but
-// // the field is skipped if empty.
-// // Note the leading comma.
-// Field int `json:",omitempty"`
-//
-// The "string" option signals that a field is stored as JSON inside a
-// JSON-encoded string. This extra level of encoding is sometimes
-// used when communicating with JavaScript programs:
-//
-// Int64String int64 `json:",string"`
-//
-// The key name will be used if it's a non-empty string consisting of
-// only Unicode letters, digits, dollar signs, hyphens, and underscores.
-//
-// Map values encode as JSON objects.
-// The map's key type must be string; the object keys are used directly
-// as map keys.
-//
-// Pointer values encode as the value pointed to.
-// A nil pointer encodes as the null JSON object.
-//
-// Interface values encode as the value contained in the interface.
-// A nil interface value encodes as the null JSON object.
-//
-// Channel, complex, and function values cannot be encoded in JSON.
-// Attempting to encode such a value causes Marshal to return
-// an InvalidTypeError.
-//
-// JSON cannot represent cyclic data structures and Marshal does not
-// handle them. Passing cyclic structures to Marshal will result in
-// an infinite recursion.
-//
-func Marshal(v interface{}) ([]byte, error) {
- e := &encodeState{}
- err := e.marshal(v)
- if err != nil {
- return nil, err
- }
- return e.Bytes(), nil
-}
-
-// MarshalIndent is like Marshal but applies Indent to format the output.
-func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
- b, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- var buf bytes.Buffer
- err = Indent(&buf, b, prefix, indent)
- if err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-// MarshalForHTML is like Marshal but applies HTMLEscape to the output.
-func MarshalForHTML(v interface{}) ([]byte, error) {
- b, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- var buf bytes.Buffer
- HTMLEscape(&buf, b)
- return buf.Bytes(), nil
-}
-
-// HTMLEscape appends to dst the JSON-encoded src with <, >, and &
-// characters inside string literals changed to \u003c, \u003e, \u0026
-// so that the JSON will be safe to embed inside HTML <script> tags.
-// For historical reasons, web browsers don't honor standard HTML
-// escaping within <script> tags, so an alternative JSON encoding must
-// be used.
-func HTMLEscape(dst *bytes.Buffer, src []byte) {
- // < > & can only appear in string literals,
- // so just scan the string one byte at a time.
- start := 0
- for i, c := range src {
- if c == '<' || c == '>' || c == '&' {
- if start < i {
- dst.Write(src[start:i])
- }
- dst.WriteString(`\u00`)
- dst.WriteByte(hex[c>>4])
- dst.WriteByte(hex[c&0xF])
- start = i + 1
- }
- }
- if start < len(src) {
- dst.Write(src[start:])
- }
-}
-
-// Marshaler is the interface implemented by objects that
-// can marshal themselves into valid JSON.
-type Marshaler interface {
- MarshalJSON() ([]byte, error)
-}
-
-type UnsupportedTypeError struct {
- Type reflect.Type
-}
-
-func (e *UnsupportedTypeError) Error() string {
- return "json: unsupported type: " + e.Type.String()
-}
-
-type InvalidUTF8Error struct {
- S string
-}
-
-func (e *InvalidUTF8Error) Error() string {
- return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
-}
-
-type MarshalerError struct {
- Type reflect.Type
- Err error
-}
-
-func (e *MarshalerError) Error() string {
- return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
-}
-
-type interfaceOrPtrValue interface {
- IsNil() bool
- Elem() reflect.Value
-}
-
-var hex = "0123456789abcdef"
-
-// An encodeState encodes JSON into a bytes.Buffer.
-type encodeState struct {
- bytes.Buffer // accumulated output
-}
-
-func (e *encodeState) marshal(v interface{}) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if _, ok := r.(runtime.Error); ok {
- panic(r)
- }
- err = r.(error)
- }
- }()
- e.reflectValue(reflect.ValueOf(v))
- return nil
-}
-
-func (e *encodeState) error(err error) {
- panic(err)
-}
-
-var byteSliceType = reflect.TypeOf([]byte(nil))
-
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
-
-func (e *encodeState) reflectValue(v reflect.Value) {
- e.reflectValueQuoted(v, false)
-}
-
-// reflectValueQuoted writes the value in v to the output.
-// If quoted is true, the serialization is wrapped in a JSON string.
-func (e *encodeState) reflectValueQuoted(v reflect.Value, quoted bool) {
- if !v.IsValid() {
- e.WriteString("null")
- return
- }
-
- if j, ok := v.Interface().(Marshaler); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
- b, err := j.MarshalJSON()
- if err == nil {
- // copy JSON into buffer, checking validity.
- err = Compact(&e.Buffer, b)
- }
- if err != nil {
- e.error(&MarshalerError{v.Type(), err})
- }
- return
- }
-
- writeString := (*encodeState).WriteString
- if quoted {
- writeString = (*encodeState).string
- }
-
- switch v.Kind() {
- case reflect.Bool:
- x := v.Bool()
- if x {
- writeString(e, "true")
- } else {
- writeString(e, "false")
- }
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- writeString(e, strconv.Itoa64(v.Int()))
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- writeString(e, strconv.Uitoa64(v.Uint()))
-
- case reflect.Float32, reflect.Float64:
- writeString(e, strconv.FtoaN(v.Float(), 'g', -1, v.Type().Bits()))
-
- case reflect.String:
- if quoted {
- sb, err := Marshal(v.String())
- if err != nil {
- e.error(err)
- }
- e.string(string(sb))
- } else {
- e.string(v.String())
- }
-
- case reflect.Struct:
- e.WriteByte('{')
- t := v.Type()
- n := v.NumField()
- first := true
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if f.PkgPath != "" {
- continue
- }
- tag, omitEmpty, quoted := f.Name, false, false
- if tv := f.Tag.Get("json"); tv != "" {
- if tv == "-" {
- continue
- }
- name, opts := parseTag(tv)
- if isValidTag(name) {
- tag = name
- }
- omitEmpty = opts.Contains("omitempty")
- quoted = opts.Contains("string")
- }
- fieldValue := v.Field(i)
- if omitEmpty && isEmptyValue(fieldValue) {
- continue
- }
- if first {
- first = false
- } else {
- e.WriteByte(',')
- }
- e.string(tag)
- e.WriteByte(':')
- e.reflectValueQuoted(fieldValue, quoted)
- }
- e.WriteByte('}')
-
- case reflect.Map:
- if v.Type().Key().Kind() != reflect.String {
- e.error(&UnsupportedTypeError{v.Type()})
- }
- if v.IsNil() {
- e.WriteString("null")
- break
- }
- e.WriteByte('{')
- var sv stringValues = v.MapKeys()
- sort.Sort(sv)
- for i, k := range sv {
- if i > 0 {
- e.WriteByte(',')
- }
- e.string(k.String())
- e.WriteByte(':')
- e.reflectValue(v.MapIndex(k))
- }
- e.WriteByte('}')
-
- case reflect.Slice:
- if v.IsNil() {
- e.WriteString("null")
- break
- }
- // Slices can be marshalled as nil, but otherwise are handled
- // as arrays.
- fallthrough
- case reflect.Array:
- if v.Type() == byteSliceType {
- e.WriteByte('"')
- s := v.Interface().([]byte)
- if len(s) < 1024 {
- // for small buffers, using Encode directly is much faster.
- dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
- base64.StdEncoding.Encode(dst, s)
- e.Write(dst)
- } else {
- // for large buffers, avoid unnecessary extra temporary
- // buffer space.
- enc := base64.NewEncoder(base64.StdEncoding, e)
- enc.Write(s)
- enc.Close()
- }
- e.WriteByte('"')
- break
- }
- e.WriteByte('[')
- n := v.Len()
- for i := 0; i < n; i++ {
- if i > 0 {
- e.WriteByte(',')
- }
- e.reflectValue(v.Index(i))
- }
- e.WriteByte(']')
-
- case reflect.Interface, reflect.Ptr:
- if v.IsNil() {
- e.WriteString("null")
- return
- }
- e.reflectValue(v.Elem())
-
- default:
- e.error(&UnsupportedTypeError{v.Type()})
- }
- return
-}
-
-func isValidTag(s string) bool {
- if s == "" {
- return false
- }
- for _, c := range s {
- if c != '$' && c != '-' && c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
- return false
- }
- }
- return true
-}
-
-// stringValues is a slice of reflect.Value holding *reflect.StringValue.
-// It implements the methods to sort by string.
-type stringValues []reflect.Value
-
-func (sv stringValues) Len() int { return len(sv) }
-func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
-func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
-func (sv stringValues) get(i int) string { return sv[i].String() }
-
-func (e *encodeState) string(s string) (int, error) {
- len0 := e.Len()
- e.WriteByte('"')
- start := 0
- for i := 0; i < len(s); {
- if b := s[i]; b < utf8.RuneSelf {
- if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' {
- i++
- continue
- }
- if start < i {
- e.WriteString(s[start:i])
- }
- switch b {
- case '\\', '"':
- e.WriteByte('\\')
- e.WriteByte(b)
- case '\n':
- e.WriteByte('\\')
- e.WriteByte('n')
- case '\r':
- e.WriteByte('\\')
- e.WriteByte('r')
- default:
- // This encodes bytes < 0x20 except for \n and \r,
- // as well as < and >. The latter are escaped because they
- // can lead to security holes when user-controlled strings
- // are rendered into JSON and served to some browsers.
- e.WriteString(`\u00`)
- e.WriteByte(hex[b>>4])
- e.WriteByte(hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRuneInString(s[i:])
- if c == utf8.RuneError && size == 1 {
- e.error(&InvalidUTF8Error{s})
- }
- i += size
- }
- if start < len(s) {
- e.WriteString(s[start:])
- }
- e.WriteByte('"')
- return e.Len() - len0, nil
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "reflect"
- "testing"
-)
-
-type Optionals struct {
- Sr string `json:"sr"`
- So string `json:"so,omitempty"`
- Sw string `json:"-"`
-
- Ir int `json:"omitempty"` // actually named omitempty, not an option
- Io int `json:"io,omitempty"`
-
- Slr []string `json:"slr,random"`
- Slo []string `json:"slo,omitempty"`
-
- Mr map[string]interface{} `json:"mr"`
- Mo map[string]interface{} `json:",omitempty"`
-}
-
-var optionalsExpected = `{
- "sr": "",
- "omitempty": 0,
- "slr": null,
- "mr": {}
-}`
-
-func TestOmitEmpty(t *testing.T) {
- var o Optionals
- o.Sw = "something"
- o.Mr = map[string]interface{}{}
- o.Mo = map[string]interface{}{}
-
- got, err := MarshalIndent(&o, "", " ")
- if err != nil {
- t.Fatal(err)
- }
- if got := string(got); got != optionalsExpected {
- t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
- }
-}
-
-type StringTag struct {
- BoolStr bool `json:",string"`
- IntStr int64 `json:",string"`
- StrStr string `json:",string"`
-}
-
-var stringTagExpected = `{
- "BoolStr": "true",
- "IntStr": "42",
- "StrStr": "\"xzbit\""
-}`
-
-func TestStringTag(t *testing.T) {
- var s StringTag
- s.BoolStr = true
- s.IntStr = 42
- s.StrStr = "xzbit"
- got, err := MarshalIndent(&s, "", " ")
- if err != nil {
- t.Fatal(err)
- }
- if got := string(got); got != stringTagExpected {
- t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
- }
-
- // Verify that it round-trips.
- var s2 StringTag
- err = NewDecoder(bytes.NewBuffer(got)).Decode(&s2)
- if err != nil {
- t.Fatalf("Decode: %v", err)
- }
- if !reflect.DeepEqual(s, s2) {
- t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import "bytes"
-
-// Compact appends to dst the JSON-encoded src with
-// insignificant space characters elided.
-func Compact(dst *bytes.Buffer, src []byte) error {
- origLen := dst.Len()
- var scan scanner
- scan.reset()
- start := 0
- for i, c := range src {
- v := scan.step(&scan, int(c))
- if v >= scanSkipSpace {
- if v == scanError {
- break
- }
- if start < i {
- dst.Write(src[start:i])
- }
- start = i + 1
- }
- }
- if scan.eof() == scanError {
- dst.Truncate(origLen)
- return scan.err
- }
- if start < len(src) {
- dst.Write(src[start:])
- }
- return nil
-}
-
-func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
- dst.WriteByte('\n')
- dst.WriteString(prefix)
- for i := 0; i < depth; i++ {
- dst.WriteString(indent)
- }
-}
-
-// Indent appends to dst an indented form of the JSON-encoded src.
-// Each element in a JSON object or array begins on a new,
-// indented line beginning with prefix followed by one or more
-// copies of indent according to the indentation nesting.
-// The data appended to dst has no trailing newline, to make it easier
-// to embed inside other formatted JSON data.
-func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
- origLen := dst.Len()
- var scan scanner
- scan.reset()
- needIndent := false
- depth := 0
- for _, c := range src {
- scan.bytes++
- v := scan.step(&scan, int(c))
- if v == scanSkipSpace {
- continue
- }
- if v == scanError {
- break
- }
- if needIndent && v != scanEndObject && v != scanEndArray {
- needIndent = false
- depth++
- newline(dst, prefix, indent, depth)
- }
-
- // Emit semantically uninteresting bytes
- // (in particular, punctuation in strings) unmodified.
- if v == scanContinue {
- dst.WriteByte(c)
- continue
- }
-
- // Add spacing around real punctuation.
- switch c {
- case '{', '[':
- // delay indent so that empty object and array are formatted as {} and [].
- needIndent = true
- dst.WriteByte(c)
-
- case ',':
- dst.WriteByte(c)
- newline(dst, prefix, indent, depth)
-
- case ':':
- dst.WriteByte(c)
- dst.WriteByte(' ')
-
- case '}', ']':
- if needIndent {
- // suppress indent in empty object/array
- needIndent = false
- } else {
- depth--
- newline(dst, prefix, indent, depth)
- }
- dst.WriteByte(c)
-
- default:
- dst.WriteByte(c)
- }
- }
- if scan.eof() == scanError {
- dst.Truncate(origLen)
- return scan.err
- }
- return nil
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-// JSON value parser state machine.
-// Just about at the limit of what is reasonable to write by hand.
-// Some parts are a bit tedious, but overall it nicely factors out the
-// otherwise common code from the multiple scanning functions
-// in this package (Compact, Indent, checkValid, nextValue, etc).
-//
-// This file starts with two simple examples using the scanner
-// before diving into the scanner itself.
-
-import "strconv"
-
-// checkValid verifies that data is valid JSON-encoded data.
-// scan is passed in for use by checkValid to avoid an allocation.
-func checkValid(data []byte, scan *scanner) error {
- scan.reset()
- for _, c := range data {
- scan.bytes++
- if scan.step(scan, int(c)) == scanError {
- return scan.err
- }
- }
- if scan.eof() == scanError {
- return scan.err
- }
- return nil
-}
-
-// nextValue splits data after the next whole JSON value,
-// returning that value and the bytes that follow it as separate slices.
-// scan is passed in for use by nextValue to avoid an allocation.
-func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
- scan.reset()
- for i, c := range data {
- v := scan.step(scan, int(c))
- if v >= scanEnd {
- switch v {
- case scanError:
- return nil, nil, scan.err
- case scanEnd:
- return data[0:i], data[i:], nil
- }
- }
- }
- if scan.eof() == scanError {
- return nil, nil, scan.err
- }
- return data, nil, nil
-}
-
-// A SyntaxError is a description of a JSON syntax error.
-type SyntaxError struct {
- msg string // description of error
- Offset int64 // error occurred after reading Offset bytes
-}
-
-func (e *SyntaxError) Error() string { return e.msg }
-
-// A scanner is a JSON scanning state machine.
-// Callers call scan.reset() and then pass bytes in one at a time
-// by calling scan.step(&scan, c) for each byte.
-// The return value, referred to as an opcode, tells the
-// caller about significant parsing events like beginning
-// and ending literals, objects, and arrays, so that the
-// caller can follow along if it wishes.
-// The return value scanEnd indicates that a single top-level
-// JSON value has been completed, *before* the byte that
-// just got passed in. (The indication must be delayed in order
-// to recognize the end of numbers: is 123 a whole value or
-// the beginning of 12345e+6?).
-type scanner struct {
- // The step is a func to be called to execute the next transition.
- // Also tried using an integer constant and a single func
- // with a switch, but using the func directly was 10% faster
- // on a 64-bit Mac Mini, and it's nicer to read.
- step func(*scanner, int) int
-
- // Stack of what we're in the middle of - array values, object keys, object values.
- parseState []int
-
- // Error that happened, if any.
- err error
-
- // 1-byte redo (see undo method)
- redoCode int
- redoState func(*scanner, int) int
-
- // total bytes consumed, updated by decoder.Decode
- bytes int64
-}
-
-// These values are returned by the state transition functions
-// assigned to scanner.state and the method scanner.eof.
-// They give details about the current state of the scan that
-// callers might be interested to know about.
-// It is okay to ignore the return value of any particular
-// call to scanner.state: if one call returns scanError,
-// every subsequent call will return scanError too.
-const (
- // Continue.
- scanContinue = iota // uninteresting byte
- scanBeginLiteral // end implied by next result != scanContinue
- scanBeginObject // begin object
- scanObjectKey // just finished object key (string)
- scanObjectValue // just finished non-last object value
- scanEndObject // end object (implies scanObjectValue if possible)
- scanBeginArray // begin array
- scanArrayValue // just finished array value
- scanEndArray // end array (implies scanArrayValue if possible)
- scanSkipSpace // space byte; can skip; known to be last "continue" result
-
- // Stop.
- scanEnd // top-level value ended *before* this byte; known to be first "stop" result
- scanError // hit an error, scanner.err.
-)
-
-// These values are stored in the parseState stack.
-// They give the current state of a composite value
-// being scanned. If the parser is inside a nested value
-// the parseState describes the nested state, outermost at entry 0.
-const (
- parseObjectKey = iota // parsing object key (before colon)
- parseObjectValue // parsing object value (after colon)
- parseArrayValue // parsing array value
-)
-
-// reset prepares the scanner for use.
-// It must be called before calling s.step.
-func (s *scanner) reset() {
- s.step = stateBeginValue
- s.parseState = s.parseState[0:0]
- s.err = nil
-}
-
-// eof tells the scanner that the end of input has been reached.
-// It returns a scan status just as s.step does.
-func (s *scanner) eof() int {
- if s.err != nil {
- return scanError
- }
- if s.step == stateEndTop {
- return scanEnd
- }
- s.step(s, ' ')
- if s.step == stateEndTop {
- return scanEnd
- }
- if s.err == nil {
- s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
- }
- return scanError
-}
-
-// pushParseState pushes a new parse state p onto the parse stack.
-func (s *scanner) pushParseState(p int) {
- s.parseState = append(s.parseState, p)
-}
-
-// popParseState pops a parse state (already obtained) off the stack
-// and updates s.step accordingly.
-func (s *scanner) popParseState() {
- n := len(s.parseState) - 1
- s.parseState = s.parseState[0:n]
- if n == 0 {
- s.step = stateEndTop
- } else {
- s.step = stateEndValue
- }
-}
-
-func isSpace(c rune) bool {
- return c == ' ' || c == '\t' || c == '\r' || c == '\n'
-}
-
-// NOTE(rsc): The various instances of
-//
-// if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
-//
-// below should all be if c <= ' ' && isSpace(c), but inlining
-// the checks makes a significant difference (>10%) in tight loops
-// such as nextValue. These should be rewritten with the clearer
-// function call once 6g knows to inline the call.
-
-// stateBeginValueOrEmpty is the state after reading `[`.
-func stateBeginValueOrEmpty(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
- return scanSkipSpace
- }
- if c == ']' {
- return stateEndValue(s, c)
- }
- return stateBeginValue(s, c)
-}
-
-// stateBeginValue is the state at the beginning of the input.
-func stateBeginValue(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
- return scanSkipSpace
- }
- switch c {
- case '{':
- s.step = stateBeginStringOrEmpty
- s.pushParseState(parseObjectKey)
- return scanBeginObject
- case '[':
- s.step = stateBeginValueOrEmpty
- s.pushParseState(parseArrayValue)
- return scanBeginArray
- case '"':
- s.step = stateInString
- return scanBeginLiteral
- case '-':
- s.step = stateNeg
- return scanBeginLiteral
- case '0': // beginning of 0.123
- s.step = state0
- return scanBeginLiteral
- case 't': // beginning of true
- s.step = stateT
- return scanBeginLiteral
- case 'f': // beginning of false
- s.step = stateF
- return scanBeginLiteral
- case 'n': // beginning of null
- s.step = stateN
- return scanBeginLiteral
- }
- if '1' <= c && c <= '9' { // beginning of 1234.5
- s.step = state1
- return scanBeginLiteral
- }
- return s.error(c, "looking for beginning of value")
-}
-
-// stateBeginStringOrEmpty is the state after reading `{`.
-func stateBeginStringOrEmpty(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
- return scanSkipSpace
- }
- if c == '}' {
- n := len(s.parseState)
- s.parseState[n-1] = parseObjectValue
- return stateEndValue(s, c)
- }
- return stateBeginString(s, c)
-}
-
-// stateBeginString is the state after reading `{"key": value,`.
-func stateBeginString(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
- return scanSkipSpace
- }
- if c == '"' {
- s.step = stateInString
- return scanBeginLiteral
- }
- return s.error(c, "looking for beginning of object key string")
-}
-
-// stateEndValue is the state after completing a value,
-// such as after reading `{}` or `true` or `["x"`.
-func stateEndValue(s *scanner, c int) int {
- n := len(s.parseState)
- if n == 0 {
- // Completed top-level before the current byte.
- s.step = stateEndTop
- return stateEndTop(s, c)
- }
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
- s.step = stateEndValue
- return scanSkipSpace
- }
- ps := s.parseState[n-1]
- switch ps {
- case parseObjectKey:
- if c == ':' {
- s.parseState[n-1] = parseObjectValue
- s.step = stateBeginValue
- return scanObjectKey
- }
- return s.error(c, "after object key")
- case parseObjectValue:
- if c == ',' {
- s.parseState[n-1] = parseObjectKey
- s.step = stateBeginString
- return scanObjectValue
- }
- if c == '}' {
- s.popParseState()
- return scanEndObject
- }
- return s.error(c, "after object key:value pair")
- case parseArrayValue:
- if c == ',' {
- s.step = stateBeginValue
- return scanArrayValue
- }
- if c == ']' {
- s.popParseState()
- return scanEndArray
- }
- return s.error(c, "after array element")
- }
- return s.error(c, "")
-}
-
-// stateEndTop is the state after finishing the top-level value,
-// such as after reading `{}` or `[1,2,3]`.
-// Only space characters should be seen now.
-func stateEndTop(s *scanner, c int) int {
- if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
- // Complain about non-space byte on next call.
- s.error(c, "after top-level value")
- }
- return scanEnd
-}
-
-// stateInString is the state after reading `"`.
-func stateInString(s *scanner, c int) int {
- if c == '"' {
- s.step = stateEndValue
- return scanContinue
- }
- if c == '\\' {
- s.step = stateInStringEsc
- return scanContinue
- }
- if c < 0x20 {
- return s.error(c, "in string literal")
- }
- return scanContinue
-}
-
-// stateInStringEsc is the state after reading `"\` during a quoted string.
-func stateInStringEsc(s *scanner, c int) int {
- switch c {
- case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
- s.step = stateInString
- return scanContinue
- }
- if c == 'u' {
- s.step = stateInStringEscU
- return scanContinue
- }
- return s.error(c, "in string escape code")
-}
-
-// stateInStringEscU is the state after reading `"\u` during a quoted string.
-func stateInStringEscU(s *scanner, c int) int {
- if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
- s.step = stateInStringEscU1
- return scanContinue
- }
- // numbers
- return s.error(c, "in \\u hexadecimal character escape")
-}
-
-// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
-func stateInStringEscU1(s *scanner, c int) int {
- if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
- s.step = stateInStringEscU12
- return scanContinue
- }
- // numbers
- return s.error(c, "in \\u hexadecimal character escape")
-}
-
-// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
-func stateInStringEscU12(s *scanner, c int) int {
- if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
- s.step = stateInStringEscU123
- return scanContinue
- }
- // numbers
- return s.error(c, "in \\u hexadecimal character escape")
-}
-
-// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
-func stateInStringEscU123(s *scanner, c int) int {
- if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
- s.step = stateInString
- return scanContinue
- }
- // numbers
- return s.error(c, "in \\u hexadecimal character escape")
-}
-
-// stateInStringEscU123 is the state after reading `-` during a number.
-func stateNeg(s *scanner, c int) int {
- if c == '0' {
- s.step = state0
- return scanContinue
- }
- if '1' <= c && c <= '9' {
- s.step = state1
- return scanContinue
- }
- return s.error(c, "in numeric literal")
-}
-
-// state1 is the state after reading a non-zero integer during a number,
-// such as after reading `1` or `100` but not `0`.
-func state1(s *scanner, c int) int {
- if '0' <= c && c <= '9' {
- s.step = state1
- return scanContinue
- }
- return state0(s, c)
-}
-
-// state0 is the state after reading `0` during a number.
-func state0(s *scanner, c int) int {
- if c == '.' {
- s.step = stateDot
- return scanContinue
- }
- if c == 'e' || c == 'E' {
- s.step = stateE
- return scanContinue
- }
- return stateEndValue(s, c)
-}
-
-// stateDot is the state after reading the integer and decimal point in a number,
-// such as after reading `1.`.
-func stateDot(s *scanner, c int) int {
- if '0' <= c && c <= '9' {
- s.step = stateDot0
- return scanContinue
- }
- return s.error(c, "after decimal point in numeric literal")
-}
-
-// stateDot0 is the state after reading the integer, decimal point, and subsequent
-// digits of a number, such as after reading `3.14`.
-func stateDot0(s *scanner, c int) int {
- if '0' <= c && c <= '9' {
- s.step = stateDot0
- return scanContinue
- }
- if c == 'e' || c == 'E' {
- s.step = stateE
- return scanContinue
- }
- return stateEndValue(s, c)
-}
-
-// stateE is the state after reading the mantissa and e in a number,
-// such as after reading `314e` or `0.314e`.
-func stateE(s *scanner, c int) int {
- if c == '+' {
- s.step = stateESign
- return scanContinue
- }
- if c == '-' {
- s.step = stateESign
- return scanContinue
- }
- return stateESign(s, c)
-}
-
-// stateESign is the state after reading the mantissa, e, and sign in a number,
-// such as after reading `314e-` or `0.314e+`.
-func stateESign(s *scanner, c int) int {
- if '0' <= c && c <= '9' {
- s.step = stateE0
- return scanContinue
- }
- return s.error(c, "in exponent of numeric literal")
-}
-
-// stateE0 is the state after reading the mantissa, e, optional sign,
-// and at least one digit of the exponent in a number,
-// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
-func stateE0(s *scanner, c int) int {
- if '0' <= c && c <= '9' {
- s.step = stateE0
- return scanContinue
- }
- return stateEndValue(s, c)
-}
-
-// stateT is the state after reading `t`.
-func stateT(s *scanner, c int) int {
- if c == 'r' {
- s.step = stateTr
- return scanContinue
- }
- return s.error(c, "in literal true (expecting 'r')")
-}
-
-// stateTr is the state after reading `tr`.
-func stateTr(s *scanner, c int) int {
- if c == 'u' {
- s.step = stateTru
- return scanContinue
- }
- return s.error(c, "in literal true (expecting 'u')")
-}
-
-// stateTru is the state after reading `tru`.
-func stateTru(s *scanner, c int) int {
- if c == 'e' {
- s.step = stateEndValue
- return scanContinue
- }
- return s.error(c, "in literal true (expecting 'e')")
-}
-
-// stateF is the state after reading `f`.
-func stateF(s *scanner, c int) int {
- if c == 'a' {
- s.step = stateFa
- return scanContinue
- }
- return s.error(c, "in literal false (expecting 'a')")
-}
-
-// stateFa is the state after reading `fa`.
-func stateFa(s *scanner, c int) int {
- if c == 'l' {
- s.step = stateFal
- return scanContinue
- }
- return s.error(c, "in literal false (expecting 'l')")
-}
-
-// stateFal is the state after reading `fal`.
-func stateFal(s *scanner, c int) int {
- if c == 's' {
- s.step = stateFals
- return scanContinue
- }
- return s.error(c, "in literal false (expecting 's')")
-}
-
-// stateFals is the state after reading `fals`.
-func stateFals(s *scanner, c int) int {
- if c == 'e' {
- s.step = stateEndValue
- return scanContinue
- }
- return s.error(c, "in literal false (expecting 'e')")
-}
-
-// stateN is the state after reading `n`.
-func stateN(s *scanner, c int) int {
- if c == 'u' {
- s.step = stateNu
- return scanContinue
- }
- return s.error(c, "in literal null (expecting 'u')")
-}
-
-// stateNu is the state after reading `nu`.
-func stateNu(s *scanner, c int) int {
- if c == 'l' {
- s.step = stateNul
- return scanContinue
- }
- return s.error(c, "in literal null (expecting 'l')")
-}
-
-// stateNul is the state after reading `nul`.
-func stateNul(s *scanner, c int) int {
- if c == 'l' {
- s.step = stateEndValue
- return scanContinue
- }
- return s.error(c, "in literal null (expecting 'l')")
-}
-
-// stateError is the state after reaching a syntax error,
-// such as after reading `[1}` or `5.1.2`.
-func stateError(s *scanner, c int) int {
- return scanError
-}
-
-// error records an error and switches to the error state.
-func (s *scanner) error(c int, context string) int {
- s.step = stateError
- s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
- return scanError
-}
-
-// quoteChar formats c as a quoted character literal
-func quoteChar(c int) string {
- // special cases - different from quoted strings
- if c == '\'' {
- return `'\''`
- }
- if c == '"' {
- return `'"'`
- }
-
- // use quoted string with different quotation marks
- s := strconv.Quote(string(c))
- return "'" + s[1:len(s)-1] + "'"
-}
-
-// undo causes the scanner to return scanCode from the next state transition.
-// This gives callers a simple 1-byte undo mechanism.
-func (s *scanner) undo(scanCode int) {
- if s.step == stateRedo {
- panic("invalid use of scanner")
- }
- s.redoCode = scanCode
- s.redoState = s.step
- s.step = stateRedo
-}
-
-// stateRedo helps implement the scanner's 1-byte undo.
-func stateRedo(s *scanner, c int) int {
- s.step = s.redoState
- return s.redoCode
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "math"
- "rand"
- "reflect"
- "testing"
-)
-
-// Tests of simple examples.
-
-type example struct {
- compact string
- indent string
-}
-
-var examples = []example{
- {`1`, `1`},
- {`{}`, `{}`},
- {`[]`, `[]`},
- {`{"":2}`, "{\n\t\"\": 2\n}"},
- {`[3]`, "[\n\t3\n]"},
- {`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
- {`{"x":1}`, "{\n\t\"x\": 1\n}"},
- {ex1, ex1i},
-}
-
-var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
-
-var ex1i = `[
- true,
- false,
- null,
- "x",
- 1,
- 1.5,
- 0,
- -5e+2
-]`
-
-func TestCompact(t *testing.T) {
- var buf bytes.Buffer
- for _, tt := range examples {
- buf.Reset()
- if err := Compact(&buf, []byte(tt.compact)); err != nil {
- t.Errorf("Compact(%#q): %v", tt.compact, err)
- } else if s := buf.String(); s != tt.compact {
- t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
- }
-
- buf.Reset()
- if err := Compact(&buf, []byte(tt.indent)); err != nil {
- t.Errorf("Compact(%#q): %v", tt.indent, err)
- continue
- } else if s := buf.String(); s != tt.compact {
- t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
- }
- }
-}
-
-func TestIndent(t *testing.T) {
- var buf bytes.Buffer
- for _, tt := range examples {
- buf.Reset()
- if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
- t.Errorf("Indent(%#q): %v", tt.indent, err)
- } else if s := buf.String(); s != tt.indent {
- t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
- }
-
- buf.Reset()
- if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
- t.Errorf("Indent(%#q): %v", tt.compact, err)
- continue
- } else if s := buf.String(); s != tt.indent {
- t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
- }
- }
-}
-
-// Tests of a large random structure.
-
-func TestCompactBig(t *testing.T) {
- initBig()
- var buf bytes.Buffer
- if err := Compact(&buf, jsonBig); err != nil {
- t.Fatalf("Compact: %v", err)
- }
- b := buf.Bytes()
- if bytes.Compare(b, jsonBig) != 0 {
- t.Error("Compact(jsonBig) != jsonBig")
- diff(t, b, jsonBig)
- return
- }
-}
-
-func TestIndentBig(t *testing.T) {
- initBig()
- var buf bytes.Buffer
- if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
- t.Fatalf("Indent1: %v", err)
- }
- b := buf.Bytes()
- if len(b) == len(jsonBig) {
- // jsonBig is compact (no unnecessary spaces);
- // indenting should make it bigger
- t.Fatalf("Indent(jsonBig) did not get bigger")
- }
-
- // should be idempotent
- var buf1 bytes.Buffer
- if err := Indent(&buf1, b, "", "\t"); err != nil {
- t.Fatalf("Indent2: %v", err)
- }
- b1 := buf1.Bytes()
- if bytes.Compare(b1, b) != 0 {
- t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
- diff(t, b1, b)
- return
- }
-
- // should get back to original
- buf1.Reset()
- if err := Compact(&buf1, b); err != nil {
- t.Fatalf("Compact: %v", err)
- }
- b1 = buf1.Bytes()
- if bytes.Compare(b1, jsonBig) != 0 {
- t.Error("Compact(Indent(jsonBig)) != jsonBig")
- diff(t, b1, jsonBig)
- return
- }
-}
-
-type indentErrorTest struct {
- in string
- err error
-}
-
-var indentErrorTests = []indentErrorTest{
- {`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
- {`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
-}
-
-func TestIdentErrors(t *testing.T) {
- for i, tt := range indentErrorTests {
- slice := make([]uint8, 0)
- buf := bytes.NewBuffer(slice)
- if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
- if !reflect.DeepEqual(err, tt.err) {
- t.Errorf("#%d: Indent: %#v", i, err)
- continue
- }
- }
- }
-}
-
-func TestNextValueBig(t *testing.T) {
- initBig()
- var scan scanner
- item, rest, err := nextValue(jsonBig, &scan)
- if err != nil {
- t.Fatalf("nextValue: %s", err)
- }
- if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
- t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
- }
- if len(rest) != 0 {
- t.Errorf("invalid rest: %d", len(rest))
- }
-
- item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
- if err != nil {
- t.Fatalf("nextValue extra: %s", err)
- }
- if len(item) != len(jsonBig) {
- t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
- }
- if string(rest) != "HELLO WORLD" {
- t.Errorf("invalid rest: %d", len(rest))
- }
-}
-
-func BenchmarkSkipValue(b *testing.B) {
- initBig()
- var scan scanner
- for i := 0; i < b.N; i++ {
- nextValue(jsonBig, &scan)
- }
- b.SetBytes(int64(len(jsonBig)))
-}
-
-func diff(t *testing.T, a, b []byte) {
- for i := 0; ; i++ {
- if i >= len(a) || i >= len(b) || a[i] != b[i] {
- j := i - 10
- if j < 0 {
- j = 0
- }
- t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
- return
- }
- }
-}
-
-func trim(b []byte) []byte {
- if len(b) > 20 {
- return b[0:20]
- }
- return b
-}
-
-// Generate a random JSON object.
-
-var jsonBig []byte
-
-const (
- big = 10000
- small = 100
-)
-
-func initBig() {
- n := big
- if testing.Short() {
- n = small
- }
- if len(jsonBig) != n {
- b, err := Marshal(genValue(n))
- if err != nil {
- panic(err)
- }
- jsonBig = b
- }
-}
-
-func genValue(n int) interface{} {
- if n > 1 {
- switch rand.Intn(2) {
- case 0:
- return genArray(n)
- case 1:
- return genMap(n)
- }
- }
- switch rand.Intn(3) {
- case 0:
- return rand.Intn(2) == 0
- case 1:
- return rand.NormFloat64()
- case 2:
- return genString(30)
- }
- panic("unreachable")
-}
-
-func genString(stddev float64) string {
- n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
- c := make([]rune, n)
- for i := range c {
- f := math.Abs(rand.NormFloat64()*64 + 32)
- if f > 0x10ffff {
- f = 0x10ffff
- }
- c[i] = rune(f)
- }
- return string(c)
-}
-
-func genArray(n int) []interface{} {
- f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
- if f > n {
- f = n
- }
- if n > 0 && f == 0 {
- f = 1
- }
- x := make([]interface{}, f)
- for i := range x {
- x[i] = genValue(((i+1)*n)/f - (i*n)/f)
- }
- return x
-}
-
-func genMap(n int) map[string]interface{} {
- f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
- if f > n {
- f = n
- }
- if n > 0 && f == 0 {
- f = 1
- }
- x := make(map[string]interface{})
- for i := 0; i < f; i++ {
- x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
- }
- return x
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "errors"
- "io"
-)
-
-// A Decoder reads and decodes JSON objects from an input stream.
-type Decoder struct {
- r io.Reader
- buf []byte
- d decodeState
- scan scanner
- err error
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{r: r}
-}
-
-// Decode reads the next JSON-encoded value from its
-// input and stores it in the value pointed to by v.
-//
-// See the documentation for Unmarshal for details about
-// the conversion of JSON into a Go value.
-func (dec *Decoder) Decode(v interface{}) error {
- if dec.err != nil {
- return dec.err
- }
-
- n, err := dec.readValue()
- if err != nil {
- return err
- }
-
- // Don't save err from unmarshal into dec.err:
- // the connection is still usable since we read a complete JSON
- // object from it before the error happened.
- dec.d.init(dec.buf[0:n])
- err = dec.d.unmarshal(v)
-
- // Slide rest of data down.
- rest := copy(dec.buf, dec.buf[n:])
- dec.buf = dec.buf[0:rest]
-
- return err
-}
-
-// readValue reads a JSON value into dec.buf.
-// It returns the length of the encoding.
-func (dec *Decoder) readValue() (int, error) {
- dec.scan.reset()
-
- scanp := 0
- var err error
-Input:
- for {
- // Look in the buffer for a new value.
- for i, c := range dec.buf[scanp:] {
- dec.scan.bytes++
- v := dec.scan.step(&dec.scan, int(c))
- if v == scanEnd {
- scanp += i
- break Input
- }
- // scanEnd is delayed one byte.
- // We might block trying to get that byte from src,
- // so instead invent a space byte.
- if v == scanEndObject && dec.scan.step(&dec.scan, ' ') == scanEnd {
- scanp += i + 1
- break Input
- }
- if v == scanError {
- dec.err = dec.scan.err
- return 0, dec.scan.err
- }
- }
- scanp = len(dec.buf)
-
- // Did the last read have an error?
- // Delayed until now to allow buffer scan.
- if err != nil {
- if err == io.EOF {
- if dec.scan.step(&dec.scan, ' ') == scanEnd {
- break Input
- }
- if nonSpace(dec.buf) {
- err = io.ErrUnexpectedEOF
- }
- }
- dec.err = err
- return 0, err
- }
-
- // Make room to read more into the buffer.
- const minRead = 512
- if cap(dec.buf)-len(dec.buf) < minRead {
- newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
- copy(newBuf, dec.buf)
- dec.buf = newBuf
- }
-
- // Read. Delay error for next iteration (after scan).
- var n int
- n, err = dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
- dec.buf = dec.buf[0 : len(dec.buf)+n]
- }
- return scanp, nil
-}
-
-func nonSpace(b []byte) bool {
- for _, c := range b {
- if !isSpace(rune(c)) {
- return true
- }
- }
- return false
-}
-
-// An Encoder writes JSON objects to an output stream.
-type Encoder struct {
- w io.Writer
- e encodeState
- err error
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{w: w}
-}
-
-// Encode writes the JSON encoding of v to the connection.
-//
-// See the documentation for Marshal for details about the
-// conversion of Go values to JSON.
-func (enc *Encoder) Encode(v interface{}) error {
- if enc.err != nil {
- return enc.err
- }
- enc.e.Reset()
- err := enc.e.marshal(v)
- if err != nil {
- return err
- }
-
- // Terminate each value with a newline.
- // This makes the output look a little nicer
- // when debugging, and some kind of space
- // is required if the encoded value was a number,
- // so that the reader knows there aren't more
- // digits coming.
- enc.e.WriteByte('\n')
-
- if _, err = enc.w.Write(enc.e.Bytes()); err != nil {
- enc.err = err
- }
- return err
-}
-
-// RawMessage is a raw encoded JSON object.
-// It implements Marshaler and Unmarshaler and can
-// be used to delay JSON decoding or precompute a JSON encoding.
-type RawMessage []byte
-
-// MarshalJSON returns *m as the JSON encoding of m.
-func (m *RawMessage) MarshalJSON() ([]byte, error) {
- return *m, nil
-}
-
-// UnmarshalJSON sets *m to a copy of data.
-func (m *RawMessage) UnmarshalJSON(data []byte) error {
- if m == nil {
- return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
- }
- *m = append((*m)[0:0], data...)
- return nil
-}
-
-var _ Marshaler = (*RawMessage)(nil)
-var _ Unmarshaler = (*RawMessage)(nil)
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "reflect"
- "testing"
-)
-
-// Test values for the stream test.
-// One of each JSON kind.
-var streamTest = []interface{}{
- 0.1,
- "hello",
- nil,
- true,
- false,
- []interface{}{"a", "b", "c"},
- map[string]interface{}{"K": "Kelvin", "ß": "long s"},
- 3.14, // another value to make sure something can follow map
-}
-
-var streamEncoded = `0.1
-"hello"
-null
-true
-false
-["a","b","c"]
-{"ß":"long s","K":"Kelvin"}
-3.14
-`
-
-func TestEncoder(t *testing.T) {
- for i := 0; i <= len(streamTest); i++ {
- var buf bytes.Buffer
- enc := NewEncoder(&buf)
- for j, v := range streamTest[0:i] {
- if err := enc.Encode(v); err != nil {
- t.Fatalf("encode #%d: %v", j, err)
- }
- }
- if have, want := buf.String(), nlines(streamEncoded, i); have != want {
- t.Errorf("encoding %d items: mismatch", i)
- diff(t, []byte(have), []byte(want))
- break
- }
- }
-}
-
-func TestDecoder(t *testing.T) {
- for i := 0; i <= len(streamTest); i++ {
- // Use stream without newlines as input,
- // just to stress the decoder even more.
- // Our test input does not include back-to-back numbers.
- // Otherwise stripping the newlines would
- // merge two adjacent JSON values.
- var buf bytes.Buffer
- for _, c := range nlines(streamEncoded, i) {
- if c != '\n' {
- buf.WriteRune(c)
- }
- }
- out := make([]interface{}, i)
- dec := NewDecoder(&buf)
- for j := range out {
- if err := dec.Decode(&out[j]); err != nil {
- t.Fatalf("decode #%d/%d: %v", j, i, err)
- }
- }
- if !reflect.DeepEqual(out, streamTest[0:i]) {
- t.Errorf("decoding %d items: mismatch", i)
- for j := range out {
- if !reflect.DeepEqual(out[j], streamTest[j]) {
- t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
- }
- }
- break
- }
- }
-}
-
-func nlines(s string, n int) string {
- if n <= 0 {
- return ""
- }
- for i, c := range s {
- if c == '\n' {
- if n--; n == 0 {
- return s[0 : i+1]
- }
- }
- }
- return s
-}
-
-func TestRawMessage(t *testing.T) {
- // TODO(rsc): Should not need the * in *RawMessage
- var data struct {
- X float64
- Id *RawMessage
- Y float32
- }
- const raw = `["\u0056",null]`
- const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
- err := Unmarshal([]byte(msg), &data)
- if err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if string([]byte(*data.Id)) != raw {
- t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
- }
- b, err := Marshal(&data)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if string(b) != msg {
- t.Fatalf("Marshal: have %#q want %#q", b, msg)
- }
-}
-
-func TestNullRawMessage(t *testing.T) {
- // TODO(rsc): Should not need the * in *RawMessage
- var data struct {
- X float64
- Id *RawMessage
- Y float32
- }
- data.Id = new(RawMessage)
- const msg = `{"X":0.1,"Id":null,"Y":0.2}`
- err := Unmarshal([]byte(msg), &data)
- if err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if data.Id != nil {
- t.Fatalf("Raw mismatch: have non-nil, want nil")
- }
- b, err := Marshal(&data)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if string(b) != msg {
- t.Fatalf("Marshal: have %#q want %#q", b, msg)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "testing"
-)
-
-type basicLatin2xTag struct {
- V string `json:"$-"`
-}
-
-type basicLatin3xTag struct {
- V string `json:"0123456789"`
-}
-
-type basicLatin4xTag struct {
- V string `json:"ABCDEFGHIJKLMO"`
-}
-
-type basicLatin5xTag struct {
- V string `json:"PQRSTUVWXYZ_"`
-}
-
-type basicLatin6xTag struct {
- V string `json:"abcdefghijklmno"`
-}
-
-type basicLatin7xTag struct {
- V string `json:"pqrstuvwxyz"`
-}
-
-type miscPlaneTag struct {
- V string `json:"色は匂へど"`
-}
-
-type emptyTag struct {
- W string
-}
-
-type misnamedTag struct {
- X string `jsom:"Misnamed"`
-}
-
-type badFormatTag struct {
- Y string `:"BadFormat"`
-}
-
-type badCodeTag struct {
- Z string `json:" !\"#%&'()*+,./"`
-}
-
-var structTagObjectKeyTests = []struct {
- raw interface{}
- value string
- key string
-}{
- {basicLatin2xTag{"2x"}, "2x", "$-"},
- {basicLatin3xTag{"3x"}, "3x", "0123456789"},
- {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
- {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
- {basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
- {basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
- {miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
- {emptyTag{"Pour Moi"}, "Pour Moi", "W"},
- {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
- {badFormatTag{"Orfevre"}, "Orfevre", "Y"},
- {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
-}
-
-func TestStructTagObjectKey(t *testing.T) {
- for _, tt := range structTagObjectKeyTests {
- b, err := Marshal(tt.raw)
- if err != nil {
- t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
- }
- var f interface{}
- err = Unmarshal(b, &f)
- if err != nil {
- t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
- }
- for i, v := range f.(map[string]interface{}) {
- switch i {
- case tt.key:
- if s, ok := v.(string); !ok || s != tt.value {
- t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
- }
- default:
- t.Fatalf("Unexpected key: %#q", i)
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "strings"
-)
-
-// tagOptions is the string following a comma in a struct field's "json"
-// tag, or the empty string. It does not include the leading comma.
-type tagOptions string
-
-// parseTag splits a struct field's json tag into its name and
-// comma-separated options.
-func parseTag(tag string) (string, tagOptions) {
- if idx := strings.Index(tag, ","); idx != -1 {
- return tag[:idx], tagOptions(tag[idx+1:])
- }
- return tag, tagOptions("")
-}
-
-// Contains returns whether checks that a comma-separated list of options
-// contains a particular substr flag. substr must be surrounded by a
-// string boundary or commas.
-func (o tagOptions) Contains(optionName string) bool {
- if len(o) == 0 {
- return false
- }
- s := string(o)
- for s != "" {
- var next string
- i := strings.Index(s, ",")
- if i >= 0 {
- s, next = s[:i], s[i+1:]
- }
- if s == optionName {
- return true
- }
- s = next
- }
- return false
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "testing"
-)
-
-func TestTagParsing(t *testing.T) {
- name, opts := parseTag("field,foobar,foo")
- if name != "field" {
- t.Fatalf("name = %q, want field", name)
- }
- for _, tt := range []struct {
- opt string
- want bool
- }{
- {"foobar", true},
- {"foo", true},
- {"bar", false},
- } {
- if opts.Contains(tt.opt) != tt.want {
- t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
- }
- }
-}
"bytes"
"fmt"
"io"
- "runtime"
"os"
- "time"
+ "runtime"
"sync"
+ "time"
)
// These flags define which text to prefix to each log entry generated by the Logger.
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syslog provides a simple interface to the system log service. It
+// can send messages to the syslog daemon using UNIX domain sockets, UDP, or
+// TCP connections.
+package syslog
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "os"
+)
+
+type Priority int
+
+const (
+ // From /usr/include/sys/syslog.h.
+ // These are the same on Linux, BSD, and OS X.
+ LOG_EMERG Priority = iota
+ LOG_ALERT
+ LOG_CRIT
+ LOG_ERR
+ LOG_WARNING
+ LOG_NOTICE
+ LOG_INFO
+ LOG_DEBUG
+)
+
+// A Writer is a connection to a syslog server.
+type Writer struct {
+ priority Priority
+ prefix string
+ conn serverConn
+}
+
+type serverConn interface {
+ writeBytes(p Priority, prefix string, b []byte) (int, error)
+ writeString(p Priority, prefix string, s string) (int, error)
+ close() error
+}
+
+type netConn struct {
+ conn net.Conn
+}
+
+// New establishes a new connection to the system log daemon.
+// Each write to the returned writer sends a log message with
+// the given priority and prefix.
+func New(priority Priority, prefix string) (w *Writer, err error) {
+ return Dial("", "", priority, prefix)
+}
+
+// Dial establishes a connection to a log daemon by connecting
+// to address raddr on the network net.
+// Each write to the returned writer sends a log message with
+// the given priority and prefix.
+func Dial(network, raddr string, priority Priority, prefix string) (w *Writer, err error) {
+ if prefix == "" {
+ prefix = os.Args[0]
+ }
+ var conn serverConn
+ if network == "" {
+ conn, err = unixSyslog()
+ } else {
+ var c net.Conn
+ c, err = net.Dial(network, raddr)
+ conn = netConn{c}
+ }
+ return &Writer{priority, prefix, conn}, err
+}
+
+// Write sends a log message to the syslog daemon.
+func (w *Writer) Write(b []byte) (int, error) {
+ if w.priority > LOG_DEBUG || w.priority < LOG_EMERG {
+ return 0, os.EINVAL
+ }
+ return w.conn.writeBytes(w.priority, w.prefix, b)
+}
+
+func (w *Writer) writeString(p Priority, s string) (int, error) {
+ return w.conn.writeString(p, w.prefix, s)
+}
+
+func (w *Writer) Close() error { return w.conn.close() }
+
+// Emerg logs a message using the LOG_EMERG priority.
+func (w *Writer) Emerg(m string) (err error) {
+ _, err = w.writeString(LOG_EMERG, m)
+ return err
+}
+// Crit logs a message using the LOG_CRIT priority.
+func (w *Writer) Crit(m string) (err error) {
+ _, err = w.writeString(LOG_CRIT, m)
+ return err
+}
+// ERR logs a message using the LOG_ERR priority.
+func (w *Writer) Err(m string) (err error) {
+ _, err = w.writeString(LOG_ERR, m)
+ return err
+}
+
+// Warning logs a message using the LOG_WARNING priority.
+func (w *Writer) Warning(m string) (err error) {
+ _, err = w.writeString(LOG_WARNING, m)
+ return err
+}
+
+// Notice logs a message using the LOG_NOTICE priority.
+func (w *Writer) Notice(m string) (err error) {
+ _, err = w.writeString(LOG_NOTICE, m)
+ return err
+}
+// Info logs a message using the LOG_INFO priority.
+func (w *Writer) Info(m string) (err error) {
+ _, err = w.writeString(LOG_INFO, m)
+ return err
+}
+// Debug logs a message using the LOG_DEBUG priority.
+func (w *Writer) Debug(m string) (err error) {
+ _, err = w.writeString(LOG_DEBUG, m)
+ return err
+}
+
+func (n netConn) writeBytes(p Priority, prefix string, b []byte) (int, error) {
+ return fmt.Fprintf(n.conn, "<%d>%s: %s\n", p, prefix, b)
+}
+
+func (n netConn) writeString(p Priority, prefix string, s string) (int, error) {
+ return fmt.Fprintf(n.conn, "<%d>%s: %s\n", p, prefix, s)
+}
+
+func (n netConn) close() error {
+ return n.conn.Close()
+}
+
+// NewLogger provides an object that implements the full log.Logger interface,
+// but sends messages to Syslog instead; flag is passed as is to Logger;
+// priority will be used for all messages sent using this interface.
+// All messages are logged with priority p.
+func NewLogger(p Priority, flag int) *log.Logger {
+ s, err := New(p, "")
+ if err != nil {
+ return nil
+ }
+ return log.New(s, "", flag)
+}
--- /dev/null
+/* syslog_c.c -- call syslog for Go.
+
+ Copyright 2011 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include <syslog.h>
+
+/* We need to use a C function to call the syslog function, because we
+ can't represent a C varargs function in Go. */
+
+void syslog_c(int, const char*)
+ asm ("libgo_syslog.syslog.syslog_c");
+
+void
+syslog_c (int priority, const char *msg)
+{
+ syslog (priority, "%s", msg);
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// gccgo specific implementation of syslog for Solaris. Solaris uses
+// STREAMS to communicate with syslogd. That is enough of a pain that
+// we just call the libc function.
+
+package syslog
+
+import (
+ "fmt"
+ "syscall"
+)
+
+func unixSyslog() (conn serverConn, err error) {
+ return libcConn(0), nil
+}
+
+type libcConn int
+
+func syslog_c(int, *byte)
+
+func (libcConn) writeBytes(p Priority, prefix string, b []byte) (int, error) {
+ syslog_c(int(p), syscall.StringBytePtr(fmt.Sprintf("%s: %s", prefix, b)))
+ return len(b), nil
+}
+
+func (libcConn) writeString(p Priority, prefix string, s string) (int, error) {
+ syslog_c(int(p), syscall.StringBytePtr(fmt.Sprintf("%s: %s", prefix, s)))
+ return len(s), nil
+}
+
+func (libcConn) close() error {
+ return nil
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package syslog
+
+import (
+ "io"
+ "log"
+ "net"
+ "testing"
+)
+
+var serverAddr string
+
+func runSyslog(c net.PacketConn, done chan<- string) {
+ var buf [4096]byte
+ var rcvd string = ""
+ for {
+ n, _, err := c.ReadFrom(buf[0:])
+ if err != nil || n == 0 {
+ break
+ }
+ rcvd += string(buf[0:n])
+ }
+ done <- rcvd
+}
+
+func startServer(done chan<- string) {
+ c, e := net.ListenPacket("udp", "127.0.0.1:0")
+ if e != nil {
+ log.Fatalf("net.ListenPacket failed udp :0 %v", e)
+ }
+ serverAddr = c.LocalAddr().String()
+ c.SetReadTimeout(100e6) // 100ms
+ go runSyslog(c, done)
+}
+
+func skipNetTest(t *testing.T) bool {
+ if testing.Short() {
+ // Depends on syslog daemon running, and sometimes it's not.
+ t.Logf("skipping syslog test during -short")
+ return true
+ }
+ return false
+}
+
+func TestNew(t *testing.T) {
+ if skipNetTest(t) {
+ return
+ }
+ s, err := New(LOG_INFO, "")
+ if err != nil {
+ t.Fatalf("New() failed: %s", err)
+ }
+ // Don't send any messages.
+ s.Close()
+}
+
+func TestNewLogger(t *testing.T) {
+ if skipNetTest(t) {
+ return
+ }
+ f := NewLogger(LOG_INFO, 0)
+ if f == nil {
+ t.Error("NewLogger() failed")
+ }
+}
+
+func TestDial(t *testing.T) {
+ if skipNetTest(t) {
+ return
+ }
+ l, err := Dial("", "", LOG_ERR, "syslog_test")
+ if err != nil {
+ t.Fatalf("Dial() failed: %s", err)
+ }
+ l.Close()
+}
+
+func TestUDPDial(t *testing.T) {
+ done := make(chan string)
+ startServer(done)
+ l, err := Dial("udp", serverAddr, LOG_INFO, "syslog_test")
+ if err != nil {
+ t.Fatalf("syslog.Dial() failed: %s", err)
+ }
+ msg := "udp test"
+ l.Info(msg)
+ expected := "<6>syslog_test: udp test\n"
+ rcvd := <-done
+ if rcvd != expected {
+ t.Fatalf("s.Info() = '%q', but wanted '%q'", rcvd, expected)
+ }
+}
+
+func TestWrite(t *testing.T) {
+ done := make(chan string)
+ startServer(done)
+ l, err := Dial("udp", serverAddr, LOG_ERR, "syslog_test")
+ if err != nil {
+ t.Fatalf("syslog.Dial() failed: %s", err)
+ }
+ msg := "write test"
+ _, err = io.WriteString(l, msg)
+ if err != nil {
+ t.Fatalf("WriteString() failed: %s", err)
+ }
+ expected := "<3>syslog_test: write test\n"
+ rcvd := <-done
+ if rcvd != expected {
+ t.Fatalf("s.Info() = '%q', but wanted '%q'", rcvd, expected)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syslog
+
+import (
+ "errors"
+ "net"
+)
+
+// unixSyslog opens a connection to the syslog daemon running on the
+// local machine using a Unix domain socket.
+
+func unixSyslog() (conn serverConn, err error) {
+ logTypes := []string{"unixgram", "unix"}
+ logPaths := []string{"/dev/log", "/var/run/syslog"}
+ var raddr string
+ for _, network := range logTypes {
+ for _, path := range logPaths {
+ raddr = path
+ conn, err := net.Dial(network, raddr)
+ if err != nil {
+ continue
+ } else {
+ return netConn{conn}, nil
+ }
+ }
+ }
+ return nil, errors.New("Unix syslog delivery error")
+}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package mail implements parsing of mail messages.
-
-For the most part, this package follows the syntax as specified by RFC 5322.
-Notable divergences:
- * Obsolete address formats are not parsed, including addresses with
- embedded route information.
- * Group addresses are not parsed.
- * The full range of spacing (the CFWS syntax element) is not supported,
- such as breaking addresses across lines.
-*/
-package mail
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/textproto"
- "strconv"
- "strings"
- "time"
-)
-
-var debug = debugT(false)
-
-type debugT bool
-
-func (d debugT) Printf(format string, args ...interface{}) {
- if d {
- log.Printf(format, args...)
- }
-}
-
-// A Message represents a parsed mail message.
-type Message struct {
- Header Header
- Body io.Reader
-}
-
-// ReadMessage reads a message from r.
-// The headers are parsed, and the body of the message will be reading from r.
-func ReadMessage(r io.Reader) (msg *Message, err error) {
- tp := textproto.NewReader(bufio.NewReader(r))
-
- hdr, err := tp.ReadMIMEHeader()
- if err != nil {
- return nil, err
- }
-
- return &Message{
- Header: Header(hdr),
- Body: tp.R,
- }, nil
-}
-
-// Layouts suitable for passing to time.Parse.
-// These are tried in order.
-var dateLayouts []string
-
-func init() {
- // Generate layouts based on RFC 5322, section 3.3.
-
- dows := [...]string{"", "Mon, "} // day-of-week
- days := [...]string{"2", "02"} // day = 1*2DIGIT
- years := [...]string{"2006", "06"} // year = 4*DIGIT / 2*DIGIT
- seconds := [...]string{":05", ""} // second
- zones := [...]string{"-0700", "MST"} // zone = (("+" / "-") 4DIGIT) / "GMT" / ...
-
- for _, dow := range dows {
- for _, day := range days {
- for _, year := range years {
- for _, second := range seconds {
- for _, zone := range zones {
- s := dow + day + " Jan " + year + " 15:04" + second + " " + zone
- dateLayouts = append(dateLayouts, s)
- }
- }
- }
- }
- }
-}
-
-func parseDate(date string) (*time.Time, error) {
- for _, layout := range dateLayouts {
- t, err := time.Parse(layout, date)
- if err == nil {
- return t, nil
- }
- }
- return nil, errors.New("mail: header could not be parsed")
-}
-
-// A Header represents the key-value pairs in a mail message header.
-type Header map[string][]string
-
-// Get gets the first value associated with the given key.
-// If there are no values associated with the key, Get returns "".
-func (h Header) Get(key string) string {
- return textproto.MIMEHeader(h).Get(key)
-}
-
-var ErrHeaderNotPresent = errors.New("mail: header not in message")
-
-// Date parses the Date header field.
-func (h Header) Date() (*time.Time, error) {
- hdr := h.Get("Date")
- if hdr == "" {
- return nil, ErrHeaderNotPresent
- }
- return parseDate(hdr)
-}
-
-// AddressList parses the named header field as a list of addresses.
-func (h Header) AddressList(key string) ([]*Address, error) {
- hdr := h.Get(key)
- if hdr == "" {
- return nil, ErrHeaderNotPresent
- }
- return newAddrParser(hdr).parseAddressList()
-}
-
-// Address represents a single mail address.
-// An address such as "Barry Gibbs <bg@example.com>" is represented
-// as Address{Name: "Barry Gibbs", Address: "bg@example.com"}.
-type Address struct {
- Name string // Proper name; may be empty.
- Address string // user@domain
-}
-
-// String formats the address as a valid RFC 5322 address.
-// If the address's name contains non-ASCII characters
-// the name will be rendered according to RFC 2047.
-func (a *Address) String() string {
- s := "<" + a.Address + ">"
- if a.Name == "" {
- return s
- }
- // If every character is printable ASCII, quoting is simple.
- allPrintable := true
- for i := 0; i < len(a.Name); i++ {
- if !isVchar(a.Name[i]) {
- allPrintable = false
- break
- }
- }
- if allPrintable {
- b := bytes.NewBufferString(`"`)
- for i := 0; i < len(a.Name); i++ {
- if !isQtext(a.Name[i]) {
- b.WriteByte('\\')
- }
- b.WriteByte(a.Name[i])
- }
- b.WriteString(`" `)
- b.WriteString(s)
- return b.String()
- }
-
- // UTF-8 "Q" encoding
- b := bytes.NewBufferString("=?utf-8?q?")
- for i := 0; i < len(a.Name); i++ {
- switch c := a.Name[i]; {
- case c == ' ':
- b.WriteByte('_')
- case isVchar(c) && c != '=' && c != '?' && c != '_':
- b.WriteByte(c)
- default:
- fmt.Fprintf(b, "=%02X", c)
- }
- }
- b.WriteString("?= ")
- b.WriteString(s)
- return b.String()
-}
-
-type addrParser []byte
-
-func newAddrParser(s string) *addrParser {
- p := addrParser([]byte(s))
- return &p
-}
-
-func (p *addrParser) parseAddressList() ([]*Address, error) {
- var list []*Address
- for {
- p.skipSpace()
- addr, err := p.parseAddress()
- if err != nil {
- return nil, err
- }
- list = append(list, addr)
-
- p.skipSpace()
- if p.empty() {
- break
- }
- if !p.consume(',') {
- return nil, errors.New("mail: expected comma")
- }
- }
- return list, nil
-}
-
-// parseAddress parses a single RFC 5322 address at the start of p.
-func (p *addrParser) parseAddress() (addr *Address, err error) {
- debug.Printf("parseAddress: %q", *p)
- p.skipSpace()
- if p.empty() {
- return nil, errors.New("mail: no address")
- }
-
- // address = name-addr / addr-spec
- // TODO(dsymonds): Support parsing group address.
-
- // addr-spec has a more restricted grammar than name-addr,
- // so try parsing it first, and fallback to name-addr.
- // TODO(dsymonds): Is this really correct?
- spec, err := p.consumeAddrSpec()
- if err == nil {
- return &Address{
- Address: spec,
- }, err
- }
- debug.Printf("parseAddress: not an addr-spec: %v", err)
- debug.Printf("parseAddress: state is now %q", *p)
-
- // display-name
- var displayName string
- if p.peek() != '<' {
- displayName, err = p.consumePhrase()
- if err != nil {
- return nil, err
- }
- }
- debug.Printf("parseAddress: displayName=%q", displayName)
-
- // angle-addr = "<" addr-spec ">"
- p.skipSpace()
- if !p.consume('<') {
- return nil, errors.New("mail: no angle-addr")
- }
- spec, err = p.consumeAddrSpec()
- if err != nil {
- return nil, err
- }
- if !p.consume('>') {
- return nil, errors.New("mail: unclosed angle-addr")
- }
- debug.Printf("parseAddress: spec=%q", spec)
-
- return &Address{
- Name: displayName,
- Address: spec,
- }, nil
-}
-
-// consumeAddrSpec parses a single RFC 5322 addr-spec at the start of p.
-func (p *addrParser) consumeAddrSpec() (spec string, err error) {
- debug.Printf("consumeAddrSpec: %q", *p)
-
- orig := *p
- defer func() {
- if err != nil {
- *p = orig
- }
- }()
-
- // local-part = dot-atom / quoted-string
- var localPart string
- p.skipSpace()
- if p.empty() {
- return "", errors.New("mail: no addr-spec")
- }
- if p.peek() == '"' {
- // quoted-string
- debug.Printf("consumeAddrSpec: parsing quoted-string")
- localPart, err = p.consumeQuotedString()
- } else {
- // dot-atom
- debug.Printf("consumeAddrSpec: parsing dot-atom")
- localPart, err = p.consumeAtom(true)
- }
- if err != nil {
- debug.Printf("consumeAddrSpec: failed: %v", err)
- return "", err
- }
-
- if !p.consume('@') {
- return "", errors.New("mail: missing @ in addr-spec")
- }
-
- // domain = dot-atom / domain-literal
- var domain string
- p.skipSpace()
- if p.empty() {
- return "", errors.New("mail: no domain in addr-spec")
- }
- // TODO(dsymonds): Handle domain-literal
- domain, err = p.consumeAtom(true)
- if err != nil {
- return "", err
- }
-
- return localPart + "@" + domain, nil
-}
-
-// consumePhrase parses the RFC 5322 phrase at the start of p.
-func (p *addrParser) consumePhrase() (phrase string, err error) {
- debug.Printf("consumePhrase: [%s]", *p)
- // phrase = 1*word
- var words []string
- for {
- // word = atom / quoted-string
- var word string
- p.skipSpace()
- if p.empty() {
- return "", errors.New("mail: missing phrase")
- }
- if p.peek() == '"' {
- // quoted-string
- word, err = p.consumeQuotedString()
- } else {
- // atom
- word, err = p.consumeAtom(false)
- }
-
- // RFC 2047 encoded-word starts with =?, ends with ?=, and has two other ?s.
- if err == nil && strings.HasPrefix(word, "=?") && strings.HasSuffix(word, "?=") && strings.Count(word, "?") == 4 {
- word, err = decodeRFC2047Word(word)
- }
-
- if err != nil {
- break
- }
- debug.Printf("consumePhrase: consumed %q", word)
- words = append(words, word)
- }
- // Ignore any error if we got at least one word.
- if err != nil && len(words) == 0 {
- debug.Printf("consumePhrase: hit err: %v", err)
- return "", errors.New("mail: missing word in phrase")
- }
- phrase = strings.Join(words, " ")
- return phrase, nil
-}
-
-// consumeQuotedString parses the quoted string at the start of p.
-func (p *addrParser) consumeQuotedString() (qs string, err error) {
- // Assume first byte is '"'.
- i := 1
- qsb := make([]byte, 0, 10)
-Loop:
- for {
- if i >= p.len() {
- return "", errors.New("mail: unclosed quoted-string")
- }
- switch c := (*p)[i]; {
- case c == '"':
- break Loop
- case c == '\\':
- if i+1 == p.len() {
- return "", errors.New("mail: unclosed quoted-string")
- }
- qsb = append(qsb, (*p)[i+1])
- i += 2
- case isQtext(c), c == ' ' || c == '\t':
- // qtext (printable US-ASCII excluding " and \), or
- // FWS (almost; we're ignoring CRLF)
- qsb = append(qsb, c)
- i++
- default:
- return "", fmt.Errorf("mail: bad character in quoted-string: %q", c)
- }
- }
- *p = (*p)[i+1:]
- return string(qsb), nil
-}
-
-// consumeAtom parses an RFC 5322 atom at the start of p.
-// If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
-func (p *addrParser) consumeAtom(dot bool) (atom string, err error) {
- if !isAtext(p.peek(), false) {
- return "", errors.New("mail: invalid string")
- }
- i := 1
- for ; i < p.len() && isAtext((*p)[i], dot); i++ {
- }
- // TODO(dsymonds): Remove the []byte() conversion here when 6g doesn't need it.
- atom, *p = string([]byte((*p)[:i])), (*p)[i:]
- return atom, nil
-}
-
-func (p *addrParser) consume(c byte) bool {
- if p.empty() || p.peek() != c {
- return false
- }
- *p = (*p)[1:]
- return true
-}
-
-// skipSpace skips the leading space and tab characters.
-func (p *addrParser) skipSpace() {
- *p = bytes.TrimLeft(*p, " \t")
-}
-
-func (p *addrParser) peek() byte {
- return (*p)[0]
-}
-
-func (p *addrParser) empty() bool {
- return p.len() == 0
-}
-
-func (p *addrParser) len() int {
- return len(*p)
-}
-
-func decodeRFC2047Word(s string) (string, error) {
- fields := strings.Split(s, "?")
- if len(fields) != 5 || fields[0] != "=" || fields[4] != "=" {
- return "", errors.New("mail: address not RFC 2047 encoded")
- }
- charset, enc := strings.ToLower(fields[1]), strings.ToLower(fields[2])
- if charset != "iso-8859-1" && charset != "utf-8" {
- return "", fmt.Errorf("mail: charset not supported: %q", charset)
- }
-
- in := bytes.NewBufferString(fields[3])
- var r io.Reader
- switch enc {
- case "b":
- r = base64.NewDecoder(base64.StdEncoding, in)
- case "q":
- r = qDecoder{r: in}
- default:
- return "", fmt.Errorf("mail: RFC 2047 encoding not supported: %q", enc)
- }
-
- dec, err := ioutil.ReadAll(r)
- if err != nil {
- return "", err
- }
-
- switch charset {
- case "iso-8859-1":
- b := new(bytes.Buffer)
- for _, c := range dec {
- b.WriteRune(rune(c))
- }
- return b.String(), nil
- case "utf-8":
- return string(dec), nil
- }
- panic("unreachable")
-}
-
-type qDecoder struct {
- r io.Reader
- scratch [2]byte
-}
-
-func (qd qDecoder) Read(p []byte) (n int, err error) {
- // This method writes at most one byte into p.
- if len(p) == 0 {
- return 0, nil
- }
- if _, err := qd.r.Read(qd.scratch[:1]); err != nil {
- return 0, err
- }
- switch c := qd.scratch[0]; {
- case c == '=':
- if _, err := io.ReadFull(qd.r, qd.scratch[:2]); err != nil {
- return 0, err
- }
- x, err := strconv.Btoi64(string(qd.scratch[:2]), 16)
- if err != nil {
- return 0, fmt.Errorf("mail: invalid RFC 2047 encoding: %q", qd.scratch[:2])
- }
- p[0] = byte(x)
- case c == '_':
- p[0] = ' '
- default:
- p[0] = c
- }
- return 1, nil
-}
-
-var atextChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
- "abcdefghijklmnopqrstuvwxyz" +
- "0123456789" +
- "!#$%&'*+-/=?^_`{|}~")
-
-// isAtext returns true if c is an RFC 5322 atext character.
-// If dot is true, period is included.
-func isAtext(c byte, dot bool) bool {
- if dot && c == '.' {
- return true
- }
- return bytes.IndexByte(atextChars, c) >= 0
-}
-
-// isQtext returns true if c is an RFC 5322 qtest character.
-func isQtext(c byte) bool {
- // Printable US-ASCII, excluding backslash or quote.
- if c == '\\' || c == '"' {
- return false
- }
- return '!' <= c && c <= '~'
-}
-
-// isVchar returns true if c is an RFC 5322 VCHAR character.
-func isVchar(c byte) bool {
- // Visible (printing) characters.
- return '!' <= c && c <= '~'
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mail
-
-import (
- "bytes"
- "io/ioutil"
- "reflect"
- "testing"
- "time"
-)
-
-var parseTests = []struct {
- in string
- header Header
- body string
-}{
- {
- // RFC 5322, Appendix A.1.1
- in: `From: John Doe <jdoe@machine.example>
-To: Mary Smith <mary@example.net>
-Subject: Saying Hello
-Date: Fri, 21 Nov 1997 09:55:06 -0600
-Message-ID: <1234@local.machine.example>
-
-This is a message just to say hello.
-So, "Hello".
-`,
- header: Header{
- "From": []string{"John Doe <jdoe@machine.example>"},
- "To": []string{"Mary Smith <mary@example.net>"},
- "Subject": []string{"Saying Hello"},
- "Date": []string{"Fri, 21 Nov 1997 09:55:06 -0600"},
- "Message-Id": []string{"<1234@local.machine.example>"},
- },
- body: "This is a message just to say hello.\nSo, \"Hello\".\n",
- },
-}
-
-func TestParsing(t *testing.T) {
- for i, test := range parseTests {
- msg, err := ReadMessage(bytes.NewBuffer([]byte(test.in)))
- if err != nil {
- t.Errorf("test #%d: Failed parsing message: %v", i, err)
- continue
- }
- if !headerEq(msg.Header, test.header) {
- t.Errorf("test #%d: Incorrectly parsed message header.\nGot:\n%+v\nWant:\n%+v",
- i, msg.Header, test.header)
- }
- body, err := ioutil.ReadAll(msg.Body)
- if err != nil {
- t.Errorf("test #%d: Failed reading body: %v", i, err)
- continue
- }
- bodyStr := string(body)
- if bodyStr != test.body {
- t.Errorf("test #%d: Incorrectly parsed message body.\nGot:\n%+v\nWant:\n%+v",
- i, bodyStr, test.body)
- }
- }
-}
-
-func headerEq(a, b Header) bool {
- if len(a) != len(b) {
- return false
- }
- for k, as := range a {
- bs, ok := b[k]
- if !ok {
- return false
- }
- if !reflect.DeepEqual(as, bs) {
- return false
- }
- }
- return true
-}
-
-func TestDateParsing(t *testing.T) {
- tests := []struct {
- dateStr string
- exp *time.Time
- }{
- // RFC 5322, Appendix A.1.1
- {
- "Fri, 21 Nov 1997 09:55:06 -0600",
- &time.Time{
- Year: 1997,
- Month: 11,
- Day: 21,
- Hour: 9,
- Minute: 55,
- Second: 6,
- ZoneOffset: -6 * 60 * 60,
- },
- },
- // RFC5322, Appendix A.6.2
- // Obsolete date.
- {
- "21 Nov 97 09:55:06 GMT",
- &time.Time{
- Year: 1997,
- Month: 11,
- Day: 21,
- Hour: 9,
- Minute: 55,
- Second: 6,
- Zone: "GMT",
- },
- },
- }
- for _, test := range tests {
- hdr := Header{
- "Date": []string{test.dateStr},
- }
- date, err := hdr.Date()
- if err != nil {
- t.Errorf("Failed parsing %q: %v", test.dateStr, err)
- continue
- }
- if !reflect.DeepEqual(date, test.exp) {
- t.Errorf("Parse of %q: got %+v, want %+v", test.dateStr, date, test.exp)
- }
- }
-}
-
-func TestAddressParsing(t *testing.T) {
- tests := []struct {
- addrsStr string
- exp []*Address
- }{
- // Bare address
- {
- `jdoe@machine.example`,
- []*Address{&Address{
- Address: "jdoe@machine.example",
- }},
- },
- // RFC 5322, Appendix A.1.1
- {
- `John Doe <jdoe@machine.example>`,
- []*Address{&Address{
- Name: "John Doe",
- Address: "jdoe@machine.example",
- }},
- },
- // RFC 5322, Appendix A.1.2
- {
- `"Joe Q. Public" <john.q.public@example.com>`,
- []*Address{&Address{
- Name: "Joe Q. Public",
- Address: "john.q.public@example.com",
- }},
- },
- {
- `Mary Smith <mary@x.test>, jdoe@example.org, Who? <one@y.test>`,
- []*Address{
- &Address{
- Name: "Mary Smith",
- Address: "mary@x.test",
- },
- &Address{
- Address: "jdoe@example.org",
- },
- &Address{
- Name: "Who?",
- Address: "one@y.test",
- },
- },
- },
- {
- `<boss@nil.test>, "Giant; \"Big\" Box" <sysservices@example.net>`,
- []*Address{
- &Address{
- Address: "boss@nil.test",
- },
- &Address{
- Name: `Giant; "Big" Box`,
- Address: "sysservices@example.net",
- },
- },
- },
- // RFC 5322, Appendix A.1.3
- // TODO(dsymonds): Group addresses.
-
- // RFC 2047 "Q"-encoded ISO-8859-1 address.
- {
- `=?iso-8859-1?q?J=F6rg_Doe?= <joerg@example.com>`,
- []*Address{
- &Address{
- Name: `Jörg Doe`,
- Address: "joerg@example.com",
- },
- },
- },
- // RFC 2047 "Q"-encoded UTF-8 address.
- {
- `=?utf-8?q?J=C3=B6rg_Doe?= <joerg@example.com>`,
- []*Address{
- &Address{
- Name: `Jörg Doe`,
- Address: "joerg@example.com",
- },
- },
- },
- // RFC 2047, Section 8.
- {
- `=?ISO-8859-1?Q?Andr=E9?= Pirard <PIRARD@vm1.ulg.ac.be>`,
- []*Address{
- &Address{
- Name: `André Pirard`,
- Address: "PIRARD@vm1.ulg.ac.be",
- },
- },
- },
- // Custom example of RFC 2047 "B"-encoded ISO-8859-1 address.
- {
- `=?ISO-8859-1?B?SvZyZw==?= <joerg@example.com>`,
- []*Address{
- &Address{
- Name: `Jörg`,
- Address: "joerg@example.com",
- },
- },
- },
- // Custom example of RFC 2047 "B"-encoded UTF-8 address.
- {
- `=?UTF-8?B?SsO2cmc=?= <joerg@example.com>`,
- []*Address{
- &Address{
- Name: `Jörg`,
- Address: "joerg@example.com",
- },
- },
- },
- }
- for _, test := range tests {
- addrs, err := newAddrParser(test.addrsStr).parseAddressList()
- if err != nil {
- t.Errorf("Failed parsing %q: %v", test.addrsStr, err)
- continue
- }
- if !reflect.DeepEqual(addrs, test.exp) {
- t.Errorf("Parse of %q: got %+v, want %+v", test.addrsStr, addrs, test.exp)
- }
- }
-}
-
-func TestAddressFormatting(t *testing.T) {
- tests := []struct {
- addr *Address
- exp string
- }{
- {
- &Address{Address: "bob@example.com"},
- "<bob@example.com>",
- },
- {
- &Address{Name: "Bob", Address: "bob@example.com"},
- `"Bob" <bob@example.com>`,
- },
- {
- // note the ö (o with an umlaut)
- &Address{Name: "Böb", Address: "bob@example.com"},
- `=?utf-8?q?B=C3=B6b?= <bob@example.com>`,
- },
- }
- for _, test := range tests {
- s := test.addr.String()
- if s != test.exp {
- t.Errorf("Address%+v.String() = %v, want %v", *test.addr, s, test.exp)
- }
- }
-}
import (
"fmt"
. "math"
- "runtime"
"testing"
)
-2.517729313893103197176091e-01,
-7.39241351595676573201918e-01,
}
+// Results for 100000 * Pi + vf[i]
+var cosLarge = []float64{
+ 2.634752141185559426744e-01,
+ 1.14855126055543100712e-01,
+ 9.61912973266488928113e-01,
+ 2.9381411499556122552e-01,
+ -9.777138189880161924641e-01,
+ -9.76930413445147608049e-01,
+ 4.940088097314976789841e-01,
+ -9.15658690217517835002e-01,
+ -2.51772931436786954751e-01,
+ -7.3924135157173099849e-01,
+}
var cosh = []float64{
7.2668796942212842775517446e+01,
1.1479413465659254502011135e+03,
9.6778633541687993721617774e-01,
-6.734405869050344734943028e-01,
}
+// Results for 100000 * Pi + vf[i]
+var sinLarge = []float64{
+ -9.646661658548936063912e-01,
+ 9.933822527198506903752e-01,
+ -2.7335587036246899796e-01,
+ 9.55862576853689321268e-01,
+ -2.099421066862688873691e-01,
+ 2.13557878070308981163e-01,
+ -8.694568970959221300497e-01,
+ 4.01956668098863248917e-01,
+ 9.67786335404528727927e-01,
+ -6.7344058693131973066e-01,
+}
var sinh = []float64{
7.2661916084208532301448439e+01,
1.1479409110035194500526446e+03,
-3.843885560201130679995041e+00,
9.10988793377685105753416e-01,
}
+// Results for 100000 * Pi + vf[i]
+var tanLarge = []float64{
+ -3.66131656475596512705e+00,
+ 8.6490023287202547927e+00,
+ -2.841794195104782406e-01,
+ 3.2532901861033120983e+00,
+ 2.14727564046880001365e-01,
+ -2.18600910700688062874e-01,
+ -1.760002817699722747043e+00,
+ -4.38980891453536115952e-01,
+ -3.84388555942723509071e+00,
+ 9.1098879344275101051e-01,
+}
var tanh = []float64{
9.9990531206936338549262119e-01,
9.9999962057085294197613294e-01,
func TestTan(t *testing.T) {
for i := 0; i < len(vf); i++ {
- if f := Tan(vf[i]); !close(tan[i], f) {
+ if f := Tan(vf[i]); !veryclose(tan[i], f) {
t.Errorf("Tan(%g) = %g, want %g", vf[i], f, tan[i])
}
}
t.Errorf("Tan(%g) = %g, want %g", vfsinSC[i], f, sinSC[i])
}
}
-
- // Make sure portable Tan(Pi/2) doesn't panic (it used to).
- // The portable implementation returns NaN.
- // Assembly implementations might not,
- // because Pi/2 is not exactly representable.
- if runtime.GOARCH != "386" {
- if f := Tan(Pi / 2); !alike(f, NaN()) {
- t.Errorf("Tan(%g) = %g, want %g", Pi/2, f, NaN())
- }
- }
}
func TestTanh(t *testing.T) {
}
// Check that math functions of high angle values
-// return similar results to low angle values
+// return accurate results. [Since (vf[i] + large) - large != vf[i],
+// testing for Trig(vf[i] + large) == Trig(vf[i]), where large is
+// a multiple of 2*Pi, is misleading.]
func TestLargeCos(t *testing.T) {
large := float64(100000 * Pi)
for i := 0; i < len(vf); i++ {
- f1 := Cos(vf[i])
+ f1 := cosLarge[i]
f2 := Cos(vf[i] + large)
- if !kindaclose(f1, f2) {
+ if !close(f1, f2) {
t.Errorf("Cos(%g) = %g, want %g", vf[i]+large, f2, f1)
}
}
func TestLargeSin(t *testing.T) {
large := float64(100000 * Pi)
for i := 0; i < len(vf); i++ {
- f1 := Sin(vf[i])
+ f1 := sinLarge[i]
f2 := Sin(vf[i] + large)
- if !kindaclose(f1, f2) {
+ if !close(f1, f2) {
t.Errorf("Sin(%g) = %g, want %g", vf[i]+large, f2, f1)
}
}
func TestLargeSincos(t *testing.T) {
large := float64(100000 * Pi)
for i := 0; i < len(vf); i++ {
- f1, g1 := Sincos(vf[i])
+ f1, g1 := sinLarge[i], cosLarge[i]
f2, g2 := Sincos(vf[i] + large)
- if !kindaclose(f1, f2) || !kindaclose(g1, g2) {
+ if !close(f1, f2) || !close(g1, g2) {
t.Errorf("Sincos(%g) = %g, %g, want %g, %g", vf[i]+large, f2, g2, f1, g1)
}
}
func TestLargeTan(t *testing.T) {
large := float64(100000 * Pi)
for i := 0; i < len(vf); i++ {
- f1 := Tan(vf[i])
+ f1 := tanLarge[i]
f2 := Tan(vf[i] + large)
- if !kindaclose(f1, f2) {
+ if !close(f1, f2) {
t.Errorf("Tan(%g) = %g, want %g", vf[i]+large, f2, f1)
}
}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides Go implementations of elementary multi-precision
+// arithmetic operations on word vectors. Needed for platforms without
+// assembly implementations of these routines.
+
+package big
+
+// TODO(gri) Decide if Word needs to remain exported.
+
+type Word uintptr
+
+const (
+ // Compute the size _S of a Word in bytes.
+ _m = ^Word(0)
+ _logS = _m>>8&1 + _m>>16&1 + _m>>32&1
+ _S = 1 << _logS
+
+ _W = _S << 3 // word size in bits
+ _B = 1 << _W // digit base
+ _M = _B - 1 // digit mask
+
+ _W2 = _W / 2 // half word size in bits
+ _B2 = 1 << _W2 // half digit base
+ _M2 = _B2 - 1 // half digit mask
+)
+
+// ----------------------------------------------------------------------------
+// Elementary operations on words
+//
+// These operations are used by the vector operations below.
+
+// z1<<_W + z0 = x+y+c, with c == 0 or 1
+func addWW_g(x, y, c Word) (z1, z0 Word) {
+ yc := y + c
+ z0 = x + yc
+ if z0 < x || yc < y {
+ z1 = 1
+ }
+ return
+}
+
+// z1<<_W + z0 = x-y-c, with c == 0 or 1
+func subWW_g(x, y, c Word) (z1, z0 Word) {
+ yc := y + c
+ z0 = x - yc
+ if z0 > x || yc < y {
+ z1 = 1
+ }
+ return
+}
+
+// z1<<_W + z0 = x*y
+func mulWW(x, y Word) (z1, z0 Word) { return mulWW_g(x, y) }
+// Adapted from Warren, Hacker's Delight, p. 132.
+func mulWW_g(x, y Word) (z1, z0 Word) {
+ x0 := x & _M2
+ x1 := x >> _W2
+ y0 := y & _M2
+ y1 := y >> _W2
+ w0 := x0 * y0
+ t := x1*y0 + w0>>_W2
+ w1 := t & _M2
+ w2 := t >> _W2
+ w1 += x0 * y1
+ z1 = x1*y1 + w2 + w1>>_W2
+ z0 = x * y
+ return
+}
+
+// z1<<_W + z0 = x*y + c
+func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
+ z1, zz0 := mulWW(x, y)
+ if z0 = zz0 + c; z0 < zz0 {
+ z1++
+ }
+ return
+}
+
+// Length of x in bits.
+func bitLen(x Word) (n int) {
+ for ; x >= 0x100; x >>= 8 {
+ n += 8
+ }
+ for ; x > 0; x >>= 1 {
+ n++
+ }
+ return
+}
+
+// log2 computes the integer binary logarithm of x.
+// The result is the integer n for which 2^n <= x < 2^(n+1).
+// If x == 0, the result is -1.
+func log2(x Word) int {
+ return bitLen(x) - 1
+}
+
+// Number of leading zeros in x.
+func leadingZeros(x Word) uint {
+ return uint(_W - bitLen(x))
+}
+
+// q = (u1<<_W + u0 - r)/y
+func divWW(x1, x0, y Word) (q, r Word) { return divWW_g(x1, x0, y) }
+// Adapted from Warren, Hacker's Delight, p. 152.
+func divWW_g(u1, u0, v Word) (q, r Word) {
+ if u1 >= v {
+ return 1<<_W - 1, 1<<_W - 1
+ }
+
+ s := leadingZeros(v)
+ v <<= s
+
+ vn1 := v >> _W2
+ vn0 := v & _M2
+ un32 := u1<<s | u0>>(_W-s)
+ un10 := u0 << s
+ un1 := un10 >> _W2
+ un0 := un10 & _M2
+ q1 := un32 / vn1
+ rhat := un32 - q1*vn1
+
+again1:
+ if q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {
+ q1--
+ rhat += vn1
+ if rhat < _B2 {
+ goto again1
+ }
+ }
+
+ un21 := un32*_B2 + un1 - q1*v
+ q0 := un21 / vn1
+ rhat = un21 - q0*vn1
+
+again2:
+ if q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {
+ q0--
+ rhat += vn1
+ if rhat < _B2 {
+ goto again2
+ }
+ }
+
+ return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s
+}
+
+func addVV(z, x, y []Word) (c Word) { return addVV_g(z, x, y) }
+func addVV_g(z, x, y []Word) (c Word) {
+ for i := range z {
+ c, z[i] = addWW_g(x[i], y[i], c)
+ }
+ return
+}
+
+func subVV(z, x, y []Word) (c Word) { return subVV_g(z, x, y) }
+func subVV_g(z, x, y []Word) (c Word) {
+ for i := range z {
+ c, z[i] = subWW_g(x[i], y[i], c)
+ }
+ return
+}
+
+func addVW(z, x []Word, y Word) (c Word) { return addVW_g(z, x, y) }
+func addVW_g(z, x []Word, y Word) (c Word) {
+ c = y
+ for i := range z {
+ c, z[i] = addWW_g(x[i], c, 0)
+ }
+ return
+}
+
+func subVW(z, x []Word, y Word) (c Word) { return subVW_g(z, x, y) }
+func subVW_g(z, x []Word, y Word) (c Word) {
+ c = y
+ for i := range z {
+ c, z[i] = subWW_g(x[i], c, 0)
+ }
+ return
+}
+
+func shlVU(z, x []Word, s uint) (c Word) { return shlVU_g(z, x, s) }
+func shlVU_g(z, x []Word, s uint) (c Word) {
+ if n := len(z); n > 0 {
+ ŝ := _W - s
+ w1 := x[n-1]
+ c = w1 >> ŝ
+ for i := n - 1; i > 0; i-- {
+ w := w1
+ w1 = x[i-1]
+ z[i] = w<<s | w1>>ŝ
+ }
+ z[0] = w1 << s
+ }
+ return
+}
+
+func shrVU(z, x []Word, s uint) (c Word) { return shrVU_g(z, x, s) }
+func shrVU_g(z, x []Word, s uint) (c Word) {
+ if n := len(z); n > 0 {
+ ŝ := _W - s
+ w1 := x[0]
+ c = w1 << ŝ
+ for i := 0; i < n-1; i++ {
+ w := w1
+ w1 = x[i+1]
+ z[i] = w>>s | w1<<ŝ
+ }
+ z[n-1] = w1 >> s
+ }
+ return
+}
+
+func mulAddVWW(z, x []Word, y, r Word) (c Word) { return mulAddVWW_g(z, x, y, r) }
+func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
+ c = r
+ for i := range z {
+ c, z[i] = mulAddWWW_g(x[i], y, c)
+ }
+ return
+}
+
+func addMulVVW(z, x []Word, y Word) (c Word) { return addMulVVW_g(z, x, y) }
+func addMulVVW_g(z, x []Word, y Word) (c Word) {
+ for i := range z {
+ z1, z0 := mulAddWWW_g(x[i], y, z[i])
+ c, z[i] = addWW_g(z0, c, 0)
+ c += z1
+ }
+ return
+}
+
+func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) { return divWVW_g(z, xn, x, y) }
+func divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {
+ r = xn
+ for i := len(z) - 1; i >= 0; i-- {
+ z[i], r = divWW_g(r, x[i], y)
+ }
+ return
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+// implemented in arith_$GOARCH.s
+func mulWW(x, y Word) (z1, z0 Word)
+func divWW(x1, x0, y Word) (q, r Word)
+func addVV(z, x, y []Word) (c Word)
+func subVV(z, x, y []Word) (c Word)
+func addVW(z, x []Word, y Word) (c Word)
+func subVW(z, x []Word, y Word) (c Word)
+func shlVU(z, x []Word, s uint) (c Word)
+func shrVU(z, x []Word, s uint) (c Word)
+func mulAddVWW(z, x []Word, y, r Word) (c Word)
+func addMulVVW(z, x []Word, y Word) (c Word)
+func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import "testing"
+
+type funWW func(x, y, c Word) (z1, z0 Word)
+type argWW struct {
+ x, y, c, z1, z0 Word
+}
+
+var sumWW = []argWW{
+ {0, 0, 0, 0, 0},
+ {0, 1, 0, 0, 1},
+ {0, 0, 1, 0, 1},
+ {0, 1, 1, 0, 2},
+ {12345, 67890, 0, 0, 80235},
+ {12345, 67890, 1, 0, 80236},
+ {_M, 1, 0, 1, 0},
+ {_M, 0, 1, 1, 0},
+ {_M, 1, 1, 1, 1},
+ {_M, _M, 0, 1, _M - 1},
+ {_M, _M, 1, 1, _M},
+}
+
+func testFunWW(t *testing.T, msg string, f funWW, a argWW) {
+ z1, z0 := f(a.x, a.y, a.c)
+ if z1 != a.z1 || z0 != a.z0 {
+ t.Errorf("%s%+v\n\tgot z1:z0 = %#x:%#x; want %#x:%#x", msg, a, z1, z0, a.z1, a.z0)
+ }
+}
+
+func TestFunWW(t *testing.T) {
+ for _, a := range sumWW {
+ arg := a
+ testFunWW(t, "addWW_g", addWW_g, arg)
+
+ arg = argWW{a.y, a.x, a.c, a.z1, a.z0}
+ testFunWW(t, "addWW_g symmetric", addWW_g, arg)
+
+ arg = argWW{a.z0, a.x, a.c, a.z1, a.y}
+ testFunWW(t, "subWW_g", subWW_g, arg)
+
+ arg = argWW{a.z0, a.y, a.c, a.z1, a.x}
+ testFunWW(t, "subWW_g symmetric", subWW_g, arg)
+ }
+}
+
+type funVV func(z, x, y []Word) (c Word)
+type argVV struct {
+ z, x, y nat
+ c Word
+}
+
+var sumVV = []argVV{
+ {},
+ {nat{0}, nat{0}, nat{0}, 0},
+ {nat{1}, nat{1}, nat{0}, 0},
+ {nat{0}, nat{_M}, nat{1}, 1},
+ {nat{80235}, nat{12345}, nat{67890}, 0},
+ {nat{_M - 1}, nat{_M}, nat{_M}, 1},
+ {nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, nat{1, 0, 0, 0}, 1},
+ {nat{0, 0, 0, _M}, nat{_M, _M, _M, _M - 1}, nat{1, 0, 0, 0}, 0},
+ {nat{0, 0, 0, 0}, nat{_M, 0, _M, 0}, nat{1, _M, 0, _M}, 1},
+}
+
+func testFunVV(t *testing.T, msg string, f funVV, a argVV) {
+ z := make(nat, len(a.z))
+ c := f(z, a.x, a.y)
+ for i, zi := range z {
+ if zi != a.z[i] {
+ t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+ break
+ }
+ }
+ if c != a.c {
+ t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
+ }
+}
+
+func TestFunVV(t *testing.T) {
+ for _, a := range sumVV {
+ arg := a
+ testFunVV(t, "addVV_g", addVV_g, arg)
+ testFunVV(t, "addVV", addVV, arg)
+
+ arg = argVV{a.z, a.y, a.x, a.c}
+ testFunVV(t, "addVV_g symmetric", addVV_g, arg)
+ testFunVV(t, "addVV symmetric", addVV, arg)
+
+ arg = argVV{a.x, a.z, a.y, a.c}
+ testFunVV(t, "subVV_g", subVV_g, arg)
+ testFunVV(t, "subVV", subVV, arg)
+
+ arg = argVV{a.y, a.z, a.x, a.c}
+ testFunVV(t, "subVV_g symmetric", subVV_g, arg)
+ testFunVV(t, "subVV symmetric", subVV, arg)
+ }
+}
+
+type funVW func(z, x []Word, y Word) (c Word)
+type argVW struct {
+ z, x nat
+ y Word
+ c Word
+}
+
+var sumVW = []argVW{
+ {},
+ {nil, nil, 2, 2},
+ {nat{0}, nat{0}, 0, 0},
+ {nat{1}, nat{0}, 1, 0},
+ {nat{1}, nat{1}, 0, 0},
+ {nat{0}, nat{_M}, 1, 1},
+ {nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, 1, 1},
+}
+
+var prodVW = []argVW{
+ {},
+ {nat{0}, nat{0}, 0, 0},
+ {nat{0}, nat{_M}, 0, 0},
+ {nat{0}, nat{0}, _M, 0},
+ {nat{1}, nat{1}, 1, 0},
+ {nat{22793}, nat{991}, 23, 0},
+ {nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0},
+ {nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0},
+ {nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0},
+ {nat{_M << 1 & _M}, nat{_M}, 1 << 1, _M >> (_W - 1)},
+ {nat{_M << 7 & _M}, nat{_M}, 1 << 7, _M >> (_W - 7)},
+ {nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, _M >> (_W - 7)},
+}
+
+var lshVW = []argVW{
+ {},
+ {nat{0}, nat{0}, 0, 0},
+ {nat{0}, nat{0}, 1, 0},
+ {nat{0}, nat{0}, 20, 0},
+
+ {nat{_M}, nat{_M}, 0, 0},
+ {nat{_M << 1 & _M}, nat{_M}, 1, 1},
+ {nat{_M << 20 & _M}, nat{_M}, 20, _M >> (_W - 20)},
+
+ {nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
+ {nat{_M << 1 & _M, _M, _M}, nat{_M, _M, _M}, 1, 1},
+ {nat{_M << 20 & _M, _M, _M}, nat{_M, _M, _M}, 20, _M >> (_W - 20)},
+}
+
+var rshVW = []argVW{
+ {},
+ {nat{0}, nat{0}, 0, 0},
+ {nat{0}, nat{0}, 1, 0},
+ {nat{0}, nat{0}, 20, 0},
+
+ {nat{_M}, nat{_M}, 0, 0},
+ {nat{_M >> 1}, nat{_M}, 1, _M << (_W - 1) & _M},
+ {nat{_M >> 20}, nat{_M}, 20, _M << (_W - 20) & _M},
+
+ {nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
+ {nat{_M, _M, _M >> 1}, nat{_M, _M, _M}, 1, _M << (_W - 1) & _M},
+ {nat{_M, _M, _M >> 20}, nat{_M, _M, _M}, 20, _M << (_W - 20) & _M},
+}
+
+func testFunVW(t *testing.T, msg string, f funVW, a argVW) {
+ z := make(nat, len(a.z))
+ c := f(z, a.x, a.y)
+ for i, zi := range z {
+ if zi != a.z[i] {
+ t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+ break
+ }
+ }
+ if c != a.c {
+ t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
+ }
+}
+
+func makeFunVW(f func(z, x []Word, s uint) (c Word)) funVW {
+ return func(z, x []Word, s Word) (c Word) {
+ return f(z, x, uint(s))
+ }
+}
+
+func TestFunVW(t *testing.T) {
+ for _, a := range sumVW {
+ arg := a
+ testFunVW(t, "addVW_g", addVW_g, arg)
+ testFunVW(t, "addVW", addVW, arg)
+
+ arg = argVW{a.x, a.z, a.y, a.c}
+ testFunVW(t, "subVW_g", subVW_g, arg)
+ testFunVW(t, "subVW", subVW, arg)
+ }
+
+ shlVW_g := makeFunVW(shlVU_g)
+ shlVW := makeFunVW(shlVU)
+ for _, a := range lshVW {
+ arg := a
+ testFunVW(t, "shlVU_g", shlVW_g, arg)
+ testFunVW(t, "shlVU", shlVW, arg)
+ }
+
+ shrVW_g := makeFunVW(shrVU_g)
+ shrVW := makeFunVW(shrVU)
+ for _, a := range rshVW {
+ arg := a
+ testFunVW(t, "shrVU_g", shrVW_g, arg)
+ testFunVW(t, "shrVU", shrVW, arg)
+ }
+}
+
+type funVWW func(z, x []Word, y, r Word) (c Word)
+type argVWW struct {
+ z, x nat
+ y, r Word
+ c Word
+}
+
+var prodVWW = []argVWW{
+ {},
+ {nat{0}, nat{0}, 0, 0, 0},
+ {nat{991}, nat{0}, 0, 991, 0},
+ {nat{0}, nat{_M}, 0, 0, 0},
+ {nat{991}, nat{_M}, 0, 991, 0},
+ {nat{0}, nat{0}, _M, 0, 0},
+ {nat{991}, nat{0}, _M, 991, 0},
+ {nat{1}, nat{1}, 1, 0, 0},
+ {nat{992}, nat{1}, 1, 991, 0},
+ {nat{22793}, nat{991}, 23, 0, 0},
+ {nat{22800}, nat{991}, 23, 7, 0},
+ {nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0, 0},
+ {nat{7, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 7, 0},
+ {nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0, 0},
+ {nat{991, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 991, 0},
+ {nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0, 0},
+ {nat{991, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 991, 0},
+ {nat{_M << 1 & _M}, nat{_M}, 1 << 1, 0, _M >> (_W - 1)},
+ {nat{_M<<1&_M + 1}, nat{_M}, 1 << 1, 1, _M >> (_W - 1)},
+ {nat{_M << 7 & _M}, nat{_M}, 1 << 7, 0, _M >> (_W - 7)},
+ {nat{_M<<7&_M + 1<<6}, nat{_M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
+ {nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 0, _M >> (_W - 7)},
+ {nat{_M<<7&_M + 1<<6, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
+}
+
+func testFunVWW(t *testing.T, msg string, f funVWW, a argVWW) {
+ z := make(nat, len(a.z))
+ c := f(z, a.x, a.y, a.r)
+ for i, zi := range z {
+ if zi != a.z[i] {
+ t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+ break
+ }
+ }
+ if c != a.c {
+ t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
+ }
+}
+
+// TODO(gri) mulAddVWW and divWVW are symmetric operations but
+// their signature is not symmetric. Try to unify.
+
+type funWVW func(z []Word, xn Word, x []Word, y Word) (r Word)
+type argWVW struct {
+ z nat
+ xn Word
+ x nat
+ y Word
+ r Word
+}
+
+func testFunWVW(t *testing.T, msg string, f funWVW, a argWVW) {
+ z := make(nat, len(a.z))
+ r := f(z, a.xn, a.x, a.y)
+ for i, zi := range z {
+ if zi != a.z[i] {
+ t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+ break
+ }
+ }
+ if r != a.r {
+ t.Errorf("%s%+v\n\tgot r = %#x; want %#x", msg, a, r, a.r)
+ }
+}
+
+func TestFunVWW(t *testing.T) {
+ for _, a := range prodVWW {
+ arg := a
+ testFunVWW(t, "mulAddVWW_g", mulAddVWW_g, arg)
+ testFunVWW(t, "mulAddVWW", mulAddVWW, arg)
+
+ if a.y != 0 && a.r < a.y {
+ arg := argWVW{a.x, a.c, a.z, a.y, a.r}
+ testFunWVW(t, "divWVW_g", divWVW_g, arg)
+ testFunWVW(t, "divWVW", divWVW, arg)
+ }
+ }
+}
+
+var mulWWTests = []struct {
+ x, y Word
+ q, r Word
+}{
+ {_M, _M, _M - 1, 1},
+ // 32 bit only: {0xc47dfa8c, 50911, 0x98a4, 0x998587f4},
+}
+
+func TestMulWW(t *testing.T) {
+ for i, test := range mulWWTests {
+ q, r := mulWW_g(test.x, test.y)
+ if q != test.q || r != test.r {
+ t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
+ }
+ }
+}
+
+var mulAddWWWTests = []struct {
+ x, y, c Word
+ q, r Word
+}{
+ // TODO(agl): These will only work on 64-bit platforms.
+ // {15064310297182388543, 0xe7df04d2d35d5d80, 13537600649892366549, 13644450054494335067, 10832252001440893781},
+ // {15064310297182388543, 0xdab2f18048baa68d, 13644450054494335067, 12869334219691522700, 14233854684711418382},
+ {_M, _M, 0, _M - 1, 1},
+ {_M, _M, _M, _M, 0},
+}
+
+func TestMulAddWWW(t *testing.T) {
+ for i, test := range mulAddWWWTests {
+ q, r := mulAddWWW_g(test.x, test.y, test.c)
+ if q != test.q || r != test.r {
+ t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file prints execution times for the Mul benchmark
+// given different Karatsuba thresholds. The result may be
+// used to manually fine-tune the threshold constant. The
+// results are somewhat fragile; use repeated runs to get
+// a clear picture.
+
+// Usage: gotest -calibrate
+
+package big
+
+import (
+ "flag"
+ "fmt"
+ "testing"
+ "time"
+)
+
+var calibrate = flag.Bool("calibrate", false, "run calibration test")
+
+// measure returns the time to run f
+func measure(f func()) int64 {
+ const N = 100
+ start := time.Nanoseconds()
+ for i := N; i > 0; i-- {
+ f()
+ }
+ stop := time.Nanoseconds()
+ return (stop - start) / N
+}
+
+func computeThresholds() {
+ fmt.Printf("Multiplication times for varying Karatsuba thresholds\n")
+ fmt.Printf("(run repeatedly for good results)\n")
+
+ // determine Tk, the work load execution time using basic multiplication
+ karatsubaThreshold = 1e9 // disable karatsuba
+ Tb := measure(benchmarkMulLoad)
+ fmt.Printf("Tb = %dns\n", Tb)
+
+ // thresholds
+ n := 8 // any lower values for the threshold lead to very slow multiplies
+ th1 := -1
+ th2 := -1
+
+ var deltaOld int64
+ for count := -1; count != 0; count-- {
+ // determine Tk, the work load execution time using Karatsuba multiplication
+ karatsubaThreshold = n // enable karatsuba
+ Tk := measure(benchmarkMulLoad)
+
+ // improvement over Tb
+ delta := (Tb - Tk) * 100 / Tb
+
+ fmt.Printf("n = %3d Tk = %8dns %4d%%", n, Tk, delta)
+
+ // determine break-even point
+ if Tk < Tb && th1 < 0 {
+ th1 = n
+ fmt.Print(" break-even point")
+ }
+
+ // determine diminishing return
+ if 0 < delta && delta < deltaOld && th2 < 0 {
+ th2 = n
+ fmt.Print(" diminishing return")
+ }
+ deltaOld = delta
+
+ fmt.Println()
+
+ // trigger counter
+ if th1 >= 0 && th2 >= 0 && count < 0 {
+ count = 20 // this many extra measurements after we got both thresholds
+ }
+
+ n++
+ }
+}
+
+func TestCalibrate(t *testing.T) {
+ if *calibrate {
+ computeThresholds()
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A little test program and benchmark for rational arithmetics.
+// Computes a Hilbert matrix, its inverse, multiplies them
+// and verifies that the product is the identity matrix.
+
+package big
+
+import (
+ "fmt"
+ "testing"
+)
+
+type matrix struct {
+ n, m int
+ a []*Rat
+}
+
+func (a *matrix) at(i, j int) *Rat {
+ if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
+ panic("index out of range")
+ }
+ return a.a[i*a.m+j]
+}
+
+func (a *matrix) set(i, j int, x *Rat) {
+ if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
+ panic("index out of range")
+ }
+ a.a[i*a.m+j] = x
+}
+
+func newMatrix(n, m int) *matrix {
+ if !(0 <= n && 0 <= m) {
+ panic("illegal matrix")
+ }
+ a := new(matrix)
+ a.n = n
+ a.m = m
+ a.a = make([]*Rat, n*m)
+ return a
+}
+
+func newUnit(n int) *matrix {
+ a := newMatrix(n, n)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ x := NewRat(0, 1)
+ if i == j {
+ x.SetInt64(1)
+ }
+ a.set(i, j, x)
+ }
+ }
+ return a
+}
+
+func newHilbert(n int) *matrix {
+ a := newMatrix(n, n)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ a.set(i, j, NewRat(1, int64(i+j+1)))
+ }
+ }
+ return a
+}
+
+func newInverseHilbert(n int) *matrix {
+ a := newMatrix(n, n)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ x1 := new(Rat).SetInt64(int64(i + j + 1))
+ x2 := new(Rat).SetInt(new(Int).Binomial(int64(n+i), int64(n-j-1)))
+ x3 := new(Rat).SetInt(new(Int).Binomial(int64(n+j), int64(n-i-1)))
+ x4 := new(Rat).SetInt(new(Int).Binomial(int64(i+j), int64(i)))
+
+ x1.Mul(x1, x2)
+ x1.Mul(x1, x3)
+ x1.Mul(x1, x4)
+ x1.Mul(x1, x4)
+
+ if (i+j)&1 != 0 {
+ x1.Neg(x1)
+ }
+
+ a.set(i, j, x1)
+ }
+ }
+ return a
+}
+
+func (a *matrix) mul(b *matrix) *matrix {
+ if a.m != b.n {
+ panic("illegal matrix multiply")
+ }
+ c := newMatrix(a.n, b.m)
+ for i := 0; i < c.n; i++ {
+ for j := 0; j < c.m; j++ {
+ x := NewRat(0, 1)
+ for k := 0; k < a.m; k++ {
+ x.Add(x, new(Rat).Mul(a.at(i, k), b.at(k, j)))
+ }
+ c.set(i, j, x)
+ }
+ }
+ return c
+}
+
+func (a *matrix) eql(b *matrix) bool {
+ if a.n != b.n || a.m != b.m {
+ return false
+ }
+ for i := 0; i < a.n; i++ {
+ for j := 0; j < a.m; j++ {
+ if a.at(i, j).Cmp(b.at(i, j)) != 0 {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (a *matrix) String() string {
+ s := ""
+ for i := 0; i < a.n; i++ {
+ for j := 0; j < a.m; j++ {
+ s += fmt.Sprintf("\t%s", a.at(i, j))
+ }
+ s += "\n"
+ }
+ return s
+}
+
+func doHilbert(t *testing.T, n int) {
+ a := newHilbert(n)
+ b := newInverseHilbert(n)
+ I := newUnit(n)
+ ab := a.mul(b)
+ if !ab.eql(I) {
+ if t == nil {
+ panic("Hilbert failed")
+ }
+ t.Errorf("a = %s\n", a)
+ t.Errorf("b = %s\n", b)
+ t.Errorf("a*b = %s\n", ab)
+ t.Errorf("I = %s\n", I)
+ }
+}
+
+func TestHilbert(t *testing.T) {
+ doHilbert(t, 10)
+}
+
+func BenchmarkHilbert(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ doHilbert(nil, 10)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements signed multi-precision integers.
+
+package big
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "strings"
+)
+
+// An Int represents a signed multi-precision integer.
+// The zero value for an Int represents the value 0.
+type Int struct {
+ neg bool // sign
+ abs nat // absolute value of the integer
+}
+
+var intOne = &Int{false, natOne}
+
+// Sign returns:
+//
+// -1 if x < 0
+// 0 if x == 0
+// +1 if x > 0
+//
+func (x *Int) Sign() int {
+ if len(x.abs) == 0 {
+ return 0
+ }
+ if x.neg {
+ return -1
+ }
+ return 1
+}
+
+// SetInt64 sets z to x and returns z.
+func (z *Int) SetInt64(x int64) *Int {
+ neg := false
+ if x < 0 {
+ neg = true
+ x = -x
+ }
+ z.abs = z.abs.setUint64(uint64(x))
+ z.neg = neg
+ return z
+}
+
+// NewInt allocates and returns a new Int set to x.
+func NewInt(x int64) *Int {
+ return new(Int).SetInt64(x)
+}
+
+// Set sets z to x and returns z.
+func (z *Int) Set(x *Int) *Int {
+ if z != x {
+ z.abs = z.abs.set(x.abs)
+ z.neg = x.neg
+ }
+ return z
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Int) Abs(x *Int) *Int {
+ z.Set(x)
+ z.neg = false
+ return z
+}
+
+// Neg sets z to -x and returns z.
+func (z *Int) Neg(x *Int) *Int {
+ z.Set(x)
+ z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
+ return z
+}
+
+// Add sets z to the sum x+y and returns z.
+func (z *Int) Add(x, y *Int) *Int {
+ neg := x.neg
+ if x.neg == y.neg {
+ // x + y == x + y
+ // (-x) + (-y) == -(x + y)
+ z.abs = z.abs.add(x.abs, y.abs)
+ } else {
+ // x + (-y) == x - y == -(y - x)
+ // (-x) + y == y - x == -(x - y)
+ if x.abs.cmp(y.abs) >= 0 {
+ z.abs = z.abs.sub(x.abs, y.abs)
+ } else {
+ neg = !neg
+ z.abs = z.abs.sub(y.abs, x.abs)
+ }
+ }
+ z.neg = len(z.abs) > 0 && neg // 0 has no sign
+ return z
+}
+
+// Sub sets z to the difference x-y and returns z.
+func (z *Int) Sub(x, y *Int) *Int {
+ neg := x.neg
+ if x.neg != y.neg {
+ // x - (-y) == x + y
+ // (-x) - y == -(x + y)
+ z.abs = z.abs.add(x.abs, y.abs)
+ } else {
+ // x - y == x - y == -(y - x)
+ // (-x) - (-y) == y - x == -(x - y)
+ if x.abs.cmp(y.abs) >= 0 {
+ z.abs = z.abs.sub(x.abs, y.abs)
+ } else {
+ neg = !neg
+ z.abs = z.abs.sub(y.abs, x.abs)
+ }
+ }
+ z.neg = len(z.abs) > 0 && neg // 0 has no sign
+ return z
+}
+
+// Mul sets z to the product x*y and returns z.
+func (z *Int) Mul(x, y *Int) *Int {
+ // x * y == x * y
+ // x * (-y) == -(x * y)
+ // (-x) * y == -(x * y)
+ // (-x) * (-y) == x * y
+ z.abs = z.abs.mul(x.abs, y.abs)
+ z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
+ return z
+}
+
+// MulRange sets z to the product of all integers
+// in the range [a, b] inclusively and returns z.
+// If a > b (empty range), the result is 1.
+func (z *Int) MulRange(a, b int64) *Int {
+ switch {
+ case a > b:
+ return z.SetInt64(1) // empty range
+ case a <= 0 && b >= 0:
+ return z.SetInt64(0) // range includes 0
+ }
+ // a <= b && (b < 0 || a > 0)
+
+ neg := false
+ if a < 0 {
+ neg = (b-a)&1 == 0
+ a, b = -b, -a
+ }
+
+ z.abs = z.abs.mulRange(uint64(a), uint64(b))
+ z.neg = neg
+ return z
+}
+
+// Binomial sets z to the binomial coefficient of (n, k) and returns z.
+func (z *Int) Binomial(n, k int64) *Int {
+ var a, b Int
+ a.MulRange(n-k+1, n)
+ b.MulRange(1, k)
+ return z.Quo(&a, &b)
+}
+
+// Quo sets z to the quotient x/y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Quo implements truncated division (like Go); see QuoRem for more details.
+func (z *Int) Quo(x, y *Int) *Int {
+ z.abs, _ = z.abs.div(nil, x.abs, y.abs)
+ z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
+ return z
+}
+
+// Rem sets z to the remainder x%y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Rem implements truncated modulus (like Go); see QuoRem for more details.
+func (z *Int) Rem(x, y *Int) *Int {
+ _, z.abs = nat{}.div(z.abs, x.abs, y.abs)
+ z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
+ return z
+}
+
+// QuoRem sets z to the quotient x/y and r to the remainder x%y
+// and returns the pair (z, r) for y != 0.
+// If y == 0, a division-by-zero run-time panic occurs.
+//
+// QuoRem implements T-division and modulus (like Go):
+//
+// q = x/y with the result truncated to zero
+// r = x - y*q
+//
+// (See Daan Leijen, ``Division and Modulus for Computer Scientists''.)
+//
+func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
+ z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
+ z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
+ return z, r
+}
+
+// Div sets z to the quotient x/y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Div implements Euclidean division (unlike Go); see DivMod for more details.
+func (z *Int) Div(x, y *Int) *Int {
+ y_neg := y.neg // z may be an alias for y
+ var r Int
+ z.QuoRem(x, y, &r)
+ if r.neg {
+ if y_neg {
+ z.Add(z, intOne)
+ } else {
+ z.Sub(z, intOne)
+ }
+ }
+ return z
+}
+
+// Mod sets z to the modulus x%y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
+func (z *Int) Mod(x, y *Int) *Int {
+ y0 := y // save y
+ if z == y || alias(z.abs, y.abs) {
+ y0 = new(Int).Set(y)
+ }
+ var q Int
+ q.QuoRem(x, y, z)
+ if z.neg {
+ if y0.neg {
+ z.Sub(z, y0)
+ } else {
+ z.Add(z, y0)
+ }
+ }
+ return z
+}
+
+// DivMod sets z to the quotient x div y and m to the modulus x mod y
+// and returns the pair (z, m) for y != 0.
+// If y == 0, a division-by-zero run-time panic occurs.
+//
+// DivMod implements Euclidean division and modulus (unlike Go):
+//
+// q = x div y such that
+// m = x - y*q with 0 <= m < |q|
+//
+// (See Raymond T. Boute, ``The Euclidean definition of the functions
+// div and mod''. ACM Transactions on Programming Languages and
+// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
+// ACM press.)
+//
+func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
+ y0 := y // save y
+ if z == y || alias(z.abs, y.abs) {
+ y0 = new(Int).Set(y)
+ }
+ z.QuoRem(x, y, m)
+ if m.neg {
+ if y0.neg {
+ z.Add(z, intOne)
+ m.Sub(m, y0)
+ } else {
+ z.Sub(z, intOne)
+ m.Add(m, y0)
+ }
+ }
+ return z, m
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+//
+func (x *Int) Cmp(y *Int) (r int) {
+ // x cmp y == x cmp y
+ // x cmp (-y) == x
+ // (-x) cmp y == y
+ // (-x) cmp (-y) == -(x cmp y)
+ switch {
+ case x.neg == y.neg:
+ r = x.abs.cmp(y.abs)
+ if x.neg {
+ r = -r
+ }
+ case x.neg:
+ r = -1
+ default:
+ r = 1
+ }
+ return
+}
+
+func (x *Int) String() string {
+ switch {
+ case x == nil:
+ return "<nil>"
+ case x.neg:
+ return "-" + x.abs.decimalString()
+ }
+ return x.abs.decimalString()
+}
+
+func charset(ch rune) string {
+ switch ch {
+ case 'b':
+ return lowercaseDigits[0:2]
+ case 'o':
+ return lowercaseDigits[0:8]
+ case 'd', 's', 'v':
+ return lowercaseDigits[0:10]
+ case 'x':
+ return lowercaseDigits[0:16]
+ case 'X':
+ return uppercaseDigits[0:16]
+ }
+ return "" // unknown format
+}
+
+// write count copies of text to s
+func writeMultiple(s fmt.State, text string, count int) {
+ if len(text) > 0 {
+ b := []byte(text)
+ for ; count > 0; count-- {
+ s.Write(b)
+ }
+ }
+}
+
+// Format is a support routine for fmt.Formatter. It accepts
+// the formats 'b' (binary), 'o' (octal), 'd' (decimal), 'x'
+// (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
+// Also supported are the full suite of package fmt's format
+// verbs for integral types, including '+', '-', and ' '
+// for sign control, '#' for leading zero in octal and for
+// hexadecimal, a leading "0x" or "0X" for "%#x" and "%#X"
+// respectively, specification of minimum digits precision,
+// output field width, space or zero padding, and left or
+// right justification.
+//
+func (x *Int) Format(s fmt.State, ch rune) {
+ cs := charset(ch)
+
+ // special cases
+ switch {
+ case cs == "":
+ // unknown format
+ fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
+ return
+ case x == nil:
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ // determine sign character
+ sign := ""
+ switch {
+ case x.neg:
+ sign = "-"
+ case s.Flag('+'): // supersedes ' ' when both specified
+ sign = "+"
+ case s.Flag(' '):
+ sign = " "
+ }
+
+ // determine prefix characters for indicating output base
+ prefix := ""
+ if s.Flag('#') {
+ switch ch {
+ case 'o': // octal
+ prefix = "0"
+ case 'x': // hexadecimal
+ prefix = "0x"
+ case 'X':
+ prefix = "0X"
+ }
+ }
+
+ // determine digits with base set by len(cs) and digit characters from cs
+ digits := x.abs.string(cs)
+
+ // number of characters for the three classes of number padding
+ var left int // space characters to left of digits for right justification ("%8d")
+ var zeroes int // zero characters (actually cs[0]) as left-most digits ("%.8d")
+ var right int // space characters to right of digits for left justification ("%-8d")
+
+ // determine number padding from precision: the least number of digits to output
+ precision, precisionSet := s.Precision()
+ if precisionSet {
+ switch {
+ case len(digits) < precision:
+ zeroes = precision - len(digits) // count of zero padding
+ case digits == "0" && precision == 0:
+ return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
+ }
+ }
+
+ // determine field pad from width: the least number of characters to output
+ length := len(sign) + len(prefix) + zeroes + len(digits)
+ if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
+ switch d := width - length; {
+ case s.Flag('-'):
+ // pad on the right with spaces; supersedes '0' when both specified
+ right = d
+ case s.Flag('0') && !precisionSet:
+ // pad with zeroes unless precision also specified
+ zeroes = d
+ default:
+ // pad on the left with spaces
+ left = d
+ }
+ }
+
+ // print number as [left pad][sign][prefix][zero pad][digits][right pad]
+ writeMultiple(s, " ", left)
+ writeMultiple(s, sign, 1)
+ writeMultiple(s, prefix, 1)
+ writeMultiple(s, "0", zeroes)
+ writeMultiple(s, digits, 1)
+ writeMultiple(s, " ", right)
+}
+
+// scan sets z to the integer value corresponding to the longest possible prefix
+// read from r representing a signed integer number in a given conversion base.
+// It returns z, the actual conversion base used, and an error, if any. In the
+// error case, the value of z is undefined but the returned value is nil. The
+// syntax follows the syntax of integer literals in Go.
+//
+// The base argument must be 0 or a value from 2 through MaxBase. If the base
+// is 0, the string prefix determines the actual conversion base. A prefix of
+// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
+// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
+//
+func (z *Int) scan(r io.RuneScanner, base int) (*Int, int, error) {
+ // determine sign
+ ch, _, err := r.ReadRune()
+ if err != nil {
+ return nil, 0, err
+ }
+ neg := false
+ switch ch {
+ case '-':
+ neg = true
+ case '+': // nothing to do
+ default:
+ r.UnreadRune()
+ }
+
+ // determine mantissa
+ z.abs, base, err = z.abs.scan(r, base)
+ if err != nil {
+ return nil, base, err
+ }
+ z.neg = len(z.abs) > 0 && neg // 0 has no sign
+
+ return z, base, nil
+}
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
+// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
+func (z *Int) Scan(s fmt.ScanState, ch rune) error {
+ s.SkipSpace() // skip leading space characters
+ base := 0
+ switch ch {
+ case 'b':
+ base = 2
+ case 'o':
+ base = 8
+ case 'd':
+ base = 10
+ case 'x', 'X':
+ base = 16
+ case 's', 'v':
+ // let scan determine the base
+ default:
+ return errors.New("Int.Scan: invalid verb")
+ }
+ _, _, err := z.scan(s, base)
+ return err
+}
+
+// Int64 returns the int64 representation of x.
+// If x cannot be represented in an int64, the result is undefined.
+func (x *Int) Int64() int64 {
+ if len(x.abs) == 0 {
+ return 0
+ }
+ v := int64(x.abs[0])
+ if _W == 32 && len(x.abs) > 1 {
+ v |= int64(x.abs[1]) << 32
+ }
+ if x.neg {
+ v = -v
+ }
+ return v
+}
+
+// SetString sets z to the value of s, interpreted in the given base,
+// and returns z and a boolean indicating success. If SetString fails,
+// the value of z is undefined but the returned value is nil.
+//
+// The base argument must be 0 or a value from 2 through MaxBase. If the base
+// is 0, the string prefix determines the actual conversion base. A prefix of
+// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
+// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
+//
+func (z *Int) SetString(s string, base int) (*Int, bool) {
+ r := strings.NewReader(s)
+ _, _, err := z.scan(r, base)
+ if err != nil {
+ return nil, false
+ }
+ _, _, err = r.ReadRune()
+ if err != io.EOF {
+ return nil, false
+ }
+ return z, true // err == io.EOF => scan consumed all of s
+}
+
+// SetBytes interprets buf as the bytes of a big-endian unsigned
+// integer, sets z to that value, and returns z.
+func (z *Int) SetBytes(buf []byte) *Int {
+ z.abs = z.abs.setBytes(buf)
+ z.neg = false
+ return z
+}
+
+// Bytes returns the absolute value of z as a big-endian byte slice.
+func (z *Int) Bytes() []byte {
+ buf := make([]byte, len(z.abs)*_S)
+ return buf[z.abs.bytes(buf):]
+}
+
+// BitLen returns the length of the absolute value of z in bits.
+// The bit length of 0 is 0.
+func (z *Int) BitLen() int {
+ return z.abs.bitLen()
+}
+
+// Exp sets z = x**y mod m. If m is nil, z = x**y.
+// See Knuth, volume 2, section 4.6.3.
+func (z *Int) Exp(x, y, m *Int) *Int {
+ if y.neg || len(y.abs) == 0 {
+ neg := x.neg
+ z.SetInt64(1)
+ z.neg = neg
+ return z
+ }
+
+ var mWords nat
+ if m != nil {
+ mWords = m.abs
+ }
+
+ z.abs = z.abs.expNN(x.abs, y.abs, mWords)
+ z.neg = len(z.abs) > 0 && x.neg && y.abs[0]&1 == 1 // 0 has no sign
+ return z
+}
+
+// GcdInt sets d to the greatest common divisor of a and b, which must be
+// positive numbers.
+// If x and y are not nil, GcdInt sets x and y such that d = a*x + b*y.
+// If either a or b is not positive, GcdInt sets d = x = y = 0.
+func GcdInt(d, x, y, a, b *Int) {
+ if a.neg || b.neg {
+ d.SetInt64(0)
+ if x != nil {
+ x.SetInt64(0)
+ }
+ if y != nil {
+ y.SetInt64(0)
+ }
+ return
+ }
+
+ A := new(Int).Set(a)
+ B := new(Int).Set(b)
+
+ X := new(Int)
+ Y := new(Int).SetInt64(1)
+
+ lastX := new(Int).SetInt64(1)
+ lastY := new(Int)
+
+ q := new(Int)
+ temp := new(Int)
+
+ for len(B.abs) > 0 {
+ r := new(Int)
+ q, r = q.QuoRem(A, B, r)
+
+ A, B = B, r
+
+ temp.Set(X)
+ X.Mul(X, q)
+ X.neg = !X.neg
+ X.Add(X, lastX)
+ lastX.Set(temp)
+
+ temp.Set(Y)
+ Y.Mul(Y, q)
+ Y.neg = !Y.neg
+ Y.Add(Y, lastY)
+ lastY.Set(temp)
+ }
+
+ if x != nil {
+ *x = *lastX
+ }
+
+ if y != nil {
+ *y = *lastY
+ }
+
+ *d = *A
+}
+
+// ProbablyPrime performs n Miller-Rabin tests to check whether z is prime.
+// If it returns true, z is prime with probability 1 - 1/4^n.
+// If it returns false, z is not prime.
+func ProbablyPrime(z *Int, n int) bool {
+ return !z.neg && z.abs.probablyPrime(n)
+}
+
+// Rand sets z to a pseudo-random number in [0, n) and returns z.
+func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
+ z.neg = false
+ if n.neg == true || len(n.abs) == 0 {
+ z.abs = nil
+ return z
+ }
+ z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
+ return z
+}
+
+// ModInverse sets z to the multiplicative inverse of g in the group ℤ/pℤ (where
+// p is a prime) and returns z.
+func (z *Int) ModInverse(g, p *Int) *Int {
+ var d Int
+ GcdInt(&d, z, nil, g, p)
+ // x and y are such that g*x + p*y = d. Since p is prime, d = 1. Taking
+ // that modulo p results in g*x = 1, therefore x is the inverse element.
+ if z.neg {
+ z.Add(z, p)
+ }
+ return z
+}
+
+// Lsh sets z = x << n and returns z.
+func (z *Int) Lsh(x *Int, n uint) *Int {
+ z.abs = z.abs.shl(x.abs, n)
+ z.neg = x.neg
+ return z
+}
+
+// Rsh sets z = x >> n and returns z.
+func (z *Int) Rsh(x *Int, n uint) *Int {
+ if x.neg {
+ // (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
+ t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
+ t = t.shr(t, n)
+ z.abs = t.add(t, natOne)
+ z.neg = true // z cannot be zero if x is negative
+ return z
+ }
+
+ z.abs = z.abs.shr(x.abs, n)
+ z.neg = false
+ return z
+}
+
+// Bit returns the value of the i'th bit of z. That is, it
+// returns (z>>i)&1. The bit index i must be >= 0.
+func (z *Int) Bit(i int) uint {
+ if i < 0 {
+ panic("negative bit index")
+ }
+ if z.neg {
+ t := nat{}.sub(z.abs, natOne)
+ return t.bit(uint(i)) ^ 1
+ }
+
+ return z.abs.bit(uint(i))
+}
+
+// SetBit sets the i'th bit of z to bit and returns z.
+// That is, if bit is 1 SetBit sets z = x | (1 << i);
+// if bit is 0 it sets z = x &^ (1 << i). If bit is not 0 or 1,
+// SetBit will panic.
+func (z *Int) SetBit(x *Int, i int, b uint) *Int {
+ if i < 0 {
+ panic("negative bit index")
+ }
+ if x.neg {
+ t := z.abs.sub(x.abs, natOne)
+ t = t.setBit(t, uint(i), b^1)
+ z.abs = t.add(t, natOne)
+ z.neg = len(z.abs) > 0
+ return z
+ }
+ z.abs = z.abs.setBit(x.abs, uint(i), b)
+ z.neg = false
+ return z
+}
+
+// And sets z = x & y and returns z.
+func (z *Int) And(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
+ x1 := nat{}.sub(x.abs, natOne)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
+ z.neg = true // z cannot be zero if x and y are negative
+ return z
+ }
+
+ // x & y == x & y
+ z.abs = z.abs.and(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ // x.neg != y.neg
+ if x.neg {
+ x, y = y, x // & is symmetric
+ }
+
+ // x & (-y) == x & ^(y-1) == x &^ (y-1)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.andNot(x.abs, y1)
+ z.neg = false
+ return z
+}
+
+// AndNot sets z = x &^ y and returns z.
+func (z *Int) AndNot(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
+ x1 := nat{}.sub(x.abs, natOne)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.andNot(y1, x1)
+ z.neg = false
+ return z
+ }
+
+ // x &^ y == x &^ y
+ z.abs = z.abs.andNot(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ if x.neg {
+ // (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
+ x1 := nat{}.sub(x.abs, natOne)
+ z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
+ z.neg = true // z cannot be zero if x is negative and y is positive
+ return z
+ }
+
+ // x &^ (-y) == x &^ ^(y-1) == x & (y-1)
+ y1 := nat{}.add(y.abs, natOne)
+ z.abs = z.abs.and(x.abs, y1)
+ z.neg = false
+ return z
+}
+
+// Or sets z = x | y and returns z.
+func (z *Int) Or(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
+ x1 := nat{}.sub(x.abs, natOne)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
+ z.neg = true // z cannot be zero if x and y are negative
+ return z
+ }
+
+ // x | y == x | y
+ z.abs = z.abs.or(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ // x.neg != y.neg
+ if x.neg {
+ x, y = y, x // | is symmetric
+ }
+
+ // x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
+ z.neg = true // z cannot be zero if one of x or y is negative
+ return z
+}
+
+// Xor sets z = x ^ y and returns z.
+func (z *Int) Xor(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
+ x1 := nat{}.sub(x.abs, natOne)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.xor(x1, y1)
+ z.neg = false
+ return z
+ }
+
+ // x ^ y == x ^ y
+ z.abs = z.abs.xor(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ // x.neg != y.neg
+ if x.neg {
+ x, y = y, x // ^ is symmetric
+ }
+
+ // x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+ y1 := nat{}.sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
+ z.neg = true // z cannot be zero if only one of x or y is negative
+ return z
+}
+
+// Not sets z = ^x and returns z.
+func (z *Int) Not(x *Int) *Int {
+ if x.neg {
+ // ^(-x) == ^(^(x-1)) == x-1
+ z.abs = z.abs.sub(x.abs, natOne)
+ z.neg = false
+ return z
+ }
+
+ // ^x == -x-1 == -(x+1)
+ z.abs = z.abs.add(x.abs, natOne)
+ z.neg = true // z cannot be zero if x is positive
+ return z
+}
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const intGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+func (z *Int) GobEncode() ([]byte, error) {
+ buf := make([]byte, 1+len(z.abs)*_S) // extra byte for version and sign bit
+ i := z.abs.bytes(buf) - 1 // i >= 0
+ b := intGobVersion << 1 // make space for sign bit
+ if z.neg {
+ b |= 1
+ }
+ buf[i] = b
+ return buf[i:], nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Int) GobDecode(buf []byte) error {
+ if len(buf) == 0 {
+ return errors.New("Int.GobDecode: no data")
+ }
+ b := buf[0]
+ if b>>1 != intGobVersion {
+ return errors.New(fmt.Sprintf("Int.GobDecode: encoding version %d not supported", b>>1))
+ }
+ z.neg = b&1 != 0
+ z.abs = z.abs.setBytes(buf[1:])
+ return nil
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/hex"
+ "fmt"
+ "testing"
+ "testing/quick"
+)
+
+func isNormalized(x *Int) bool {
+ if len(x.abs) == 0 {
+ return !x.neg
+ }
+ // len(x.abs) > 0
+ return x.abs[len(x.abs)-1] != 0
+}
+
+type funZZ func(z, x, y *Int) *Int
+type argZZ struct {
+ z, x, y *Int
+}
+
+var sumZZ = []argZZ{
+ {NewInt(0), NewInt(0), NewInt(0)},
+ {NewInt(1), NewInt(1), NewInt(0)},
+ {NewInt(1111111110), NewInt(123456789), NewInt(987654321)},
+ {NewInt(-1), NewInt(-1), NewInt(0)},
+ {NewInt(864197532), NewInt(-123456789), NewInt(987654321)},
+ {NewInt(-1111111110), NewInt(-123456789), NewInt(-987654321)},
+}
+
+var prodZZ = []argZZ{
+ {NewInt(0), NewInt(0), NewInt(0)},
+ {NewInt(0), NewInt(1), NewInt(0)},
+ {NewInt(1), NewInt(1), NewInt(1)},
+ {NewInt(-991 * 991), NewInt(991), NewInt(-991)},
+ // TODO(gri) add larger products
+}
+
+func TestSignZ(t *testing.T) {
+ var zero Int
+ for _, a := range sumZZ {
+ s := a.z.Sign()
+ e := a.z.Cmp(&zero)
+ if s != e {
+ t.Errorf("got %d; want %d for z = %v", s, e, a.z)
+ }
+ }
+}
+
+func TestSetZ(t *testing.T) {
+ for _, a := range sumZZ {
+ var z Int
+ z.Set(a.z)
+ if !isNormalized(&z) {
+ t.Errorf("%v is not normalized", z)
+ }
+ if (&z).Cmp(a.z) != 0 {
+ t.Errorf("got z = %v; want %v", z, a.z)
+ }
+ }
+}
+
+func TestAbsZ(t *testing.T) {
+ var zero Int
+ for _, a := range sumZZ {
+ var z Int
+ z.Abs(a.z)
+ var e Int
+ e.Set(a.z)
+ if e.Cmp(&zero) < 0 {
+ e.Sub(&zero, &e)
+ }
+ if z.Cmp(&e) != 0 {
+ t.Errorf("got z = %v; want %v", z, e)
+ }
+ }
+}
+
+func testFunZZ(t *testing.T, msg string, f funZZ, a argZZ) {
+ var z Int
+ f(&z, a.x, a.y)
+ if !isNormalized(&z) {
+ t.Errorf("%s%v is not normalized", z, msg)
+ }
+ if (&z).Cmp(a.z) != 0 {
+ t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
+ }
+}
+
+func TestSumZZ(t *testing.T) {
+ AddZZ := func(z, x, y *Int) *Int { return z.Add(x, y) }
+ SubZZ := func(z, x, y *Int) *Int { return z.Sub(x, y) }
+ for _, a := range sumZZ {
+ arg := a
+ testFunZZ(t, "AddZZ", AddZZ, arg)
+
+ arg = argZZ{a.z, a.y, a.x}
+ testFunZZ(t, "AddZZ symmetric", AddZZ, arg)
+
+ arg = argZZ{a.x, a.z, a.y}
+ testFunZZ(t, "SubZZ", SubZZ, arg)
+
+ arg = argZZ{a.y, a.z, a.x}
+ testFunZZ(t, "SubZZ symmetric", SubZZ, arg)
+ }
+}
+
+func TestProdZZ(t *testing.T) {
+ MulZZ := func(z, x, y *Int) *Int { return z.Mul(x, y) }
+ for _, a := range prodZZ {
+ arg := a
+ testFunZZ(t, "MulZZ", MulZZ, arg)
+
+ arg = argZZ{a.z, a.y, a.x}
+ testFunZZ(t, "MulZZ symmetric", MulZZ, arg)
+ }
+}
+
+// mulBytes returns x*y via grade school multiplication. Both inputs
+// and the result are assumed to be in big-endian representation (to
+// match the semantics of Int.Bytes and Int.SetBytes).
+func mulBytes(x, y []byte) []byte {
+ z := make([]byte, len(x)+len(y))
+
+ // multiply
+ k0 := len(z) - 1
+ for j := len(y) - 1; j >= 0; j-- {
+ d := int(y[j])
+ if d != 0 {
+ k := k0
+ carry := 0
+ for i := len(x) - 1; i >= 0; i-- {
+ t := int(z[k]) + int(x[i])*d + carry
+ z[k], carry = byte(t), t>>8
+ k--
+ }
+ z[k] = byte(carry)
+ }
+ k0--
+ }
+
+ // normalize (remove leading 0's)
+ i := 0
+ for i < len(z) && z[i] == 0 {
+ i++
+ }
+
+ return z[i:]
+}
+
+func checkMul(a, b []byte) bool {
+ var x, y, z1 Int
+ x.SetBytes(a)
+ y.SetBytes(b)
+ z1.Mul(&x, &y)
+
+ var z2 Int
+ z2.SetBytes(mulBytes(a, b))
+
+ return z1.Cmp(&z2) == 0
+}
+
+func TestMul(t *testing.T) {
+ if err := quick.Check(checkMul, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+var mulRangesZ = []struct {
+ a, b int64
+ prod string
+}{
+ // entirely positive ranges are covered by mulRangesN
+ {-1, 1, "0"},
+ {-2, -1, "2"},
+ {-3, -2, "6"},
+ {-3, -1, "-6"},
+ {1, 3, "6"},
+ {-10, -10, "-10"},
+ {0, -1, "1"}, // empty range
+ {-1, -100, "1"}, // empty range
+ {-1, 1, "0"}, // range includes 0
+ {-1e9, 0, "0"}, // range includes 0
+ {-1e9, 1e9, "0"}, // range includes 0
+ {-10, -1, "3628800"}, // 10!
+ {-20, -2, "-2432902008176640000"}, // -20!
+ {-99, -1,
+ "-933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "511852109168640000000000000000000000", // -99!
+ },
+}
+
+func TestMulRangeZ(t *testing.T) {
+ var tmp Int
+ // test entirely positive ranges
+ for i, r := range mulRangesN {
+ prod := tmp.MulRange(int64(r.a), int64(r.b)).String()
+ if prod != r.prod {
+ t.Errorf("#%da: got %s; want %s", i, prod, r.prod)
+ }
+ }
+ // test other ranges
+ for i, r := range mulRangesZ {
+ prod := tmp.MulRange(r.a, r.b).String()
+ if prod != r.prod {
+ t.Errorf("#%db: got %s; want %s", i, prod, r.prod)
+ }
+ }
+}
+
+var stringTests = []struct {
+ in string
+ out string
+ base int
+ val int64
+ ok bool
+}{
+ {in: "", ok: false},
+ {in: "a", ok: false},
+ {in: "z", ok: false},
+ {in: "+", ok: false},
+ {in: "-", ok: false},
+ {in: "0b", ok: false},
+ {in: "0x", ok: false},
+ {in: "2", base: 2, ok: false},
+ {in: "0b2", base: 0, ok: false},
+ {in: "08", ok: false},
+ {in: "8", base: 8, ok: false},
+ {in: "0xg", base: 0, ok: false},
+ {in: "g", base: 16, ok: false},
+ {"0", "0", 0, 0, true},
+ {"0", "0", 10, 0, true},
+ {"0", "0", 16, 0, true},
+ {"+0", "0", 0, 0, true},
+ {"-0", "0", 0, 0, true},
+ {"10", "10", 0, 10, true},
+ {"10", "10", 10, 10, true},
+ {"10", "10", 16, 16, true},
+ {"-10", "-10", 16, -16, true},
+ {"+10", "10", 16, 16, true},
+ {"0x10", "16", 0, 16, true},
+ {in: "0x10", base: 16, ok: false},
+ {"-0x10", "-16", 0, -16, true},
+ {"+0x10", "16", 0, 16, true},
+ {"00", "0", 0, 0, true},
+ {"0", "0", 8, 0, true},
+ {"07", "7", 0, 7, true},
+ {"7", "7", 8, 7, true},
+ {"023", "19", 0, 19, true},
+ {"23", "23", 8, 19, true},
+ {"cafebabe", "cafebabe", 16, 0xcafebabe, true},
+ {"0b0", "0", 0, 0, true},
+ {"-111", "-111", 2, -7, true},
+ {"-0b111", "-7", 0, -7, true},
+ {"0b1001010111", "599", 0, 0x257, true},
+ {"1001010111", "1001010111", 2, 0x257, true},
+}
+
+func format(base int) string {
+ switch base {
+ case 2:
+ return "%b"
+ case 8:
+ return "%o"
+ case 16:
+ return "%x"
+ }
+ return "%d"
+}
+
+func TestGetString(t *testing.T) {
+ z := new(Int)
+ for i, test := range stringTests {
+ if !test.ok {
+ continue
+ }
+ z.SetInt64(test.val)
+
+ if test.base == 10 {
+ s := z.String()
+ if s != test.out {
+ t.Errorf("#%da got %s; want %s", i, s, test.out)
+ }
+ }
+
+ s := fmt.Sprintf(format(test.base), z)
+ if s != test.out {
+ t.Errorf("#%db got %s; want %s", i, s, test.out)
+ }
+ }
+}
+
+func TestSetString(t *testing.T) {
+ tmp := new(Int)
+ for i, test := range stringTests {
+ // initialize to a non-zero value so that issues with parsing
+ // 0 are detected
+ tmp.SetInt64(1234567890)
+ n1, ok1 := new(Int).SetString(test.in, test.base)
+ n2, ok2 := tmp.SetString(test.in, test.base)
+ expected := NewInt(test.val)
+ if ok1 != test.ok || ok2 != test.ok {
+ t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
+ continue
+ }
+ if !ok1 {
+ if n1 != nil {
+ t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
+ }
+ continue
+ }
+ if !ok2 {
+ if n2 != nil {
+ t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
+ }
+ continue
+ }
+
+ if ok1 && !isNormalized(n1) {
+ t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n1)
+ }
+ if ok2 && !isNormalized(n2) {
+ t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n2)
+ }
+
+ if n1.Cmp(expected) != 0 {
+ t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
+ }
+ if n2.Cmp(expected) != 0 {
+ t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
+ }
+ }
+}
+
+var formatTests = []struct {
+ input string
+ format string
+ output string
+}{
+ {"<nil>", "%x", "<nil>"},
+ {"<nil>", "%#x", "<nil>"},
+ {"<nil>", "%#y", "%!y(big.Int=<nil>)"},
+
+ {"10", "%b", "1010"},
+ {"10", "%o", "12"},
+ {"10", "%d", "10"},
+ {"10", "%v", "10"},
+ {"10", "%x", "a"},
+ {"10", "%X", "A"},
+ {"-10", "%X", "-A"},
+ {"10", "%y", "%!y(big.Int=10)"},
+ {"-10", "%y", "%!y(big.Int=-10)"},
+
+ {"10", "%#b", "1010"},
+ {"10", "%#o", "012"},
+ {"10", "%#d", "10"},
+ {"10", "%#v", "10"},
+ {"10", "%#x", "0xa"},
+ {"10", "%#X", "0XA"},
+ {"-10", "%#X", "-0XA"},
+ {"10", "%#y", "%!y(big.Int=10)"},
+ {"-10", "%#y", "%!y(big.Int=-10)"},
+
+ {"1234", "%d", "1234"},
+ {"1234", "%3d", "1234"},
+ {"1234", "%4d", "1234"},
+ {"-1234", "%d", "-1234"},
+ {"1234", "% 5d", " 1234"},
+ {"1234", "%+5d", "+1234"},
+ {"1234", "%-5d", "1234 "},
+ {"1234", "%x", "4d2"},
+ {"1234", "%X", "4D2"},
+ {"-1234", "%3x", "-4d2"},
+ {"-1234", "%4x", "-4d2"},
+ {"-1234", "%5x", " -4d2"},
+ {"-1234", "%-5x", "-4d2 "},
+ {"1234", "%03d", "1234"},
+ {"1234", "%04d", "1234"},
+ {"1234", "%05d", "01234"},
+ {"1234", "%06d", "001234"},
+ {"-1234", "%06d", "-01234"},
+ {"1234", "%+06d", "+01234"},
+ {"1234", "% 06d", " 01234"},
+ {"1234", "%-6d", "1234 "},
+ {"1234", "%-06d", "1234 "},
+ {"-1234", "%-06d", "-1234 "},
+
+ {"1234", "%.3d", "1234"},
+ {"1234", "%.4d", "1234"},
+ {"1234", "%.5d", "01234"},
+ {"1234", "%.6d", "001234"},
+ {"-1234", "%.3d", "-1234"},
+ {"-1234", "%.4d", "-1234"},
+ {"-1234", "%.5d", "-01234"},
+ {"-1234", "%.6d", "-001234"},
+
+ {"1234", "%8.3d", " 1234"},
+ {"1234", "%8.4d", " 1234"},
+ {"1234", "%8.5d", " 01234"},
+ {"1234", "%8.6d", " 001234"},
+ {"-1234", "%8.3d", " -1234"},
+ {"-1234", "%8.4d", " -1234"},
+ {"-1234", "%8.5d", " -01234"},
+ {"-1234", "%8.6d", " -001234"},
+
+ {"1234", "%+8.3d", " +1234"},
+ {"1234", "%+8.4d", " +1234"},
+ {"1234", "%+8.5d", " +01234"},
+ {"1234", "%+8.6d", " +001234"},
+ {"-1234", "%+8.3d", " -1234"},
+ {"-1234", "%+8.4d", " -1234"},
+ {"-1234", "%+8.5d", " -01234"},
+ {"-1234", "%+8.6d", " -001234"},
+
+ {"1234", "% 8.3d", " 1234"},
+ {"1234", "% 8.4d", " 1234"},
+ {"1234", "% 8.5d", " 01234"},
+ {"1234", "% 8.6d", " 001234"},
+ {"-1234", "% 8.3d", " -1234"},
+ {"-1234", "% 8.4d", " -1234"},
+ {"-1234", "% 8.5d", " -01234"},
+ {"-1234", "% 8.6d", " -001234"},
+
+ {"1234", "%.3x", "4d2"},
+ {"1234", "%.4x", "04d2"},
+ {"1234", "%.5x", "004d2"},
+ {"1234", "%.6x", "0004d2"},
+ {"-1234", "%.3x", "-4d2"},
+ {"-1234", "%.4x", "-04d2"},
+ {"-1234", "%.5x", "-004d2"},
+ {"-1234", "%.6x", "-0004d2"},
+
+ {"1234", "%8.3x", " 4d2"},
+ {"1234", "%8.4x", " 04d2"},
+ {"1234", "%8.5x", " 004d2"},
+ {"1234", "%8.6x", " 0004d2"},
+ {"-1234", "%8.3x", " -4d2"},
+ {"-1234", "%8.4x", " -04d2"},
+ {"-1234", "%8.5x", " -004d2"},
+ {"-1234", "%8.6x", " -0004d2"},
+
+ {"1234", "%+8.3x", " +4d2"},
+ {"1234", "%+8.4x", " +04d2"},
+ {"1234", "%+8.5x", " +004d2"},
+ {"1234", "%+8.6x", " +0004d2"},
+ {"-1234", "%+8.3x", " -4d2"},
+ {"-1234", "%+8.4x", " -04d2"},
+ {"-1234", "%+8.5x", " -004d2"},
+ {"-1234", "%+8.6x", " -0004d2"},
+
+ {"1234", "% 8.3x", " 4d2"},
+ {"1234", "% 8.4x", " 04d2"},
+ {"1234", "% 8.5x", " 004d2"},
+ {"1234", "% 8.6x", " 0004d2"},
+ {"1234", "% 8.7x", " 00004d2"},
+ {"1234", "% 8.8x", " 000004d2"},
+ {"-1234", "% 8.3x", " -4d2"},
+ {"-1234", "% 8.4x", " -04d2"},
+ {"-1234", "% 8.5x", " -004d2"},
+ {"-1234", "% 8.6x", " -0004d2"},
+ {"-1234", "% 8.7x", "-00004d2"},
+ {"-1234", "% 8.8x", "-000004d2"},
+
+ {"1234", "%-8.3d", "1234 "},
+ {"1234", "%-8.4d", "1234 "},
+ {"1234", "%-8.5d", "01234 "},
+ {"1234", "%-8.6d", "001234 "},
+ {"1234", "%-8.7d", "0001234 "},
+ {"1234", "%-8.8d", "00001234"},
+ {"-1234", "%-8.3d", "-1234 "},
+ {"-1234", "%-8.4d", "-1234 "},
+ {"-1234", "%-8.5d", "-01234 "},
+ {"-1234", "%-8.6d", "-001234 "},
+ {"-1234", "%-8.7d", "-0001234"},
+ {"-1234", "%-8.8d", "-00001234"},
+
+ {"16777215", "%b", "111111111111111111111111"}, // 2**24 - 1
+
+ {"0", "%.d", ""},
+ {"0", "%.0d", ""},
+ {"0", "%3.d", ""},
+}
+
+func TestFormat(t *testing.T) {
+ for i, test := range formatTests {
+ var x *Int
+ if test.input != "<nil>" {
+ var ok bool
+ x, ok = new(Int).SetString(test.input, 0)
+ if !ok {
+ t.Errorf("#%d failed reading input %s", i, test.input)
+ }
+ }
+ output := fmt.Sprintf(test.format, x)
+ if output != test.output {
+ t.Errorf("#%d got %q; want %q, {%q, %q, %q}", i, output, test.output, test.input, test.format, test.output)
+ }
+ }
+}
+
+var scanTests = []struct {
+ input string
+ format string
+ output string
+ remaining int
+}{
+ {"1010", "%b", "10", 0},
+ {"0b1010", "%v", "10", 0},
+ {"12", "%o", "10", 0},
+ {"012", "%v", "10", 0},
+ {"10", "%d", "10", 0},
+ {"10", "%v", "10", 0},
+ {"a", "%x", "10", 0},
+ {"0xa", "%v", "10", 0},
+ {"A", "%X", "10", 0},
+ {"-A", "%X", "-10", 0},
+ {"+0b1011001", "%v", "89", 0},
+ {"0xA", "%v", "10", 0},
+ {"0 ", "%v", "0", 1},
+ {"2+3", "%v", "2", 2},
+ {"0XABC 12", "%v", "2748", 3},
+}
+
+func TestScan(t *testing.T) {
+ var buf bytes.Buffer
+ for i, test := range scanTests {
+ x := new(Int)
+ buf.Reset()
+ buf.WriteString(test.input)
+ if _, err := fmt.Fscanf(&buf, test.format, x); err != nil {
+ t.Errorf("#%d error: %s", i, err)
+ }
+ if x.String() != test.output {
+ t.Errorf("#%d got %s; want %s", i, x.String(), test.output)
+ }
+ if buf.Len() != test.remaining {
+ t.Errorf("#%d got %d bytes remaining; want %d", i, buf.Len(), test.remaining)
+ }
+ }
+}
+
+// Examples from the Go Language Spec, section "Arithmetic operators"
+var divisionSignsTests = []struct {
+ x, y int64
+ q, r int64 // T-division
+ d, m int64 // Euclidian division
+}{
+ {5, 3, 1, 2, 1, 2},
+ {-5, 3, -1, -2, -2, 1},
+ {5, -3, -1, 2, -1, 2},
+ {-5, -3, 1, -2, 2, 1},
+ {1, 2, 0, 1, 0, 1},
+ {8, 4, 2, 0, 2, 0},
+}
+
+func TestDivisionSigns(t *testing.T) {
+ for i, test := range divisionSignsTests {
+ x := NewInt(test.x)
+ y := NewInt(test.y)
+ q := NewInt(test.q)
+ r := NewInt(test.r)
+ d := NewInt(test.d)
+ m := NewInt(test.m)
+
+ q1 := new(Int).Quo(x, y)
+ r1 := new(Int).Rem(x, y)
+ if !isNormalized(q1) {
+ t.Errorf("#%d Quo: %v is not normalized", i, *q1)
+ }
+ if !isNormalized(r1) {
+ t.Errorf("#%d Rem: %v is not normalized", i, *r1)
+ }
+ if q1.Cmp(q) != 0 || r1.Cmp(r) != 0 {
+ t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q1, r1, q, r)
+ }
+
+ q2, r2 := new(Int).QuoRem(x, y, new(Int))
+ if !isNormalized(q2) {
+ t.Errorf("#%d Quo: %v is not normalized", i, *q2)
+ }
+ if !isNormalized(r2) {
+ t.Errorf("#%d Rem: %v is not normalized", i, *r2)
+ }
+ if q2.Cmp(q) != 0 || r2.Cmp(r) != 0 {
+ t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q2, r2, q, r)
+ }
+
+ d1 := new(Int).Div(x, y)
+ m1 := new(Int).Mod(x, y)
+ if !isNormalized(d1) {
+ t.Errorf("#%d Div: %v is not normalized", i, *d1)
+ }
+ if !isNormalized(m1) {
+ t.Errorf("#%d Mod: %v is not normalized", i, *m1)
+ }
+ if d1.Cmp(d) != 0 || m1.Cmp(m) != 0 {
+ t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d1, m1, d, m)
+ }
+
+ d2, m2 := new(Int).DivMod(x, y, new(Int))
+ if !isNormalized(d2) {
+ t.Errorf("#%d Div: %v is not normalized", i, *d2)
+ }
+ if !isNormalized(m2) {
+ t.Errorf("#%d Mod: %v is not normalized", i, *m2)
+ }
+ if d2.Cmp(d) != 0 || m2.Cmp(m) != 0 {
+ t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d2, m2, d, m)
+ }
+ }
+}
+
+func checkSetBytes(b []byte) bool {
+ hex1 := hex.EncodeToString(new(Int).SetBytes(b).Bytes())
+ hex2 := hex.EncodeToString(b)
+
+ for len(hex1) < len(hex2) {
+ hex1 = "0" + hex1
+ }
+
+ for len(hex1) > len(hex2) {
+ hex2 = "0" + hex2
+ }
+
+ return hex1 == hex2
+}
+
+func TestSetBytes(t *testing.T) {
+ if err := quick.Check(checkSetBytes, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func checkBytes(b []byte) bool {
+ b2 := new(Int).SetBytes(b).Bytes()
+ return bytes.Compare(b, b2) == 0
+}
+
+func TestBytes(t *testing.T) {
+ if err := quick.Check(checkSetBytes, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func checkQuo(x, y []byte) bool {
+ u := new(Int).SetBytes(x)
+ v := new(Int).SetBytes(y)
+
+ if len(v.abs) == 0 {
+ return true
+ }
+
+ r := new(Int)
+ q, r := new(Int).QuoRem(u, v, r)
+
+ if r.Cmp(v) >= 0 {
+ return false
+ }
+
+ uprime := new(Int).Set(q)
+ uprime.Mul(uprime, v)
+ uprime.Add(uprime, r)
+
+ return uprime.Cmp(u) == 0
+}
+
+var quoTests = []struct {
+ x, y string
+ q, r string
+}{
+ {
+ "476217953993950760840509444250624797097991362735329973741718102894495832294430498335824897858659711275234906400899559094370964723884706254265559534144986498357",
+ "9353930466774385905609975137998169297361893554149986716853295022578535724979483772383667534691121982974895531435241089241440253066816724367338287092081996",
+ "50911",
+ "1",
+ },
+ {
+ "11510768301994997771168",
+ "1328165573307167369775",
+ "8",
+ "885443715537658812968",
+ },
+}
+
+func TestQuo(t *testing.T) {
+ if err := quick.Check(checkQuo, nil); err != nil {
+ t.Error(err)
+ }
+
+ for i, test := range quoTests {
+ x, _ := new(Int).SetString(test.x, 10)
+ y, _ := new(Int).SetString(test.y, 10)
+ expectedQ, _ := new(Int).SetString(test.q, 10)
+ expectedR, _ := new(Int).SetString(test.r, 10)
+
+ r := new(Int)
+ q, r := new(Int).QuoRem(x, y, r)
+
+ if q.Cmp(expectedQ) != 0 || r.Cmp(expectedR) != 0 {
+ t.Errorf("#%d got (%s, %s) want (%s, %s)", i, q, r, expectedQ, expectedR)
+ }
+ }
+}
+
+func TestQuoStepD6(t *testing.T) {
+ // See Knuth, Volume 2, section 4.3.1, exercise 21. This code exercises
+ // a code path which only triggers 1 in 10^{-19} cases.
+
+ u := &Int{false, nat{0, 0, 1 + 1<<(_W-1), _M ^ (1 << (_W - 1))}}
+ v := &Int{false, nat{5, 2 + 1<<(_W-1), 1 << (_W - 1)}}
+
+ r := new(Int)
+ q, r := new(Int).QuoRem(u, v, r)
+ const expectedQ64 = "18446744073709551613"
+ const expectedR64 = "3138550867693340382088035895064302439801311770021610913807"
+ const expectedQ32 = "4294967293"
+ const expectedR32 = "39614081266355540837921718287"
+ if q.String() != expectedQ64 && q.String() != expectedQ32 ||
+ r.String() != expectedR64 && r.String() != expectedR32 {
+ t.Errorf("got (%s, %s) want (%s, %s) or (%s, %s)", q, r, expectedQ64, expectedR64, expectedQ32, expectedR32)
+ }
+}
+
+var bitLenTests = []struct {
+ in string
+ out int
+}{
+ {"-1", 1},
+ {"0", 0},
+ {"1", 1},
+ {"2", 2},
+ {"4", 3},
+ {"0xabc", 12},
+ {"0x8000", 16},
+ {"0x80000000", 32},
+ {"0x800000000000", 48},
+ {"0x8000000000000000", 64},
+ {"0x80000000000000000000", 80},
+ {"-0x4000000000000000000000", 87},
+}
+
+func TestBitLen(t *testing.T) {
+ for i, test := range bitLenTests {
+ x, ok := new(Int).SetString(test.in, 0)
+ if !ok {
+ t.Errorf("#%d test input invalid: %s", i, test.in)
+ continue
+ }
+
+ if n := x.BitLen(); n != test.out {
+ t.Errorf("#%d got %d want %d", i, n, test.out)
+ }
+ }
+}
+
+var expTests = []struct {
+ x, y, m string
+ out string
+}{
+ {"5", "0", "", "1"},
+ {"-5", "0", "", "-1"},
+ {"5", "1", "", "5"},
+ {"-5", "1", "", "-5"},
+ {"-2", "3", "2", "0"},
+ {"5", "2", "", "25"},
+ {"1", "65537", "2", "1"},
+ {"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
+ {"0x8000000000000000", "2", "6719", "4944"},
+ {"0x8000000000000000", "3", "6719", "5447"},
+ {"0x8000000000000000", "1000", "6719", "1603"},
+ {"0x8000000000000000", "1000000", "6719", "3199"},
+ {
+ "2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
+ "298472983472983471903246121093472394872319615612417471234712061",
+ "29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
+ "23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
+ },
+}
+
+func TestExp(t *testing.T) {
+ for i, test := range expTests {
+ x, ok1 := new(Int).SetString(test.x, 0)
+ y, ok2 := new(Int).SetString(test.y, 0)
+ out, ok3 := new(Int).SetString(test.out, 0)
+
+ var ok4 bool
+ var m *Int
+
+ if len(test.m) == 0 {
+ m, ok4 = nil, true
+ } else {
+ m, ok4 = new(Int).SetString(test.m, 0)
+ }
+
+ if !ok1 || !ok2 || !ok3 || !ok4 {
+ t.Errorf("#%d: error in input", i)
+ continue
+ }
+
+ z := y.Exp(x, y, m)
+ if !isNormalized(z) {
+ t.Errorf("#%d: %v is not normalized", i, *z)
+ }
+ if z.Cmp(out) != 0 {
+ t.Errorf("#%d: got %s want %s", i, z, out)
+ }
+ }
+}
+
+func checkGcd(aBytes, bBytes []byte) bool {
+ a := new(Int).SetBytes(aBytes)
+ b := new(Int).SetBytes(bBytes)
+
+ x := new(Int)
+ y := new(Int)
+ d := new(Int)
+
+ GcdInt(d, x, y, a, b)
+ x.Mul(x, a)
+ y.Mul(y, b)
+ x.Add(x, y)
+
+ return x.Cmp(d) == 0
+}
+
+var gcdTests = []struct {
+ a, b int64
+ d, x, y int64
+}{
+ {120, 23, 1, -9, 47},
+}
+
+func TestGcd(t *testing.T) {
+ for i, test := range gcdTests {
+ a := NewInt(test.a)
+ b := NewInt(test.b)
+
+ x := new(Int)
+ y := new(Int)
+ d := new(Int)
+
+ expectedX := NewInt(test.x)
+ expectedY := NewInt(test.y)
+ expectedD := NewInt(test.d)
+
+ GcdInt(d, x, y, a, b)
+
+ if expectedX.Cmp(x) != 0 ||
+ expectedY.Cmp(y) != 0 ||
+ expectedD.Cmp(d) != 0 {
+ t.Errorf("#%d got (%s %s %s) want (%s %s %s)", i, x, y, d, expectedX, expectedY, expectedD)
+ }
+ }
+
+ quick.Check(checkGcd, nil)
+}
+
+var primes = []string{
+ "2",
+ "3",
+ "5",
+ "7",
+ "11",
+
+ "13756265695458089029",
+ "13496181268022124907",
+ "10953742525620032441",
+ "17908251027575790097",
+
+ // http://code.google.com/p/go/issues/detail?id=638
+ "18699199384836356663",
+
+ "98920366548084643601728869055592650835572950932266967461790948584315647051443",
+ "94560208308847015747498523884063394671606671904944666360068158221458669711639",
+
+ // http://primes.utm.edu/lists/small/small3.html
+ "449417999055441493994709297093108513015373787049558499205492347871729927573118262811508386655998299074566974373711472560655026288668094291699357843464363003144674940345912431129144354948751003607115263071543163",
+ "230975859993204150666423538988557839555560243929065415434980904258310530753006723857139742334640122533598517597674807096648905501653461687601339782814316124971547968912893214002992086353183070342498989426570593",
+ "5521712099665906221540423207019333379125265462121169655563495403888449493493629943498064604536961775110765377745550377067893607246020694972959780839151452457728855382113555867743022746090187341871655890805971735385789993",
+ "203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123",
+}
+
+var composites = []string{
+ "21284175091214687912771199898307297748211672914763848041968395774954376176754",
+ "6084766654921918907427900243509372380954290099172559290432744450051395395951",
+ "84594350493221918389213352992032324280367711247940675652888030554255915464401",
+ "82793403787388584738507275144194252681",
+}
+
+func TestProbablyPrime(t *testing.T) {
+ nreps := 20
+ if testing.Short() {
+ nreps = 1
+ }
+ for i, s := range primes {
+ p, _ := new(Int).SetString(s, 10)
+ if !ProbablyPrime(p, nreps) {
+ t.Errorf("#%d prime found to be non-prime (%s)", i, s)
+ }
+ }
+
+ for i, s := range composites {
+ c, _ := new(Int).SetString(s, 10)
+ if ProbablyPrime(c, nreps) {
+ t.Errorf("#%d composite found to be prime (%s)", i, s)
+ }
+ if testing.Short() {
+ break
+ }
+ }
+}
+
+type intShiftTest struct {
+ in string
+ shift uint
+ out string
+}
+
+var rshTests = []intShiftTest{
+ {"0", 0, "0"},
+ {"-0", 0, "0"},
+ {"0", 1, "0"},
+ {"0", 2, "0"},
+ {"1", 0, "1"},
+ {"1", 1, "0"},
+ {"1", 2, "0"},
+ {"2", 0, "2"},
+ {"2", 1, "1"},
+ {"-1", 0, "-1"},
+ {"-1", 1, "-1"},
+ {"-1", 10, "-1"},
+ {"-100", 2, "-25"},
+ {"-100", 3, "-13"},
+ {"-100", 100, "-1"},
+ {"4294967296", 0, "4294967296"},
+ {"4294967296", 1, "2147483648"},
+ {"4294967296", 2, "1073741824"},
+ {"18446744073709551616", 0, "18446744073709551616"},
+ {"18446744073709551616", 1, "9223372036854775808"},
+ {"18446744073709551616", 2, "4611686018427387904"},
+ {"18446744073709551616", 64, "1"},
+ {"340282366920938463463374607431768211456", 64, "18446744073709551616"},
+ {"340282366920938463463374607431768211456", 128, "1"},
+}
+
+func TestRsh(t *testing.T) {
+ for i, test := range rshTests {
+ in, _ := new(Int).SetString(test.in, 10)
+ expected, _ := new(Int).SetString(test.out, 10)
+ out := new(Int).Rsh(in, test.shift)
+
+ if !isNormalized(out) {
+ t.Errorf("#%d: %v is not normalized", i, *out)
+ }
+ if out.Cmp(expected) != 0 {
+ t.Errorf("#%d: got %s want %s", i, out, expected)
+ }
+ }
+}
+
+func TestRshSelf(t *testing.T) {
+ for i, test := range rshTests {
+ z, _ := new(Int).SetString(test.in, 10)
+ expected, _ := new(Int).SetString(test.out, 10)
+ z.Rsh(z, test.shift)
+
+ if !isNormalized(z) {
+ t.Errorf("#%d: %v is not normalized", i, *z)
+ }
+ if z.Cmp(expected) != 0 {
+ t.Errorf("#%d: got %s want %s", i, z, expected)
+ }
+ }
+}
+
+var lshTests = []intShiftTest{
+ {"0", 0, "0"},
+ {"0", 1, "0"},
+ {"0", 2, "0"},
+ {"1", 0, "1"},
+ {"1", 1, "2"},
+ {"1", 2, "4"},
+ {"2", 0, "2"},
+ {"2", 1, "4"},
+ {"2", 2, "8"},
+ {"-87", 1, "-174"},
+ {"4294967296", 0, "4294967296"},
+ {"4294967296", 1, "8589934592"},
+ {"4294967296", 2, "17179869184"},
+ {"18446744073709551616", 0, "18446744073709551616"},
+ {"9223372036854775808", 1, "18446744073709551616"},
+ {"4611686018427387904", 2, "18446744073709551616"},
+ {"1", 64, "18446744073709551616"},
+ {"18446744073709551616", 64, "340282366920938463463374607431768211456"},
+ {"1", 128, "340282366920938463463374607431768211456"},
+}
+
+func TestLsh(t *testing.T) {
+ for i, test := range lshTests {
+ in, _ := new(Int).SetString(test.in, 10)
+ expected, _ := new(Int).SetString(test.out, 10)
+ out := new(Int).Lsh(in, test.shift)
+
+ if !isNormalized(out) {
+ t.Errorf("#%d: %v is not normalized", i, *out)
+ }
+ if out.Cmp(expected) != 0 {
+ t.Errorf("#%d: got %s want %s", i, out, expected)
+ }
+ }
+}
+
+func TestLshSelf(t *testing.T) {
+ for i, test := range lshTests {
+ z, _ := new(Int).SetString(test.in, 10)
+ expected, _ := new(Int).SetString(test.out, 10)
+ z.Lsh(z, test.shift)
+
+ if !isNormalized(z) {
+ t.Errorf("#%d: %v is not normalized", i, *z)
+ }
+ if z.Cmp(expected) != 0 {
+ t.Errorf("#%d: got %s want %s", i, z, expected)
+ }
+ }
+}
+
+func TestLshRsh(t *testing.T) {
+ for i, test := range rshTests {
+ in, _ := new(Int).SetString(test.in, 10)
+ out := new(Int).Lsh(in, test.shift)
+ out = out.Rsh(out, test.shift)
+
+ if !isNormalized(out) {
+ t.Errorf("#%d: %v is not normalized", i, *out)
+ }
+ if in.Cmp(out) != 0 {
+ t.Errorf("#%d: got %s want %s", i, out, in)
+ }
+ }
+ for i, test := range lshTests {
+ in, _ := new(Int).SetString(test.in, 10)
+ out := new(Int).Lsh(in, test.shift)
+ out.Rsh(out, test.shift)
+
+ if !isNormalized(out) {
+ t.Errorf("#%d: %v is not normalized", i, *out)
+ }
+ if in.Cmp(out) != 0 {
+ t.Errorf("#%d: got %s want %s", i, out, in)
+ }
+ }
+}
+
+var int64Tests = []int64{
+ 0,
+ 1,
+ -1,
+ 4294967295,
+ -4294967295,
+ 4294967296,
+ -4294967296,
+ 9223372036854775807,
+ -9223372036854775807,
+ -9223372036854775808,
+}
+
+func TestInt64(t *testing.T) {
+ for i, testVal := range int64Tests {
+ in := NewInt(testVal)
+ out := in.Int64()
+
+ if out != testVal {
+ t.Errorf("#%d got %d want %d", i, out, testVal)
+ }
+ }
+}
+
+var bitwiseTests = []struct {
+ x, y string
+ and, or, xor, andNot string
+}{
+ {"0x00", "0x00", "0x00", "0x00", "0x00", "0x00"},
+ {"0x00", "0x01", "0x00", "0x01", "0x01", "0x00"},
+ {"0x01", "0x00", "0x00", "0x01", "0x01", "0x01"},
+ {"-0x01", "0x00", "0x00", "-0x01", "-0x01", "-0x01"},
+ {"-0xaf", "-0x50", "-0xf0", "-0x0f", "0xe1", "0x41"},
+ {"0x00", "-0x01", "0x00", "-0x01", "-0x01", "0x00"},
+ {"0x01", "0x01", "0x01", "0x01", "0x00", "0x00"},
+ {"-0x01", "-0x01", "-0x01", "-0x01", "0x00", "0x00"},
+ {"0x07", "0x08", "0x00", "0x0f", "0x0f", "0x07"},
+ {"0x05", "0x0f", "0x05", "0x0f", "0x0a", "0x00"},
+ {"0x013ff6", "0x9a4e", "0x1a46", "0x01bffe", "0x01a5b8", "0x0125b0"},
+ {"-0x013ff6", "0x9a4e", "0x800a", "-0x0125b2", "-0x01a5bc", "-0x01c000"},
+ {"-0x013ff6", "-0x9a4e", "-0x01bffe", "-0x1a46", "0x01a5b8", "0x8008"},
+ {
+ "0x1000009dc6e3d9822cba04129bcbe3401",
+ "0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
+ "0x1000001186210100001000009048c2001",
+ "0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
+ "0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
+ "0x8c40c2d8822caa04120b8321400",
+ },
+ {
+ "0x1000009dc6e3d9822cba04129bcbe3401",
+ "-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
+ "0x8c40c2d8822caa04120b8321401",
+ "-0xb9bd7d543685789d57ca918e82229142459020483cd2014001fd",
+ "-0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fe",
+ "0x1000001186210100001000009048c2000",
+ },
+ {
+ "-0x1000009dc6e3d9822cba04129bcbe3401",
+ "-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
+ "-0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
+ "-0x1000001186210100001000009048c2001",
+ "0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
+ "0xb9bd7d543685789d57ca918e82229142459020483cd2014001fc",
+ },
+}
+
+type bitFun func(z, x, y *Int) *Int
+
+func testBitFun(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
+ expected := new(Int)
+ expected.SetString(exp, 0)
+
+ out := f(new(Int), x, y)
+ if out.Cmp(expected) != 0 {
+ t.Errorf("%s: got %s want %s", msg, out, expected)
+ }
+}
+
+func testBitFunSelf(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
+ self := new(Int)
+ self.Set(x)
+ expected := new(Int)
+ expected.SetString(exp, 0)
+
+ self = f(self, self, y)
+ if self.Cmp(expected) != 0 {
+ t.Errorf("%s: got %s want %s", msg, self, expected)
+ }
+}
+
+func altBit(x *Int, i int) uint {
+ z := new(Int).Rsh(x, uint(i))
+ z = z.And(z, NewInt(1))
+ if z.Cmp(new(Int)) != 0 {
+ return 1
+ }
+ return 0
+}
+
+func altSetBit(z *Int, x *Int, i int, b uint) *Int {
+ one := NewInt(1)
+ m := one.Lsh(one, uint(i))
+ switch b {
+ case 1:
+ return z.Or(x, m)
+ case 0:
+ return z.AndNot(x, m)
+ }
+ panic("set bit is not 0 or 1")
+}
+
+func testBitset(t *testing.T, x *Int) {
+ n := x.BitLen()
+ z := new(Int).Set(x)
+ z1 := new(Int).Set(x)
+ for i := 0; i < n+10; i++ {
+ old := z.Bit(i)
+ old1 := altBit(z1, i)
+ if old != old1 {
+ t.Errorf("bitset: inconsistent value for Bit(%s, %d), got %v want %v", z1, i, old, old1)
+ }
+ z := new(Int).SetBit(z, i, 1)
+ z1 := altSetBit(new(Int), z1, i, 1)
+ if z.Bit(i) == 0 {
+ t.Errorf("bitset: bit %d of %s got 0 want 1", i, x)
+ }
+ if z.Cmp(z1) != 0 {
+ t.Errorf("bitset: inconsistent value after SetBit 1, got %s want %s", z, z1)
+ }
+ z.SetBit(z, i, 0)
+ altSetBit(z1, z1, i, 0)
+ if z.Bit(i) != 0 {
+ t.Errorf("bitset: bit %d of %s got 1 want 0", i, x)
+ }
+ if z.Cmp(z1) != 0 {
+ t.Errorf("bitset: inconsistent value after SetBit 0, got %s want %s", z, z1)
+ }
+ altSetBit(z1, z1, i, old)
+ z.SetBit(z, i, old)
+ if z.Cmp(z1) != 0 {
+ t.Errorf("bitset: inconsistent value after SetBit old, got %s want %s", z, z1)
+ }
+ }
+ if z.Cmp(x) != 0 {
+ t.Errorf("bitset: got %s want %s", z, x)
+ }
+}
+
+var bitsetTests = []struct {
+ x string
+ i int
+ b uint
+}{
+ {"0", 0, 0},
+ {"0", 200, 0},
+ {"1", 0, 1},
+ {"1", 1, 0},
+ {"-1", 0, 1},
+ {"-1", 200, 1},
+ {"0x2000000000000000000000000000", 108, 0},
+ {"0x2000000000000000000000000000", 109, 1},
+ {"0x2000000000000000000000000000", 110, 0},
+ {"-0x2000000000000000000000000001", 108, 1},
+ {"-0x2000000000000000000000000001", 109, 0},
+ {"-0x2000000000000000000000000001", 110, 1},
+}
+
+func TestBitSet(t *testing.T) {
+ for _, test := range bitwiseTests {
+ x := new(Int)
+ x.SetString(test.x, 0)
+ testBitset(t, x)
+ x = new(Int)
+ x.SetString(test.y, 0)
+ testBitset(t, x)
+ }
+ for i, test := range bitsetTests {
+ x := new(Int)
+ x.SetString(test.x, 0)
+ b := x.Bit(test.i)
+ if b != test.b {
+
+ t.Errorf("#%d want %v got %v", i, test.b, b)
+ }
+ }
+}
+
+func BenchmarkBitset(b *testing.B) {
+ z := new(Int)
+ z.SetBit(z, 512, 1)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ z.SetBit(z, i&512, 1)
+ }
+}
+
+func BenchmarkBitsetNeg(b *testing.B) {
+ z := NewInt(-1)
+ z.SetBit(z, 512, 0)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ z.SetBit(z, i&512, 0)
+ }
+}
+
+func BenchmarkBitsetOrig(b *testing.B) {
+ z := new(Int)
+ altSetBit(z, z, 512, 1)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ altSetBit(z, z, i&512, 1)
+ }
+}
+
+func BenchmarkBitsetNegOrig(b *testing.B) {
+ z := NewInt(-1)
+ altSetBit(z, z, 512, 0)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ altSetBit(z, z, i&512, 0)
+ }
+}
+
+func TestBitwise(t *testing.T) {
+ x := new(Int)
+ y := new(Int)
+ for _, test := range bitwiseTests {
+ x.SetString(test.x, 0)
+ y.SetString(test.y, 0)
+
+ testBitFun(t, "and", (*Int).And, x, y, test.and)
+ testBitFunSelf(t, "and", (*Int).And, x, y, test.and)
+ testBitFun(t, "andNot", (*Int).AndNot, x, y, test.andNot)
+ testBitFunSelf(t, "andNot", (*Int).AndNot, x, y, test.andNot)
+ testBitFun(t, "or", (*Int).Or, x, y, test.or)
+ testBitFunSelf(t, "or", (*Int).Or, x, y, test.or)
+ testBitFun(t, "xor", (*Int).Xor, x, y, test.xor)
+ testBitFunSelf(t, "xor", (*Int).Xor, x, y, test.xor)
+ }
+}
+
+var notTests = []struct {
+ in string
+ out string
+}{
+ {"0", "-1"},
+ {"1", "-2"},
+ {"7", "-8"},
+ {"0", "-1"},
+ {"-81910", "81909"},
+ {
+ "298472983472983471903246121093472394872319615612417471234712061",
+ "-298472983472983471903246121093472394872319615612417471234712062",
+ },
+}
+
+func TestNot(t *testing.T) {
+ in := new(Int)
+ out := new(Int)
+ expected := new(Int)
+ for i, test := range notTests {
+ in.SetString(test.in, 10)
+ expected.SetString(test.out, 10)
+ out = out.Not(in)
+ if out.Cmp(expected) != 0 {
+ t.Errorf("#%d: got %s want %s", i, out, expected)
+ }
+ out = out.Not(out)
+ if out.Cmp(in) != 0 {
+ t.Errorf("#%d: got %s want %s", i, out, in)
+ }
+ }
+}
+
+var modInverseTests = []struct {
+ element string
+ prime string
+}{
+ {"1", "7"},
+ {"1", "13"},
+ {"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"},
+}
+
+func TestModInverse(t *testing.T) {
+ var element, prime Int
+ one := NewInt(1)
+ for i, test := range modInverseTests {
+ (&element).SetString(test.element, 10)
+ (&prime).SetString(test.prime, 10)
+ inverse := new(Int).ModInverse(&element, &prime)
+ inverse.Mul(inverse, &element)
+ inverse.Mod(inverse, &prime)
+ if inverse.Cmp(one) != 0 {
+ t.Errorf("#%d: failed (e·e^(-1)=%s)", i, inverse)
+ }
+ }
+}
+
+// used by TestIntGobEncoding and TestRatGobEncoding
+var gobEncodingTests = []string{
+ "0",
+ "1",
+ "2",
+ "10",
+ "42",
+ "1234567890",
+ "298472983472983471903246121093472394872319615612417471234712061",
+}
+
+func TestIntGobEncoding(t *testing.T) {
+ var medium bytes.Buffer
+ enc := gob.NewEncoder(&medium)
+ dec := gob.NewDecoder(&medium)
+ for i, test := range gobEncodingTests {
+ for j := 0; j < 2; j++ {
+ medium.Reset() // empty buffer for each test case (in case of failures)
+ stest := test
+ if j != 0 {
+ // negative numbers
+ stest = "-" + test
+ }
+ var tx Int
+ tx.SetString(stest, 10)
+ if err := enc.Encode(&tx); err != nil {
+ t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
+ }
+ var rx Int
+ if err := dec.Decode(&rx); err != nil {
+ t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
+ }
+ if rx.Cmp(&tx) != 0 {
+ t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package big implements multi-precision arithmetic (big numbers).
+// The following numeric types are supported:
+//
+// - Int signed integers
+// - Rat rational numbers
+//
+// All methods on Int take the result as the receiver; if it is one
+// of the operands it may be overwritten (and its memory reused).
+// To enable chaining of operations, the result is also returned.
+//
+package big
+
+// This file contains operations on unsigned multi-precision integers.
+// These are the building blocks for the operations on signed integers
+// and rationals.
+
+import (
+ "errors"
+ "io"
+ "math/rand"
+)
+
+// An unsigned integer x of the form
+//
+// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
+//
+// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
+// with the digits x[i] as the slice elements.
+//
+// A number is normalized if the slice contains no leading 0 digits.
+// During arithmetic operations, denormalized values may occur but are
+// always normalized before returning the final result. The normalized
+// representation of 0 is the empty or nil slice (length = 0).
+//
+type nat []Word
+
+var (
+ natOne = nat{1}
+ natTwo = nat{2}
+ natTen = nat{10}
+)
+
+func (z nat) clear() {
+ for i := range z {
+ z[i] = 0
+ }
+}
+
+func (z nat) norm() nat {
+ i := len(z)
+ for i > 0 && z[i-1] == 0 {
+ i--
+ }
+ return z[0:i]
+}
+
+func (z nat) make(n int) nat {
+ if n <= cap(z) {
+ return z[0:n] // reuse z
+ }
+ // Choosing a good value for e has significant performance impact
+ // because it increases the chance that a value can be reused.
+ const e = 4 // extra capacity
+ return make(nat, n, n+e)
+}
+
+func (z nat) setWord(x Word) nat {
+ if x == 0 {
+ return z.make(0)
+ }
+ z = z.make(1)
+ z[0] = x
+ return z
+}
+
+func (z nat) setUint64(x uint64) nat {
+ // single-digit values
+ if w := Word(x); uint64(w) == x {
+ return z.setWord(w)
+ }
+
+ // compute number of words n required to represent x
+ n := 0
+ for t := x; t > 0; t >>= _W {
+ n++
+ }
+
+ // split x into n words
+ z = z.make(n)
+ for i := range z {
+ z[i] = Word(x & _M)
+ x >>= _W
+ }
+
+ return z
+}
+
+func (z nat) set(x nat) nat {
+ z = z.make(len(x))
+ copy(z, x)
+ return z
+}
+
+func (z nat) add(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+
+ switch {
+ case m < n:
+ return z.add(y, x)
+ case m == 0:
+ // n == 0 because m >= n; result is 0
+ return z.make(0)
+ case n == 0:
+ // result is x
+ return z.set(x)
+ }
+ // m > 0
+
+ z = z.make(m + 1)
+ c := addVV(z[0:n], x, y)
+ if m > n {
+ c = addVW(z[n:m], x[n:], c)
+ }
+ z[m] = c
+
+ return z.norm()
+}
+
+func (z nat) sub(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+
+ switch {
+ case m < n:
+ panic("underflow")
+ case m == 0:
+ // n == 0 because m >= n; result is 0
+ return z.make(0)
+ case n == 0:
+ // result is x
+ return z.set(x)
+ }
+ // m > 0
+
+ z = z.make(m)
+ c := subVV(z[0:n], x, y)
+ if m > n {
+ c = subVW(z[n:], x[n:], c)
+ }
+ if c != 0 {
+ panic("underflow")
+ }
+
+ return z.norm()
+}
+
+func (x nat) cmp(y nat) (r int) {
+ m := len(x)
+ n := len(y)
+ if m != n || m == 0 {
+ switch {
+ case m < n:
+ r = -1
+ case m > n:
+ r = 1
+ }
+ return
+ }
+
+ i := m - 1
+ for i > 0 && x[i] == y[i] {
+ i--
+ }
+
+ switch {
+ case x[i] < y[i]:
+ r = -1
+ case x[i] > y[i]:
+ r = 1
+ }
+ return
+}
+
+func (z nat) mulAddWW(x nat, y, r Word) nat {
+ m := len(x)
+ if m == 0 || y == 0 {
+ return z.setWord(r) // result is r
+ }
+ // m > 0
+
+ z = z.make(m + 1)
+ z[m] = mulAddVWW(z[0:m], x, y, r)
+
+ return z.norm()
+}
+
+// basicMul multiplies x and y and leaves the result in z.
+// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
+func basicMul(z, x, y nat) {
+ z[0 : len(x)+len(y)].clear() // initialize z
+ for i, d := range y {
+ if d != 0 {
+ z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d)
+ }
+ }
+}
+
+// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
+// Factored out for readability - do not use outside karatsuba.
+func karatsubaAdd(z, x nat, n int) {
+ if c := addVV(z[0:n], z, x); c != 0 {
+ addVW(z[n:n+n>>1], z[n:], c)
+ }
+}
+
+// Like karatsubaAdd, but does subtract.
+func karatsubaSub(z, x nat, n int) {
+ if c := subVV(z[0:n], z, x); c != 0 {
+ subVW(z[n:n+n>>1], z[n:], c)
+ }
+}
+
+// Operands that are shorter than karatsubaThreshold are multiplied using
+// "grade school" multiplication; for longer operands the Karatsuba algorithm
+// is used.
+var karatsubaThreshold int = 32 // computed by calibrate.go
+
+// karatsuba multiplies x and y and leaves the result in z.
+// Both x and y must have the same length n and n must be a
+// power of 2. The result vector z must have len(z) >= 6*n.
+// The (non-normalized) result is placed in z[0 : 2*n].
+func karatsuba(z, x, y nat) {
+ n := len(y)
+
+ // Switch to basic multiplication if numbers are odd or small.
+ // (n is always even if karatsubaThreshold is even, but be
+ // conservative)
+ if n&1 != 0 || n < karatsubaThreshold || n < 2 {
+ basicMul(z, x, y)
+ return
+ }
+ // n&1 == 0 && n >= karatsubaThreshold && n >= 2
+
+ // Karatsuba multiplication is based on the observation that
+ // for two numbers x and y with:
+ //
+ // x = x1*b + x0
+ // y = y1*b + y0
+ //
+ // the product x*y can be obtained with 3 products z2, z1, z0
+ // instead of 4:
+ //
+ // x*y = x1*y1*b*b + (x1*y0 + x0*y1)*b + x0*y0
+ // = z2*b*b + z1*b + z0
+ //
+ // with:
+ //
+ // xd = x1 - x0
+ // yd = y0 - y1
+ //
+ // z1 = xd*yd + z1 + z0
+ // = (x1-x0)*(y0 - y1) + z1 + z0
+ // = x1*y0 - x1*y1 - x0*y0 + x0*y1 + z1 + z0
+ // = x1*y0 - z1 - z0 + x0*y1 + z1 + z0
+ // = x1*y0 + x0*y1
+
+ // split x, y into "digits"
+ n2 := n >> 1 // n2 >= 1
+ x1, x0 := x[n2:], x[0:n2] // x = x1*b + y0
+ y1, y0 := y[n2:], y[0:n2] // y = y1*b + y0
+
+ // z is used for the result and temporary storage:
+ //
+ // 6*n 5*n 4*n 3*n 2*n 1*n 0*n
+ // z = [z2 copy|z0 copy| xd*yd | yd:xd | x1*y1 | x0*y0 ]
+ //
+ // For each recursive call of karatsuba, an unused slice of
+ // z is passed in that has (at least) half the length of the
+ // caller's z.
+
+ // compute z0 and z2 with the result "in place" in z
+ karatsuba(z, x0, y0) // z0 = x0*y0
+ karatsuba(z[n:], x1, y1) // z2 = x1*y1
+
+ // compute xd (or the negative value if underflow occurs)
+ s := 1 // sign of product xd*yd
+ xd := z[2*n : 2*n+n2]
+ if subVV(xd, x1, x0) != 0 { // x1-x0
+ s = -s
+ subVV(xd, x0, x1) // x0-x1
+ }
+
+ // compute yd (or the negative value if underflow occurs)
+ yd := z[2*n+n2 : 3*n]
+ if subVV(yd, y0, y1) != 0 { // y0-y1
+ s = -s
+ subVV(yd, y1, y0) // y1-y0
+ }
+
+ // p = (x1-x0)*(y0-y1) == x1*y0 - x1*y1 - x0*y0 + x0*y1 for s > 0
+ // p = (x0-x1)*(y0-y1) == x0*y0 - x0*y1 - x1*y0 + x1*y1 for s < 0
+ p := z[n*3:]
+ karatsuba(p, xd, yd)
+
+ // save original z2:z0
+ // (ok to use upper half of z since we're done recursing)
+ r := z[n*4:]
+ copy(r, z)
+
+ // add up all partial products
+ //
+ // 2*n n 0
+ // z = [ z2 | z0 ]
+ // + [ z0 ]
+ // + [ z2 ]
+ // + [ p ]
+ //
+ karatsubaAdd(z[n2:], r, n)
+ karatsubaAdd(z[n2:], r[n:], n)
+ if s > 0 {
+ karatsubaAdd(z[n2:], p, n)
+ } else {
+ karatsubaSub(z[n2:], p, n)
+ }
+}
+
+// alias returns true if x and y share the same base array.
+func alias(x, y nat) bool {
+ return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
+}
+
+// addAt implements z += x*(1<<(_W*i)); z must be long enough.
+// (we don't use nat.add because we need z to stay the same
+// slice, and we don't need to normalize z after each addition)
+func addAt(z, x nat, i int) {
+ if n := len(x); n > 0 {
+ if c := addVV(z[i:i+n], z[i:], x); c != 0 {
+ j := i + n
+ if j < len(z) {
+ addVW(z[j:], z[j:], c)
+ }
+ }
+ }
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+// karatsubaLen computes an approximation to the maximum k <= n such that
+// k = p<<i for a number p <= karatsubaThreshold and an i >= 0. Thus, the
+// result is the largest number that can be divided repeatedly by 2 before
+// becoming about the value of karatsubaThreshold.
+func karatsubaLen(n int) int {
+ i := uint(0)
+ for n > karatsubaThreshold {
+ n >>= 1
+ i++
+ }
+ return n << i
+}
+
+func (z nat) mul(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+
+ switch {
+ case m < n:
+ return z.mul(y, x)
+ case m == 0 || n == 0:
+ return z.make(0)
+ case n == 1:
+ return z.mulAddWW(x, y[0], 0)
+ }
+ // m >= n > 1
+
+ // determine if z can be reused
+ if alias(z, x) || alias(z, y) {
+ z = nil // z is an alias for x or y - cannot reuse
+ }
+
+ // use basic multiplication if the numbers are small
+ if n < karatsubaThreshold || n < 2 {
+ z = z.make(m + n)
+ basicMul(z, x, y)
+ return z.norm()
+ }
+ // m >= n && n >= karatsubaThreshold && n >= 2
+
+ // determine Karatsuba length k such that
+ //
+ // x = x1*b + x0
+ // y = y1*b + y0 (and k <= len(y), which implies k <= len(x))
+ // b = 1<<(_W*k) ("base" of digits xi, yi)
+ //
+ k := karatsubaLen(n)
+ // k <= n
+
+ // multiply x0 and y0 via Karatsuba
+ x0 := x[0:k] // x0 is not normalized
+ y0 := y[0:k] // y0 is not normalized
+ z = z.make(max(6*k, m+n)) // enough space for karatsuba of x0*y0 and full result of x*y
+ karatsuba(z, x0, y0)
+ z = z[0 : m+n] // z has final length but may be incomplete, upper portion is garbage
+
+ // If x1 and/or y1 are not 0, add missing terms to z explicitly:
+ //
+ // m+n 2*k 0
+ // z = [ ... | x0*y0 ]
+ // + [ x1*y1 ]
+ // + [ x1*y0 ]
+ // + [ x0*y1 ]
+ //
+ if k < n || m != n {
+ x1 := x[k:] // x1 is normalized because x is
+ y1 := y[k:] // y1 is normalized because y is
+ var t nat
+ t = t.mul(x1, y1)
+ copy(z[2*k:], t)
+ z[2*k+len(t):].clear() // upper portion of z is garbage
+ t = t.mul(x1, y0.norm())
+ addAt(z, t, k)
+ t = t.mul(x0.norm(), y1)
+ addAt(z, t, k)
+ }
+
+ return z.norm()
+}
+
+// mulRange computes the product of all the unsigned integers in the
+// range [a, b] inclusively. If a > b (empty range), the result is 1.
+func (z nat) mulRange(a, b uint64) nat {
+ switch {
+ case a == 0:
+ // cut long ranges short (optimization)
+ return z.setUint64(0)
+ case a > b:
+ return z.setUint64(1)
+ case a == b:
+ return z.setUint64(a)
+ case a+1 == b:
+ return z.mul(nat{}.setUint64(a), nat{}.setUint64(b))
+ }
+ m := (a + b) / 2
+ return z.mul(nat{}.mulRange(a, m), nat{}.mulRange(m+1, b))
+}
+
+// q = (x-r)/y, with 0 <= r < y
+func (z nat) divW(x nat, y Word) (q nat, r Word) {
+ m := len(x)
+ switch {
+ case y == 0:
+ panic("division by zero")
+ case y == 1:
+ q = z.set(x) // result is x
+ return
+ case m == 0:
+ q = z.make(0) // result is 0
+ return
+ }
+ // m > 0
+ z = z.make(m)
+ r = divWVW(z, 0, x, y)
+ q = z.norm()
+ return
+}
+
+func (z nat) div(z2, u, v nat) (q, r nat) {
+ if len(v) == 0 {
+ panic("division by zero")
+ }
+
+ if u.cmp(v) < 0 {
+ q = z.make(0)
+ r = z2.set(u)
+ return
+ }
+
+ if len(v) == 1 {
+ var rprime Word
+ q, rprime = z.divW(u, v[0])
+ if rprime > 0 {
+ r = z2.make(1)
+ r[0] = rprime
+ } else {
+ r = z2.make(0)
+ }
+ return
+ }
+
+ q, r = z.divLarge(z2, u, v)
+ return
+}
+
+// q = (uIn-r)/v, with 0 <= r < y
+// Uses z as storage for q, and u as storage for r if possible.
+// See Knuth, Volume 2, section 4.3.1, Algorithm D.
+// Preconditions:
+// len(v) >= 2
+// len(uIn) >= len(v)
+func (z nat) divLarge(u, uIn, v nat) (q, r nat) {
+ n := len(v)
+ m := len(uIn) - n
+
+ // determine if z can be reused
+ // TODO(gri) should find a better solution - this if statement
+ // is very costly (see e.g. time pidigits -s -n 10000)
+ if alias(z, uIn) || alias(z, v) {
+ z = nil // z is an alias for uIn or v - cannot reuse
+ }
+ q = z.make(m + 1)
+
+ qhatv := make(nat, n+1)
+ if alias(u, uIn) || alias(u, v) {
+ u = nil // u is an alias for uIn or v - cannot reuse
+ }
+ u = u.make(len(uIn) + 1)
+ u.clear()
+
+ // D1.
+ shift := leadingZeros(v[n-1])
+ if shift > 0 {
+ // do not modify v, it may be used by another goroutine simultaneously
+ v1 := make(nat, n)
+ shlVU(v1, v, shift)
+ v = v1
+ }
+ u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift)
+
+ // D2.
+ for j := m; j >= 0; j-- {
+ // D3.
+ qhat := Word(_M)
+ if u[j+n] != v[n-1] {
+ var rhat Word
+ qhat, rhat = divWW(u[j+n], u[j+n-1], v[n-1])
+
+ // x1 | x2 = q̂v_{n-2}
+ x1, x2 := mulWW(qhat, v[n-2])
+ // test if q̂v_{n-2} > br̂ + u_{j+n-2}
+ for greaterThan(x1, x2, rhat, u[j+n-2]) {
+ qhat--
+ prevRhat := rhat
+ rhat += v[n-1]
+ // v[n-1] >= 0, so this tests for overflow.
+ if rhat < prevRhat {
+ break
+ }
+ x1, x2 = mulWW(qhat, v[n-2])
+ }
+ }
+
+ // D4.
+ qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
+
+ c := subVV(u[j:j+len(qhatv)], u[j:], qhatv)
+ if c != 0 {
+ c := addVV(u[j:j+n], u[j:], v)
+ u[j+n] += c
+ qhat--
+ }
+
+ q[j] = qhat
+ }
+
+ q = q.norm()
+ shrVU(u, u, shift)
+ r = u.norm()
+
+ return q, r
+}
+
+// Length of x in bits. x must be normalized.
+func (x nat) bitLen() int {
+ if i := len(x) - 1; i >= 0 {
+ return i*_W + bitLen(x[i])
+ }
+ return 0
+}
+
+// MaxBase is the largest number base accepted for string conversions.
+const MaxBase = 'z' - 'a' + 10 + 1 // = hexValue('z') + 1
+
+func hexValue(ch rune) Word {
+ d := MaxBase + 1 // illegal base
+ switch {
+ case '0' <= ch && ch <= '9':
+ d = int(ch - '0')
+ case 'a' <= ch && ch <= 'z':
+ d = int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'Z':
+ d = int(ch - 'A' + 10)
+ }
+ return Word(d)
+}
+
+// scan sets z to the natural number corresponding to the longest possible prefix
+// read from r representing an unsigned integer in a given conversion base.
+// It returns z, the actual conversion base used, and an error, if any. In the
+// error case, the value of z is undefined. The syntax follows the syntax of
+// unsigned integer literals in Go.
+//
+// The base argument must be 0 or a value from 2 through MaxBase. If the base
+// is 0, the string prefix determines the actual conversion base. A prefix of
+// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
+// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
+//
+func (z nat) scan(r io.RuneScanner, base int) (nat, int, error) {
+ // reject illegal bases
+ if base < 0 || base == 1 || MaxBase < base {
+ return z, 0, errors.New("illegal number base")
+ }
+
+ // one char look-ahead
+ ch, _, err := r.ReadRune()
+ if err != nil {
+ return z, 0, err
+ }
+
+ // determine base if necessary
+ b := Word(base)
+ if base == 0 {
+ b = 10
+ if ch == '0' {
+ switch ch, _, err = r.ReadRune(); err {
+ case nil:
+ b = 8
+ switch ch {
+ case 'x', 'X':
+ b = 16
+ case 'b', 'B':
+ b = 2
+ }
+ if b == 2 || b == 16 {
+ if ch, _, err = r.ReadRune(); err != nil {
+ return z, 0, err
+ }
+ }
+ case io.EOF:
+ return z.make(0), 10, nil
+ default:
+ return z, 10, err
+ }
+ }
+ }
+
+ // convert string
+ // - group as many digits d as possible together into a "super-digit" dd with "super-base" bb
+ // - only when bb does not fit into a word anymore, do a full number mulAddWW using bb and dd
+ z = z.make(0)
+ bb := Word(1)
+ dd := Word(0)
+ for max := _M / b; ; {
+ d := hexValue(ch)
+ if d >= b {
+ r.UnreadRune() // ch does not belong to number anymore
+ break
+ }
+
+ if bb <= max {
+ bb *= b
+ dd = dd*b + d
+ } else {
+ // bb * b would overflow
+ z = z.mulAddWW(z, bb, dd)
+ bb = b
+ dd = d
+ }
+
+ if ch, _, err = r.ReadRune(); err != nil {
+ if err != io.EOF {
+ return z, int(b), err
+ }
+ break
+ }
+ }
+
+ switch {
+ case bb > 1:
+ // there was at least one mantissa digit
+ z = z.mulAddWW(z, bb, dd)
+ case base == 0 && b == 8:
+ // there was only the octal prefix 0 (possibly followed by digits > 7);
+ // return base 10, not 8
+ return z, 10, nil
+ case base != 0 || b != 8:
+ // there was neither a mantissa digit nor the octal prefix 0
+ return z, int(b), errors.New("syntax error scanning number")
+ }
+
+ return z.norm(), int(b), nil
+}
+
+// Character sets for string conversion.
+const (
+ lowercaseDigits = "0123456789abcdefghijklmnopqrstuvwxyz"
+ uppercaseDigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+)
+
+// decimalString returns a decimal representation of x.
+// It calls x.string with the charset "0123456789".
+func (x nat) decimalString() string {
+ return x.string(lowercaseDigits[0:10])
+}
+
+// string converts x to a string using digits from a charset; a digit with
+// value d is represented by charset[d]. The conversion base is determined
+// by len(charset), which must be >= 2.
+func (x nat) string(charset string) string {
+ b := Word(len(charset))
+
+ // special cases
+ switch {
+ case b < 2 || b > 256:
+ panic("illegal base")
+ case len(x) == 0:
+ return string(charset[0])
+ }
+
+ // allocate buffer for conversion
+ i := x.bitLen()/log2(b) + 1 // +1: round up
+ s := make([]byte, i)
+
+ // special case: power of two bases can avoid divisions completely
+ if b == b&-b {
+ // shift is base-b digit size in bits
+ shift := uint(trailingZeroBits(b)) // shift > 0 because b >= 2
+ mask := Word(1)<<shift - 1
+ w := x[0]
+ nbits := uint(_W) // number of unprocessed bits in w
+
+ // convert less-significant words
+ for k := 1; k < len(x); k++ {
+ // convert full digits
+ for nbits >= shift {
+ i--
+ s[i] = charset[w&mask]
+ w >>= shift
+ nbits -= shift
+ }
+
+ // convert any partial leading digit and advance to next word
+ if nbits == 0 {
+ // no partial digit remaining, just advance
+ w = x[k]
+ nbits = _W
+ } else {
+ // partial digit in current (k-1) and next (k) word
+ w |= x[k] << nbits
+ i--
+ s[i] = charset[w&mask]
+
+ // advance
+ w = x[k] >> (shift - nbits)
+ nbits = _W - (shift - nbits)
+ }
+ }
+
+ // convert digits of most-significant word (omit leading zeros)
+ for nbits >= 0 && w != 0 {
+ i--
+ s[i] = charset[w&mask]
+ w >>= shift
+ nbits -= shift
+ }
+
+ return string(s[i:])
+ }
+
+ // general case: extract groups of digits by multiprecision division
+
+ // maximize ndigits where b**ndigits < 2^_W; bb (big base) is b**ndigits
+ bb := Word(1)
+ ndigits := 0
+ for max := Word(_M / b); bb <= max; bb *= b {
+ ndigits++
+ }
+
+ // preserve x, create local copy for use in repeated divisions
+ q := nat{}.set(x)
+ var r Word
+
+ // convert
+ if b == 10 { // hard-coding for 10 here speeds this up by 1.25x
+ for len(q) > 0 {
+ // extract least significant, base bb "digit"
+ q, r = q.divW(q, bb) // N.B. >82% of time is here. Optimize divW
+ if len(q) == 0 {
+ // skip leading zeros in most-significant group of digits
+ for j := 0; j < ndigits && r != 0; j++ {
+ i--
+ s[i] = charset[r%10]
+ r /= 10
+ }
+ } else {
+ for j := 0; j < ndigits; j++ {
+ i--
+ s[i] = charset[r%10]
+ r /= 10
+ }
+ }
+ }
+ } else {
+ for len(q) > 0 {
+ // extract least significant group of digits
+ q, r = q.divW(q, bb) // N.B. >82% of time is here. Optimize divW
+ if len(q) == 0 {
+ // skip leading zeros in most-significant group of digits
+ for j := 0; j < ndigits && r != 0; j++ {
+ i--
+ s[i] = charset[r%b]
+ r /= b
+ }
+ } else {
+ for j := 0; j < ndigits; j++ {
+ i--
+ s[i] = charset[r%b]
+ r /= b
+ }
+ }
+ }
+ }
+
+ return string(s[i:])
+}
+
+const deBruijn32 = 0x077CB531
+
+var deBruijn32Lookup = []byte{
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+
+var deBruijn64Lookup = []byte{
+ 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+ 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+ 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+ 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// trailingZeroBits returns the number of consecutive zero bits on the right
+// side of the given Word.
+// See Knuth, volume 4, section 7.3.1
+func trailingZeroBits(x Word) int {
+ // x & -x leaves only the right-most bit set in the word. Let k be the
+ // index of that bit. Since only a single bit is set, the value is two
+ // to the power of k. Multiplying by a power of two is equivalent to
+ // left shifting, in this case by k bits. The de Bruijn constant is
+ // such that all six bit, consecutive substrings are distinct.
+ // Therefore, if we have a left shifted version of this constant we can
+ // find by how many bits it was shifted by looking at which six bit
+ // substring ended up at the top of the word.
+ switch _W {
+ case 32:
+ return int(deBruijn32Lookup[((x&-x)*deBruijn32)>>27])
+ case 64:
+ return int(deBruijn64Lookup[((x&-x)*(deBruijn64&_M))>>58])
+ default:
+ panic("Unknown word size")
+ }
+
+ return 0
+}
+
+// z = x << s
+func (z nat) shl(x nat, s uint) nat {
+ m := len(x)
+ if m == 0 {
+ return z.make(0)
+ }
+ // m > 0
+
+ n := m + int(s/_W)
+ z = z.make(n + 1)
+ z[n] = shlVU(z[n-m:n], x, s%_W)
+ z[0 : n-m].clear()
+
+ return z.norm()
+}
+
+// z = x >> s
+func (z nat) shr(x nat, s uint) nat {
+ m := len(x)
+ n := m - int(s/_W)
+ if n <= 0 {
+ return z.make(0)
+ }
+ // n > 0
+
+ z = z.make(n)
+ shrVU(z, x[m-n:], s%_W)
+
+ return z.norm()
+}
+
+func (z nat) setBit(x nat, i uint, b uint) nat {
+ j := int(i / _W)
+ m := Word(1) << (i % _W)
+ n := len(x)
+ switch b {
+ case 0:
+ z = z.make(n)
+ copy(z, x)
+ if j >= n {
+ // no need to grow
+ return z
+ }
+ z[j] &^= m
+ return z.norm()
+ case 1:
+ if j >= n {
+ n = j + 1
+ }
+ z = z.make(n)
+ copy(z, x)
+ z[j] |= m
+ // no need to normalize
+ return z
+ }
+ panic("set bit is not 0 or 1")
+}
+
+func (z nat) bit(i uint) uint {
+ j := int(i / _W)
+ if j >= len(z) {
+ return 0
+ }
+ return uint(z[j] >> (i % _W) & 1)
+}
+
+func (z nat) and(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ if m > n {
+ m = n
+ }
+ // m <= n
+
+ z = z.make(m)
+ for i := 0; i < m; i++ {
+ z[i] = x[i] & y[i]
+ }
+
+ return z.norm()
+}
+
+func (z nat) andNot(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ if n > m {
+ n = m
+ }
+ // m >= n
+
+ z = z.make(m)
+ for i := 0; i < n; i++ {
+ z[i] = x[i] &^ y[i]
+ }
+ copy(z[n:m], x[n:m])
+
+ return z.norm()
+}
+
+func (z nat) or(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ s := x
+ if m < n {
+ n, m = m, n
+ s = y
+ }
+ // m >= n
+
+ z = z.make(m)
+ for i := 0; i < n; i++ {
+ z[i] = x[i] | y[i]
+ }
+ copy(z[n:m], s[n:m])
+
+ return z.norm()
+}
+
+func (z nat) xor(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ s := x
+ if m < n {
+ n, m = m, n
+ s = y
+ }
+ // m >= n
+
+ z = z.make(m)
+ for i := 0; i < n; i++ {
+ z[i] = x[i] ^ y[i]
+ }
+ copy(z[n:m], s[n:m])
+
+ return z.norm()
+}
+
+// greaterThan returns true iff (x1<<_W + x2) > (y1<<_W + y2)
+func greaterThan(x1, x2, y1, y2 Word) bool {
+ return x1 > y1 || x1 == y1 && x2 > y2
+}
+
+// modW returns x % d.
+func (x nat) modW(d Word) (r Word) {
+ // TODO(agl): we don't actually need to store the q value.
+ var q nat
+ q = q.make(len(x))
+ return divWVW(q, 0, x, d)
+}
+
+// powersOfTwoDecompose finds q and k with x = q * 1<<k and q is odd, or q and k are 0.
+func (x nat) powersOfTwoDecompose() (q nat, k int) {
+ if len(x) == 0 {
+ return x, 0
+ }
+
+ // One of the words must be non-zero by definition,
+ // so this loop will terminate with i < len(x), and
+ // i is the number of 0 words.
+ i := 0
+ for x[i] == 0 {
+ i++
+ }
+ n := trailingZeroBits(x[i]) // x[i] != 0
+
+ q = make(nat, len(x)-i)
+ shrVU(q, x[i:], uint(n))
+
+ q = q.norm()
+ k = i*_W + n
+ return
+}
+
+// random creates a random integer in [0..limit), using the space in z if
+// possible. n is the bit length of limit.
+func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
+ bitLengthOfMSW := uint(n % _W)
+ if bitLengthOfMSW == 0 {
+ bitLengthOfMSW = _W
+ }
+ mask := Word((1 << bitLengthOfMSW) - 1)
+ z = z.make(len(limit))
+
+ for {
+ for i := range z {
+ switch _W {
+ case 32:
+ z[i] = Word(rand.Uint32())
+ case 64:
+ z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
+ }
+ }
+
+ z[len(limit)-1] &= mask
+
+ if z.cmp(limit) < 0 {
+ break
+ }
+ }
+
+ return z.norm()
+}
+
+// If m != nil, expNN calculates x**y mod m. Otherwise it calculates x**y. It
+// reuses the storage of z if possible.
+func (z nat) expNN(x, y, m nat) nat {
+ if alias(z, x) || alias(z, y) {
+ // We cannot allow in place modification of x or y.
+ z = nil
+ }
+
+ if len(y) == 0 {
+ z = z.make(1)
+ z[0] = 1
+ return z
+ }
+
+ if m != nil {
+ // We likely end up being as long as the modulus.
+ z = z.make(len(m))
+ }
+ z = z.set(x)
+ v := y[len(y)-1]
+ // It's invalid for the most significant word to be zero, therefore we
+ // will find a one bit.
+ shift := leadingZeros(v) + 1
+ v <<= shift
+ var q nat
+
+ const mask = 1 << (_W - 1)
+
+ // We walk through the bits of the exponent one by one. Each time we
+ // see a bit, we square, thus doubling the power. If the bit is a one,
+ // we also multiply by x, thus adding one to the power.
+
+ w := _W - int(shift)
+ for j := 0; j < w; j++ {
+ z = z.mul(z, z)
+
+ if v&mask != 0 {
+ z = z.mul(z, x)
+ }
+
+ if m != nil {
+ q, z = q.div(z, z, m)
+ }
+
+ v <<= 1
+ }
+
+ for i := len(y) - 2; i >= 0; i-- {
+ v = y[i]
+
+ for j := 0; j < _W; j++ {
+ z = z.mul(z, z)
+
+ if v&mask != 0 {
+ z = z.mul(z, x)
+ }
+
+ if m != nil {
+ q, z = q.div(z, z, m)
+ }
+
+ v <<= 1
+ }
+ }
+
+ return z
+}
+
+// probablyPrime performs reps Miller-Rabin tests to check whether n is prime.
+// If it returns true, n is prime with probability 1 - 1/4^reps.
+// If it returns false, n is not prime.
+func (n nat) probablyPrime(reps int) bool {
+ if len(n) == 0 {
+ return false
+ }
+
+ if len(n) == 1 {
+ if n[0] < 2 {
+ return false
+ }
+
+ if n[0]%2 == 0 {
+ return n[0] == 2
+ }
+
+ // We have to exclude these cases because we reject all
+ // multiples of these numbers below.
+ switch n[0] {
+ case 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53:
+ return true
+ }
+ }
+
+ const primesProduct32 = 0xC0CFD797 // Π {p ∈ primes, 2 < p <= 29}
+ const primesProduct64 = 0xE221F97C30E94E1D // Π {p ∈ primes, 2 < p <= 53}
+
+ var r Word
+ switch _W {
+ case 32:
+ r = n.modW(primesProduct32)
+ case 64:
+ r = n.modW(primesProduct64 & _M)
+ default:
+ panic("Unknown word size")
+ }
+
+ if r%3 == 0 || r%5 == 0 || r%7 == 0 || r%11 == 0 ||
+ r%13 == 0 || r%17 == 0 || r%19 == 0 || r%23 == 0 || r%29 == 0 {
+ return false
+ }
+
+ if _W == 64 && (r%31 == 0 || r%37 == 0 || r%41 == 0 ||
+ r%43 == 0 || r%47 == 0 || r%53 == 0) {
+ return false
+ }
+
+ nm1 := nat{}.sub(n, natOne)
+ // 1<<k * q = nm1;
+ q, k := nm1.powersOfTwoDecompose()
+
+ nm3 := nat{}.sub(nm1, natTwo)
+ rand := rand.New(rand.NewSource(int64(n[0])))
+
+ var x, y, quotient nat
+ nm3Len := nm3.bitLen()
+
+NextRandom:
+ for i := 0; i < reps; i++ {
+ x = x.random(rand, nm3, nm3Len)
+ x = x.add(x, natTwo)
+ y = y.expNN(x, q, n)
+ if y.cmp(natOne) == 0 || y.cmp(nm1) == 0 {
+ continue
+ }
+ for j := 1; j < k; j++ {
+ y = y.mul(y, y)
+ quotient, y = quotient.div(y, y, n)
+ if y.cmp(nm1) == 0 {
+ continue NextRandom
+ }
+ if y.cmp(natOne) == 0 {
+ return false
+ }
+ }
+ return false
+ }
+
+ return true
+}
+
+// bytes writes the value of z into buf using big-endian encoding.
+// len(buf) must be >= len(z)*_S. The value of z is encoded in the
+// slice buf[i:]. The number i of unused bytes at the beginning of
+// buf is returned as result.
+func (z nat) bytes(buf []byte) (i int) {
+ i = len(buf)
+ for _, d := range z {
+ for j := 0; j < _S; j++ {
+ i--
+ buf[i] = byte(d)
+ d >>= 8
+ }
+ }
+
+ for i < len(buf) && buf[i] == 0 {
+ i++
+ }
+
+ return
+}
+
+// setBytes interprets buf as the bytes of a big-endian unsigned
+// integer, sets z to that value, and returns z.
+func (z nat) setBytes(buf []byte) nat {
+ z = z.make((len(buf) + _S - 1) / _S)
+
+ k := 0
+ s := uint(0)
+ var d Word
+ for i := len(buf); i > 0; i-- {
+ d |= Word(buf[i-1]) << s
+ if s += 8; s == _S*8 {
+ z[k] = d
+ k++
+ s = 0
+ d = 0
+ }
+ }
+ if k < len(z) {
+ z[k] = d
+ }
+
+ return z.norm()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+)
+
+var cmpTests = []struct {
+ x, y nat
+ r int
+}{
+ {nil, nil, 0},
+ {nil, nat{}, 0},
+ {nat{}, nil, 0},
+ {nat{}, nat{}, 0},
+ {nat{0}, nat{0}, 0},
+ {nat{0}, nat{1}, -1},
+ {nat{1}, nat{0}, 1},
+ {nat{1}, nat{1}, 0},
+ {nat{0, _M}, nat{1}, 1},
+ {nat{1}, nat{0, _M}, -1},
+ {nat{1, _M}, nat{0, _M}, 1},
+ {nat{0, _M}, nat{1, _M}, -1},
+ {nat{16, 571956, 8794, 68}, nat{837, 9146, 1, 754489}, -1},
+ {nat{34986, 41, 105, 1957}, nat{56, 7458, 104, 1957}, 1},
+}
+
+func TestCmp(t *testing.T) {
+ for i, a := range cmpTests {
+ r := a.x.cmp(a.y)
+ if r != a.r {
+ t.Errorf("#%d got r = %v; want %v", i, r, a.r)
+ }
+ }
+}
+
+type funNN func(z, x, y nat) nat
+type argNN struct {
+ z, x, y nat
+}
+
+var sumNN = []argNN{
+ {},
+ {nat{1}, nil, nat{1}},
+ {nat{1111111110}, nat{123456789}, nat{987654321}},
+ {nat{0, 0, 0, 1}, nil, nat{0, 0, 0, 1}},
+ {nat{0, 0, 0, 1111111110}, nat{0, 0, 0, 123456789}, nat{0, 0, 0, 987654321}},
+ {nat{0, 0, 0, 1}, nat{0, 0, _M}, nat{0, 0, 1}},
+}
+
+var prodNN = []argNN{
+ {},
+ {nil, nil, nil},
+ {nil, nat{991}, nil},
+ {nat{991}, nat{991}, nat{1}},
+ {nat{991 * 991}, nat{991}, nat{991}},
+ {nat{0, 0, 991 * 991}, nat{0, 991}, nat{0, 991}},
+ {nat{1 * 991, 2 * 991, 3 * 991, 4 * 991}, nat{1, 2, 3, 4}, nat{991}},
+ {nat{4, 11, 20, 30, 20, 11, 4}, nat{1, 2, 3, 4}, nat{4, 3, 2, 1}},
+}
+
+func TestSet(t *testing.T) {
+ for _, a := range sumNN {
+ z := nat{}.set(a.z)
+ if z.cmp(a.z) != 0 {
+ t.Errorf("got z = %v; want %v", z, a.z)
+ }
+ }
+}
+
+func testFunNN(t *testing.T, msg string, f funNN, a argNN) {
+ z := f(nil, a.x, a.y)
+ if z.cmp(a.z) != 0 {
+ t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, z, a.z)
+ }
+}
+
+func TestFunNN(t *testing.T) {
+ for _, a := range sumNN {
+ arg := a
+ testFunNN(t, "add", nat.add, arg)
+
+ arg = argNN{a.z, a.y, a.x}
+ testFunNN(t, "add symmetric", nat.add, arg)
+
+ arg = argNN{a.x, a.z, a.y}
+ testFunNN(t, "sub", nat.sub, arg)
+
+ arg = argNN{a.y, a.z, a.x}
+ testFunNN(t, "sub symmetric", nat.sub, arg)
+ }
+
+ for _, a := range prodNN {
+ arg := a
+ testFunNN(t, "mul", nat.mul, arg)
+
+ arg = argNN{a.z, a.y, a.x}
+ testFunNN(t, "mul symmetric", nat.mul, arg)
+ }
+}
+
+var mulRangesN = []struct {
+ a, b uint64
+ prod string
+}{
+ {0, 0, "0"},
+ {1, 1, "1"},
+ {1, 2, "2"},
+ {1, 3, "6"},
+ {10, 10, "10"},
+ {0, 100, "0"},
+ {0, 1e9, "0"},
+ {1, 0, "1"}, // empty range
+ {100, 1, "1"}, // empty range
+ {1, 10, "3628800"}, // 10!
+ {1, 20, "2432902008176640000"}, // 20!
+ {1, 100,
+ "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000", // 100!
+ },
+}
+
+func TestMulRangeN(t *testing.T) {
+ for i, r := range mulRangesN {
+ prod := nat{}.mulRange(r.a, r.b).decimalString()
+ if prod != r.prod {
+ t.Errorf("#%d: got %s; want %s", i, prod, r.prod)
+ }
+ }
+}
+
+var mulArg, mulTmp nat
+
+func init() {
+ const n = 1000
+ mulArg = make(nat, n)
+ for i := 0; i < n; i++ {
+ mulArg[i] = _M
+ }
+}
+
+func benchmarkMulLoad() {
+ for j := 1; j <= 10; j++ {
+ x := mulArg[0 : j*100]
+ mulTmp.mul(x, x)
+ }
+}
+
+func BenchmarkMul(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ benchmarkMulLoad()
+ }
+}
+
+func toString(x nat, charset string) string {
+ base := len(charset)
+
+ // special cases
+ switch {
+ case base < 2:
+ panic("illegal base")
+ case len(x) == 0:
+ return string(charset[0])
+ }
+
+ // allocate buffer for conversion
+ i := x.bitLen()/log2(Word(base)) + 1 // +1: round up
+ s := make([]byte, i)
+
+ // don't destroy x
+ q := nat{}.set(x)
+
+ // convert
+ for len(q) > 0 {
+ i--
+ var r Word
+ q, r = q.divW(q, Word(base))
+ s[i] = charset[r]
+ }
+
+ return string(s[i:])
+}
+
+var strTests = []struct {
+ x nat // nat value to be converted
+ c string // conversion charset
+ s string // expected result
+}{
+ {nil, "01", "0"},
+ {nat{1}, "01", "1"},
+ {nat{0xc5}, "01", "11000101"},
+ {nat{03271}, lowercaseDigits[0:8], "3271"},
+ {nat{10}, lowercaseDigits[0:10], "10"},
+ {nat{1234567890}, uppercaseDigits[0:10], "1234567890"},
+ {nat{0xdeadbeef}, lowercaseDigits[0:16], "deadbeef"},
+ {nat{0xdeadbeef}, uppercaseDigits[0:16], "DEADBEEF"},
+ {nat{0x229be7}, lowercaseDigits[0:17], "1a2b3c"},
+ {nat{0x309663e6}, uppercaseDigits[0:32], "O9COV6"},
+}
+
+func TestString(t *testing.T) {
+ for _, a := range strTests {
+ s := a.x.string(a.c)
+ if s != a.s {
+ t.Errorf("string%+v\n\tgot s = %s; want %s", a, s, a.s)
+ }
+
+ x, b, err := nat{}.scan(strings.NewReader(a.s), len(a.c))
+ if x.cmp(a.x) != 0 {
+ t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
+ }
+ if b != len(a.c) {
+ t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, len(a.c))
+ }
+ if err != nil {
+ t.Errorf("scan%+v\n\tgot error = %s", a, err)
+ }
+ }
+}
+
+var natScanTests = []struct {
+ s string // string to be scanned
+ base int // input base
+ x nat // expected nat
+ b int // expected base
+ ok bool // expected success
+ next rune // next character (or 0, if at EOF)
+}{
+ // error: illegal base
+ {base: -1},
+ {base: 1},
+ {base: 37},
+
+ // error: no mantissa
+ {},
+ {s: "?"},
+ {base: 10},
+ {base: 36},
+ {s: "?", base: 10},
+ {s: "0x"},
+ {s: "345", base: 2},
+
+ // no errors
+ {"0", 0, nil, 10, true, 0},
+ {"0", 10, nil, 10, true, 0},
+ {"0", 36, nil, 36, true, 0},
+ {"1", 0, nat{1}, 10, true, 0},
+ {"1", 10, nat{1}, 10, true, 0},
+ {"0 ", 0, nil, 10, true, ' '},
+ {"08", 0, nil, 10, true, '8'},
+ {"018", 0, nat{1}, 8, true, '8'},
+ {"0b1", 0, nat{1}, 2, true, 0},
+ {"0b11000101", 0, nat{0xc5}, 2, true, 0},
+ {"03271", 0, nat{03271}, 8, true, 0},
+ {"10ab", 0, nat{10}, 10, true, 'a'},
+ {"1234567890", 0, nat{1234567890}, 10, true, 0},
+ {"xyz", 36, nat{(33*36+34)*36 + 35}, 36, true, 0},
+ {"xyz?", 36, nat{(33*36+34)*36 + 35}, 36, true, '?'},
+ {"0x", 16, nil, 16, true, 'x'},
+ {"0xdeadbeef", 0, nat{0xdeadbeef}, 16, true, 0},
+ {"0XDEADBEEF", 0, nat{0xdeadbeef}, 16, true, 0},
+}
+
+func TestScanBase(t *testing.T) {
+ for _, a := range natScanTests {
+ r := strings.NewReader(a.s)
+ x, b, err := nat{}.scan(r, a.base)
+ if err == nil && !a.ok {
+ t.Errorf("scan%+v\n\texpected error", a)
+ }
+ if err != nil {
+ if a.ok {
+ t.Errorf("scan%+v\n\tgot error = %s", a, err)
+ }
+ continue
+ }
+ if x.cmp(a.x) != 0 {
+ t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
+ }
+ if b != a.b {
+ t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.base)
+ }
+ next, _, err := r.ReadRune()
+ if err == io.EOF {
+ next = 0
+ err = nil
+ }
+ if err == nil && next != a.next {
+ t.Errorf("scan%+v\n\tgot next = %q; want %q", a, next, a.next)
+ }
+ }
+}
+
+var pi = "3" +
+ "14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651" +
+ "32823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461" +
+ "28475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920" +
+ "96282925409171536436789259036001133053054882046652138414695194151160943305727036575959195309218611738193261179" +
+ "31051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798" +
+ "60943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901" +
+ "22495343014654958537105079227968925892354201995611212902196086403441815981362977477130996051870721134999999837" +
+ "29780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083" +
+ "81420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909" +
+ "21642019893809525720106548586327886593615338182796823030195203530185296899577362259941389124972177528347913151" +
+ "55748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035" +
+ "63707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104" +
+ "75216205696602405803815019351125338243003558764024749647326391419927260426992279678235478163600934172164121992" +
+ "45863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818" +
+ "34797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548" +
+ "16136115735255213347574184946843852332390739414333454776241686251898356948556209921922218427255025425688767179" +
+ "04946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886" +
+ "26945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645" +
+ "99581339047802759009946576407895126946839835259570982582262052248940772671947826848260147699090264013639443745" +
+ "53050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382" +
+ "68683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244" +
+ "13654976278079771569143599770012961608944169486855584840635342207222582848864815845602850601684273945226746767" +
+ "88952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288" +
+ "79710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821" +
+ "68299894872265880485756401427047755513237964145152374623436454285844479526586782105114135473573952311342716610" +
+ "21359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435" +
+ "06430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675" +
+ "14269123974894090718649423196156794520809514655022523160388193014209376213785595663893778708303906979207734672" +
+ "21825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539" +
+ "05796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007" +
+ "23055876317635942187312514712053292819182618612586732157919841484882916447060957527069572209175671167229109816" +
+ "90915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398" +
+ "31501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064" +
+ "20467525907091548141654985946163718027098199430992448895757128289059232332609729971208443357326548938239119325" +
+ "97463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100" +
+ "44929321516084244485963766983895228684783123552658213144957685726243344189303968642624341077322697802807318915" +
+ "44110104468232527162010526522721116603966655730925471105578537634668206531098965269186205647693125705863566201" +
+ "85581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318" +
+ "58676975145661406800700237877659134401712749470420562230538994561314071127000407854733269939081454664645880797" +
+ "27082668306343285878569830523580893306575740679545716377525420211495576158140025012622859413021647155097925923" +
+ "09907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111" +
+ "79042978285647503203198691514028708085990480109412147221317947647772622414254854540332157185306142288137585043" +
+ "06332175182979866223717215916077166925474873898665494945011465406284336639379003976926567214638530673609657120" +
+ "91807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862" +
+ "94726547364252308177036751590673502350728354056704038674351362222477158915049530984448933309634087807693259939" +
+ "78054193414473774418426312986080998886874132604721569516239658645730216315981931951673538129741677294786724229" +
+ "24654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001" +
+ "59377647165122893578601588161755782973523344604281512627203734314653197777416031990665541876397929334419521541" +
+ "34189948544473456738316249934191318148092777710386387734317720754565453220777092120190516609628049092636019759" +
+ "88281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267" +
+ "94561275318134078330336254232783944975382437205835311477119926063813346776879695970309833913077109870408591337"
+
+// Test case for BenchmarkScanPi.
+func TestScanPi(t *testing.T) {
+ var x nat
+ z, _, err := x.scan(strings.NewReader(pi), 10)
+ if err != nil {
+ t.Errorf("scanning pi: %s", err)
+ }
+ if s := z.decimalString(); s != pi {
+ t.Errorf("scanning pi: got %s", s)
+ }
+}
+
+func BenchmarkScanPi(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x nat
+ x.scan(strings.NewReader(pi), 10)
+ }
+}
+
+const (
+ // 314**271
+ // base 2: 2249 digits
+ // base 8: 751 digits
+ // base 10: 678 digits
+ // base 16: 563 digits
+ shortBase = 314
+ shortExponent = 271
+
+ // 3141**2178
+ // base 2: 31577 digits
+ // base 8: 10527 digits
+ // base 10: 9507 digits
+ // base 16: 7895 digits
+ mediumBase = 3141
+ mediumExponent = 2718
+
+ // 3141**2178
+ // base 2: 406078 digits
+ // base 8: 135360 digits
+ // base 10: 122243 digits
+ // base 16: 101521 digits
+ longBase = 31415
+ longExponent = 27182
+)
+
+func BenchmarkScanShort2(b *testing.B) {
+ ScanHelper(b, 2, shortBase, shortExponent)
+}
+
+func BenchmarkScanShort8(b *testing.B) {
+ ScanHelper(b, 8, shortBase, shortExponent)
+}
+
+func BenchmarkScanSort10(b *testing.B) {
+ ScanHelper(b, 10, shortBase, shortExponent)
+}
+
+func BenchmarkScanShort16(b *testing.B) {
+ ScanHelper(b, 16, shortBase, shortExponent)
+}
+
+func BenchmarkScanMedium2(b *testing.B) {
+ ScanHelper(b, 2, mediumBase, mediumExponent)
+}
+
+func BenchmarkScanMedium8(b *testing.B) {
+ ScanHelper(b, 8, mediumBase, mediumExponent)
+}
+
+func BenchmarkScanMedium10(b *testing.B) {
+ ScanHelper(b, 10, mediumBase, mediumExponent)
+}
+
+func BenchmarkScanMedium16(b *testing.B) {
+ ScanHelper(b, 16, mediumBase, mediumExponent)
+}
+
+func BenchmarkScanLong2(b *testing.B) {
+ ScanHelper(b, 2, longBase, longExponent)
+}
+
+func BenchmarkScanLong8(b *testing.B) {
+ ScanHelper(b, 8, longBase, longExponent)
+}
+
+func BenchmarkScanLong10(b *testing.B) {
+ ScanHelper(b, 10, longBase, longExponent)
+}
+
+func BenchmarkScanLong16(b *testing.B) {
+ ScanHelper(b, 16, longBase, longExponent)
+}
+
+func ScanHelper(b *testing.B, base int, xv, yv Word) {
+ b.StopTimer()
+ var x, y, z nat
+ x = x.setWord(xv)
+ y = y.setWord(yv)
+ z = z.expNN(x, y, nil)
+
+ var s string
+ s = z.string(lowercaseDigits[0:base])
+ if t := toString(z, lowercaseDigits[0:base]); t != s {
+ panic(fmt.Sprintf("scanning: got %s; want %s", s, t))
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ x.scan(strings.NewReader(s), base)
+ }
+}
+
+func BenchmarkStringShort2(b *testing.B) {
+ StringHelper(b, 2, shortBase, shortExponent)
+}
+
+func BenchmarkStringShort8(b *testing.B) {
+ StringHelper(b, 8, shortBase, shortExponent)
+}
+
+func BenchmarkStringShort10(b *testing.B) {
+ StringHelper(b, 10, shortBase, shortExponent)
+}
+
+func BenchmarkStringShort16(b *testing.B) {
+ StringHelper(b, 16, shortBase, shortExponent)
+}
+
+func BenchmarkStringMedium2(b *testing.B) {
+ StringHelper(b, 2, mediumBase, mediumExponent)
+}
+
+func BenchmarkStringMedium8(b *testing.B) {
+ StringHelper(b, 8, mediumBase, mediumExponent)
+}
+
+func BenchmarkStringMedium10(b *testing.B) {
+ StringHelper(b, 10, mediumBase, mediumExponent)
+}
+
+func BenchmarkStringMedium16(b *testing.B) {
+ StringHelper(b, 16, mediumBase, mediumExponent)
+}
+
+func BenchmarkStringLong2(b *testing.B) {
+ StringHelper(b, 2, longBase, longExponent)
+}
+
+func BenchmarkStringLong8(b *testing.B) {
+ StringHelper(b, 8, longBase, longExponent)
+}
+
+func BenchmarkStringLong10(b *testing.B) {
+ StringHelper(b, 10, longBase, longExponent)
+}
+
+func BenchmarkStringLong16(b *testing.B) {
+ StringHelper(b, 16, longBase, longExponent)
+}
+
+func StringHelper(b *testing.B, base int, xv, yv Word) {
+ b.StopTimer()
+ var x, y, z nat
+ x = x.setWord(xv)
+ y = y.setWord(yv)
+ z = z.expNN(x, y, nil)
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ z.string(lowercaseDigits[0:base])
+ }
+}
+
+func TestLeadingZeros(t *testing.T) {
+ var x Word = _B >> 1
+ for i := 0; i <= _W; i++ {
+ if int(leadingZeros(x)) != i {
+ t.Errorf("failed at %x: got %d want %d", x, leadingZeros(x), i)
+ }
+ x >>= 1
+ }
+}
+
+type shiftTest struct {
+ in nat
+ shift uint
+ out nat
+}
+
+var leftShiftTests = []shiftTest{
+ {nil, 0, nil},
+ {nil, 1, nil},
+ {natOne, 0, natOne},
+ {natOne, 1, natTwo},
+ {nat{1 << (_W - 1)}, 1, nat{0}},
+ {nat{1 << (_W - 1), 0}, 1, nat{0, 1}},
+}
+
+func TestShiftLeft(t *testing.T) {
+ for i, test := range leftShiftTests {
+ var z nat
+ z = z.shl(test.in, test.shift)
+ for j, d := range test.out {
+ if j >= len(z) || z[j] != d {
+ t.Errorf("#%d: got: %v want: %v", i, z, test.out)
+ break
+ }
+ }
+ }
+}
+
+var rightShiftTests = []shiftTest{
+ {nil, 0, nil},
+ {nil, 1, nil},
+ {natOne, 0, natOne},
+ {natOne, 1, nil},
+ {natTwo, 1, natOne},
+ {nat{0, 1}, 1, nat{1 << (_W - 1)}},
+ {nat{2, 1, 1}, 1, nat{1<<(_W-1) + 1, 1 << (_W - 1)}},
+}
+
+func TestShiftRight(t *testing.T) {
+ for i, test := range rightShiftTests {
+ var z nat
+ z = z.shr(test.in, test.shift)
+ for j, d := range test.out {
+ if j >= len(z) || z[j] != d {
+ t.Errorf("#%d: got: %v want: %v", i, z, test.out)
+ break
+ }
+ }
+ }
+}
+
+type modWTest struct {
+ in string
+ dividend string
+ out string
+}
+
+var modWTests32 = []modWTest{
+ {"23492635982634928349238759823742", "252341", "220170"},
+}
+
+var modWTests64 = []modWTest{
+ {"6527895462947293856291561095690465243862946", "524326975699234", "375066989628668"},
+}
+
+func runModWTests(t *testing.T, tests []modWTest) {
+ for i, test := range tests {
+ in, _ := new(Int).SetString(test.in, 10)
+ d, _ := new(Int).SetString(test.dividend, 10)
+ out, _ := new(Int).SetString(test.out, 10)
+
+ r := in.abs.modW(d.abs[0])
+ if r != out.abs[0] {
+ t.Errorf("#%d failed: got %s want %s", i, r, out)
+ }
+ }
+}
+
+func TestModW(t *testing.T) {
+ if _W >= 32 {
+ runModWTests(t, modWTests32)
+ }
+ if _W >= 64 {
+ runModWTests(t, modWTests64)
+ }
+}
+
+func TestTrailingZeroBits(t *testing.T) {
+ var x Word
+ x--
+ for i := 0; i < _W; i++ {
+ if trailingZeroBits(x) != i {
+ t.Errorf("Failed at step %d: x: %x got: %d", i, x, trailingZeroBits(x))
+ }
+ x <<= 1
+ }
+}
+
+var expNNTests = []struct {
+ x, y, m string
+ out string
+}{
+ {"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
+ {"0x8000000000000000", "2", "6719", "4944"},
+ {"0x8000000000000000", "3", "6719", "5447"},
+ {"0x8000000000000000", "1000", "6719", "1603"},
+ {"0x8000000000000000", "1000000", "6719", "3199"},
+ {
+ "2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
+ "298472983472983471903246121093472394872319615612417471234712061",
+ "29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
+ "23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
+ },
+}
+
+func TestExpNN(t *testing.T) {
+ for i, test := range expNNTests {
+ x, _, _ := nat{}.scan(strings.NewReader(test.x), 0)
+ y, _, _ := nat{}.scan(strings.NewReader(test.y), 0)
+ out, _, _ := nat{}.scan(strings.NewReader(test.out), 0)
+
+ var m nat
+
+ if len(test.m) > 0 {
+ m, _, _ = nat{}.scan(strings.NewReader(test.m), 0)
+ }
+
+ z := nat{}.expNN(x, y, m)
+ if z.cmp(out) != 0 {
+ t.Errorf("#%d got %v want %v", i, z, out)
+ }
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements multi-precision rational numbers.
+
+package big
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// A Rat represents a quotient a/b of arbitrary precision.
+// The zero value for a Rat represents the value 0.
+type Rat struct {
+ a Int
+ b nat // len(b) == 0 acts like b == 1
+}
+
+// NewRat creates a new Rat with numerator a and denominator b.
+func NewRat(a, b int64) *Rat {
+ return new(Rat).SetFrac64(a, b)
+}
+
+// SetFrac sets z to a/b and returns z.
+func (z *Rat) SetFrac(a, b *Int) *Rat {
+ z.a.neg = a.neg != b.neg
+ babs := b.abs
+ if len(babs) == 0 {
+ panic("division by zero")
+ }
+ if &z.a == b || alias(z.a.abs, babs) {
+ babs = nat{}.set(babs) // make a copy
+ }
+ z.a.abs = z.a.abs.set(a.abs)
+ z.b = z.b.set(babs)
+ return z.norm()
+}
+
+// SetFrac64 sets z to a/b and returns z.
+func (z *Rat) SetFrac64(a, b int64) *Rat {
+ z.a.SetInt64(a)
+ if b == 0 {
+ panic("division by zero")
+ }
+ if b < 0 {
+ b = -b
+ z.a.neg = !z.a.neg
+ }
+ z.b = z.b.setUint64(uint64(b))
+ return z.norm()
+}
+
+// SetInt sets z to x (by making a copy of x) and returns z.
+func (z *Rat) SetInt(x *Int) *Rat {
+ z.a.Set(x)
+ z.b = z.b.make(0)
+ return z
+}
+
+// SetInt64 sets z to x and returns z.
+func (z *Rat) SetInt64(x int64) *Rat {
+ z.a.SetInt64(x)
+ z.b = z.b.make(0)
+ return z
+}
+
+// Set sets z to x (by making a copy of x) and returns z.
+func (z *Rat) Set(x *Rat) *Rat {
+ if z != x {
+ z.a.Set(&x.a)
+ z.b = z.b.set(x.b)
+ }
+ return z
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Rat) Abs(x *Rat) *Rat {
+ z.Set(x)
+ z.a.neg = false
+ return z
+}
+
+// Neg sets z to -x and returns z.
+func (z *Rat) Neg(x *Rat) *Rat {
+ z.Set(x)
+ z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
+ return z
+}
+
+// Inv sets z to 1/x and returns z.
+func (z *Rat) Inv(x *Rat) *Rat {
+ if len(x.a.abs) == 0 {
+ panic("division by zero")
+ }
+ z.Set(x)
+ a := z.b
+ if len(a) == 0 {
+ a = a.setWord(1) // materialize numerator
+ }
+ b := z.a.abs
+ if b.cmp(natOne) == 0 {
+ b = b.make(0) // normalize denominator
+ }
+ z.a.abs, z.b = a, b // sign doesn't change
+ return z
+}
+
+// Sign returns:
+//
+// -1 if x < 0
+// 0 if x == 0
+// +1 if x > 0
+//
+func (x *Rat) Sign() int {
+ return x.a.Sign()
+}
+
+// IsInt returns true if the denominator of x is 1.
+func (x *Rat) IsInt() bool {
+ return len(x.b) == 0 || x.b.cmp(natOne) == 0
+}
+
+// Num returns the numerator of x; it may be <= 0.
+// The result is a reference to x's numerator; it
+// may change if a new value is assigned to x.
+func (x *Rat) Num() *Int {
+ return &x.a
+}
+
+// Denom returns the denominator of x; it is always > 0.
+// The result is a reference to x's denominator; it
+// may change if a new value is assigned to x.
+func (x *Rat) Denom() *Int {
+ if len(x.b) == 0 {
+ return &Int{abs: nat{1}}
+ }
+ return &Int{abs: x.b}
+}
+
+func gcd(x, y nat) nat {
+ // Euclidean algorithm.
+ var a, b nat
+ a = a.set(x)
+ b = b.set(y)
+ for len(b) != 0 {
+ var q, r nat
+ _, r = q.div(r, a, b)
+ a = b
+ b = r
+ }
+ return a
+}
+
+func (z *Rat) norm() *Rat {
+ switch {
+ case len(z.a.abs) == 0:
+ // z == 0 - normalize sign and denominator
+ z.a.neg = false
+ z.b = z.b.make(0)
+ case len(z.b) == 0:
+ // z is normalized int - nothing to do
+ case z.b.cmp(natOne) == 0:
+ // z is int - normalize denominator
+ z.b = z.b.make(0)
+ default:
+ if f := gcd(z.a.abs, z.b); f.cmp(natOne) != 0 {
+ z.a.abs, _ = z.a.abs.div(nil, z.a.abs, f)
+ z.b, _ = z.b.div(nil, z.b, f)
+ }
+ }
+ return z
+}
+
+// mulDenom sets z to the denominator product x*y (by taking into
+// account that 0 values for x or y must be interpreted as 1) and
+// returns z.
+func mulDenom(z, x, y nat) nat {
+ switch {
+ case len(x) == 0:
+ return z.set(y)
+ case len(y) == 0:
+ return z.set(x)
+ }
+ return z.mul(x, y)
+}
+
+// scaleDenom computes x*f.
+// If f == 0 (zero value of denominator), the result is (a copy of) x.
+func scaleDenom(x *Int, f nat) *Int {
+ var z Int
+ if len(f) == 0 {
+ return z.Set(x)
+ }
+ z.abs = z.abs.mul(x.abs, f)
+ z.neg = x.neg
+ return &z
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+//
+func (x *Rat) Cmp(y *Rat) int {
+ return scaleDenom(&x.a, y.b).Cmp(scaleDenom(&y.a, x.b))
+}
+
+// Add sets z to the sum x+y and returns z.
+func (z *Rat) Add(x, y *Rat) *Rat {
+ a1 := scaleDenom(&x.a, y.b)
+ a2 := scaleDenom(&y.a, x.b)
+ z.a.Add(a1, a2)
+ z.b = mulDenom(z.b, x.b, y.b)
+ return z.norm()
+}
+
+// Sub sets z to the difference x-y and returns z.
+func (z *Rat) Sub(x, y *Rat) *Rat {
+ a1 := scaleDenom(&x.a, y.b)
+ a2 := scaleDenom(&y.a, x.b)
+ z.a.Sub(a1, a2)
+ z.b = mulDenom(z.b, x.b, y.b)
+ return z.norm()
+}
+
+// Mul sets z to the product x*y and returns z.
+func (z *Rat) Mul(x, y *Rat) *Rat {
+ z.a.Mul(&x.a, &y.a)
+ z.b = mulDenom(z.b, x.b, y.b)
+ return z.norm()
+}
+
+// Quo sets z to the quotient x/y and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+func (z *Rat) Quo(x, y *Rat) *Rat {
+ if len(y.a.abs) == 0 {
+ panic("division by zero")
+ }
+ a := scaleDenom(&x.a, y.b)
+ b := scaleDenom(&y.a, x.b)
+ z.a.abs = a.abs
+ z.b = b.abs
+ z.a.neg = a.neg != b.neg
+ return z.norm()
+}
+
+func ratTok(ch rune) bool {
+ return strings.IndexRune("+-/0123456789.eE", ch) >= 0
+}
+
+// Scan is a support routine for fmt.Scanner. It accepts the formats
+// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
+func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
+ tok, err := s.Token(true, ratTok)
+ if err != nil {
+ return err
+ }
+ if strings.IndexRune("efgEFGv", ch) < 0 {
+ return errors.New("Rat.Scan: invalid verb")
+ }
+ if _, ok := z.SetString(string(tok)); !ok {
+ return errors.New("Rat.Scan: invalid syntax")
+ }
+ return nil
+}
+
+// SetString sets z to the value of s and returns z and a boolean indicating
+// success. s can be given as a fraction "a/b" or as a floating-point number
+// optionally followed by an exponent. If the operation failed, the value of
+// z is undefined but the returned value is nil.
+func (z *Rat) SetString(s string) (*Rat, bool) {
+ if len(s) == 0 {
+ return nil, false
+ }
+
+ // check for a quotient
+ sep := strings.Index(s, "/")
+ if sep >= 0 {
+ if _, ok := z.a.SetString(s[0:sep], 10); !ok {
+ return nil, false
+ }
+ s = s[sep+1:]
+ var err error
+ if z.b, _, err = z.b.scan(strings.NewReader(s), 10); err != nil {
+ return nil, false
+ }
+ return z.norm(), true
+ }
+
+ // check for a decimal point
+ sep = strings.Index(s, ".")
+ // check for an exponent
+ e := strings.IndexAny(s, "eE")
+ var exp Int
+ if e >= 0 {
+ if e < sep {
+ // The E must come after the decimal point.
+ return nil, false
+ }
+ if _, ok := exp.SetString(s[e+1:], 10); !ok {
+ return nil, false
+ }
+ s = s[0:e]
+ }
+ if sep >= 0 {
+ s = s[0:sep] + s[sep+1:]
+ exp.Sub(&exp, NewInt(int64(len(s)-sep)))
+ }
+
+ if _, ok := z.a.SetString(s, 10); !ok {
+ return nil, false
+ }
+ powTen := nat{}.expNN(natTen, exp.abs, nil)
+ if exp.neg {
+ z.b = powTen
+ z.norm()
+ } else {
+ z.a.abs = z.a.abs.mul(z.a.abs, powTen)
+ z.b = z.b.make(0)
+ }
+
+ return z, true
+}
+
+// String returns a string representation of z in the form "a/b" (even if b == 1).
+func (z *Rat) String() string {
+ s := "/1"
+ if len(z.b) != 0 {
+ s = "/" + z.b.decimalString()
+ }
+ return z.a.String() + s
+}
+
+// RatString returns a string representation of z in the form "a/b" if b != 1,
+// and in the form "a" if b == 1.
+func (z *Rat) RatString() string {
+ if z.IsInt() {
+ return z.a.String()
+ }
+ return z.String()
+}
+
+// FloatString returns a string representation of z in decimal form with prec
+// digits of precision after the decimal point and the last digit rounded.
+func (z *Rat) FloatString(prec int) string {
+ if z.IsInt() {
+ s := z.a.String()
+ if prec > 0 {
+ s += "." + strings.Repeat("0", prec)
+ }
+ return s
+ }
+ // z.b != 0
+
+ q, r := nat{}.div(nat{}, z.a.abs, z.b)
+
+ p := natOne
+ if prec > 0 {
+ p = nat{}.expNN(natTen, nat{}.setUint64(uint64(prec)), nil)
+ }
+
+ r = r.mul(r, p)
+ r, r2 := r.div(nat{}, r, z.b)
+
+ // see if we need to round up
+ r2 = r2.add(r2, r2)
+ if z.b.cmp(r2) <= 0 {
+ r = r.add(r, natOne)
+ if r.cmp(p) >= 0 {
+ q = nat{}.add(q, natOne)
+ r = nat{}.sub(r, p)
+ }
+ }
+
+ s := q.decimalString()
+ if z.a.neg {
+ s = "-" + s
+ }
+
+ if prec > 0 {
+ rs := r.decimalString()
+ leadingZeros := prec - len(rs)
+ s += "." + strings.Repeat("0", leadingZeros) + rs
+ }
+
+ return s
+}
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const ratGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+func (z *Rat) GobEncode() ([]byte, error) {
+ buf := make([]byte, 1+4+(len(z.a.abs)+len(z.b))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
+ i := z.b.bytes(buf)
+ j := z.a.abs.bytes(buf[0:i])
+ n := i - j
+ if int(uint32(n)) != n {
+ // this should never happen
+ return nil, errors.New("Rat.GobEncode: numerator too large")
+ }
+ binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
+ j -= 1 + 4
+ b := ratGobVersion << 1 // make space for sign bit
+ if z.a.neg {
+ b |= 1
+ }
+ buf[j] = b
+ return buf[j:], nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Rat) GobDecode(buf []byte) error {
+ if len(buf) == 0 {
+ return errors.New("Rat.GobDecode: no data")
+ }
+ b := buf[0]
+ if b>>1 != ratGobVersion {
+ return errors.New(fmt.Sprintf("Rat.GobDecode: encoding version %d not supported", b>>1))
+ }
+ const j = 1 + 4
+ i := j + binary.BigEndian.Uint32(buf[j-4:j])
+ z.a.neg = b&1 != 0
+ z.a.abs = z.a.abs.setBytes(buf[j:i])
+ z.b = z.b.setBytes(buf[i:])
+ return nil
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "testing"
+)
+
+func TestZeroRat(t *testing.T) {
+ var x, y, z Rat
+ y.SetFrac64(0, 42)
+
+ if x.Cmp(&y) != 0 {
+ t.Errorf("x and y should be both equal and zero")
+ }
+
+ if s := x.String(); s != "0/1" {
+ t.Errorf("got x = %s, want 0/1", s)
+ }
+
+ if s := x.RatString(); s != "0" {
+ t.Errorf("got x = %s, want 0", s)
+ }
+
+ z.Add(&x, &y)
+ if s := z.RatString(); s != "0" {
+ t.Errorf("got x+y = %s, want 0", s)
+ }
+
+ z.Sub(&x, &y)
+ if s := z.RatString(); s != "0" {
+ t.Errorf("got x-y = %s, want 0", s)
+ }
+
+ z.Mul(&x, &y)
+ if s := z.RatString(); s != "0" {
+ t.Errorf("got x*y = %s, want 0", s)
+ }
+
+ // check for division by zero
+ defer func() {
+ if s := recover(); s == nil || s.(string) != "division by zero" {
+ panic(s)
+ }
+ }()
+ z.Quo(&x, &y)
+}
+
+var setStringTests = []struct {
+ in, out string
+ ok bool
+}{
+ {"0", "0", true},
+ {"-0", "0", true},
+ {"1", "1", true},
+ {"-1", "-1", true},
+ {"1.", "1", true},
+ {"1e0", "1", true},
+ {"1.e1", "10", true},
+ {in: "1e", ok: false},
+ {in: "1.e", ok: false},
+ {in: "1e+14e-5", ok: false},
+ {in: "1e4.5", ok: false},
+ {in: "r", ok: false},
+ {in: "a/b", ok: false},
+ {in: "a.b", ok: false},
+ {"-0.1", "-1/10", true},
+ {"-.1", "-1/10", true},
+ {"2/4", "1/2", true},
+ {".25", "1/4", true},
+ {"-1/5", "-1/5", true},
+ {"8129567.7690E14", "812956776900000000000", true},
+ {"78189e+4", "781890000", true},
+ {"553019.8935e+8", "55301989350000", true},
+ {"98765432109876543210987654321e-10", "98765432109876543210987654321/10000000000", true},
+ {"9877861857500000E-7", "3951144743/4", true},
+ {"2169378.417e-3", "2169378417/1000000", true},
+ {"884243222337379604041632732738665534", "884243222337379604041632732738665534", true},
+ {"53/70893980658822810696", "53/70893980658822810696", true},
+ {"106/141787961317645621392", "53/70893980658822810696", true},
+ {"204211327800791583.81095", "4084226556015831676219/20000", true},
+}
+
+func TestRatSetString(t *testing.T) {
+ for i, test := range setStringTests {
+ x, ok := new(Rat).SetString(test.in)
+
+ if ok {
+ if !test.ok {
+ t.Errorf("#%d SetString(%q) expected failure", i, test.in)
+ } else if x.RatString() != test.out {
+ t.Errorf("#%d SetString(%q) got %s want %s", i, test.in, x.RatString(), test.out)
+ }
+ } else if x != nil {
+ t.Errorf("#%d SetString(%q) got %p want nil", i, test.in, x)
+ }
+ }
+}
+
+func TestRatScan(t *testing.T) {
+ var buf bytes.Buffer
+ for i, test := range setStringTests {
+ x := new(Rat)
+ buf.Reset()
+ buf.WriteString(test.in)
+
+ _, err := fmt.Fscanf(&buf, "%v", x)
+ if err == nil != test.ok {
+ if test.ok {
+ t.Errorf("#%d error: %s", i, err)
+ } else {
+ t.Errorf("#%d expected error", i)
+ }
+ continue
+ }
+ if err == nil && x.RatString() != test.out {
+ t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
+ }
+ }
+}
+
+var floatStringTests = []struct {
+ in string
+ prec int
+ out string
+}{
+ {"0", 0, "0"},
+ {"0", 4, "0.0000"},
+ {"1", 0, "1"},
+ {"1", 2, "1.00"},
+ {"-1", 0, "-1"},
+ {".25", 2, "0.25"},
+ {".25", 1, "0.3"},
+ {".25", 3, "0.250"},
+ {"-1/3", 3, "-0.333"},
+ {"-2/3", 4, "-0.6667"},
+ {"0.96", 1, "1.0"},
+ {"0.999", 2, "1.00"},
+ {"0.9", 0, "1"},
+ {".25", -1, "0"},
+ {".55", -1, "1"},
+}
+
+func TestFloatString(t *testing.T) {
+ for i, test := range floatStringTests {
+ x, _ := new(Rat).SetString(test.in)
+
+ if x.FloatString(test.prec) != test.out {
+ t.Errorf("#%d got %s want %s", i, x.FloatString(test.prec), test.out)
+ }
+ }
+}
+
+func TestRatSign(t *testing.T) {
+ zero := NewRat(0, 1)
+ for _, a := range setStringTests {
+ x, ok := new(Rat).SetString(a.in)
+ if !ok {
+ continue
+ }
+ s := x.Sign()
+ e := x.Cmp(zero)
+ if s != e {
+ t.Errorf("got %d; want %d for z = %v", s, e, &x)
+ }
+ }
+}
+
+var ratCmpTests = []struct {
+ rat1, rat2 string
+ out int
+}{
+ {"0", "0/1", 0},
+ {"1/1", "1", 0},
+ {"-1", "-2/2", 0},
+ {"1", "0", 1},
+ {"0/1", "1/1", -1},
+ {"-5/1434770811533343057144", "-5/1434770811533343057145", -1},
+ {"49832350382626108453/8964749413", "49832350382626108454/8964749413", -1},
+ {"-37414950961700930/7204075375675961", "37414950961700930/7204075375675961", -1},
+ {"37414950961700930/7204075375675961", "74829901923401860/14408150751351922", 0},
+}
+
+func TestRatCmp(t *testing.T) {
+ for i, test := range ratCmpTests {
+ x, _ := new(Rat).SetString(test.rat1)
+ y, _ := new(Rat).SetString(test.rat2)
+
+ out := x.Cmp(y)
+ if out != test.out {
+ t.Errorf("#%d got out = %v; want %v", i, out, test.out)
+ }
+ }
+}
+
+func TestIsInt(t *testing.T) {
+ one := NewInt(1)
+ for _, a := range setStringTests {
+ x, ok := new(Rat).SetString(a.in)
+ if !ok {
+ continue
+ }
+ i := x.IsInt()
+ e := x.Denom().Cmp(one) == 0
+ if i != e {
+ t.Errorf("got IsInt(%v) == %v; want %v", x, i, e)
+ }
+ }
+}
+
+func TestRatAbs(t *testing.T) {
+ zero := new(Rat)
+ for _, a := range setStringTests {
+ x, ok := new(Rat).SetString(a.in)
+ if !ok {
+ continue
+ }
+ e := new(Rat).Set(x)
+ if e.Cmp(zero) < 0 {
+ e.Sub(zero, e)
+ }
+ z := new(Rat).Abs(x)
+ if z.Cmp(e) != 0 {
+ t.Errorf("got Abs(%v) = %v; want %v", x, z, e)
+ }
+ }
+}
+
+func TestRatNeg(t *testing.T) {
+ zero := new(Rat)
+ for _, a := range setStringTests {
+ x, ok := new(Rat).SetString(a.in)
+ if !ok {
+ continue
+ }
+ e := new(Rat).Sub(zero, x)
+ z := new(Rat).Neg(x)
+ if z.Cmp(e) != 0 {
+ t.Errorf("got Neg(%v) = %v; want %v", x, z, e)
+ }
+ }
+}
+
+func TestRatInv(t *testing.T) {
+ zero := new(Rat)
+ for _, a := range setStringTests {
+ x, ok := new(Rat).SetString(a.in)
+ if !ok {
+ continue
+ }
+ if x.Cmp(zero) == 0 {
+ continue // avoid division by zero
+ }
+ e := new(Rat).SetFrac(x.Denom(), x.Num())
+ z := new(Rat).Inv(x)
+ if z.Cmp(e) != 0 {
+ t.Errorf("got Inv(%v) = %v; want %v", x, z, e)
+ }
+ }
+}
+
+type ratBinFun func(z, x, y *Rat) *Rat
+type ratBinArg struct {
+ x, y, z string
+}
+
+func testRatBin(t *testing.T, i int, name string, f ratBinFun, a ratBinArg) {
+ x, _ := new(Rat).SetString(a.x)
+ y, _ := new(Rat).SetString(a.y)
+ z, _ := new(Rat).SetString(a.z)
+ out := f(new(Rat), x, y)
+
+ if out.Cmp(z) != 0 {
+ t.Errorf("%s #%d got %s want %s", name, i, out, z)
+ }
+}
+
+var ratBinTests = []struct {
+ x, y string
+ sum, prod string
+}{
+ {"0", "0", "0", "0"},
+ {"0", "1", "1", "0"},
+ {"-1", "0", "-1", "0"},
+ {"-1", "1", "0", "-1"},
+ {"1", "1", "2", "1"},
+ {"1/2", "1/2", "1", "1/4"},
+ {"1/4", "1/3", "7/12", "1/12"},
+ {"2/5", "-14/3", "-64/15", "-28/15"},
+ {"4707/49292519774798173060", "-3367/70976135186689855734", "84058377121001851123459/1749296273614329067191168098769082663020", "-1760941/388732505247628681598037355282018369560"},
+ {"-61204110018146728334/3", "-31052192278051565633/2", "-215564796870448153567/6", "950260896245257153059642991192710872711/3"},
+ {"-854857841473707320655/4237645934602118692642972629634714039", "-18/31750379913563777419", "-27/133467566250814981", "15387441146526731771790/134546868362786310073779084329032722548987800600710485341"},
+ {"618575745270541348005638912139/19198433543745179392300736", "-19948846211000086/637313996471", "27674141753240653/30123979153216", "-6169936206128396568797607742807090270137721977/6117715203873571641674006593837351328"},
+ {"-3/26206484091896184128", "5/2848423294177090248", "15310893822118706237/9330894968229805033368778458685147968", "-5/24882386581946146755650075889827061248"},
+ {"26946729/330400702820", "41563965/225583428284", "1238218672302860271/4658307703098666660055", "224002580204097/14906584649915733312176"},
+ {"-8259900599013409474/7", "-84829337473700364773/56707961321161574960", "-468402123685491748914621885145127724451/396955729248131024720", "350340947706464153265156004876107029701/198477864624065512360"},
+ {"575775209696864/1320203974639986246357", "29/712593081308", "410331716733912717985762465/940768218243776489278275419794956", "808/45524274987585732633"},
+ {"1786597389946320496771/2066653520653241", "6269770/1992362624741777", "3559549865190272133656109052308126637/4117523232840525481453983149257", "8967230/3296219033"},
+ {"-36459180403360509753/32150500941194292113930", "9381566963714/9633539", "301622077145533298008420642898530153/309723104686531919656937098270", "-3784609207827/3426986245"},
+}
+
+func TestRatBin(t *testing.T) {
+ for i, test := range ratBinTests {
+ arg := ratBinArg{test.x, test.y, test.sum}
+ testRatBin(t, i, "Add", (*Rat).Add, arg)
+
+ arg = ratBinArg{test.y, test.x, test.sum}
+ testRatBin(t, i, "Add symmetric", (*Rat).Add, arg)
+
+ arg = ratBinArg{test.sum, test.x, test.y}
+ testRatBin(t, i, "Sub", (*Rat).Sub, arg)
+
+ arg = ratBinArg{test.sum, test.y, test.x}
+ testRatBin(t, i, "Sub symmetric", (*Rat).Sub, arg)
+
+ arg = ratBinArg{test.x, test.y, test.prod}
+ testRatBin(t, i, "Mul", (*Rat).Mul, arg)
+
+ arg = ratBinArg{test.y, test.x, test.prod}
+ testRatBin(t, i, "Mul symmetric", (*Rat).Mul, arg)
+
+ if test.x != "0" {
+ arg = ratBinArg{test.prod, test.x, test.y}
+ testRatBin(t, i, "Quo", (*Rat).Quo, arg)
+ }
+
+ if test.y != "0" {
+ arg = ratBinArg{test.prod, test.y, test.x}
+ testRatBin(t, i, "Quo symmetric", (*Rat).Quo, arg)
+ }
+ }
+}
+
+func TestIssue820(t *testing.T) {
+ x := NewRat(3, 1)
+ y := NewRat(2, 1)
+ z := y.Quo(x, y)
+ q := NewRat(3, 2)
+ if z.Cmp(q) != 0 {
+ t.Errorf("got %s want %s", z, q)
+ }
+
+ y = NewRat(3, 1)
+ x = NewRat(2, 1)
+ z = y.Quo(x, y)
+ q = NewRat(2, 3)
+ if z.Cmp(q) != 0 {
+ t.Errorf("got %s want %s", z, q)
+ }
+
+ x = NewRat(3, 1)
+ z = x.Quo(x, x)
+ q = NewRat(3, 3)
+ if z.Cmp(q) != 0 {
+ t.Errorf("got %s want %s", z, q)
+ }
+}
+
+var setFrac64Tests = []struct {
+ a, b int64
+ out string
+}{
+ {0, 1, "0"},
+ {0, -1, "0"},
+ {1, 1, "1"},
+ {-1, 1, "-1"},
+ {1, -1, "-1"},
+ {-1, -1, "1"},
+ {-9223372036854775808, -9223372036854775808, "1"},
+}
+
+func TestRatSetFrac64Rat(t *testing.T) {
+ for i, test := range setFrac64Tests {
+ x := new(Rat).SetFrac64(test.a, test.b)
+ if x.RatString() != test.out {
+ t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
+ }
+ }
+}
+
+func TestRatGobEncoding(t *testing.T) {
+ var medium bytes.Buffer
+ enc := gob.NewEncoder(&medium)
+ dec := gob.NewDecoder(&medium)
+ for i, test := range gobEncodingTests {
+ for j := 0; j < 4; j++ {
+ medium.Reset() // empty buffer for each test case (in case of failures)
+ stest := test
+ if j&1 != 0 {
+ // negative numbers
+ stest = "-" + test
+ }
+ if j%2 != 0 {
+ // fractions
+ stest = stest + "." + test
+ }
+ var tx Rat
+ tx.SetString(stest)
+ if err := enc.Encode(&tx); err != nil {
+ t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
+ }
+ var rx Rat
+ if err := dec.Decode(&rx); err != nil {
+ t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
+ }
+ if rx.Cmp(&tx) != 0 {
+ t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
+ }
+ }
+ }
+}
+
+func TestIssue2379(t *testing.T) {
+ // 1) no aliasing
+ q := NewRat(3, 2)
+ x := new(Rat)
+ x.SetFrac(NewInt(3), NewInt(2))
+ if x.Cmp(q) != 0 {
+ t.Errorf("1) got %s want %s", x, q)
+ }
+
+ // 2) aliasing of numerator
+ x = NewRat(2, 3)
+ x.SetFrac(NewInt(3), x.Num())
+ if x.Cmp(q) != 0 {
+ t.Errorf("2) got %s want %s", x, q)
+ }
+
+ // 3) aliasing of denominator
+ x = NewRat(2, 3)
+ x.SetFrac(x.Denom(), NewInt(2))
+ if x.Cmp(q) != 0 {
+ t.Errorf("3) got %s want %s", x, q)
+ }
+
+ // 4) aliasing of numerator and denominator
+ x = NewRat(2, 3)
+ x.SetFrac(x.Denom(), x.Num())
+ if x.Cmp(q) != 0 {
+ t.Errorf("4) got %s want %s", x, q)
+ }
+
+ // 5) numerator and denominator are the same
+ q = NewRat(1, 1)
+ x = new(Rat)
+ n := NewInt(7)
+ x.SetFrac(n, n)
+ if x.Cmp(q) != 0 {
+ t.Errorf("5) got %s want %s", x, q)
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmplx provides basic constants and mathematical functions for
+// complex numbers.
+package cmplx
+
+import "math"
+
+// Abs returns the absolute value (also called the modulus) of x.
+func Abs(x complex128) float64 { return math.Hypot(real(x), imag(x)) }
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex circular arc sine
+//
+// DESCRIPTION:
+//
+// Inverse complex sine:
+// 2
+// w = -i clog( iz + csqrt( 1 - z ) ).
+//
+// casin(z) = -i casinh(iz)
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 10100 2.1e-15 3.4e-16
+// IEEE -10,+10 30000 2.2e-14 2.7e-15
+// Larger relative error can be observed for z near zero.
+// Also tested by csin(casin(z)) = z.
+
+// Asin returns the inverse sine of x.
+func Asin(x complex128) complex128 {
+ if imag(x) == 0 {
+ if math.Abs(real(x)) > 1 {
+ return complex(math.Pi/2, 0) // DOMAIN error
+ }
+ return complex(math.Asin(real(x)), 0)
+ }
+ ct := complex(-imag(x), real(x)) // i * x
+ xx := x * x
+ x1 := complex(1-real(xx), -imag(xx)) // 1 - x*x
+ x2 := Sqrt(x1) // x2 = sqrt(1 - x*x)
+ w := Log(ct + x2)
+ return complex(imag(w), -real(w)) // -i * w
+}
+
+// Asinh returns the inverse hyperbolic sine of x.
+func Asinh(x complex128) complex128 {
+ // TODO check range
+ if imag(x) == 0 {
+ if math.Abs(real(x)) > 1 {
+ return complex(math.Pi/2, 0) // DOMAIN error
+ }
+ return complex(math.Asinh(real(x)), 0)
+ }
+ xx := x * x
+ x1 := complex(1+real(xx), imag(xx)) // 1 + x*x
+ return Log(x + Sqrt(x1)) // log(x + sqrt(1 + x*x))
+}
+
+// Complex circular arc cosine
+//
+// DESCRIPTION:
+//
+// w = arccos z = PI/2 - arcsin z.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 5200 1.6e-15 2.8e-16
+// IEEE -10,+10 30000 1.8e-14 2.2e-15
+
+// Acos returns the inverse cosine of x.
+func Acos(x complex128) complex128 {
+ w := Asin(x)
+ return complex(math.Pi/2-real(w), -imag(w))
+}
+
+// Acosh returns the inverse hyperbolic cosine of x.
+func Acosh(x complex128) complex128 {
+ w := Acos(x)
+ if imag(w) <= 0 {
+ return complex(-imag(w), real(w)) // i * w
+ }
+ return complex(imag(w), -real(w)) // -i * w
+}
+
+// Complex circular arc tangent
+//
+// DESCRIPTION:
+//
+// If
+// z = x + iy,
+//
+// then
+// 1 ( 2x )
+// Re w = - arctan(-----------) + k PI
+// 2 ( 2 2)
+// (1 - x - y )
+//
+// ( 2 2)
+// 1 (x + (y+1) )
+// Im w = - log(------------)
+// 4 ( 2 2)
+// (x + (y-1) )
+//
+// Where k is an arbitrary integer.
+//
+// catan(z) = -i catanh(iz).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 5900 1.3e-16 7.8e-18
+// IEEE -10,+10 30000 2.3e-15 8.5e-17
+// The check catan( ctan(z) ) = z, with |x| and |y| < PI/2,
+// had peak relative error 1.5e-16, rms relative error
+// 2.9e-17. See also clog().
+
+// Atan returns the inverse tangent of x.
+func Atan(x complex128) complex128 {
+ if real(x) == 0 && imag(x) > 1 {
+ return NaN()
+ }
+
+ x2 := real(x) * real(x)
+ a := 1 - x2 - imag(x)*imag(x)
+ if a == 0 {
+ return NaN()
+ }
+ t := 0.5 * math.Atan2(2*real(x), a)
+ w := reducePi(t)
+
+ t = imag(x) - 1
+ b := x2 + t*t
+ if b == 0 {
+ return NaN()
+ }
+ t = imag(x) + 1
+ c := (x2 + t*t) / b
+ return complex(w, 0.25*math.Log(c))
+}
+
+// Atanh returns the inverse hyperbolic tangent of x.
+func Atanh(x complex128) complex128 {
+ z := complex(-imag(x), real(x)) // z = i * x
+ z = Atan(z)
+ return complex(imag(z), -real(z)) // z = -i * z
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import (
+ "math"
+ "testing"
+)
+
+var vc26 = []complex128{
+ (4.97901192488367350108546816 + 7.73887247457810456552351752i),
+ (7.73887247457810456552351752 - 0.27688005719200159404635997i),
+ (-0.27688005719200159404635997 - 5.01060361827107492160848778i),
+ (-5.01060361827107492160848778 + 9.63629370719841737980004837i),
+ (9.63629370719841737980004837 + 2.92637723924396464525443662i),
+ (2.92637723924396464525443662 + 5.22908343145930665230025625i),
+ (5.22908343145930665230025625 + 2.72793991043601025126008608i),
+ (2.72793991043601025126008608 + 1.82530809168085506044576505i),
+ (1.82530809168085506044576505 - 8.68592476857560136238589621i),
+ (-8.68592476857560136238589621 + 4.97901192488367350108546816i),
+}
+var vc = []complex128{
+ (4.9790119248836735e+00 + 7.7388724745781045e+00i),
+ (7.7388724745781045e+00 - 2.7688005719200159e-01i),
+ (-2.7688005719200159e-01 - 5.0106036182710749e+00i),
+ (-5.0106036182710749e+00 + 9.6362937071984173e+00i),
+ (9.6362937071984173e+00 + 2.9263772392439646e+00i),
+ (2.9263772392439646e+00 + 5.2290834314593066e+00i),
+ (5.2290834314593066e+00 + 2.7279399104360102e+00i),
+ (2.7279399104360102e+00 + 1.8253080916808550e+00i),
+ (1.8253080916808550e+00 - 8.6859247685756013e+00i),
+ (-8.6859247685756013e+00 + 4.9790119248836735e+00i),
+}
+
+// The expected results below were computed by the high precision calculators
+// at http://keisan.casio.com/. More exact input values (array vc[], above)
+// were obtained by printing them with "%.26f". The answers were calculated
+// to 26 digits (by using the "Digit number" drop-down control of each
+// calculator).
+
+var abs = []float64{
+ 9.2022120669932650313380972e+00,
+ 7.7438239742296106616261394e+00,
+ 5.0182478202557746902556648e+00,
+ 1.0861137372799545160704002e+01,
+ 1.0070841084922199607011905e+01,
+ 5.9922447613166942183705192e+00,
+ 5.8978784056736762299945176e+00,
+ 3.2822866700678709020367184e+00,
+ 8.8756430028990417290744307e+00,
+ 1.0011785496777731986390856e+01,
+}
+
+var acos = []complex128{
+ (1.0017679804707456328694569 - 2.9138232718554953784519807i),
+ (0.03606427612041407369636057 + 2.7358584434576260925091256i),
+ (1.6249365462333796703711823 + 2.3159537454335901187730929i),
+ (2.0485650849650740120660391 - 3.0795576791204117911123886i),
+ (0.29621132089073067282488147 - 3.0007392508200622519398814i),
+ (1.0664555914934156601503632 - 2.4872865024796011364747111i),
+ (0.48681307452231387690013905 - 2.463655912283054555225301i),
+ (0.6116977071277574248407752 - 1.8734458851737055262693056i),
+ (1.3649311280370181331184214 + 2.8793528632328795424123832i),
+ (2.6189310485682988308904501 - 2.9956543302898767795858704i),
+}
+var acosh = []complex128{
+ (2.9138232718554953784519807 + 1.0017679804707456328694569i),
+ (2.7358584434576260925091256 - 0.03606427612041407369636057i),
+ (2.3159537454335901187730929 - 1.6249365462333796703711823i),
+ (3.0795576791204117911123886 + 2.0485650849650740120660391i),
+ (3.0007392508200622519398814 + 0.29621132089073067282488147i),
+ (2.4872865024796011364747111 + 1.0664555914934156601503632i),
+ (2.463655912283054555225301 + 0.48681307452231387690013905i),
+ (1.8734458851737055262693056 + 0.6116977071277574248407752i),
+ (2.8793528632328795424123832 - 1.3649311280370181331184214i),
+ (2.9956543302898767795858704 + 2.6189310485682988308904501i),
+}
+var asin = []complex128{
+ (0.56902834632415098636186476 + 2.9138232718554953784519807i),
+ (1.5347320506744825455349611 - 2.7358584434576260925091256i),
+ (-0.054140219438483051139860579 - 2.3159537454335901187730929i),
+ (-0.47776875817017739283471738 + 3.0795576791204117911123886i),
+ (1.2745850059041659464064402 + 3.0007392508200622519398814i),
+ (0.50434073530148095908095852 + 2.4872865024796011364747111i),
+ (1.0839832522725827423311826 + 2.463655912283054555225301i),
+ (0.9590986196671391943905465 + 1.8734458851737055262693056i),
+ (0.20586519875787848611290031 - 2.8793528632328795424123832i),
+ (-1.0481347217734022116591284 + 2.9956543302898767795858704i),
+}
+var asinh = []complex128{
+ (2.9113760469415295679342185 + 0.99639459545704326759805893i),
+ (2.7441755423994259061579029 - 0.035468308789000500601119392i),
+ (-2.2962136462520690506126678 - 1.5144663565690151885726707i),
+ (-3.0771233459295725965402455 + 1.0895577967194013849422294i),
+ (3.0048366100923647417557027 + 0.29346979169819220036454168i),
+ (2.4800059370795363157364643 + 1.0545868606049165710424232i),
+ (2.4718773838309585611141821 + 0.47502344364250803363708842i),
+ (1.8910743588080159144378396 + 0.56882925572563602341139174i),
+ (2.8735426423367341878069406 - 1.362376149648891420997548i),
+ (-2.9981750586172477217567878 + 0.5183571985225367505624207i),
+}
+var atan = []complex128{
+ (1.5115747079332741358607654 + 0.091324403603954494382276776i),
+ (1.4424504323482602560806727 - 0.0045416132642803911503770933i),
+ (-1.5593488703630532674484026 - 0.20163295409248362456446431i),
+ (-1.5280619472445889867794105 + 0.081721556230672003746956324i),
+ (1.4759909163240799678221039 + 0.028602969320691644358773586i),
+ (1.4877353772046548932715555 + 0.14566877153207281663773599i),
+ (1.4206983927779191889826 + 0.076830486127880702249439993i),
+ (1.3162236060498933364869556 + 0.16031313000467530644933363i),
+ (1.5473450684303703578810093 - 0.11064907507939082484935782i),
+ (-1.4841462340185253987375812 + 0.049341850305024399493142411i),
+}
+var atanh = []complex128{
+ (0.058375027938968509064640438 + 1.4793488495105334458167782i),
+ (0.12977343497790381229915667 - 1.5661009410463561327262499i),
+ (-0.010576456067347252072200088 - 1.3743698658402284549750563i),
+ (-0.042218595678688358882784918 + 1.4891433968166405606692604i),
+ (0.095218997991316722061828397 + 1.5416884098777110330499698i),
+ (0.079965459366890323857556487 + 1.4252510353873192700350435i),
+ (0.15051245471980726221708301 + 1.4907432533016303804884461i),
+ (0.25082072933993987714470373 + 1.392057665392187516442986i),
+ (0.022896108815797135846276662 - 1.4609224989282864208963021i),
+ (-0.08665624101841876130537396 + 1.5207902036935093480142159i),
+}
+var conj = []complex128{
+ (4.9790119248836735e+00 - 7.7388724745781045e+00i),
+ (7.7388724745781045e+00 + 2.7688005719200159e-01i),
+ (-2.7688005719200159e-01 + 5.0106036182710749e+00i),
+ (-5.0106036182710749e+00 - 9.6362937071984173e+00i),
+ (9.6362937071984173e+00 - 2.9263772392439646e+00i),
+ (2.9263772392439646e+00 - 5.2290834314593066e+00i),
+ (5.2290834314593066e+00 - 2.7279399104360102e+00i),
+ (2.7279399104360102e+00 - 1.8253080916808550e+00i),
+ (1.8253080916808550e+00 + 8.6859247685756013e+00i),
+ (-8.6859247685756013e+00 - 4.9790119248836735e+00i),
+}
+var cos = []complex128{
+ (3.024540920601483938336569e+02 + 1.1073797572517071650045357e+03i),
+ (1.192858682649064973252758e-01 + 2.7857554122333065540970207e-01i),
+ (7.2144394304528306603857962e+01 - 2.0500129667076044169954205e+01i),
+ (2.24921952538403984190541e+03 - 7.317363745602773587049329e+03i),
+ (-9.148222970032421760015498e+00 + 1.953124661113563541862227e+00i),
+ (-9.116081175857732248227078e+01 - 1.992669213569952232487371e+01i),
+ (3.795639179042704640002918e+00 + 6.623513350981458399309662e+00i),
+ (-2.9144840732498869560679084e+00 - 1.214620271628002917638748e+00i),
+ (-7.45123482501299743872481e+02 + 2.8641692314488080814066734e+03i),
+ (-5.371977967039319076416747e+01 + 4.893348341339375830564624e+01i),
+}
+var cosh = []complex128{
+ (8.34638383523018249366948e+00 + 7.2181057886425846415112064e+01i),
+ (1.10421967379919366952251e+03 - 3.1379638689277575379469861e+02i),
+ (3.051485206773701584738512e-01 - 2.6805384730105297848044485e-01i),
+ (-7.33294728684187933370938e+01 + 1.574445942284918251038144e+01i),
+ (-7.478643293945957535757355e+03 + 1.6348382209913353929473321e+03i),
+ (4.622316522966235701630926e+00 - 8.088695185566375256093098e+00i),
+ (-8.544333183278877406197712e+01 + 3.7505836120128166455231717e+01i),
+ (-1.934457815021493925115198e+00 + 7.3725859611767228178358673e+00i),
+ (-2.352958770061749348353548e+00 - 2.034982010440878358915409e+00i),
+ (7.79756457532134748165069e+02 + 2.8549350716819176560377717e+03i),
+}
+var exp = []complex128{
+ (1.669197736864670815125146e+01 + 1.4436895109507663689174096e+02i),
+ (2.2084389286252583447276212e+03 - 6.2759289284909211238261917e+02i),
+ (2.227538273122775173434327e-01 + 7.2468284028334191250470034e-01i),
+ (-6.5182985958153548997881627e-03 - 1.39965837915193860879044e-03i),
+ (-1.4957286524084015746110777e+04 + 3.269676455931135688988042e+03i),
+ (9.218158701983105935659273e+00 - 1.6223985291084956009304582e+01i),
+ (-1.7088175716853040841444505e+02 + 7.501382609870410713795546e+01i),
+ (-3.852461315830959613132505e+00 + 1.4808420423156073221970892e+01i),
+ (-4.586775503301407379786695e+00 - 4.178501081246873415144744e+00i),
+ (4.451337963005453491095747e-05 - 1.62977574205442915935263e-04i),
+}
+var log = []complex128{
+ (2.2194438972179194425697051e+00 + 9.9909115046919291062461269e-01i),
+ (2.0468956191154167256337289e+00 - 3.5762575021856971295156489e-02i),
+ (1.6130808329853860438751244e+00 - 1.6259990074019058442232221e+00i),
+ (2.3851910394823008710032651e+00 + 2.0502936359659111755031062e+00i),
+ (2.3096442270679923004800651e+00 + 2.9483213155446756211881774e-01i),
+ (1.7904660933974656106951860e+00 + 1.0605860367252556281902109e+00i),
+ (1.7745926939841751666177512e+00 + 4.8084556083358307819310911e-01i),
+ (1.1885403350045342425648780e+00 + 5.8969634164776659423195222e-01i),
+ (2.1833107837679082586772505e+00 - 1.3636647724582455028314573e+00i),
+ (2.3037629487273259170991671e+00 + 2.6210913895386013290915234e+00i),
+}
+var log10 = []complex128{
+ (9.6389223745559042474184943e-01 + 4.338997735671419492599631e-01i),
+ (8.8895547241376579493490892e-01 - 1.5531488990643548254864806e-02i),
+ (7.0055210462945412305244578e-01 - 7.0616239649481243222248404e-01i),
+ (1.0358753067322445311676952e+00 + 8.9043121238134980156490909e-01i),
+ (1.003065742975330237172029e+00 + 1.2804396782187887479857811e-01i),
+ (7.7758954439739162532085157e-01 + 4.6060666333341810869055108e-01i),
+ (7.7069581462315327037689152e-01 + 2.0882857371769952195512475e-01i),
+ (5.1617650901191156135137239e-01 + 2.5610186717615977620363299e-01i),
+ (9.4819982567026639742663212e-01 - 5.9223208584446952284914289e-01i),
+ (1.0005115362454417135973429e+00 + 1.1383255270407412817250921e+00i),
+}
+
+type ff struct {
+ r, theta float64
+}
+
+var polar = []ff{
+ {9.2022120669932650313380972e+00, 9.9909115046919291062461269e-01},
+ {7.7438239742296106616261394e+00, -3.5762575021856971295156489e-02},
+ {5.0182478202557746902556648e+00, -1.6259990074019058442232221e+00},
+ {1.0861137372799545160704002e+01, 2.0502936359659111755031062e+00},
+ {1.0070841084922199607011905e+01, 2.9483213155446756211881774e-01},
+ {5.9922447613166942183705192e+00, 1.0605860367252556281902109e+00},
+ {5.8978784056736762299945176e+00, 4.8084556083358307819310911e-01},
+ {3.2822866700678709020367184e+00, 5.8969634164776659423195222e-01},
+ {8.8756430028990417290744307e+00, -1.3636647724582455028314573e+00},
+ {1.0011785496777731986390856e+01, 2.6210913895386013290915234e+00},
+}
+var pow = []complex128{
+ (-2.499956739197529585028819e+00 + 1.759751724335650228957144e+00i),
+ (7.357094338218116311191939e+04 - 5.089973412479151648145882e+04i),
+ (1.320777296067768517259592e+01 - 3.165621914333901498921986e+01i),
+ (-3.123287828297300934072149e-07 - 1.9849567521490553032502223E-7i),
+ (8.0622651468477229614813e+04 - 7.80028727944573092944363e+04i),
+ (-1.0268824572103165858577141e+00 - 4.716844738244989776610672e-01i),
+ (-4.35953819012244175753187e+01 + 2.2036445974645306917648585e+02i),
+ (8.3556092283250594950239e-01 - 1.2261571947167240272593282e+01i),
+ (1.582292972120769306069625e+03 + 1.273564263524278244782512e+04i),
+ (6.592208301642122149025369e-08 + 2.584887236651661903526389e-08i),
+}
+var sin = []complex128{
+ (-1.1073801774240233539648544e+03 + 3.024539773002502192425231e+02i),
+ (1.0317037521400759359744682e+00 - 3.2208979799929570242818e-02i),
+ (-2.0501952097271429804261058e+01 - 7.2137981348240798841800967e+01i),
+ (7.3173638080346338642193078e+03 + 2.249219506193664342566248e+03i),
+ (-1.964375633631808177565226e+00 - 9.0958264713870404464159683e+00i),
+ (1.992783647158514838337674e+01 - 9.11555769410191350416942e+01i),
+ (-6.680335650741921444300349e+00 + 3.763353833142432513086117e+00i),
+ (1.2794028166657459148245993e+00 - 2.7669092099795781155109602e+00i),
+ (2.8641693949535259594188879e+03 + 7.451234399649871202841615e+02i),
+ (-4.893811726244659135553033e+01 - 5.371469305562194635957655e+01i),
+}
+var sinh = []complex128{
+ (8.34559353341652565758198e+00 + 7.2187893208650790476628899e+01i),
+ (1.1042192548260646752051112e+03 - 3.1379650595631635858792056e+02i),
+ (-8.239469336509264113041849e-02 + 9.9273668758439489098514519e-01i),
+ (7.332295456982297798219401e+01 - 1.574585908122833444899023e+01i),
+ (-7.4786432301380582103534216e+03 + 1.63483823493980029604071e+03i),
+ (4.595842179016870234028347e+00 - 8.135290105518580753211484e+00i),
+ (-8.543842533574163435246793e+01 + 3.750798997857594068272375e+01i),
+ (-1.918003500809465688017307e+00 + 7.4358344619793504041350251e+00i),
+ (-2.233816733239658031433147e+00 - 2.143519070805995056229335e+00i),
+ (-7.797564130187551181105341e+02 - 2.8549352346594918614806877e+03i),
+}
+var sqrt = []complex128{
+ (2.6628203086086130543813948e+00 + 1.4531345674282185229796902e+00i),
+ (2.7823278427251986247149295e+00 - 4.9756907317005224529115567e-02i),
+ (1.5397025302089642757361015e+00 - 1.6271336573016637535695727e+00i),
+ (1.7103411581506875260277898e+00 + 2.8170677122737589676157029e+00i),
+ (3.1390392472953103383607947e+00 + 4.6612625849858653248980849e-01i),
+ (2.1117080764822417640789287e+00 + 1.2381170223514273234967850e+00i),
+ (2.3587032281672256703926939e+00 + 5.7827111903257349935720172e-01i),
+ (1.7335262588873410476661577e+00 + 5.2647258220721269141550382e-01i),
+ (2.3131094974708716531499282e+00 - 1.8775429304303785570775490e+00i),
+ (8.1420535745048086240947359e-01 + 3.0575897587277248522656113e+00i),
+}
+var tan = []complex128{
+ (-1.928757919086441129134525e-07 + 1.0000003267499169073251826e+00i),
+ (1.242412685364183792138948e+00 - 3.17149693883133370106696e+00i),
+ (-4.6745126251587795225571826e-05 - 9.9992439225263959286114298e-01i),
+ (4.792363401193648192887116e-09 + 1.0000000070589333451557723e+00i),
+ (2.345740824080089140287315e-03 + 9.947733046570988661022763e-01i),
+ (-2.396030789494815566088809e-05 + 9.9994781345418591429826779e-01i),
+ (-7.370204836644931340905303e-03 + 1.0043553413417138987717748e+00i),
+ (-3.691803847992048527007457e-02 + 9.6475071993469548066328894e-01i),
+ (-2.781955256713729368401878e-08 - 1.000000049848910609006646e+00i),
+ (9.4281590064030478879791249e-05 + 9.9999119340863718183758545e-01i),
+}
+var tanh = []complex128{
+ (1.0000921981225144748819918e+00 + 2.160986245871518020231507e-05i),
+ (9.9999967727531993209562591e-01 - 1.9953763222959658873657676e-07i),
+ (-1.765485739548037260789686e+00 + 1.7024216325552852445168471e+00i),
+ (-9.999189442732736452807108e-01 + 3.64906070494473701938098e-05i),
+ (9.9999999224622333738729767e-01 - 3.560088949517914774813046e-09i),
+ (1.0029324933367326862499343e+00 - 4.948790309797102353137528e-03i),
+ (9.9996113064788012488693567e-01 - 4.226995742097032481451259e-05i),
+ (1.0074784189316340029873945e+00 - 4.194050814891697808029407e-03i),
+ (9.9385534229718327109131502e-01 + 5.144217985914355502713437e-02i),
+ (-1.0000000491604982429364892e+00 - 2.901873195374433112227349e-08i),
+}
+
+// special cases
+var vcAbsSC = []complex128{
+ NaN(),
+}
+var absSC = []float64{
+ math.NaN(),
+}
+var vcAcosSC = []complex128{
+ NaN(),
+}
+var acosSC = []complex128{
+ NaN(),
+}
+var vcAcoshSC = []complex128{
+ NaN(),
+}
+var acoshSC = []complex128{
+ NaN(),
+}
+var vcAsinSC = []complex128{
+ NaN(),
+}
+var asinSC = []complex128{
+ NaN(),
+}
+var vcAsinhSC = []complex128{
+ NaN(),
+}
+var asinhSC = []complex128{
+ NaN(),
+}
+var vcAtanSC = []complex128{
+ NaN(),
+}
+var atanSC = []complex128{
+ NaN(),
+}
+var vcAtanhSC = []complex128{
+ NaN(),
+}
+var atanhSC = []complex128{
+ NaN(),
+}
+var vcConjSC = []complex128{
+ NaN(),
+}
+var conjSC = []complex128{
+ NaN(),
+}
+var vcCosSC = []complex128{
+ NaN(),
+}
+var cosSC = []complex128{
+ NaN(),
+}
+var vcCoshSC = []complex128{
+ NaN(),
+}
+var coshSC = []complex128{
+ NaN(),
+}
+var vcExpSC = []complex128{
+ NaN(),
+}
+var expSC = []complex128{
+ NaN(),
+}
+var vcIsNaNSC = []complex128{
+ complex(math.Inf(-1), math.Inf(-1)),
+ complex(math.Inf(-1), math.NaN()),
+ complex(math.NaN(), math.Inf(-1)),
+ complex(0, math.NaN()),
+ complex(math.NaN(), 0),
+ complex(math.Inf(1), math.Inf(1)),
+ complex(math.Inf(1), math.NaN()),
+ complex(math.NaN(), math.Inf(1)),
+ complex(math.NaN(), math.NaN()),
+}
+var isNaNSC = []bool{
+ false,
+ false,
+ false,
+ true,
+ true,
+ false,
+ false,
+ false,
+ true,
+}
+var vcLogSC = []complex128{
+ NaN(),
+}
+var logSC = []complex128{
+ NaN(),
+}
+var vcLog10SC = []complex128{
+ NaN(),
+}
+var log10SC = []complex128{
+ NaN(),
+}
+var vcPolarSC = []complex128{
+ NaN(),
+}
+var polarSC = []ff{
+ {math.NaN(), math.NaN()},
+}
+var vcPowSC = [][2]complex128{
+ {NaN(), NaN()},
+}
+var powSC = []complex128{
+ NaN(),
+}
+var vcSinSC = []complex128{
+ NaN(),
+}
+var sinSC = []complex128{
+ NaN(),
+}
+var vcSinhSC = []complex128{
+ NaN(),
+}
+var sinhSC = []complex128{
+ NaN(),
+}
+var vcSqrtSC = []complex128{
+ NaN(),
+}
+var sqrtSC = []complex128{
+ NaN(),
+}
+var vcTanSC = []complex128{
+ NaN(),
+}
+var tanSC = []complex128{
+ NaN(),
+}
+var vcTanhSC = []complex128{
+ NaN(),
+}
+var tanhSC = []complex128{
+ NaN(),
+}
+
+// functions borrowed from pkg/math/all_test.go
+func tolerance(a, b, e float64) bool {
+ d := a - b
+ if d < 0 {
+ d = -d
+ }
+
+ if a != 0 {
+ e = e * a
+ if e < 0 {
+ e = -e
+ }
+ }
+ return d < e
+}
+func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
+func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
+func alike(a, b float64) bool {
+ switch {
+ case a != a && b != b: // math.IsNaN(a) && math.IsNaN(b):
+ return true
+ case a == b:
+ return math.Signbit(a) == math.Signbit(b)
+ }
+ return false
+}
+
+func cTolerance(a, b complex128, e float64) bool {
+ d := Abs(a - b)
+ if a != 0 {
+ e = e * Abs(a)
+ if e < 0 {
+ e = -e
+ }
+ }
+ return d < e
+}
+func cSoclose(a, b complex128, e float64) bool { return cTolerance(a, b, e) }
+func cVeryclose(a, b complex128) bool { return cTolerance(a, b, 4e-16) }
+func cAlike(a, b complex128) bool {
+ switch {
+ case IsNaN(a) && IsNaN(b):
+ return true
+ case a == b:
+ return math.Signbit(real(a)) == math.Signbit(real(b)) && math.Signbit(imag(a)) == math.Signbit(imag(b))
+ }
+ return false
+}
+
+func TestAbs(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Abs(vc[i]); !veryclose(abs[i], f) {
+ t.Errorf("Abs(%g) = %g, want %g", vc[i], f, abs[i])
+ }
+ }
+ for i := 0; i < len(vcAbsSC); i++ {
+ if f := Abs(vcAbsSC[i]); !alike(absSC[i], f) {
+ t.Errorf("Abs(%g) = %g, want %g", vcAbsSC[i], f, absSC[i])
+ }
+ }
+}
+func TestAcos(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Acos(vc[i]); !cSoclose(acos[i], f, 1e-14) {
+ t.Errorf("Acos(%g) = %g, want %g", vc[i], f, acos[i])
+ }
+ }
+ for i := 0; i < len(vcAcosSC); i++ {
+ if f := Acos(vcAcosSC[i]); !cAlike(acosSC[i], f) {
+ t.Errorf("Acos(%g) = %g, want %g", vcAcosSC[i], f, acosSC[i])
+ }
+ }
+}
+func TestAcosh(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Acosh(vc[i]); !cSoclose(acosh[i], f, 1e-14) {
+ t.Errorf("Acosh(%g) = %g, want %g", vc[i], f, acosh[i])
+ }
+ }
+ for i := 0; i < len(vcAcoshSC); i++ {
+ if f := Acosh(vcAcoshSC[i]); !cAlike(acoshSC[i], f) {
+ t.Errorf("Acosh(%g) = %g, want %g", vcAcoshSC[i], f, acoshSC[i])
+ }
+ }
+}
+func TestAsin(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Asin(vc[i]); !cSoclose(asin[i], f, 1e-14) {
+ t.Errorf("Asin(%g) = %g, want %g", vc[i], f, asin[i])
+ }
+ }
+ for i := 0; i < len(vcAsinSC); i++ {
+ if f := Asin(vcAsinSC[i]); !cAlike(asinSC[i], f) {
+ t.Errorf("Asin(%g) = %g, want %g", vcAsinSC[i], f, asinSC[i])
+ }
+ }
+}
+func TestAsinh(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Asinh(vc[i]); !cSoclose(asinh[i], f, 4e-15) {
+ t.Errorf("Asinh(%g) = %g, want %g", vc[i], f, asinh[i])
+ }
+ }
+ for i := 0; i < len(vcAsinhSC); i++ {
+ if f := Asinh(vcAsinhSC[i]); !cAlike(asinhSC[i], f) {
+ t.Errorf("Asinh(%g) = %g, want %g", vcAsinhSC[i], f, asinhSC[i])
+ }
+ }
+}
+func TestAtan(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Atan(vc[i]); !cVeryclose(atan[i], f) {
+ t.Errorf("Atan(%g) = %g, want %g", vc[i], f, atan[i])
+ }
+ }
+ for i := 0; i < len(vcAtanSC); i++ {
+ if f := Atan(vcAtanSC[i]); !cAlike(atanSC[i], f) {
+ t.Errorf("Atan(%g) = %g, want %g", vcAtanSC[i], f, atanSC[i])
+ }
+ }
+}
+func TestAtanh(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Atanh(vc[i]); !cVeryclose(atanh[i], f) {
+ t.Errorf("Atanh(%g) = %g, want %g", vc[i], f, atanh[i])
+ }
+ }
+ for i := 0; i < len(vcAtanhSC); i++ {
+ if f := Atanh(vcAtanhSC[i]); !cAlike(atanhSC[i], f) {
+ t.Errorf("Atanh(%g) = %g, want %g", vcAtanhSC[i], f, atanhSC[i])
+ }
+ }
+}
+func TestConj(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Conj(vc[i]); !cVeryclose(conj[i], f) {
+ t.Errorf("Conj(%g) = %g, want %g", vc[i], f, conj[i])
+ }
+ }
+ for i := 0; i < len(vcConjSC); i++ {
+ if f := Conj(vcConjSC[i]); !cAlike(conjSC[i], f) {
+ t.Errorf("Conj(%g) = %g, want %g", vcConjSC[i], f, conjSC[i])
+ }
+ }
+}
+func TestCos(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Cos(vc[i]); !cSoclose(cos[i], f, 3e-15) {
+ t.Errorf("Cos(%g) = %g, want %g", vc[i], f, cos[i])
+ }
+ }
+ for i := 0; i < len(vcCosSC); i++ {
+ if f := Cos(vcCosSC[i]); !cAlike(cosSC[i], f) {
+ t.Errorf("Cos(%g) = %g, want %g", vcCosSC[i], f, cosSC[i])
+ }
+ }
+}
+func TestCosh(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Cosh(vc[i]); !cSoclose(cosh[i], f, 2e-15) {
+ t.Errorf("Cosh(%g) = %g, want %g", vc[i], f, cosh[i])
+ }
+ }
+ for i := 0; i < len(vcCoshSC); i++ {
+ if f := Cosh(vcCoshSC[i]); !cAlike(coshSC[i], f) {
+ t.Errorf("Cosh(%g) = %g, want %g", vcCoshSC[i], f, coshSC[i])
+ }
+ }
+}
+func TestExp(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Exp(vc[i]); !cSoclose(exp[i], f, 1e-15) {
+ t.Errorf("Exp(%g) = %g, want %g", vc[i], f, exp[i])
+ }
+ }
+ for i := 0; i < len(vcExpSC); i++ {
+ if f := Exp(vcExpSC[i]); !cAlike(expSC[i], f) {
+ t.Errorf("Exp(%g) = %g, want %g", vcExpSC[i], f, expSC[i])
+ }
+ }
+}
+func TestIsNaN(t *testing.T) {
+ for i := 0; i < len(vcIsNaNSC); i++ {
+ if f := IsNaN(vcIsNaNSC[i]); isNaNSC[i] != f {
+ t.Errorf("IsNaN(%v) = %v, want %v", vcIsNaNSC[i], f, isNaNSC[i])
+ }
+ }
+}
+func TestLog(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Log(vc[i]); !cVeryclose(log[i], f) {
+ t.Errorf("Log(%g) = %g, want %g", vc[i], f, log[i])
+ }
+ }
+ for i := 0; i < len(vcLogSC); i++ {
+ if f := Log(vcLogSC[i]); !cAlike(logSC[i], f) {
+ t.Errorf("Log(%g) = %g, want %g", vcLogSC[i], f, logSC[i])
+ }
+ }
+}
+func TestLog10(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Log10(vc[i]); !cVeryclose(log10[i], f) {
+ t.Errorf("Log10(%g) = %g, want %g", vc[i], f, log10[i])
+ }
+ }
+ for i := 0; i < len(vcLog10SC); i++ {
+ if f := Log10(vcLog10SC[i]); !cAlike(log10SC[i], f) {
+ t.Errorf("Log10(%g) = %g, want %g", vcLog10SC[i], f, log10SC[i])
+ }
+ }
+}
+func TestPolar(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if r, theta := Polar(vc[i]); !veryclose(polar[i].r, r) && !veryclose(polar[i].theta, theta) {
+ t.Errorf("Polar(%g) = %g, %g want %g, %g", vc[i], r, theta, polar[i].r, polar[i].theta)
+ }
+ }
+ for i := 0; i < len(vcPolarSC); i++ {
+ if r, theta := Polar(vcPolarSC[i]); !alike(polarSC[i].r, r) && !alike(polarSC[i].theta, theta) {
+ t.Errorf("Polar(%g) = %g, %g, want %g, %g", vcPolarSC[i], r, theta, polarSC[i].r, polarSC[i].theta)
+ }
+ }
+}
+func TestPow(t *testing.T) {
+ var a = complex(3.0, 3.0)
+ for i := 0; i < len(vc); i++ {
+ if f := Pow(a, vc[i]); !cSoclose(pow[i], f, 4e-15) {
+ t.Errorf("Pow(%g, %g) = %g, want %g", a, vc[i], f, pow[i])
+ }
+ }
+ for i := 0; i < len(vcPowSC); i++ {
+ if f := Pow(vcPowSC[i][0], vcPowSC[i][0]); !cAlike(powSC[i], f) {
+ t.Errorf("Pow(%g, %g) = %g, want %g", vcPowSC[i][0], vcPowSC[i][0], f, powSC[i])
+ }
+ }
+}
+func TestRect(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Rect(polar[i].r, polar[i].theta); !cVeryclose(vc[i], f) {
+ t.Errorf("Rect(%g, %g) = %g want %g", polar[i].r, polar[i].theta, f, vc[i])
+ }
+ }
+ for i := 0; i < len(vcPolarSC); i++ {
+ if f := Rect(polarSC[i].r, polarSC[i].theta); !cAlike(vcPolarSC[i], f) {
+ t.Errorf("Rect(%g, %g) = %g, want %g", polarSC[i].r, polarSC[i].theta, f, vcPolarSC[i])
+ }
+ }
+}
+func TestSin(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Sin(vc[i]); !cSoclose(sin[i], f, 2e-15) {
+ t.Errorf("Sin(%g) = %g, want %g", vc[i], f, sin[i])
+ }
+ }
+ for i := 0; i < len(vcSinSC); i++ {
+ if f := Sin(vcSinSC[i]); !cAlike(sinSC[i], f) {
+ t.Errorf("Sin(%g) = %g, want %g", vcSinSC[i], f, sinSC[i])
+ }
+ }
+}
+func TestSinh(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Sinh(vc[i]); !cSoclose(sinh[i], f, 2e-15) {
+ t.Errorf("Sinh(%g) = %g, want %g", vc[i], f, sinh[i])
+ }
+ }
+ for i := 0; i < len(vcSinhSC); i++ {
+ if f := Sinh(vcSinhSC[i]); !cAlike(sinhSC[i], f) {
+ t.Errorf("Sinh(%g) = %g, want %g", vcSinhSC[i], f, sinhSC[i])
+ }
+ }
+}
+func TestSqrt(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Sqrt(vc[i]); !cVeryclose(sqrt[i], f) {
+ t.Errorf("Sqrt(%g) = %g, want %g", vc[i], f, sqrt[i])
+ }
+ }
+ for i := 0; i < len(vcSqrtSC); i++ {
+ if f := Sqrt(vcSqrtSC[i]); !cAlike(sqrtSC[i], f) {
+ t.Errorf("Sqrt(%g) = %g, want %g", vcSqrtSC[i], f, sqrtSC[i])
+ }
+ }
+}
+func TestTan(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Tan(vc[i]); !cSoclose(tan[i], f, 3e-15) {
+ t.Errorf("Tan(%g) = %g, want %g", vc[i], f, tan[i])
+ }
+ }
+ for i := 0; i < len(vcTanSC); i++ {
+ if f := Tan(vcTanSC[i]); !cAlike(tanSC[i], f) {
+ t.Errorf("Tan(%g) = %g, want %g", vcTanSC[i], f, tanSC[i])
+ }
+ }
+}
+func TestTanh(t *testing.T) {
+ for i := 0; i < len(vc); i++ {
+ if f := Tanh(vc[i]); !cSoclose(tanh[i], f, 2e-15) {
+ t.Errorf("Tanh(%g) = %g, want %g", vc[i], f, tanh[i])
+ }
+ }
+ for i := 0; i < len(vcTanhSC); i++ {
+ if f := Tanh(vcTanhSC[i]); !cAlike(tanhSC[i], f) {
+ t.Errorf("Tanh(%g) = %g, want %g", vcTanhSC[i], f, tanhSC[i])
+ }
+ }
+}
+
+func BenchmarkAbs(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Abs(complex(2.5, 3.5))
+ }
+}
+func BenchmarkAcos(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Acos(complex(2.5, 3.5))
+ }
+}
+func BenchmarkAcosh(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Acosh(complex(2.5, 3.5))
+ }
+}
+func BenchmarkAsin(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Asin(complex(2.5, 3.5))
+ }
+}
+func BenchmarkAsinh(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Asinh(complex(2.5, 3.5))
+ }
+}
+func BenchmarkAtan(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Atan(complex(2.5, 3.5))
+ }
+}
+func BenchmarkAtanh(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Atanh(complex(2.5, 3.5))
+ }
+}
+func BenchmarkConj(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Conj(complex(2.5, 3.5))
+ }
+}
+func BenchmarkCos(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Cos(complex(2.5, 3.5))
+ }
+}
+func BenchmarkCosh(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Cosh(complex(2.5, 3.5))
+ }
+}
+func BenchmarkExp(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Exp(complex(2.5, 3.5))
+ }
+}
+func BenchmarkLog(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Log(complex(2.5, 3.5))
+ }
+}
+func BenchmarkLog10(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Log10(complex(2.5, 3.5))
+ }
+}
+func BenchmarkPhase(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Phase(complex(2.5, 3.5))
+ }
+}
+func BenchmarkPolar(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Polar(complex(2.5, 3.5))
+ }
+}
+func BenchmarkPow(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Pow(complex(2.5, 3.5), complex(2.5, 3.5))
+ }
+}
+func BenchmarkRect(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Rect(2.5, 1.5)
+ }
+}
+func BenchmarkSin(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Sin(complex(2.5, 3.5))
+ }
+}
+func BenchmarkSinh(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Sinh(complex(2.5, 3.5))
+ }
+}
+func BenchmarkSqrt(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Sqrt(complex(2.5, 3.5))
+ }
+}
+func BenchmarkTan(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Tan(complex(2.5, 3.5))
+ }
+}
+func BenchmarkTanh(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Tanh(complex(2.5, 3.5))
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+// Conj returns the complex conjugate of x.
+func Conj(x complex128) complex128 { return complex(real(x), -imag(x)) }
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex exponential function
+//
+// DESCRIPTION:
+//
+// Returns the complex exponential of the complex argument z.
+//
+// If
+// z = x + iy,
+// r = exp(x),
+// then
+// w = r cos y + i r sin y.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 8700 3.7e-17 1.1e-17
+// IEEE -10,+10 30000 3.0e-16 8.7e-17
+
+// Exp returns e**x, the base-e exponential of x.
+func Exp(x complex128) complex128 {
+ r := math.Exp(real(x))
+ s, c := math.Sincos(imag(x))
+ return complex(r*c, r*s)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// IsInf returns true if either real(x) or imag(x) is an infinity.
+func IsInf(x complex128) bool {
+ if math.IsInf(real(x), 0) || math.IsInf(imag(x), 0) {
+ return true
+ }
+ return false
+}
+
+// Inf returns a complex infinity, complex(+Inf, +Inf).
+func Inf() complex128 {
+ inf := math.Inf(1)
+ return complex(inf, inf)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// IsNaN returns true if either real(x) or imag(x) is NaN
+// and neither is an infinity.
+func IsNaN(x complex128) bool {
+ switch {
+ case math.IsInf(real(x), 0) || math.IsInf(imag(x), 0):
+ return false
+ case math.IsNaN(real(x)) || math.IsNaN(imag(x)):
+ return true
+ }
+ return false
+}
+
+// NaN returns a complex ``not-a-number'' value.
+func NaN() complex128 {
+ nan := math.NaN()
+ return complex(nan, nan)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex natural logarithm
+//
+// DESCRIPTION:
+//
+// Returns complex logarithm to the base e (2.718...) of
+// the complex argument z.
+//
+// If
+// z = x + iy, r = sqrt( x**2 + y**2 ),
+// then
+// w = log(r) + i arctan(y/x).
+//
+// The arctangent ranges from -PI to +PI.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 7000 8.5e-17 1.9e-17
+// IEEE -10,+10 30000 5.0e-15 1.1e-16
+//
+// Larger relative error can be observed for z near 1 +i0.
+// In IEEE arithmetic the peak absolute error is 5.2e-16, rms
+// absolute error 1.0e-16.
+
+// Log returns the natural logarithm of x.
+func Log(x complex128) complex128 {
+ return complex(math.Log(Abs(x)), Phase(x))
+}
+
+// Log10 returns the decimal logarithm of x.
+func Log10(x complex128) complex128 {
+ return math.Log10E * Log(x)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// Phase returns the phase (also called the argument) of x.
+// The returned value is in the range [-Pi, Pi].
+func Phase(x complex128) float64 { return math.Atan2(imag(x), real(x)) }
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+// Polar returns the absolute value r and phase θ of x,
+// such that x = r * e**θi.
+// The phase is in the range [-Pi, Pi].
+func Polar(x complex128) (r, θ float64) {
+ return Abs(x), Phase(x)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex power function
+//
+// DESCRIPTION:
+//
+// Raises complex A to the complex Zth power.
+// Definition is per AMS55 # 4.2.8,
+// analytically equivalent to cpow(a,z) = cexp(z clog(a)).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -10,+10 30000 9.4e-15 1.5e-15
+
+// Pow returns x**y, the base-x exponential of y.
+func Pow(x, y complex128) complex128 {
+ modulus := Abs(x)
+ if modulus == 0 {
+ return complex(0, 0)
+ }
+ r := math.Pow(modulus, real(y))
+ arg := Phase(x)
+ theta := real(y) * arg
+ if imag(y) != 0 {
+ r *= math.Exp(-imag(y) * arg)
+ theta += imag(y) * math.Log(modulus)
+ }
+ s, c := math.Sincos(theta)
+ return complex(r*c, r*s)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// Rect returns the complex number x with polar coordinates r, θ.
+func Rect(r, θ float64) complex128 {
+ s, c := math.Sincos(θ)
+ return complex(r*c, r*s)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex circular sine
+//
+// DESCRIPTION:
+//
+// If
+// z = x + iy,
+//
+// then
+//
+// w = sin x cosh y + i cos x sinh y.
+//
+// csin(z) = -i csinh(iz).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 8400 5.3e-17 1.3e-17
+// IEEE -10,+10 30000 3.8e-16 1.0e-16
+// Also tested by csin(casin(z)) = z.
+
+// Sin returns the sine of x.
+func Sin(x complex128) complex128 {
+ s, c := math.Sincos(real(x))
+ sh, ch := sinhcosh(imag(x))
+ return complex(s*ch, c*sh)
+}
+
+// Complex hyperbolic sine
+//
+// DESCRIPTION:
+//
+// csinh z = (cexp(z) - cexp(-z))/2
+// = sinh x * cos y + i cosh x * sin y .
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -10,+10 30000 3.1e-16 8.2e-17
+
+// Sinh returns the hyperbolic sine of x.
+func Sinh(x complex128) complex128 {
+ s, c := math.Sincos(imag(x))
+ sh, ch := sinhcosh(real(x))
+ return complex(c*sh, s*ch)
+}
+
+// Complex circular cosine
+//
+// DESCRIPTION:
+//
+// If
+// z = x + iy,
+//
+// then
+//
+// w = cos x cosh y - i sin x sinh y.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 8400 4.5e-17 1.3e-17
+// IEEE -10,+10 30000 3.8e-16 1.0e-16
+
+// Cos returns the cosine of x.
+func Cos(x complex128) complex128 {
+ s, c := math.Sincos(real(x))
+ sh, ch := sinhcosh(imag(x))
+ return complex(c*ch, -s*sh)
+}
+
+// Complex hyperbolic cosine
+//
+// DESCRIPTION:
+//
+// ccosh(z) = cosh x cos y + i sinh x sin y .
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -10,+10 30000 2.9e-16 8.1e-17
+
+// Cosh returns the hyperbolic cosine of x.
+func Cosh(x complex128) complex128 {
+ s, c := math.Sincos(imag(x))
+ sh, ch := sinhcosh(real(x))
+ return complex(c*ch, s*sh)
+}
+
+// calculate sinh and cosh
+func sinhcosh(x float64) (sh, ch float64) {
+ if math.Abs(x) <= 0.5 {
+ return math.Sinh(x), math.Cosh(x)
+ }
+ e := math.Exp(x)
+ ei := 0.5 / e
+ e *= 0.5
+ return e - ei, e + ei
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex square root
+//
+// DESCRIPTION:
+//
+// If z = x + iy, r = |z|, then
+//
+// 1/2
+// Re w = [ (r + x)/2 ] ,
+//
+// 1/2
+// Im w = [ (r - x)/2 ] .
+//
+// Cancellation error in r-x or r+x is avoided by using the
+// identity 2 Re w Im w = y.
+//
+// Note that -w is also a square root of z. The root chosen
+// is always in the right half plane and Im w has the same sign as y.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 25000 3.2e-17 9.6e-18
+// IEEE -10,+10 1,000,000 2.9e-16 6.1e-17
+
+// Sqrt returns the square root of x.
+func Sqrt(x complex128) complex128 {
+ if imag(x) == 0 {
+ if real(x) == 0 {
+ return complex(0, 0)
+ }
+ if real(x) < 0 {
+ return complex(0, math.Sqrt(-real(x)))
+ }
+ return complex(math.Sqrt(real(x)), 0)
+ }
+ if real(x) == 0 {
+ if imag(x) < 0 {
+ r := math.Sqrt(-0.5 * imag(x))
+ return complex(r, -r)
+ }
+ r := math.Sqrt(0.5 * imag(x))
+ return complex(r, r)
+ }
+ a := real(x)
+ b := imag(x)
+ var scale float64
+ // Rescale to avoid internal overflow or underflow.
+ if math.Abs(a) > 4 || math.Abs(b) > 4 {
+ a *= 0.25
+ b *= 0.25
+ scale = 2
+ } else {
+ a *= 1.8014398509481984e16 // 2**54
+ b *= 1.8014398509481984e16
+ scale = 7.450580596923828125e-9 // 2**-27
+ }
+ r := math.Hypot(a, b)
+ var t float64
+ if a > 0 {
+ t = math.Sqrt(0.5*r + 0.5*a)
+ r = scale * math.Abs((0.5*b)/t)
+ t *= scale
+ } else {
+ r = math.Sqrt(0.5*r - 0.5*a)
+ t = scale * math.Abs((0.5*b)/r)
+ r *= scale
+ }
+ if b < 0 {
+ return complex(t, -r)
+ }
+ return complex(t, r)
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmplx
+
+import "math"
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c.
+// The go code is a simplified version of the original C.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// Complex circular tangent
+//
+// DESCRIPTION:
+//
+// If
+// z = x + iy,
+//
+// then
+//
+// sin 2x + i sinh 2y
+// w = --------------------.
+// cos 2x + cosh 2y
+//
+// On the real axis the denominator is zero at odd multiples
+// of PI/2. The denominator is evaluated by its Taylor
+// series near these points.
+//
+// ctan(z) = -i ctanh(iz).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 5200 7.1e-17 1.6e-17
+// IEEE -10,+10 30000 7.2e-16 1.2e-16
+// Also tested by ctan * ccot = 1 and catan(ctan(z)) = z.
+
+// Tan returns the tangent of x.
+func Tan(x complex128) complex128 {
+ d := math.Cos(2*real(x)) + math.Cosh(2*imag(x))
+ if math.Abs(d) < 0.25 {
+ d = tanSeries(x)
+ }
+ if d == 0 {
+ return Inf()
+ }
+ return complex(math.Sin(2*real(x))/d, math.Sinh(2*imag(x))/d)
+}
+
+// Complex hyperbolic tangent
+//
+// DESCRIPTION:
+//
+// tanh z = (sinh 2x + i sin 2y) / (cosh 2x + cos 2y) .
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -10,+10 30000 1.7e-14 2.4e-16
+
+// Tanh returns the hyperbolic tangent of x.
+func Tanh(x complex128) complex128 {
+ d := math.Cosh(2*real(x)) + math.Cos(2*imag(x))
+ if d == 0 {
+ return Inf()
+ }
+ return complex(math.Sinh(2*real(x))/d, math.Sin(2*imag(x))/d)
+}
+
+// Program to subtract nearest integer multiple of PI
+func reducePi(x float64) float64 {
+ const (
+ // extended precision value of PI:
+ DP1 = 3.14159265160560607910E0 // ?? 0x400921fb54000000
+ DP2 = 1.98418714791870343106E-9 // ?? 0x3e210b4610000000
+ DP3 = 1.14423774522196636802E-17 // ?? 0x3c6a62633145c06e
+ )
+ t := x / math.Pi
+ if t >= 0 {
+ t += 0.5
+ } else {
+ t -= 0.5
+ }
+ t = float64(int64(t)) // int64(t) = the multiple
+ return ((x - t*DP1) - t*DP2) - t*DP3
+}
+
+// Taylor series expansion for cosh(2y) - cos(2x)
+func tanSeries(z complex128) float64 {
+ const MACHEP = 1.0 / (1 << 53)
+ x := math.Abs(2 * real(z))
+ y := math.Abs(2 * imag(z))
+ x = reducePi(x)
+ x = x * x
+ y = y * y
+ x2 := 1.0
+ y2 := 1.0
+ f := 1.0
+ rn := 0.0
+ d := 0.0
+ for {
+ rn += 1
+ f *= rn
+ rn += 1
+ f *= rn
+ x2 *= x
+ y2 *= y
+ t := y2 + x2
+ t /= f
+ d += t
+
+ rn += 1
+ f *= rn
+ rn += 1
+ f *= rn
+ x2 *= x
+ y2 *= y
+ t = y2 - x2
+ t /= f
+ d += t
+ if math.Abs(t/d) <= MACHEP {
+ break
+ }
+ }
+ return d
+}
+
+// Complex circular cotangent
+//
+// DESCRIPTION:
+//
+// If
+// z = x + iy,
+//
+// then
+//
+// sin 2x - i sinh 2y
+// w = --------------------.
+// cosh 2y - cos 2x
+//
+// On the real axis, the denominator has zeros at even
+// multiples of PI/2. Near these points it is evaluated
+// by a Taylor series.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10,+10 3000 6.5e-17 1.6e-17
+// IEEE -10,+10 30000 9.2e-16 1.2e-16
+// Also tested by ctan * ccot = 1 + i0.
+
+// Cot returns the cotangent of x.
+func Cot(x complex128) complex128 {
+ d := math.Cosh(2*imag(x)) - math.Cos(2*real(x))
+ if math.Abs(d) < 0.25 {
+ d = tanSeries(x)
+ }
+ if d == 0 {
+ return Inf()
+ }
+ return complex(math.Sin(2*real(x))/d, -math.Sinh(2*imag(x))/d)
+}
// Stephen L. Moshier
// moshier@na-net.ornl.gov
-var _P = []float64{
+var _P = [...]float64{
1.60119522476751861407e-04,
1.19135147006586384913e-03,
1.04213797561761569935e-02,
4.94214826801497100753e-01,
9.99999999999999996796e-01,
}
-var _Q = []float64{
+var _Q = [...]float64{
-2.31581873324120129819e-05,
5.39605580493303397842e-04,
-4.45641913851797240494e-03,
7.14304917030273074085e-02,
1.00000000000000000320e+00,
}
-var _S = []float64{
+var _S = [...]float64{
7.87311395793093628397e-04,
-2.29549961613378126380e-04,
-2.68132617805781232825e-03,
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "math"
+)
+
+/*
+ * Exponential distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * http://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+ re = 7.69711747013104972
+)
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1).
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+// sample = ExpFloat64() / desiredRateParameter
+//
+func (r *Rand) ExpFloat64() float64 {
+ for {
+ j := r.Uint32()
+ i := j & 0xFF
+ x := float64(j) * float64(we[i])
+ if j < ke[i] {
+ return x
+ }
+ if i == 0 {
+ return re - math.Log(r.Float64())
+ }
+ if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) {
+ return x
+ }
+ }
+ panic("unreachable")
+}
+
+var ke = [256]uint32{
+ 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
+ 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
+ 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78,
+ 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651,
+ 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca,
+ 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8,
+ 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea,
+ 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba,
+ 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed,
+ 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662,
+ 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3,
+ 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace,
+ 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6,
+ 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7,
+ 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415,
+ 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4,
+ 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36,
+ 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46,
+ 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac,
+ 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245,
+ 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52,
+ 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06,
+ 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0,
+ 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9,
+ 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76,
+ 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516,
+ 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289,
+ 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed,
+ 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb,
+ 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e,
+ 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a,
+ 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1,
+ 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b,
+ 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621,
+ 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d,
+ 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3,
+ 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73,
+ 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88,
+ 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a,
+ 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb,
+ 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176,
+ 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be,
+ 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192,
+ 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed,
+ 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936,
+ 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b,
+ 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4,
+ 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1,
+ 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482,
+ 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023,
+ 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
+ 0xe6da6ecf,
+}
+var we = [256]float32{
+ 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
+ 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
+ 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11,
+ 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11,
+ 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10,
+ 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10,
+ 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10,
+ 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10,
+ 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10,
+ 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10,
+ 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10,
+ 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10,
+ 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10,
+ 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10,
+ 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10,
+ 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10,
+ 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10,
+ 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10,
+ 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10,
+ 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10,
+ 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10,
+ 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10,
+ 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10,
+ 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10,
+ 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10,
+ 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10,
+ 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10,
+ 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10,
+ 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10,
+ 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10,
+ 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10,
+ 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10,
+ 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10,
+ 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10,
+ 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10,
+ 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10,
+ 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10,
+ 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10,
+ 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10,
+ 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10,
+ 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10,
+ 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10,
+ 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10,
+ 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10,
+ 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10,
+ 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10,
+ 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10,
+ 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10,
+ 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10,
+ 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10,
+ 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10,
+ 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10,
+ 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10,
+ 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10,
+ 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10,
+ 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10,
+ 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10,
+ 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10,
+ 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10,
+ 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09,
+ 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09,
+ 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09,
+ 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
+ 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
+}
+var fe = [256]float32{
+ 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
+ 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
+ 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665,
+ 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967,
+ 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896,
+ 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092,
+ 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386,
+ 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495,
+ 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752,
+ 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325,
+ 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955,
+ 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694,
+ 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218,
+ 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763,
+ 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044,
+ 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796,
+ 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408,
+ 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928,
+ 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393,
+ 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625,
+ 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107,
+ 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878,
+ 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438,
+ 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682,
+ 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852,
+ 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479,
+ 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354,
+ 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494,
+ 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119,
+ 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624,
+ 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574,
+ 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672,
+ 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763,
+ 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816,
+ 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919,
+ 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274,
+ 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195,
+ 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106,
+ 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434,
+ 0.062193416, 0.060783047, 0.059384305, 0.057997175,
+ 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236,
+ 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623,
+ 0.043502413, 0.042254124, 0.041017443, 0.039792392,
+ 0.038578995, 0.037377283, 0.036187284, 0.035009038,
+ 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566,
+ 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421,
+ 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867,
+ 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392,
+ 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414,
+ 0.008780315, 0.007963077, 0.0071633533, 0.006381906,
+ 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648,
+ 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693,
+ 0.00045413437,
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "math"
+)
+
+/*
+ * Normal distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * http://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+ rn = 3.442619855899
+)
+
+func absInt32(i int32) uint32 {
+ if i < 0 {
+ return uint32(-i)
+ }
+ return uint32(i)
+}
+
+// NormFloat64 returns a normally distributed float64 in the range
+// [-math.MaxFloat64, +math.MaxFloat64] with
+// standard normal distribution (mean = 0, stddev = 1).
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+// sample = NormFloat64() * desiredStdDev + desiredMean
+//
+func (r *Rand) NormFloat64() float64 {
+ for {
+ j := int32(r.Uint32()) // Possibly negative
+ i := j & 0x7F
+ x := float64(j) * float64(wn[i])
+ if absInt32(j) < kn[i] {
+ // This case should be hit better than 99% of the time.
+ return x
+ }
+
+ if i == 0 {
+ // This extra work is only required for the base strip.
+ for {
+ x = -math.Log(r.Float64()) * (1.0 / rn)
+ y := -math.Log(r.Float64())
+ if y+y >= x*x {
+ break
+ }
+ }
+ if j > 0 {
+ return rn + x
+ }
+ return -rn - x
+ }
+ if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
+ return x
+ }
+ }
+ panic("unreachable")
+}
+
+var kn = [128]uint32{
+ 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
+ 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
+ 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7,
+ 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883,
+ 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30,
+ 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa,
+ 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d,
+ 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18,
+ 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924,
+ 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a,
+ 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4,
+ 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62,
+ 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e,
+ 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473,
+ 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd,
+ 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568,
+ 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08,
+ 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc,
+ 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94,
+ 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb,
+ 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075,
+ 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba,
+ 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded,
+ 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72,
+ 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
+ 0x7ba90bdc, 0x7a722176, 0x77d664e5,
+}
+var wn = [128]float32{
+ 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
+ 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
+ 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10,
+ 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10,
+ 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10,
+ 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10,
+ 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10,
+ 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10,
+ 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10,
+ 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10,
+ 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10,
+ 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10,
+ 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10,
+ 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10,
+ 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10,
+ 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10,
+ 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10,
+ 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10,
+ 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10,
+ 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10,
+ 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10,
+ 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10,
+ 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10,
+ 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10,
+ 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10,
+ 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09,
+ 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09,
+ 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09,
+ 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09,
+ 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09,
+ 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
+ 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
+}
+var fn = [128]float32{
+ 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303,
+ 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177,
+ 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569,
+ 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277,
+ 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434,
+ 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903,
+ 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055,
+ 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766,
+ 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872,
+ 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642,
+ 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446,
+ 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685,
+ 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484,
+ 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807,
+ 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239,
+ 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877,
+ 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499,
+ 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037,
+ 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265,
+ 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602,
+ 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467,
+ 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058,
+ 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863,
+ 0.040742867, 0.03688439, 0.033087887, 0.029356318,
+ 0.025693292, 0.022103304, 0.018592102, 0.015167298,
+ 0.011839478, 0.008624485, 0.005548995, 0.0026696292,
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rand implements pseudo-random number generators.
+package rand
+
+import "sync"
+
+// A Source represents a source of uniformly-distributed
+// pseudo-random int64 values in the range [0, 1<<63).
+type Source interface {
+ Int63() int64
+ Seed(seed int64)
+}
+
+// NewSource returns a new pseudo-random Source seeded with the given value.
+func NewSource(seed int64) Source {
+ var rng rngSource
+ rng.Seed(seed)
+ return &rng
+}
+
+// A Rand is a source of random numbers.
+type Rand struct {
+ src Source
+}
+
+// New returns a new Rand that uses random values from src
+// to generate other random values.
+func New(src Source) *Rand { return &Rand{src} }
+
+// Seed uses the provided seed value to initialize the generator to a deterministic state.
+func (r *Rand) Seed(seed int64) { r.src.Seed(seed) }
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (r *Rand) Int63() int64 { return r.src.Int63() }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32.
+func (r *Rand) Uint32() uint32 { return uint32(r.Int63() >> 31) }
+
+// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
+func (r *Rand) Int31() int32 { return int32(r.Int63() >> 32) }
+
+// Int returns a non-negative pseudo-random int.
+func (r *Rand) Int() int {
+ u := uint(r.Int63())
+ return int(u << 1 >> 1) // clear sign bit if int == int32
+}
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+func (r *Rand) Int63n(n int64) int64 {
+ if n <= 0 {
+ return 0
+ }
+ max := int64((1 << 63) - 1 - (1<<63)%uint64(n))
+ v := r.Int63()
+ for v > max {
+ v = r.Int63()
+ }
+ return v % n
+}
+
+// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
+func (r *Rand) Int31n(n int32) int32 {
+ if n <= 0 {
+ return 0
+ }
+ max := int32((1 << 31) - 1 - (1<<31)%uint32(n))
+ v := r.Int31()
+ for v > max {
+ v = r.Int31()
+ }
+ return v % n
+}
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+func (r *Rand) Intn(n int) int {
+ if n <= 1<<31-1 {
+ return int(r.Int31n(int32(n)))
+ }
+ return int(r.Int63n(int64(n)))
+}
+
+// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).
+func (r *Rand) Float64() float64 { return float64(r.Int63()) / (1 << 63) }
+
+// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).
+func (r *Rand) Float32() float32 { return float32(r.Float64()) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func (r *Rand) Perm(n int) []int {
+ m := make([]int, n)
+ for i := 0; i < n; i++ {
+ m[i] = i
+ }
+ for i := 0; i < n; i++ {
+ j := r.Intn(i + 1)
+ m[i], m[j] = m[j], m[i]
+ }
+ return m
+}
+
+/*
+ * Top-level convenience functions
+ */
+
+var globalRand = New(&lockedSource{src: NewSource(1)})
+
+// Seed uses the provided seed value to initialize the generator to a deterministic state.
+func Seed(seed int64) { globalRand.Seed(seed) }
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func Int63() int64 { return globalRand.Int63() }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32.
+func Uint32() uint32 { return globalRand.Uint32() }
+
+// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
+func Int31() int32 { return globalRand.Int31() }
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return globalRand.Int() }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+func Int63n(n int64) int64 { return globalRand.Int63n(n) }
+
+// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
+func Int31n(n int32) int32 { return globalRand.Int31n(n) }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+func Intn(n int) int { return globalRand.Intn(n) }
+
+// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).
+func Float64() float64 { return globalRand.Float64() }
+
+// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).
+func Float32() float32 { return globalRand.Float32() }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return globalRand.Perm(n) }
+
+// NormFloat64 returns a normally distributed float64 in the range
+// [-math.MaxFloat64, +math.MaxFloat64] with
+// standard normal distribution (mean = 0, stddev = 1).
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+// sample = NormFloat64() * desiredStdDev + desiredMean
+//
+func NormFloat64() float64 { return globalRand.NormFloat64() }
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1).
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+// sample = ExpFloat64() / desiredRateParameter
+//
+func ExpFloat64() float64 { return globalRand.ExpFloat64() }
+
+type lockedSource struct {
+ lk sync.Mutex
+ src Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "testing"
+)
+
+const (
+ numTestSamples = 10000
+)
+
+type statsResults struct {
+ mean float64
+ stddev float64
+ closeEnough float64
+ maxError float64
+}
+
+func max(a, b float64) float64 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func nearEqual(a, b, closeEnough, maxError float64) bool {
+ absDiff := math.Abs(a - b)
+ if absDiff < closeEnough { // Necessary when one value is zero and one value is close to zero.
+ return true
+ }
+ return absDiff/max(math.Abs(a), math.Abs(b)) < maxError
+}
+
+var testSeeds = []int64{1, 1754801282, 1698661970, 1550503961}
+
+// checkSimilarDistribution returns success if the mean and stddev of the
+// two statsResults are similar.
+func (this *statsResults) checkSimilarDistribution(expected *statsResults) error {
+ if !nearEqual(this.mean, expected.mean, expected.closeEnough, expected.maxError) {
+ s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", this.mean, expected.mean, expected.closeEnough, expected.maxError)
+ fmt.Println(s)
+ return errors.New(s)
+ }
+ if !nearEqual(this.stddev, expected.stddev, 0, expected.maxError) {
+ s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", this.stddev, expected.stddev, expected.closeEnough, expected.maxError)
+ fmt.Println(s)
+ return errors.New(s)
+ }
+ return nil
+}
+
+func getStatsResults(samples []float64) *statsResults {
+ res := new(statsResults)
+ var sum float64
+ for i := range samples {
+ sum += samples[i]
+ }
+ res.mean = sum / float64(len(samples))
+ var devsum float64
+ for i := range samples {
+ devsum += math.Pow(samples[i]-res.mean, 2)
+ }
+ res.stddev = math.Sqrt(devsum / float64(len(samples)))
+ return res
+}
+
+func checkSampleDistribution(t *testing.T, samples []float64, expected *statsResults) {
+ actual := getStatsResults(samples)
+ err := actual.checkSimilarDistribution(expected)
+ if err != nil {
+ t.Errorf(err.Error())
+ }
+}
+
+func checkSampleSliceDistributions(t *testing.T, samples []float64, nslices int, expected *statsResults) {
+ chunk := len(samples) / nslices
+ for i := 0; i < nslices; i++ {
+ low := i * chunk
+ var high int
+ if i == nslices-1 {
+ high = len(samples) - 1
+ } else {
+ high = (i + 1) * chunk
+ }
+ checkSampleDistribution(t, samples[low:high], expected)
+ }
+}
+
+//
+// Normal distribution tests
+//
+
+func generateNormalSamples(nsamples int, mean, stddev float64, seed int64) []float64 {
+ r := New(NewSource(seed))
+ samples := make([]float64, nsamples)
+ for i := range samples {
+ samples[i] = r.NormFloat64()*stddev + mean
+ }
+ return samples
+}
+
+func testNormalDistribution(t *testing.T, nsamples int, mean, stddev float64, seed int64) {
+ //fmt.Printf("testing nsamples=%v mean=%v stddev=%v seed=%v\n", nsamples, mean, stddev, seed);
+
+ samples := generateNormalSamples(nsamples, mean, stddev, seed)
+ errorScale := max(1.0, stddev) // Error scales with stddev
+ expected := &statsResults{mean, stddev, 0.10 * errorScale, 0.08 * errorScale}
+
+ // Make sure that the entire set matches the expected distribution.
+ checkSampleDistribution(t, samples, expected)
+
+ // Make sure that each half of the set matches the expected distribution.
+ checkSampleSliceDistributions(t, samples, 2, expected)
+
+ // Make sure that each 7th of the set matches the expected distribution.
+ checkSampleSliceDistributions(t, samples, 7, expected)
+}
+
+// Actual tests
+
+func TestStandardNormalValues(t *testing.T) {
+ for _, seed := range testSeeds {
+ testNormalDistribution(t, numTestSamples, 0, 1, seed)
+ }
+}
+
+func TestNonStandardNormalValues(t *testing.T) {
+ for sd := 0.5; sd < 1000; sd *= 2 {
+ for m := 0.5; m < 1000; m *= 2 {
+ for _, seed := range testSeeds {
+ testNormalDistribution(t, numTestSamples, m, sd, seed)
+ }
+ }
+ }
+}
+
+//
+// Exponential distribution tests
+//
+
+func generateExponentialSamples(nsamples int, rate float64, seed int64) []float64 {
+ r := New(NewSource(seed))
+ samples := make([]float64, nsamples)
+ for i := range samples {
+ samples[i] = r.ExpFloat64() / rate
+ }
+ return samples
+}
+
+func testExponentialDistribution(t *testing.T, nsamples int, rate float64, seed int64) {
+ //fmt.Printf("testing nsamples=%v rate=%v seed=%v\n", nsamples, rate, seed);
+
+ mean := 1 / rate
+ stddev := mean
+
+ samples := generateExponentialSamples(nsamples, rate, seed)
+ errorScale := max(1.0, 1/rate) // Error scales with the inverse of the rate
+ expected := &statsResults{mean, stddev, 0.10 * errorScale, 0.20 * errorScale}
+
+ // Make sure that the entire set matches the expected distribution.
+ checkSampleDistribution(t, samples, expected)
+
+ // Make sure that each half of the set matches the expected distribution.
+ checkSampleSliceDistributions(t, samples, 2, expected)
+
+ // Make sure that each 7th of the set matches the expected distribution.
+ checkSampleSliceDistributions(t, samples, 7, expected)
+}
+
+// Actual tests
+
+func TestStandardExponentialValues(t *testing.T) {
+ for _, seed := range testSeeds {
+ testExponentialDistribution(t, numTestSamples, 1, seed)
+ }
+}
+
+func TestNonStandardExponentialValues(t *testing.T) {
+ for rate := 0.05; rate < 10; rate *= 2 {
+ for _, seed := range testSeeds {
+ testExponentialDistribution(t, numTestSamples, rate, seed)
+ }
+ }
+}
+
+//
+// Table generation tests
+//
+
+func initNorm() (testKn []uint32, testWn, testFn []float32) {
+ const m1 = 1 << 31
+ var (
+ dn float64 = rn
+ tn = dn
+ vn float64 = 9.91256303526217e-3
+ )
+
+ testKn = make([]uint32, 128)
+ testWn = make([]float32, 128)
+ testFn = make([]float32, 128)
+
+ q := vn / math.Exp(-0.5*dn*dn)
+ testKn[0] = uint32((dn / q) * m1)
+ testKn[1] = 0
+ testWn[0] = float32(q / m1)
+ testWn[127] = float32(dn / m1)
+ testFn[0] = 1.0
+ testFn[127] = float32(math.Exp(-0.5 * dn * dn))
+ for i := 126; i >= 1; i-- {
+ dn = math.Sqrt(-2.0 * math.Log(vn/dn+math.Exp(-0.5*dn*dn)))
+ testKn[i+1] = uint32((dn / tn) * m1)
+ tn = dn
+ testFn[i] = float32(math.Exp(-0.5 * dn * dn))
+ testWn[i] = float32(dn / m1)
+ }
+ return
+}
+
+func initExp() (testKe []uint32, testWe, testFe []float32) {
+ const m2 = 1 << 32
+ var (
+ de float64 = re
+ te = de
+ ve float64 = 3.9496598225815571993e-3
+ )
+
+ testKe = make([]uint32, 256)
+ testWe = make([]float32, 256)
+ testFe = make([]float32, 256)
+
+ q := ve / math.Exp(-de)
+ testKe[0] = uint32((de / q) * m2)
+ testKe[1] = 0
+ testWe[0] = float32(q / m2)
+ testWe[255] = float32(de / m2)
+ testFe[0] = 1.0
+ testFe[255] = float32(math.Exp(-de))
+ for i := 254; i >= 1; i-- {
+ de = -math.Log(ve/de + math.Exp(-de))
+ testKe[i+1] = uint32((de / te) * m2)
+ te = de
+ testFe[i] = float32(math.Exp(-de))
+ testWe[i] = float32(de / m2)
+ }
+ return
+}
+
+// compareUint32Slices returns the first index where the two slices
+// disagree, or <0 if the lengths are the same and all elements
+// are identical.
+func compareUint32Slices(s1, s2 []uint32) int {
+ if len(s1) != len(s2) {
+ if len(s1) > len(s2) {
+ return len(s2) + 1
+ }
+ return len(s1) + 1
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// compareFloat32Slices returns the first index where the two slices
+// disagree, or <0 if the lengths are the same and all elements
+// are identical.
+func compareFloat32Slices(s1, s2 []float32) int {
+ if len(s1) != len(s2) {
+ if len(s1) > len(s2) {
+ return len(s2) + 1
+ }
+ return len(s1) + 1
+ }
+ for i := range s1 {
+ if !nearEqual(float64(s1[i]), float64(s2[i]), 0, 1e-7) {
+ return i
+ }
+ }
+ return -1
+}
+
+func TestNormTables(t *testing.T) {
+ testKn, testWn, testFn := initNorm()
+ if i := compareUint32Slices(kn[0:], testKn); i >= 0 {
+ t.Errorf("kn disagrees at index %v; %v != %v", i, kn[i], testKn[i])
+ }
+ if i := compareFloat32Slices(wn[0:], testWn); i >= 0 {
+ t.Errorf("wn disagrees at index %v; %v != %v", i, wn[i], testWn[i])
+ }
+ if i := compareFloat32Slices(fn[0:], testFn); i >= 0 {
+ t.Errorf("fn disagrees at index %v; %v != %v", i, fn[i], testFn[i])
+ }
+}
+
+func TestExpTables(t *testing.T) {
+ testKe, testWe, testFe := initExp()
+ if i := compareUint32Slices(ke[0:], testKe); i >= 0 {
+ t.Errorf("ke disagrees at index %v; %v != %v", i, ke[i], testKe[i])
+ }
+ if i := compareFloat32Slices(we[0:], testWe); i >= 0 {
+ t.Errorf("we disagrees at index %v; %v != %v", i, we[i], testWe[i])
+ }
+ if i := compareFloat32Slices(fe[0:], testFe); i >= 0 {
+ t.Errorf("fe disagrees at index %v; %v != %v", i, fe[i], testFe[i])
+ }
+}
+
+// Benchmarks
+
+func BenchmarkInt63Threadsafe(b *testing.B) {
+ for n := b.N; n > 0; n-- {
+ Int63()
+ }
+}
+
+func BenchmarkInt63Unthreadsafe(b *testing.B) {
+ r := New(NewSource(1))
+ for n := b.N; n > 0; n-- {
+ r.Int63()
+ }
+}
+
+func BenchmarkIntn1000(b *testing.B) {
+ r := New(NewSource(1))
+ for n := b.N; n > 0; n-- {
+ r.Intn(1000)
+ }
+}
+
+func BenchmarkInt63n1000(b *testing.B) {
+ r := New(NewSource(1))
+ for n := b.N; n > 0; n-- {
+ r.Int63n(1000)
+ }
+}
+
+func BenchmarkInt31n1000(b *testing.B) {
+ r := New(NewSource(1))
+ for n := b.N; n > 0; n-- {
+ r.Int31n(1000)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+/*
+ * Uniform distribution
+ *
+ * algorithm by
+ * DP Mitchell and JA Reeds
+ */
+
+const (
+ _LEN = 607
+ _TAP = 273
+ _MAX = 1 << 63
+ _MASK = _MAX - 1
+ _A = 48271
+ _M = (1 << 31) - 1
+ _Q = 44488
+ _R = 3399
+)
+
+var (
+ // cooked random numbers
+ // the state of the rng
+ // after 780e10 iterations
+ rng_cooked [_LEN]int64 = [...]int64{
+ 5041579894721019882, 4646389086726545243, 1395769623340756751, 5333664234075297259,
+ 2875692520355975054, 9033628115061424579, 7143218595135194537, 4812947590706362721,
+ 7937252194349799378, 5307299880338848416, 8209348851763925077, 2115741599318814044,
+ 4593015457530856296, 8140875735541888011, 3319429241265089026, 8619815648190321034,
+ 1727074043483619500, 113108499721038619, 4569519971459345583, 5062833859075314731,
+ 2387618771259064424, 2716131344356686112, 6559392774825876886, 7650093201692370310,
+ 7684323884043752161, 257867835996031390, 6593456519409015164, 271327514973697897,
+ 2789386447340118284, 1065192797246149621, 3344507881999356393, 4459797941780066633,
+ 7465081662728599889, 1014950805555097187, 4449440729345990775, 3481109366438502643,
+ 2418672789110888383, 5796562887576294778, 4484266064449540171, 3738982361971787048,
+ 4523597184512354423, 10530508058128498, 8633833783282346118, 2625309929628791628,
+ 8660405965245884302, 10162832508971942, 6540714680961817391, 7031802312784620857,
+ 6240911277345944669, 831864355460801054, 8004434137542152891, 2116287251661052151,
+ 2202309800992166967, 9161020366945053561, 4069299552407763864, 4936383537992622449,
+ 457351505131524928, 342195045928179354, 2847771682816600509, 2068020115986376518,
+ 4368649989588021065, 887231587095185257, 5563591506886576496, 6816225200251950296,
+ 5616972787034086048, 8471809303394836566, 1686575021641186857, 4045484338074262002,
+ 4244156215201778923, 7848217333783577387, 5632136521049761902, 833283142057835272,
+ 9029726508369077193, 3243583134664087292, 4316371101804477087, 8937849979965997980,
+ 6446940406810434101, 1679342092332374735, 6050638460742422078, 6993520719509581582,
+ 7640877852514293609, 5881353426285907985, 812786550756860885, 4541845584483343330,
+ 2725470216277009086, 4980675660146853729, 5210769080603236061, 8894283318990530821,
+ 6326442804750084282, 1495812843684243920, 7069751578799128019, 7370257291860230865,
+ 6756929275356942261, 4706794511633873654, 7824520467827898663, 8549875090542453214,
+ 33650829478596156, 1328918435751322643, 7297902601803624459, 1011190183918857495,
+ 2238025036817854944, 5147159997473910359, 896512091560522982, 2659470849286379941,
+ 6097729358393448602, 1731725986304753684, 4106255841983812711, 8327155210721535508,
+ 8477511620686074402, 5803876044675762232, 8435417780860221662, 5988852856651071244,
+ 4715837297103951910, 7566171971264485114, 505808562678895611, 5070098180695063370,
+ 842110666775871513, 572156825025677802, 1791881013492340891, 3393267094866038768,
+ 3778721850472236509, 2352769483186201278, 1292459583847367458, 8897907043675088419,
+ 5781809037144163536, 2733958794029492513, 5092019688680754699, 8996124554772526841,
+ 4234737173186232084, 5027558287275472836, 4635198586344772304, 8687338893267139351,
+ 5907508150730407386, 784756255473944452, 972392927514829904, 5422057694808175112,
+ 5158420642969283891, 9048531678558643225, 2407211146698877100, 7583282216521099569,
+ 3940796514530962282, 3341174631045206375, 3095313889586102949, 7405321895688238710,
+ 5832080132947175283, 7890064875145919662, 8184139210799583195, 1149859861409226130,
+ 1464597243840211302, 4641648007187991873, 3516491885471466898, 956288521791657692,
+ 6657089965014657519, 5220884358887979358, 1796677326474620641, 5340761970648932916,
+ 1147977171614181568, 5066037465548252321, 2574765911837859848, 1085848279845204775,
+ 3350107529868390359, 6116438694366558490, 2107701075971293812, 1803294065921269267,
+ 2469478054175558874, 7368243281019965984, 3791908367843677526, 185046971116456637,
+ 2257095756513439648, 7217693971077460129, 909049953079504259, 7196649268545224266,
+ 5637660345400869599, 3955544945427965183, 8057528650917418961, 4139268440301127643,
+ 6621926588513568059, 1373361136802681441, 6527366231383600011, 3507654575162700890,
+ 9202058512774729859, 1954818376891585542, 6640380907130175705, 8299563319178235687,
+ 3901867355218954373, 7046310742295574065, 6847195391333990232, 1572638100518868053,
+ 8850422670118399721, 3631909142291992901, 5158881091950831288, 2882958317343121593,
+ 4763258931815816403, 6280052734341785344, 4243789408204964850, 2043464728020827976,
+ 6545300466022085465, 4562580375758598164, 5495451168795427352, 1738312861590151095,
+ 553004618757816492, 6895160632757959823, 8233623922264685171, 7139506338801360852,
+ 8550891222387991669, 5535668688139305547, 2430933853350256242, 5401941257863201076,
+ 8159640039107728799, 6157493831600770366, 7632066283658143750, 6308328381617103346,
+ 3681878764086140361, 3289686137190109749, 6587997200611086848, 244714774258135476,
+ 4079788377417136100, 8090302575944624335, 2945117363431356361, 864324395848741045,
+ 3009039260312620700, 8430027460082534031, 401084700045993341, 7254622446438694921,
+ 4707864159563588614, 5640248530963493951, 5982507712689997893, 3315098242282210105,
+ 5503847578771918426, 3941971367175193882, 8118566580304798074, 3839261274019871296,
+ 7062410411742090847, 741381002980207668, 6027994129690250817, 2497829994150063930,
+ 6251390334426228834, 1368930247903518833, 8809096399316380241, 6492004350391900708,
+ 2462145737463489636, 404828418920299174, 4153026434231690595, 261785715255475940,
+ 5464715384600071357, 592710404378763017, 6764129236657751224, 8513655718539357449,
+ 5820343663801914208, 385298524683789911, 5224135003438199467, 6303131641338802145,
+ 7150122561309371392, 368107899140673753, 3115186834558311558, 2915636353584281051,
+ 4782583894627718279, 6718292300699989587, 8387085186914375220, 3387513132024756289,
+ 4654329375432538231, 8930667561363381602, 5374373436876319273, 7623042350483453954,
+ 7725442901813263321, 9186225467561587250, 4091027289597503355, 2357631606492579800,
+ 2530936820058611833, 1636551876240043639, 5564664674334965799, 1452244145334316253,
+ 2061642381019690829, 1279580266495294036, 9108481583171221009, 6023278686734049809,
+ 5007630032676973346, 2153168792952589781, 6720334534964750538, 6041546491134794105,
+ 3433922409283786309, 2285479922797300912, 3110614940896576130, 6366559590722842893,
+ 5418791419666136509, 7163298419643543757, 4891138053923696990, 580618510277907015,
+ 1684034065251686769, 4429514767357295841, 330346578555450005, 1119637995812174675,
+ 7177515271653460134, 4589042248470800257, 7693288629059004563, 143607045258444228,
+ 246994305896273627, 866417324803099287, 6473547110565816071, 3092379936208876896,
+ 2058427839513754051, 5133784708526867938, 8785882556301281247, 6149332666841167611,
+ 8585842181454472135, 6137678347805511274, 2070447184436970006, 5708223427705576541,
+ 5999657892458244504, 4358391411789012426, 325123008708389849, 6837621693887290924,
+ 4843721905315627004, 6010651222149276415, 5398352198963874652, 4602025990114250980,
+ 1044646352569048800, 9106614159853161675, 829256115228593269, 4919284369102997000,
+ 2681532557646850893, 3681559472488511871, 5307999518958214035, 6334130388442829274,
+ 2658708232916537604, 1163313865052186287, 581945337509520675, 3648778920718647903,
+ 4423673246306544414, 1620799783996955743, 220828013409515943, 8150384699999389761,
+ 4287360518296753003, 4590000184845883843, 5513660857261085186, 6964829100392774275,
+ 478991688350776035, 8746140185685648781, 228500091334420247, 1356187007457302238,
+ 3019253992034194581, 3152601605678500003, 430152752706002213, 5559581553696971176,
+ 4916432985369275664, 663574931734554391, 3420773838927732076, 2868348622579915573,
+ 1999319134044418520, 3328689518636282723, 2587672709781371173, 1517255313529399333,
+ 3092343956317362483, 3662252519007064108, 972445599196498113, 7664865435875959367,
+ 1708913533482282562, 6917817162668868494, 3217629022545312900, 2570043027221707107,
+ 8739788839543624613, 2488075924621352812, 4694002395387436668, 4559628481798514356,
+ 2997203966153298104, 1282559373026354493, 240113143146674385, 8665713329246516443,
+ 628141331766346752, 4571950817186770476, 1472811188152235408, 7596648026010355826,
+ 6091219417754424743, 7834161864828164065, 7103445518877254909, 4390861237357459201,
+ 4442653864240571734, 8903482404847331368, 622261699494173647, 6037261250297213248,
+ 504404948065709118, 7275215526217113061, 1011176780856001400, 2194750105623461063,
+ 2623071828615234808, 5157313728073836108, 3738405111966602044, 2539767524076729570,
+ 2467284396349269342, 5256026990536851868, 7841086888628396109, 6640857538655893162,
+ 1202087339038317498, 2113514992440715978, 7534350895342931403, 4925284734898484745,
+ 5145623771477493805, 8225140880134972332, 2719520354384050532, 9132346697815513771,
+ 4332154495710163773, 7137789594094346916, 6994721091344268833, 6667228574869048934,
+ 655440045726677499, 59934747298466858, 6124974028078036405, 8957774780655365418,
+ 2332206071942466437, 1701056712286369627, 3154897383618636503, 1637766181387607527,
+ 2460521277767576533, 197309393502684135, 643677854385267315, 2543179307861934850,
+ 4350769010207485119, 4754652089410667672, 2015595502641514512, 7999059458976458608,
+ 4287946071480840813, 8362686366770308971, 6486469209321732151, 3617727845841796026,
+ 7554353525834302244, 4450022655153542367, 1605195740213535749, 5327014565305508387,
+ 4626575813550328320, 2692222020597705149, 241045573717249868, 5098046974627094010,
+ 7916882295460730264, 884817090297530579, 5329160409530630596, 7790979528857726136,
+ 4955070238059373407, 4918537275422674302, 3008076183950404629, 3007769226071157901,
+ 2470346235617803020, 8928702772696731736, 7856187920214445904, 4474874585391974885,
+ 7900176660600710914, 2140571127916226672, 2425445057265199971, 2486055153341847830,
+ 4186670094382025798, 1883939007446035042, 8808666044074867985, 3734134241178479257,
+ 4065968871360089196, 6953124200385847784, 1305686814738899057, 1637739099014457647,
+ 3656125660947993209, 3966759634633167020, 3106378204088556331, 6328899822778449810,
+ 4565385105440252958, 1979884289539493806, 2331793186920865425, 3783206694208922581,
+ 8464961209802336085, 2843963751609577687, 3030678195484896323, 4793717574095772604,
+ 4459239494808162889, 402587895800087237, 8057891408711167515, 4541888170938985079,
+ 1042662272908816815, 5557303057122568958, 2647678726283249984, 2144477441549833761,
+ 5806352215355387087, 7117771003473903623, 5916597177708541638, 462597715452321361,
+ 8833658097025758785, 5970273481425315300, 563813119381731307, 2768349550652697015,
+ 1598828206250873866, 5206393647403558110, 6235043485709261823, 3152217402014639496,
+ 8469693267274066490, 125672920241807416, 5311079624024060938, 6663754932310491587,
+ 8736848295048751716, 4488039774992061878, 5923302823487327109, 140891791083103236,
+ 7414942793393574290, 7990420780896957397, 4317817392807076702, 3625184369705367340,
+ 2740722765288122703, 5743100009702758344, 5997898640509039159, 8854493341352484163,
+ 5242208035432907801, 701338899890987198, 7609280429197514109, 3020985755112334161,
+ 6651322707055512866, 2635195723621160615, 5144520864246028816, 1035086515727829828,
+ 1567242097116389047, 8172389260191636581, 6337820351429292273, 2163012566996458925,
+ 2743190902890262681, 1906367633221323427, 6011544915663598137, 5932255307352610768,
+ 2241128460406315459, 895504896216695588, 3094483003111372717, 4583857460292963101,
+ 9079887171656594975, 8839289181930711403, 5762740387243057873, 4225072055348026230,
+ 1838220598389033063, 3801620336801580414, 8823526620080073856, 1776617605585100335,
+ 7899055018877642622, 5421679761463003041, 5521102963086275121, 4248279443559365898,
+ 8735487530905098534, 1760527091573692978, 7142485049657745894, 8222656872927218123,
+ 4969531564923704323, 3394475942196872480, 6424174453260338141, 359248545074932887,
+ 3273651282831730598, 6797106199797138596, 3030918217665093212, 145600834617314036,
+ 6036575856065626233, 740416251634527158, 7080427635449935582, 6951781370868335478,
+ 399922722363687927, 294902314447253185, 7844950936339178523, 880320858634709042,
+ 6192655680808675579, 411604686384710388, 9026808440365124461, 6440783557497587732,
+ 4615674634722404292, 539897290441580544, 2096238225866883852, 8751955639408182687,
+ 1907224908052289603, 7381039757301768559, 6157238513393239656, 7749994231914157575,
+ 8629571604380892756, 5280433031239081479, 7101611890139813254, 2479018537985767835,
+ 7169176924412769570, 7942066497793203302, 1357759729055557688, 2278447439451174845,
+ 3625338785743880657, 6477479539006708521, 8976185375579272206, 5511371554711836120,
+ 1326024180520890843, 7537449876596048829, 5464680203499696154, 3189671183162196045,
+ 6346751753565857109, 241159987320630307, 3095793449658682053, 8978332846736310159,
+ 2902794662273147216, 7208698530190629697, 7276901792339343736, 1732385229314443140,
+ 4133292154170828382, 2918308698224194548, 1519461397937144458, 5293934712616591764,
+ 4922828954023452664, 2879211533496425641, 5896236396443472108, 8465043815351752425,
+ 7329020396871624740, 8915471717014488588, 2944902635677463047, 7052079073493465134,
+ 8382142935188824023, 9103922860780351547, 4152330101494654406,
+ }
+)
+
+type rngSource struct {
+ tap int // index into vec
+ feed int // index into vec
+ vec [_LEN]int64 // current feedback register
+}
+
+// seed rng x[n+1] = 48271 * x[n] mod (2**31 - 1)
+func seedrand(x int32) int32 {
+ hi := x / _Q
+ lo := x % _Q
+ x = _A*lo - _R*hi
+ if x < 0 {
+ x += _M
+ }
+ return x
+}
+
+// Seed uses the provided seed value to initialize the generator to a deterministic state.
+func (rng *rngSource) Seed(seed int64) {
+ rng.tap = 0
+ rng.feed = _LEN - _TAP
+
+ seed = seed % _M
+ if seed < 0 {
+ seed += _M
+ }
+ if seed == 0 {
+ seed = 89482311
+ }
+
+ x := int32(seed)
+ for i := -20; i < _LEN; i++ {
+ x = seedrand(x)
+ if i >= 0 {
+ var u int64
+ u = int64(x) << 40
+ x = seedrand(x)
+ u ^= int64(x) << 20
+ x = seedrand(x)
+ u ^= int64(x)
+ u ^= rng_cooked[i]
+ rng.vec[i] = u & _MASK
+ }
+ }
+}
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (rng *rngSource) Int63() int64 {
+ rng.tap--
+ if rng.tap < 0 {
+ rng.tap += _LEN
+ }
+
+ rng.feed--
+ if rng.feed < 0 {
+ rng.feed += _LEN
+ }
+
+ x := (rng.vec[rng.feed] + rng.vec[rng.tap]) & _MASK
+ rng.vec[rng.feed] = x
+ return x
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// W.Hormann, G.Derflinger:
+// "Rejection-Inversion to Generate Variates
+// from Monotone Discrete Distributions"
+// http://eeyore.wu-wien.ac.at/papers/96-04-04.wh-der.ps.gz
+
+package rand
+
+import "math"
+
+// A Zipf generates Zipf distributed variates.
+type Zipf struct {
+ r *Rand
+ imax float64
+ v float64
+ q float64
+ s float64
+ oneminusQ float64
+ oneminusQinv float64
+ hxm float64
+ hx0minusHxm float64
+}
+
+func (z *Zipf) h(x float64) float64 {
+ return math.Exp(z.oneminusQ*math.Log(z.v+x)) * z.oneminusQinv
+}
+
+func (z *Zipf) hinv(x float64) float64 {
+ return math.Exp(z.oneminusQinv*math.Log(z.oneminusQ*x)) - z.v
+}
+
+// NewZipf returns a Zipf generating variates p(k) on [0, imax]
+// proportional to (v+k)**(-s) where s>1 and k>=0, and v>=1.
+//
+func NewZipf(r *Rand, s float64, v float64, imax uint64) *Zipf {
+ z := new(Zipf)
+ if s <= 1.0 || v < 1 {
+ return nil
+ }
+ z.r = r
+ z.imax = float64(imax)
+ z.v = v
+ z.q = s
+ z.oneminusQ = 1.0 - z.q
+ z.oneminusQinv = 1.0 / z.oneminusQ
+ z.hxm = z.h(z.imax + 0.5)
+ z.hx0minusHxm = z.h(0.5) - math.Exp(math.Log(z.v)*(-z.q)) - z.hxm
+ z.s = 1 - z.hinv(z.h(1.5)-math.Exp(-z.q*math.Log(z.v+1.0)))
+ return z
+}
+
+// Uint64 returns a value drawn from the Zipf distributed described
+// by the Zipf object.
+func (z *Zipf) Uint64() uint64 {
+ k := 0.0
+
+ for {
+ r := z.r.Float64() // r on [0,1]
+ ur := z.hxm + r*z.hx0minusHxm
+ x := z.hinv(ur)
+ k = math.Floor(x + 0.5)
+ if k-x <= z.s {
+ break
+ }
+ if ur >= z.h(k+0.5)-math.Exp(-math.Log(k+z.v)*z.q) {
+ break
+ }
+ }
+ return uint64(k)
+}
-// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package math
/*
- Floating point tangent.
+ Floating-point tangent.
*/
+// The original C code, the long comment, and the constants
+// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
+// available from http://www.netlib.org/cephes/cmath.tgz.
+// The go code is a simplified version of the original C.
+//
+// tan.c
+//
+// Circular tangent
+//
+// SYNOPSIS:
+//
+// double x, y, tan();
+// y = tan( x );
+//
+// DESCRIPTION:
+//
+// Returns the circular tangent of the radian argument x.
+//
+// Range reduction is modulo pi/4. A rational function
+// x + x**3 P(x**2)/Q(x**2)
+// is employed in the basic interval [0, pi/4].
+//
+// ACCURACY:
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC +-1.07e9 44000 4.1e-17 1.0e-17
+// IEEE +-1.07e9 30000 2.9e-16 8.1e-17
+//
+// Partial loss of accuracy begins to occur at x = 2**30 = 1.074e9. The loss
+// is not gradual, but jumps suddenly to about 1 part in 10e7. Results may
+// be meaningless for x > 2**49 = 5.6e14.
+// [Accuracy loss statement from sin.go comments.]
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// tan coefficients
+var _tanP = [...]float64{
+ -1.30936939181383777646E4, // 0xc0c992d8d24f3f38
+ 1.15351664838587416140E6, // 0x413199eca5fc9ddd
+ -1.79565251976484877988E7, // 0xc1711fead3299176
+}
+var _tanQ = [...]float64{
+ 1.00000000000000000000E0,
+ 1.36812963470692954678E4, //0x40cab8a5eeb36572
+ -1.32089234440210967447E6, //0xc13427bc582abc96
+ 2.50083801823357915839E7, //0x4177d98fc2ead8ef
+ -5.38695755929454629881E7, //0xc189afe03cbe5a31
+}
+
// Tan returns the tangent of x.
+//
+// Special conditions are:
+// Tan(±0) = ±0
+// Tan(±Inf) = NaN
+// Tan(NaN) = NaN
func Tan(x float64) float64 {
- // Coefficients are #4285 from Hart & Cheney. (19.74D)
const (
- P0 = -.1306820264754825668269611177e+5
- P1 = .1055970901714953193602353981e+4
- P2 = -.1550685653483266376941705728e+2
- P3 = .3422554387241003435328470489e-1
- P4 = .3386638642677172096076369e-4
- Q0 = -.1663895238947119001851464661e+5
- Q1 = .4765751362916483698926655581e+4
- Q2 = -.1555033164031709966900124574e+3
+ PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000,
+ PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170,
+ M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi
)
+ // TODO(rsc): Remove manual inlining of IsNaN, IsInf
+ // when compiler does it for us
+ // special cases
+ switch {
+ case x == 0 || x != x: // x == 0 || IsNaN():
+ return x // return ±0 || NaN()
+ case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ return NaN()
+ }
- flag := false
+ // make argument positive but save the sign
sign := false
if x < 0 {
x = -x
sign = true
}
- x = x * (4 / Pi) /* overflow? */
- var e float64
- e, x = Modf(x)
- i := int32(e)
-
- switch i & 3 {
- case 1:
- x = 1 - x
- flag = true
- case 2:
- sign = !sign
- flag = true
+ j := int64(x * M4PI) // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y := float64(j) // integer part of x/(Pi/4), as float
- case 3:
- x = 1 - x
- sign = !sign
+ /* map zeros and singularities to origin */
+ if j&1 == 1 {
+ j += 1
+ y += 1
}
- xsq := x * x
- temp := ((((P4*xsq+P3)*xsq+P2)*xsq+P1)*xsq + P0) * x
- temp = temp / (((xsq+Q2)*xsq+Q1)*xsq + Q0)
+ z := ((x - y*PI4A) - y*PI4B) - y*PI4C
+ zz := z * z
- if flag {
- if temp == 0 {
- return NaN()
- }
- temp = 1 / temp
+ if zz > 1e-14 {
+ y = z + z*(zz*(((_tanP[0]*zz)+_tanP[1])*zz+_tanP[2])/((((zz+_tanQ[1])*zz+_tanQ[2])*zz+_tanQ[3])*zz+_tanQ[4]))
+ } else {
+ y = z
+ }
+ if j&2 == 2 {
+ y = -1 / y
}
if sign {
- temp = -temp
+ y = -y
}
- return temp
+ return y
}
}
// NextPart returns the next part in the multipart or an error.
-// When there are no more parts, the error os.EOF is returned.
+// When there are no more parts, the error io.EOF is returned.
func (mr *Reader) NextPart() (*Part, error) {
if mr.currentPart != nil {
mr.currentPart.Close()
import (
"bytes"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
- "json"
"strings"
"testing"
)
t.Error("Didn't expect a fifth part.")
}
if err != io.EOF {
- t.Errorf("On fifth part expected os.EOF; got %v", err)
+ t.Errorf("On fifth part expected io.EOF; got %v", err)
}
}
t.Errorf("Unexpected part in test %d", testNum)
}
if err != io.EOF {
- t.Errorf("On test %d expected os.EOF; got %v", testNum, err)
+ t.Errorf("On test %d expected io.EOF; got %v", testNum, err)
}
}
import (
"bytes"
"fmt"
- "rand"
+ "math/rand"
"sort"
)
package net
import (
- "rand"
+ "math/rand"
"sync"
"time"
)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements CGI from the perspective of a child
+// process.
+
+package cgi
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Request returns the HTTP request as represented in the current
+// environment. This assumes the current program is being run
+// by a web server in a CGI environment.
+// The returned Request's Body is populated, if applicable.
+func Request() (*http.Request, error) {
+ r, err := RequestFromMap(envMap(os.Environ()))
+ if err != nil {
+ return nil, err
+ }
+ if r.ContentLength > 0 {
+ r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength))
+ }
+ return r, nil
+}
+
+func envMap(env []string) map[string]string {
+ m := make(map[string]string)
+ for _, kv := range env {
+ if idx := strings.Index(kv, "="); idx != -1 {
+ m[kv[:idx]] = kv[idx+1:]
+ }
+ }
+ return m
+}
+
+// RequestFromMap creates an http.Request from CGI variables.
+// The returned Request's Body field is not populated.
+func RequestFromMap(params map[string]string) (*http.Request, error) {
+ r := new(http.Request)
+ r.Method = params["REQUEST_METHOD"]
+ if r.Method == "" {
+ return nil, errors.New("cgi: no REQUEST_METHOD in environment")
+ }
+
+ r.Proto = params["SERVER_PROTOCOL"]
+ var ok bool
+ r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto)
+ if !ok {
+ return nil, errors.New("cgi: invalid SERVER_PROTOCOL version")
+ }
+
+ r.Close = true
+ r.Trailer = http.Header{}
+ r.Header = http.Header{}
+
+ r.Host = params["HTTP_HOST"]
+
+ if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
+ clen, err := strconv.Atoi64(lenstr)
+ if err != nil {
+ return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
+ }
+ r.ContentLength = clen
+ }
+
+ if ct := params["CONTENT_TYPE"]; ct != "" {
+ r.Header.Set("Content-Type", ct)
+ }
+
+ // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
+ for k, v := range params {
+ if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" {
+ continue
+ }
+ r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v)
+ }
+
+ // TODO: cookies. parsing them isn't exported, though.
+
+ if r.Host != "" {
+ // Hostname is provided, so we can reasonably construct a URL,
+ // even if we have to assume 'http' for the scheme.
+ rawurl := "http://" + r.Host + params["REQUEST_URI"]
+ url, err := url.Parse(rawurl)
+ if err != nil {
+ return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
+ }
+ r.URL = url
+ }
+ // Fallback logic if we don't have a Host header or the URL
+ // failed to parse
+ if r.URL == nil {
+ uriStr := params["REQUEST_URI"]
+ url, err := url.Parse(uriStr)
+ if err != nil {
+ return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
+ }
+ r.URL = url
+ }
+
+ // There's apparently a de-facto standard for this.
+ // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
+ if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" {
+ r.TLS = &tls.ConnectionState{HandshakeComplete: true}
+ }
+
+ // Request.RemoteAddr has its port set by Go's standard http
+ // server, so we do here too. We don't have one, though, so we
+ // use a dummy one.
+ r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0")
+
+ return r, nil
+}
+
+// Serve executes the provided Handler on the currently active CGI
+// request, if any. If there's no current CGI environment
+// an error is returned. The provided handler may be nil to use
+// http.DefaultServeMux.
+func Serve(handler http.Handler) error {
+ req, err := Request()
+ if err != nil {
+ return err
+ }
+ if handler == nil {
+ handler = http.DefaultServeMux
+ }
+ rw := &response{
+ req: req,
+ header: make(http.Header),
+ bufw: bufio.NewWriter(os.Stdout),
+ }
+ handler.ServeHTTP(rw, req)
+ if err = rw.bufw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type response struct {
+ req *http.Request
+ header http.Header
+ bufw *bufio.Writer
+ headerSent bool
+}
+
+func (r *response) Flush() {
+ r.bufw.Flush()
+}
+
+func (r *response) Header() http.Header {
+ return r.header
+}
+
+func (r *response) Write(p []byte) (n int, err error) {
+ if !r.headerSent {
+ r.WriteHeader(http.StatusOK)
+ }
+ return r.bufw.Write(p)
+}
+
+func (r *response) WriteHeader(code int) {
+ if r.headerSent {
+ // Note: explicitly using Stderr, as Stdout is our HTTP output.
+ fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
+ return
+ }
+ r.headerSent = true
+ fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code))
+
+ // Set a default Content-Type
+ if _, hasType := r.header["Content-Type"]; !hasType {
+ r.header.Add("Content-Type", "text/html; charset=utf-8")
+ }
+
+ r.header.Write(r.bufw)
+ r.bufw.WriteString("\r\n")
+ r.bufw.Flush()
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for CGI (the child process perspective)
+
+package cgi
+
+import (
+ "testing"
+)
+
+func TestRequest(t *testing.T) {
+ env := map[string]string{
+ "SERVER_PROTOCOL": "HTTP/1.1",
+ "REQUEST_METHOD": "GET",
+ "HTTP_HOST": "example.com",
+ "HTTP_REFERER": "elsewhere",
+ "HTTP_USER_AGENT": "goclient",
+ "HTTP_FOO_BAR": "baz",
+ "REQUEST_URI": "/path?a=b",
+ "CONTENT_LENGTH": "123",
+ "CONTENT_TYPE": "text/xml",
+ "HTTPS": "1",
+ "REMOTE_ADDR": "5.6.7.8",
+ }
+ req, err := RequestFromMap(env)
+ if err != nil {
+ t.Fatalf("RequestFromMap: %v", err)
+ }
+ if g, e := req.UserAgent(), "goclient"; e != g {
+ t.Errorf("expected UserAgent %q; got %q", e, g)
+ }
+ if g, e := req.Method, "GET"; e != g {
+ t.Errorf("expected Method %q; got %q", e, g)
+ }
+ if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g {
+ t.Errorf("expected Content-Type %q; got %q", e, g)
+ }
+ if g, e := req.ContentLength, int64(123); e != g {
+ t.Errorf("expected ContentLength %d; got %d", e, g)
+ }
+ if g, e := req.Referer(), "elsewhere"; e != g {
+ t.Errorf("expected Referer %q; got %q", e, g)
+ }
+ if req.Header == nil {
+ t.Fatalf("unexpected nil Header")
+ }
+ if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g {
+ t.Errorf("expected Foo-Bar %q; got %q", e, g)
+ }
+ if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g {
+ t.Errorf("expected URL %q; got %q", e, g)
+ }
+ if g, e := req.FormValue("a"), "b"; e != g {
+ t.Errorf("expected FormValue(a) %q; got %q", e, g)
+ }
+ if req.Trailer == nil {
+ t.Errorf("unexpected nil Trailer")
+ }
+ if req.TLS == nil {
+ t.Errorf("expected non-nil TLS")
+ }
+ if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
+ t.Errorf("RemoteAddr: got %q; want %q", g, e)
+ }
+}
+
+func TestRequestWithoutHost(t *testing.T) {
+ env := map[string]string{
+ "SERVER_PROTOCOL": "HTTP/1.1",
+ "HTTP_HOST": "",
+ "REQUEST_METHOD": "GET",
+ "REQUEST_URI": "/path?a=b",
+ "CONTENT_LENGTH": "123",
+ }
+ req, err := RequestFromMap(env)
+ if err != nil {
+ t.Fatalf("RequestFromMap: %v", err)
+ }
+ if req.URL == nil {
+ t.Fatalf("unexpected nil URL")
+ }
+ if g, e := req.URL.String(), "/path?a=b"; e != g {
+ t.Errorf("expected URL %q; got %q", e, g)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the host side of CGI (being the webserver
+// parent process).
+
+// Package cgi implements CGI (Common Gateway Interface) as specified
+// in RFC 3875.
+//
+// Note that using CGI means starting a new process to handle each
+// request, which is typically less efficient than using a
+// long-running server. This package is intended primarily for
+// compatibility with existing systems.
+package cgi
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+var trailingPort = regexp.MustCompile(`:([0-9]+)$`)
+
+var osDefaultInheritEnv = map[string][]string{
+ "darwin": {"DYLD_LIBRARY_PATH"},
+ "freebsd": {"LD_LIBRARY_PATH"},
+ "hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"},
+ "irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"},
+ "linux": {"LD_LIBRARY_PATH"},
+ "openbsd": {"LD_LIBRARY_PATH"},
+ "solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"},
+ "windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"},
+}
+
+// Handler runs an executable in a subprocess with a CGI environment.
+type Handler struct {
+ Path string // path to the CGI executable
+ Root string // root URI prefix of handler or empty for "/"
+
+ // Dir specifies the CGI executable's working directory.
+ // If Dir is empty, the base directory of Path is used.
+ // If Path has no base directory, the current working
+ // directory is used.
+ Dir string
+
+ Env []string // extra environment variables to set, if any, as "key=value"
+ InheritEnv []string // environment variables to inherit from host, as "key"
+ Logger *log.Logger // optional log for errors or nil to use log.Print
+ Args []string // optional arguments to pass to child process
+
+ // PathLocationHandler specifies the root http Handler that
+ // should handle internal redirects when the CGI process
+ // returns a Location header value starting with a "/", as
+ // specified in RFC 3875 § 6.3.2. This will likely be
+ // http.DefaultServeMux.
+ //
+ // If nil, a CGI response with a local URI path is instead sent
+ // back to the client and not redirected internally.
+ PathLocationHandler http.Handler
+}
+
+// removeLeadingDuplicates remove leading duplicate in environments.
+// It's possible to override environment like following.
+// cgi.Handler{
+// ...
+// Env: []string{"SCRIPT_FILENAME=foo.php"},
+// }
+func removeLeadingDuplicates(env []string) (ret []string) {
+ n := len(env)
+ for i := 0; i < n; i++ {
+ e := env[i]
+ s := strings.SplitN(e, "=", 2)[0]
+ found := false
+ for j := i + 1; j < n; j++ {
+ if s == strings.SplitN(env[j], "=", 2)[0] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ ret = append(ret, e)
+ }
+ }
+ return
+}
+
+func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ root := h.Root
+ if root == "" {
+ root = "/"
+ }
+
+ if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
+ rw.WriteHeader(http.StatusBadRequest)
+ rw.Write([]byte("Chunked request bodies are not supported by CGI."))
+ return
+ }
+
+ pathInfo := req.URL.Path
+ if root != "/" && strings.HasPrefix(pathInfo, root) {
+ pathInfo = pathInfo[len(root):]
+ }
+
+ port := "80"
+ if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
+ port = matches[1]
+ }
+
+ env := []string{
+ "SERVER_SOFTWARE=go",
+ "SERVER_NAME=" + req.Host,
+ "SERVER_PROTOCOL=HTTP/1.1",
+ "HTTP_HOST=" + req.Host,
+ "GATEWAY_INTERFACE=CGI/1.1",
+ "REQUEST_METHOD=" + req.Method,
+ "QUERY_STRING=" + req.URL.RawQuery,
+ "REQUEST_URI=" + req.URL.RawPath,
+ "PATH_INFO=" + pathInfo,
+ "SCRIPT_NAME=" + root,
+ "SCRIPT_FILENAME=" + h.Path,
+ "REMOTE_ADDR=" + req.RemoteAddr,
+ "REMOTE_HOST=" + req.RemoteAddr,
+ "SERVER_PORT=" + port,
+ }
+
+ if req.TLS != nil {
+ env = append(env, "HTTPS=on")
+ }
+
+ for k, v := range req.Header {
+ k = strings.Map(upperCaseAndUnderscore, k)
+ joinStr := ", "
+ if k == "COOKIE" {
+ joinStr = "; "
+ }
+ env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr))
+ }
+
+ if req.ContentLength > 0 {
+ env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength))
+ }
+ if ctype := req.Header.Get("Content-Type"); ctype != "" {
+ env = append(env, "CONTENT_TYPE="+ctype)
+ }
+
+ if h.Env != nil {
+ env = append(env, h.Env...)
+ }
+
+ envPath := os.Getenv("PATH")
+ if envPath == "" {
+ envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin"
+ }
+ env = append(env, "PATH="+envPath)
+
+ for _, e := range h.InheritEnv {
+ if v := os.Getenv(e); v != "" {
+ env = append(env, e+"="+v)
+ }
+ }
+
+ for _, e := range osDefaultInheritEnv[runtime.GOOS] {
+ if v := os.Getenv(e); v != "" {
+ env = append(env, e+"="+v)
+ }
+ }
+
+ env = removeLeadingDuplicates(env)
+
+ var cwd, path string
+ if h.Dir != "" {
+ path = h.Path
+ cwd = h.Dir
+ } else {
+ cwd, path = filepath.Split(h.Path)
+ }
+ if cwd == "" {
+ cwd = "."
+ }
+
+ internalError := func(err error) {
+ rw.WriteHeader(http.StatusInternalServerError)
+ h.printf("CGI error: %v", err)
+ }
+
+ cmd := &exec.Cmd{
+ Path: path,
+ Args: append([]string{h.Path}, h.Args...),
+ Dir: cwd,
+ Env: env,
+ Stderr: os.Stderr, // for now
+ }
+ if req.ContentLength != 0 {
+ cmd.Stdin = req.Body
+ }
+ stdoutRead, err := cmd.StdoutPipe()
+ if err != nil {
+ internalError(err)
+ return
+ }
+
+ err = cmd.Start()
+ if err != nil {
+ internalError(err)
+ return
+ }
+ defer cmd.Wait()
+ defer stdoutRead.Close()
+
+ linebody, _ := bufio.NewReaderSize(stdoutRead, 1024)
+ headers := make(http.Header)
+ statusCode := 0
+ for {
+ line, isPrefix, err := linebody.ReadLine()
+ if isPrefix {
+ rw.WriteHeader(http.StatusInternalServerError)
+ h.printf("cgi: long header line from subprocess.")
+ return
+ }
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ h.printf("cgi: error reading headers: %v", err)
+ return
+ }
+ if len(line) == 0 {
+ break
+ }
+ parts := strings.SplitN(string(line), ":", 2)
+ if len(parts) < 2 {
+ h.printf("cgi: bogus header line: %s", string(line))
+ continue
+ }
+ header, val := parts[0], parts[1]
+ header = strings.TrimSpace(header)
+ val = strings.TrimSpace(val)
+ switch {
+ case header == "Status":
+ if len(val) < 3 {
+ h.printf("cgi: bogus status (short): %q", val)
+ return
+ }
+ code, err := strconv.Atoi(val[0:3])
+ if err != nil {
+ h.printf("cgi: bogus status: %q", val)
+ h.printf("cgi: line was %q", line)
+ return
+ }
+ statusCode = code
+ default:
+ headers.Add(header, val)
+ }
+ }
+
+ if loc := headers.Get("Location"); loc != "" {
+ if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil {
+ h.handleInternalRedirect(rw, req, loc)
+ return
+ }
+ if statusCode == 0 {
+ statusCode = http.StatusFound
+ }
+ }
+
+ if statusCode == 0 {
+ statusCode = http.StatusOK
+ }
+
+ // Copy headers to rw's headers, after we've decided not to
+ // go into handleInternalRedirect, which won't want its rw
+ // headers to have been touched.
+ for k, vv := range headers {
+ for _, v := range vv {
+ rw.Header().Add(k, v)
+ }
+ }
+
+ rw.WriteHeader(statusCode)
+
+ _, err = io.Copy(rw, linebody)
+ if err != nil {
+ h.printf("cgi: copy error: %v", err)
+ }
+}
+
+func (h *Handler) printf(format string, v ...interface{}) {
+ if h.Logger != nil {
+ h.Logger.Printf(format, v...)
+ } else {
+ log.Printf(format, v...)
+ }
+}
+
+func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) {
+ url, err := req.URL.Parse(path)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ h.printf("cgi: error resolving local URI path %q: %v", path, err)
+ return
+ }
+ // TODO: RFC 3875 isn't clear if only GET is supported, but it
+ // suggests so: "Note that any message-body attached to the
+ // request (such as for a POST request) may not be available
+ // to the resource that is the target of the redirect." We
+ // should do some tests against Apache to see how it handles
+ // POST, HEAD, etc. Does the internal redirect get the same
+ // method or just GET? What about incoming headers?
+ // (e.g. Cookies) Which headers, if any, are copied into the
+ // second request?
+ newReq := &http.Request{
+ Method: "GET",
+ URL: url,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: url.Host,
+ RemoteAddr: req.RemoteAddr,
+ TLS: req.TLS,
+ }
+ h.PathLocationHandler.ServeHTTP(rw, newReq)
+}
+
+func upperCaseAndUnderscore(r rune) rune {
+ switch {
+ case r >= 'a' && r <= 'z':
+ return r - ('a' - 'A')
+ case r == '-':
+ return '_'
+ case r == '=':
+ // Maybe not part of the CGI 'spec' but would mess up
+ // the environment in any case, as Go represents the
+ // environment as a slice of "key=value" strings.
+ return '_'
+ }
+ // TODO: other transformations in spec or practice?
+ return r
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for package cgi
+
+package cgi
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+func newRequest(httpreq string) *http.Request {
+ buf := bufio.NewReader(strings.NewReader(httpreq))
+ req, err := http.ReadRequest(buf)
+ if err != nil {
+ panic("cgi: bogus http request in test: " + httpreq)
+ }
+ req.RemoteAddr = "1.2.3.4"
+ return req
+}
+
+func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder {
+ rw := httptest.NewRecorder()
+ req := newRequest(httpreq)
+ h.ServeHTTP(rw, req)
+
+ // Make a map to hold the test map that the CGI returns.
+ m := make(map[string]string)
+ linesRead := 0
+readlines:
+ for {
+ line, err := rw.Body.ReadString('\n')
+ switch {
+ case err == io.EOF:
+ break readlines
+ case err != nil:
+ t.Fatalf("unexpected error reading from CGI: %v", err)
+ }
+ linesRead++
+ trimmedLine := strings.TrimRight(line, "\r\n")
+ split := strings.SplitN(trimmedLine, "=", 2)
+ if len(split) != 2 {
+ t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v",
+ len(split), linesRead, line, m)
+ }
+ m[split[0]] = split[1]
+ }
+
+ for key, expected := range expectedMap {
+ if got := m[key]; got != expected {
+ t.Errorf("for key %q got %q; expected %q", key, got, expected)
+ }
+ }
+ return rw
+}
+
+var cgiTested = false
+var cgiWorks bool
+
+func skipTest(t *testing.T) bool {
+ if !cgiTested {
+ cgiTested = true
+ cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil
+ }
+ if !cgiWorks {
+ // No Perl on Windows, needed by test.cgi
+ // TODO: make the child process be Go, not Perl.
+ t.Logf("Skipping test: test.cgi failed.")
+ return true
+ }
+ return false
+}
+
+func TestCGIBasicGet(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ expectedMap := map[string]string{
+ "test": "Hello CGI",
+ "param-a": "b",
+ "param-foo": "bar",
+ "env-GATEWAY_INTERFACE": "CGI/1.1",
+ "env-HTTP_HOST": "example.com",
+ "env-PATH_INFO": "",
+ "env-QUERY_STRING": "foo=bar&a=b",
+ "env-REMOTE_ADDR": "1.2.3.4",
+ "env-REMOTE_HOST": "1.2.3.4",
+ "env-REQUEST_METHOD": "GET",
+ "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
+ "env-SCRIPT_FILENAME": "testdata/test.cgi",
+ "env-SCRIPT_NAME": "/test.cgi",
+ "env-SERVER_NAME": "example.com",
+ "env-SERVER_PORT": "80",
+ "env-SERVER_SOFTWARE": "go",
+ }
+ replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+
+ if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected {
+ t.Errorf("got a Content-Type of %q; expected %q", got, expected)
+ }
+ if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
+ t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
+ }
+}
+
+func TestCGIBasicGetAbsPath(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ pwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("getwd error: %v", err)
+ }
+ h := &Handler{
+ Path: pwd + "/testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ expectedMap := map[string]string{
+ "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
+ "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
+ "env-SCRIPT_NAME": "/test.cgi",
+ }
+ runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+func TestPathInfo(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ expectedMap := map[string]string{
+ "param-a": "b",
+ "env-PATH_INFO": "/extrapath",
+ "env-QUERY_STRING": "a=b",
+ "env-REQUEST_URI": "/test.cgi/extrapath?a=b",
+ "env-SCRIPT_FILENAME": "testdata/test.cgi",
+ "env-SCRIPT_NAME": "/test.cgi",
+ }
+ runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+func TestPathInfoDirRoot(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/myscript/",
+ }
+ expectedMap := map[string]string{
+ "env-PATH_INFO": "bar",
+ "env-QUERY_STRING": "a=b",
+ "env-REQUEST_URI": "/myscript/bar?a=b",
+ "env-SCRIPT_FILENAME": "testdata/test.cgi",
+ "env-SCRIPT_NAME": "/myscript/",
+ }
+ runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+func TestDupHeaders(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ }
+ expectedMap := map[string]string{
+ "env-REQUEST_URI": "/myscript/bar?a=b",
+ "env-SCRIPT_FILENAME": "testdata/test.cgi",
+ "env-HTTP_COOKIE": "nom=NOM; yum=YUM",
+ "env-HTTP_X_FOO": "val1, val2",
+ }
+ runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
+ "Cookie: nom=NOM\n"+
+ "Cookie: yum=YUM\n"+
+ "X-Foo: val1\n"+
+ "X-Foo: val2\n"+
+ "Host: example.com\n\n",
+ expectedMap)
+}
+
+func TestPathInfoNoRoot(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "",
+ }
+ expectedMap := map[string]string{
+ "env-PATH_INFO": "/bar",
+ "env-QUERY_STRING": "a=b",
+ "env-REQUEST_URI": "/bar?a=b",
+ "env-SCRIPT_FILENAME": "testdata/test.cgi",
+ "env-SCRIPT_NAME": "/",
+ }
+ runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+func TestCGIBasicPost(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ postReq := `POST /test.cgi?a=b HTTP/1.0
+Host: example.com
+Content-Type: application/x-www-form-urlencoded
+Content-Length: 15
+
+postfoo=postbar`
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ expectedMap := map[string]string{
+ "test": "Hello CGI",
+ "param-postfoo": "postbar",
+ "env-REQUEST_METHOD": "POST",
+ "env-CONTENT_LENGTH": "15",
+ "env-REQUEST_URI": "/test.cgi?a=b",
+ }
+ runCgiTest(t, h, postReq, expectedMap)
+}
+
+func chunk(s string) string {
+ return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
+}
+
+// The CGI spec doesn't allow chunked requests.
+func TestCGIPostChunked(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ postReq := `POST /test.cgi?a=b HTTP/1.1
+Host: example.com
+Content-Type: application/x-www-form-urlencoded
+Transfer-Encoding: chunked
+
+` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
+
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ expectedMap := map[string]string{}
+ resp := runCgiTest(t, h, postReq, expectedMap)
+ if got, expected := resp.Code, http.StatusBadRequest; got != expected {
+ t.Fatalf("Expected %v response code from chunked request body; got %d",
+ expected, got)
+ }
+}
+
+func TestRedirect(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil)
+ if e, g := 302, rec.Code; e != g {
+ t.Errorf("expected status code %d; got %d", e, g)
+ }
+ if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g {
+ t.Errorf("expected Location header of %q; got %q", e, g)
+ }
+}
+
+func TestInternalRedirect(t *testing.T) {
+ if skipTest(t) {
+ return
+ }
+ baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path)
+ fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr)
+ })
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ PathLocationHandler: baseHandler,
+ }
+ expectedMap := map[string]string{
+ "basepath": "/foo",
+ "remoteaddr": "1.2.3.4",
+ }
+ runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+// TestCopyError tests that we kill the process if there's an error copying
+// its output. (for example, from the client having gone away)
+func TestCopyError(t *testing.T) {
+ if skipTest(t) || runtime.GOOS == "windows" {
+ return
+ }
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ ts := httptest.NewServer(h)
+ defer ts.Close()
+
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil)
+ err = req.Write(conn)
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+
+ res, err := http.ReadResponse(bufio.NewReader(conn), req)
+ if err != nil {
+ t.Fatalf("ReadResponse: %v", err)
+ }
+
+ pidstr := res.Header.Get("X-CGI-Pid")
+ if pidstr == "" {
+ t.Fatalf("expected an X-CGI-Pid header in response")
+ }
+ pid, err := strconv.Atoi(pidstr)
+ if err != nil {
+ t.Fatalf("invalid X-CGI-Pid value")
+ }
+
+ var buf [5000]byte
+ n, err := io.ReadFull(res.Body, buf[:])
+ if err != nil {
+ t.Fatalf("ReadFull: %d bytes, %v", n, err)
+ }
+
+ childRunning := func() bool {
+ p, err := os.FindProcess(pid)
+ if err != nil {
+ return false
+ }
+ return p.Signal(os.UnixSignal(0)) == nil
+ }
+
+ if !childRunning() {
+ t.Fatalf("pre-conn.Close, expected child to be running")
+ }
+ conn.Close()
+
+ if tries := 0; childRunning() {
+ for tries < 15 && childRunning() {
+ time.Sleep(50e6 * int64(tries))
+ tries++
+ }
+ if childRunning() {
+ t.Fatalf("post-conn.Close, expected child to be gone")
+ }
+ }
+}
+
+func TestDirUnix(t *testing.T) {
+ if skipTest(t) || runtime.GOOS == "windows" {
+ return
+ }
+
+ cwd, _ := os.Getwd()
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ Dir: cwd,
+ }
+ expectedMap := map[string]string{
+ "cwd": cwd,
+ }
+ runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
+
+ cwd, _ = os.Getwd()
+ cwd = filepath.Join(cwd, "testdata")
+ h = &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ abswd, _ := filepath.EvalSymlinks(cwd)
+ expectedMap = map[string]string{
+ "cwd": abswd,
+ }
+ runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+func TestDirWindows(t *testing.T) {
+ if skipTest(t) || runtime.GOOS != "windows" {
+ return
+ }
+
+ cgifile, _ := filepath.Abs("testdata/test.cgi")
+
+ var perl string
+ var err error
+ perl, err = exec.LookPath("perl")
+ if err != nil {
+ return
+ }
+ perl, _ = filepath.Abs(perl)
+
+ cwd, _ := os.Getwd()
+ h := &Handler{
+ Path: perl,
+ Root: "/test.cgi",
+ Dir: cwd,
+ Args: []string{cgifile},
+ Env: []string{"SCRIPT_FILENAME=" + cgifile},
+ }
+ expectedMap := map[string]string{
+ "cwd": cwd,
+ }
+ runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
+
+ // If not specify Dir on windows, working directory should be
+ // base directory of perl.
+ cwd, _ = filepath.Split(perl)
+ if cwd != "" && cwd[len(cwd)-1] == filepath.Separator {
+ cwd = cwd[:len(cwd)-1]
+ }
+ h = &Handler{
+ Path: perl,
+ Root: "/test.cgi",
+ Args: []string{cgifile},
+ Env: []string{"SCRIPT_FILENAME=" + cgifile},
+ }
+ expectedMap = map[string]string{
+ "cwd": cwd,
+ }
+ runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
+
+func TestEnvOverride(t *testing.T) {
+ cgifile, _ := filepath.Abs("testdata/test.cgi")
+
+ var perl string
+ var err error
+ perl, err = exec.LookPath("perl")
+ if err != nil {
+ return
+ }
+ perl, _ = filepath.Abs(perl)
+
+ cwd, _ := os.Getwd()
+ h := &Handler{
+ Path: perl,
+ Root: "/test.cgi",
+ Dir: cwd,
+ Args: []string{cgifile},
+ Env: []string{
+ "SCRIPT_FILENAME=" + cgifile,
+ "REQUEST_URI=/foo/bar"},
+ }
+ expectedMap := map[string]string{
+ "cwd": cwd,
+ "env-SCRIPT_FILENAME": cgifile,
+ "env-REQUEST_URI": "/foo/bar",
+ }
+ runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a Go CGI program running under a Go CGI host process.
+// Further, the two programs are the same binary, just checking
+// their environment to figure out what mode to run in.
+
+package cgi
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "testing"
+)
+
+// This test is a CGI host (testing host.go) that runs its own binary
+// as a child process testing the other half of CGI (child.go).
+func TestHostingOurselves(t *testing.T) {
+ h := &Handler{
+ Path: os.Args[0],
+ Root: "/test.go",
+ Args: []string{"-test.run=TestBeChildCGIProcess"},
+ }
+ expectedMap := map[string]string{
+ "test": "Hello CGI-in-CGI",
+ "param-a": "b",
+ "param-foo": "bar",
+ "env-GATEWAY_INTERFACE": "CGI/1.1",
+ "env-HTTP_HOST": "example.com",
+ "env-PATH_INFO": "",
+ "env-QUERY_STRING": "foo=bar&a=b",
+ "env-REMOTE_ADDR": "1.2.3.4",
+ "env-REMOTE_HOST": "1.2.3.4",
+ "env-REQUEST_METHOD": "GET",
+ "env-REQUEST_URI": "/test.go?foo=bar&a=b",
+ "env-SCRIPT_FILENAME": os.Args[0],
+ "env-SCRIPT_NAME": "/test.go",
+ "env-SERVER_NAME": "example.com",
+ "env-SERVER_PORT": "80",
+ "env-SERVER_SOFTWARE": "go",
+ }
+ replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+
+ if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
+ t.Errorf("got a Content-Type of %q; expected %q", got, expected)
+ }
+ if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
+ t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
+ }
+}
+
+// Note: not actually a test.
+func TestBeChildCGIProcess(t *testing.T) {
+ if os.Getenv("REQUEST_METHOD") == "" {
+ // Not in a CGI environment; skipping test.
+ return
+ }
+ Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ rw.Header().Set("X-Test-Header", "X-Test-Value")
+ fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
+ req.ParseForm()
+ for k, vv := range req.Form {
+ for _, v := range vv {
+ fmt.Fprintf(rw, "param-%s=%s\n", k, v)
+ }
+ }
+ for _, kv := range os.Environ() {
+ fmt.Fprintf(rw, "env-%s\n", kv)
+ }
+ }))
+ os.Exit(0)
+}
--- /dev/null
+#!/usr/bin/perl
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+#
+# Test script run as a child process under cgi_test.go
+
+use strict;
+use Cwd;
+
+my $q = MiniCGI->new;
+my $params = $q->Vars;
+
+if ($params->{"loc"}) {
+ print "Location: $params->{loc}\r\n\r\n";
+ exit(0);
+}
+
+my $NL = "\r\n";
+$NL = "\n" if $params->{mode} eq "NL";
+
+my $p = sub {
+ print "$_[0]$NL";
+};
+
+# With carriage returns
+$p->("Content-Type: text/html");
+$p->("X-CGI-Pid: $$");
+$p->("X-Test-Header: X-Test-Value");
+$p->("");
+
+if ($params->{"bigresponse"}) {
+ for (1..1024) {
+ print "A" x 1024, "\n";
+ }
+ exit 0;
+}
+
+print "test=Hello CGI\n";
+
+foreach my $k (sort keys %$params) {
+ print "param-$k=$params->{$k}\n";
+}
+
+foreach my $k (sort keys %ENV) {
+ my $clean_env = $ENV{$k};
+ $clean_env =~ s/[\n\r]//g;
+ print "env-$k=$clean_env\n";
+}
+
+# NOTE: don't call getcwd() for windows.
+# msys return /c/go/src/... not C:\go\...
+my $dir;
+if ($^O eq 'MSWin32' || $^O eq 'msys') {
+ my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe';
+ $cmd =~ s!\\!/!g;
+ $dir = `$cmd /c cd`;
+ chomp $dir;
+} else {
+ $dir = getcwd();
+}
+print "cwd=$dir\n";
+
+
+# A minimal version of CGI.pm, for people without the perl-modules
+# package installed. (CGI.pm used to be part of the Perl core, but
+# some distros now bundle perl-base and perl-modules separately...)
+package MiniCGI;
+
+sub new {
+ my $class = shift;
+ return bless {}, $class;
+}
+
+sub Vars {
+ my $self = shift;
+ my $pairs;
+ if ($ENV{CONTENT_LENGTH}) {
+ $pairs = do { local $/; <STDIN> };
+ } else {
+ $pairs = $ENV{QUERY_STRING};
+ }
+ my $vars = {};
+ foreach my $kv (split(/&/, $pairs)) {
+ my ($k, $v) = split(/=/, $kv, 2);
+ $vars->{_urldecode($k)} = _urldecode($v);
+ }
+ return $vars;
+}
+
+sub _urldecode {
+ my $v = shift;
+ $v =~ tr/+/ /;
+ $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
+ return $v;
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+)
+
+func newChunkedWriter(w io.Writer) io.WriteCloser {
+ return &chunkedWriter{w}
+}
+
+// Writing to ChunkedWriter translates to writing in HTTP chunked Transfer
+// Encoding wire format to the underlying Wire writer.
+type chunkedWriter struct {
+ Wire io.Writer
+}
+
+// Write the contents of data as one chunk to Wire.
+// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
+// a bug since it does not check for success of io.WriteString
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
+
+ // Don't send 0-length data. It looks like EOF for chunked encoding.
+ if len(data) == 0 {
+ return 0, nil
+ }
+
+ head := strconv.Itob(len(data), 16) + "\r\n"
+
+ if _, err = io.WriteString(cw.Wire, head); err != nil {
+ return 0, err
+ }
+ if n, err = cw.Wire.Write(data); err != nil {
+ return
+ }
+ if n != len(data) {
+ err = io.ErrShortWrite
+ return
+ }
+ _, err = io.WriteString(cw.Wire, "\r\n")
+
+ return
+}
+
+func (cw *chunkedWriter) Close() error {
+ _, err := io.WriteString(cw.Wire, "0\r\n")
+ return err
+}
+
+func newChunkedReader(r *bufio.Reader) io.Reader {
+ return &chunkedReader{r: r}
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP client. See RFC 2616.
+//
+// This is the high-level Client interface.
+// The low-level implementation is in transport.go.
+
+package http
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "strings"
+)
+
+// A Client is an HTTP client. Its zero value (DefaultClient) is a usable client
+// that uses DefaultTransport.
+//
+// The Client's Transport typically has internal state (cached
+// TCP connections), so Clients should be reused instead of created as
+// needed. Clients are safe for concurrent use by multiple goroutines.
+//
+// Client is not yet very configurable.
+type Client struct {
+ Transport RoundTripper // if nil, DefaultTransport is used
+
+ // If CheckRedirect is not nil, the client calls it before
+ // following an HTTP redirect. The arguments req and via
+ // are the upcoming request and the requests made already,
+ // oldest first. If CheckRedirect returns an error, the client
+ // returns that error instead of issue the Request req.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect func(req *Request, via []*Request) error
+}
+
+// DefaultClient is the default Client and is used by Get, Head, and Post.
+var DefaultClient = &Client{}
+
+// RoundTripper is an interface representing the ability to execute a
+// single HTTP transaction, obtaining the Response for a given Request.
+//
+// A RoundTripper must be safe for concurrent use by multiple
+// goroutines.
+type RoundTripper interface {
+ // RoundTrip executes a single HTTP transaction, returning
+ // the Response for the request req. RoundTrip should not
+ // attempt to interpret the response. In particular,
+ // RoundTrip must return err == nil if it obtained a response,
+ // regardless of the response's HTTP status code. A non-nil
+ // err should be reserved for failure to obtain a response.
+ // Similarly, RoundTrip should not attempt to handle
+ // higher-level protocol details such as redirects,
+ // authentication, or cookies.
+ //
+ // RoundTrip should not modify the request, except for
+ // consuming the Body. The request's URL and Header fields
+ // are guaranteed to be initialized.
+ RoundTrip(*Request) (*Response, error)
+}
+
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// Used in Send to implement io.ReadCloser by bundling together the
+// bufio.Reader through which we read the response, and the underlying
+// network connection.
+type readClose struct {
+ io.Reader
+ io.Closer
+}
+
+// Do sends an HTTP request and returns an HTTP response, following
+// policy (e.g. redirects, cookies, auth) as configured on the client.
+//
+// A non-nil response always contains a non-nil resp.Body.
+//
+// Callers should close resp.Body when done reading from it. If
+// resp.Body is not closed, the Client's underlying RoundTripper
+// (typically Transport) may not be able to re-use a persistent TCP
+// connection to the server for a subsequent "keep-alive" request.
+//
+// Generally Get, Post, or PostForm will be used instead of Do.
+func (c *Client) Do(req *Request) (resp *Response, err error) {
+ if req.Method == "GET" || req.Method == "HEAD" {
+ return c.doFollowingRedirects(req)
+ }
+ return send(req, c.Transport)
+}
+
+// send issues an HTTP request. Caller should close resp.Body when done reading from it.
+func send(req *Request, t RoundTripper) (resp *Response, err error) {
+ if t == nil {
+ t = DefaultTransport
+ if t == nil {
+ err = errors.New("http: no Client.Transport or DefaultTransport")
+ return
+ }
+ }
+
+ if req.URL == nil {
+ return nil, errors.New("http: nil Request.URL")
+ }
+
+ // Most the callers of send (Get, Post, et al) don't need
+ // Headers, leaving it uninitialized. We guarantee to the
+ // Transport that this has been initialized, though.
+ if req.Header == nil {
+ req.Header = make(Header)
+ }
+
+ info := req.URL.RawUserinfo
+ if len(info) > 0 {
+ req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(info)))
+ }
+ return t.RoundTrip(req)
+}
+
+// True if the specified HTTP status code is one for which the Get utility should
+// automatically redirect.
+func shouldRedirect(statusCode int) bool {
+ switch statusCode {
+ case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect:
+ return true
+ }
+ return false
+}
+
+// Get issues a GET to the specified URL. If the response is one of the following
+// redirect codes, Get follows the redirect, up to a maximum of 10 redirects:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+//
+// Caller should close r.Body when done reading from it.
+//
+// Get is a wrapper around DefaultClient.Get.
+func Get(url string) (r *Response, err error) {
+ return DefaultClient.Get(url)
+}
+
+// Get issues a GET to the specified URL. If the response is one of the
+// following redirect codes, Get follows the redirect after calling the
+// Client's CheckRedirect function.
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+//
+// Caller should close r.Body when done reading from it.
+func (c *Client) Get(url string) (r *Response, err error) {
+ req, err := NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.doFollowingRedirects(req)
+}
+
+func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err error) {
+ // TODO: if/when we add cookie support, the redirected request shouldn't
+ // necessarily supply the same cookies as the original.
+ var base *url.URL
+ redirectChecker := c.CheckRedirect
+ if redirectChecker == nil {
+ redirectChecker = defaultCheckRedirect
+ }
+ var via []*Request
+
+ if ireq.URL == nil {
+ return nil, errors.New("http: nil Request.URL")
+ }
+
+ req := ireq
+ urlStr := "" // next relative or absolute URL to fetch (after first request)
+ for redirect := 0; ; redirect++ {
+ if redirect != 0 {
+ req = new(Request)
+ req.Method = ireq.Method
+ req.Header = make(Header)
+ req.URL, err = base.Parse(urlStr)
+ if err != nil {
+ break
+ }
+ if len(via) > 0 {
+ // Add the Referer header.
+ lastReq := via[len(via)-1]
+ if lastReq.URL.Scheme != "https" {
+ req.Header.Set("Referer", lastReq.URL.String())
+ }
+
+ err = redirectChecker(req, via)
+ if err != nil {
+ break
+ }
+ }
+ }
+
+ urlStr = req.URL.String()
+ if r, err = send(req, c.Transport); err != nil {
+ break
+ }
+ if shouldRedirect(r.StatusCode) {
+ r.Body.Close()
+ if urlStr = r.Header.Get("Location"); urlStr == "" {
+ err = errors.New(fmt.Sprintf("%d response missing Location header", r.StatusCode))
+ break
+ }
+ base = req.URL
+ via = append(via, req)
+ continue
+ }
+ return
+ }
+
+ method := ireq.Method
+ err = &url.Error{method[0:1] + strings.ToLower(method[1:]), urlStr, err}
+ return
+}
+
+func defaultCheckRedirect(req *Request, via []*Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+ return nil
+}
+
+// Post issues a POST to the specified URL.
+//
+// Caller should close r.Body when done reading from it.
+//
+// Post is a wrapper around DefaultClient.Post
+func Post(url string, bodyType string, body io.Reader) (r *Response, err error) {
+ return DefaultClient.Post(url, bodyType, body)
+}
+
+// Post issues a POST to the specified URL.
+//
+// Caller should close r.Body when done reading from it.
+func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, err error) {
+ req, err := NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return send(req, c.Transport)
+}
+
+// PostForm issues a POST to the specified URL,
+// with data's keys and values urlencoded as the request body.
+//
+// Caller should close r.Body when done reading from it.
+//
+// PostForm is a wrapper around DefaultClient.PostForm
+func PostForm(url string, data url.Values) (r *Response, err error) {
+ return DefaultClient.PostForm(url, data)
+}
+
+// PostForm issues a POST to the specified URL,
+// with data's keys and values urlencoded as the request body.
+//
+// Caller should close r.Body when done reading from it.
+func (c *Client) PostForm(url string, data url.Values) (r *Response, err error) {
+ return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// Head issues a HEAD to the specified URL. If the response is one of the
+// following redirect codes, Head follows the redirect after calling the
+// Client's CheckRedirect function.
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+//
+// Head is a wrapper around DefaultClient.Head
+func Head(url string) (r *Response, err error) {
+ return DefaultClient.Head(url)
+}
+
+// Head issues a HEAD to the specified URL. If the response is one of the
+// following redirect codes, Head follows the redirect after calling the
+// Client's CheckRedirect function.
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+func (c *Client) Head(url string) (r *Response, err error) {
+ req, err := NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.doFollowingRedirects(req)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for client.go
+
+package http_test
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ . "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Last-Modified", "sometime")
+ fmt.Fprintf(w, "User-agent: go\nDisallow: /something/")
+})
+
+func TestClient(t *testing.T) {
+ ts := httptest.NewServer(robotsTxtHandler)
+ defer ts.Close()
+
+ r, err := Get(ts.URL)
+ var b []byte
+ if err == nil {
+ b, err = ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ }
+ if err != nil {
+ t.Error(err)
+ } else if s := string(b); !strings.HasPrefix(s, "User-agent:") {
+ t.Errorf("Incorrect page body (did not begin with User-agent): %q", s)
+ }
+}
+
+func TestClientHead(t *testing.T) {
+ ts := httptest.NewServer(robotsTxtHandler)
+ defer ts.Close()
+
+ r, err := Head(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, ok := r.Header["Last-Modified"]; !ok {
+ t.Error("Last-Modified header not found.")
+ }
+}
+
+type recordingTransport struct {
+ req *Request
+}
+
+func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err error) {
+ t.req = req
+ return nil, errors.New("dummy impl")
+}
+
+func TestGetRequestFormat(t *testing.T) {
+ tr := &recordingTransport{}
+ client := &Client{Transport: tr}
+ url := "http://dummy.faketld/"
+ client.Get(url) // Note: doesn't hit network
+ if tr.req.Method != "GET" {
+ t.Errorf("expected method %q; got %q", "GET", tr.req.Method)
+ }
+ if tr.req.URL.String() != url {
+ t.Errorf("expected URL %q; got %q", url, tr.req.URL.String())
+ }
+ if tr.req.Header == nil {
+ t.Errorf("expected non-nil request Header")
+ }
+}
+
+func TestPostRequestFormat(t *testing.T) {
+ tr := &recordingTransport{}
+ client := &Client{Transport: tr}
+
+ url := "http://dummy.faketld/"
+ json := `{"key":"value"}`
+ b := strings.NewReader(json)
+ client.Post(url, "application/json", b) // Note: doesn't hit network
+
+ if tr.req.Method != "POST" {
+ t.Errorf("got method %q, want %q", tr.req.Method, "POST")
+ }
+ if tr.req.URL.String() != url {
+ t.Errorf("got URL %q, want %q", tr.req.URL.String(), url)
+ }
+ if tr.req.Header == nil {
+ t.Fatalf("expected non-nil request Header")
+ }
+ if tr.req.Close {
+ t.Error("got Close true, want false")
+ }
+ if g, e := tr.req.ContentLength, int64(len(json)); g != e {
+ t.Errorf("got ContentLength %d, want %d", g, e)
+ }
+}
+
+func TestPostFormRequestFormat(t *testing.T) {
+ tr := &recordingTransport{}
+ client := &Client{Transport: tr}
+
+ urlStr := "http://dummy.faketld/"
+ form := make(url.Values)
+ form.Set("foo", "bar")
+ form.Add("foo", "bar2")
+ form.Set("bar", "baz")
+ client.PostForm(urlStr, form) // Note: doesn't hit network
+
+ if tr.req.Method != "POST" {
+ t.Errorf("got method %q, want %q", tr.req.Method, "POST")
+ }
+ if tr.req.URL.String() != urlStr {
+ t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr)
+ }
+ if tr.req.Header == nil {
+ t.Fatalf("expected non-nil request Header")
+ }
+ if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e {
+ t.Errorf("got Content-Type %q, want %q", g, e)
+ }
+ if tr.req.Close {
+ t.Error("got Close true, want false")
+ }
+ // Depending on map iteration, body can be either of these.
+ expectedBody := "foo=bar&foo=bar2&bar=baz"
+ expectedBody1 := "bar=baz&foo=bar&foo=bar2"
+ if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e {
+ t.Errorf("got ContentLength %d, want %d", g, e)
+ }
+ bodyb, err := ioutil.ReadAll(tr.req.Body)
+ if err != nil {
+ t.Fatalf("ReadAll on req.Body: %v", err)
+ }
+ if g := string(bodyb); g != expectedBody && g != expectedBody1 {
+ t.Errorf("got body %q, want %q or %q", g, expectedBody, expectedBody1)
+ }
+}
+
+func TestRedirects(t *testing.T) {
+ var ts *httptest.Server
+ ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ n, _ := strconv.Atoi(r.FormValue("n"))
+ // Test Referer header. (7 is arbitrary position to test at)
+ if n == 7 {
+ if g, e := r.Referer(), ts.URL+"/?n=6"; e != g {
+ t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g)
+ }
+ }
+ if n < 15 {
+ Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound)
+ return
+ }
+ fmt.Fprintf(w, "n=%d", n)
+ }))
+ defer ts.Close()
+
+ c := &Client{}
+ _, err := c.Get(ts.URL)
+ if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with default client Get, expected error %q, got %q", e, g)
+ }
+
+ // HEAD request should also have the ability to follow redirects.
+ _, err = c.Head(ts.URL)
+ if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with default client Head, expected error %q, got %q", e, g)
+ }
+
+ // Do should also follow redirects.
+ greq, _ := NewRequest("GET", ts.URL, nil)
+ _, err = c.Do(greq)
+ if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with default client Do, expected error %q, got %q", e, g)
+ }
+
+ var checkErr error
+ var lastVia []*Request
+ c = &Client{CheckRedirect: func(_ *Request, via []*Request) error {
+ lastVia = via
+ return checkErr
+ }}
+ res, err := c.Get(ts.URL)
+ finalUrl := res.Request.URL.String()
+ if e, g := "<nil>", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with custom client, expected error %q, got %q", e, g)
+ }
+ if !strings.HasSuffix(finalUrl, "/?n=15") {
+ t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl)
+ }
+ if e, g := 15, len(lastVia); e != g {
+ t.Errorf("expected lastVia to have contained %d elements; got %d", e, g)
+ }
+
+ checkErr = errors.New("no redirects allowed")
+ res, err = c.Get(ts.URL)
+ finalUrl = res.Request.URL.String()
+ if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with redirects forbidden, expected error %q, got %q", e, g)
+ }
+}
+
+func TestStreamingGet(t *testing.T) {
+ say := make(chan string)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.(Flusher).Flush()
+ for str := range say {
+ w.Write([]byte(str))
+ w.(Flusher).Flush()
+ }
+ }))
+ defer ts.Close()
+
+ c := &Client{}
+ res, err := c.Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var buf [10]byte
+ for _, str := range []string{"i", "am", "also", "known", "as", "comet"} {
+ say <- str
+ n, err := io.ReadFull(res.Body, buf[0:len(str)])
+ if err != nil {
+ t.Fatalf("ReadFull on %q: %v", str, err)
+ }
+ if n != len(str) {
+ t.Fatalf("Receiving %q, only read %d bytes", str, n)
+ }
+ got := string(buf[0:n])
+ if got != str {
+ t.Fatalf("Expected %q, got %q", str, got)
+ }
+ }
+ close(say)
+ _, err = io.ReadFull(res.Body, buf[0:1])
+ if err != io.EOF {
+ t.Fatalf("at end expected EOF, got %v", err)
+ }
+}
+
+type writeCountingConn struct {
+ net.Conn
+ count *int
+}
+
+func (c *writeCountingConn) Write(p []byte) (int, error) {
+ *c.count++
+ return c.Conn.Write(p)
+}
+
+// TestClientWrites verifies that client requests are buffered and we
+// don't send a TCP packet per line of the http request + body.
+func TestClientWrites(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ }))
+ defer ts.Close()
+
+ writes := 0
+ dialer := func(netz string, addr string) (net.Conn, error) {
+ c, err := net.Dial(netz, addr)
+ if err == nil {
+ c = &writeCountingConn{c, &writes}
+ }
+ return c, err
+ }
+ c := &Client{Transport: &Transport{Dial: dialer}}
+
+ _, err := c.Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if writes != 1 {
+ t.Errorf("Get request did %d Write calls, want 1", writes)
+ }
+
+ writes = 0
+ _, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if writes != 1 {
+ t.Errorf("Post request did %d Write calls, want 1", writes)
+ }
+}
+
+func TestClientInsecureTransport(t *testing.T) {
+ ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Write([]byte("Hello"))
+ }))
+ defer ts.Close()
+
+ // TODO(bradfitz): add tests for skipping hostname checks too?
+ // would require a new cert for testing, and probably
+ // redundant with these tests.
+ for _, insecure := range []bool{true, false} {
+ tr := &Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: insecure,
+ },
+ }
+ c := &Client{Transport: tr}
+ _, err := c.Get(ts.URL)
+ if (err == nil) != insecure {
+ t.Errorf("insecure=%v: got unexpected err=%v", insecure, err)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// This implementation is done according to RFC 6265:
+//
+// http://tools.ietf.org/html/rfc6265
+
+// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
+// HTTP response or the Cookie header of an HTTP request.
+type Cookie struct {
+ Name string
+ Value string
+ Path string
+ Domain string
+ Expires time.Time
+ RawExpires string
+
+ // MaxAge=0 means no 'Max-Age' attribute specified.
+ // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
+ // MaxAge>0 means Max-Age attribute present and given in seconds
+ MaxAge int
+ Secure bool
+ HttpOnly bool
+ Raw string
+ Unparsed []string // Raw text of unparsed attribute-value pairs
+}
+
+// readSetCookies parses all "Set-Cookie" values from
+// the header h and returns the successfully parsed Cookies.
+func readSetCookies(h Header) []*Cookie {
+ cookies := []*Cookie{}
+ for _, line := range h["Set-Cookie"] {
+ parts := strings.Split(strings.TrimSpace(line), ";")
+ if len(parts) == 1 && parts[0] == "" {
+ continue
+ }
+ parts[0] = strings.TrimSpace(parts[0])
+ j := strings.Index(parts[0], "=")
+ if j < 0 {
+ continue
+ }
+ name, value := parts[0][:j], parts[0][j+1:]
+ if !isCookieNameValid(name) {
+ continue
+ }
+ value, success := parseCookieValue(value)
+ if !success {
+ continue
+ }
+ c := &Cookie{
+ Name: name,
+ Value: value,
+ Raw: line,
+ }
+ for i := 1; i < len(parts); i++ {
+ parts[i] = strings.TrimSpace(parts[i])
+ if len(parts[i]) == 0 {
+ continue
+ }
+
+ attr, val := parts[i], ""
+ if j := strings.Index(attr, "="); j >= 0 {
+ attr, val = attr[:j], attr[j+1:]
+ }
+ lowerAttr := strings.ToLower(attr)
+ parseCookieValueFn := parseCookieValue
+ if lowerAttr == "expires" {
+ parseCookieValueFn = parseCookieExpiresValue
+ }
+ val, success = parseCookieValueFn(val)
+ if !success {
+ c.Unparsed = append(c.Unparsed, parts[i])
+ continue
+ }
+ switch lowerAttr {
+ case "secure":
+ c.Secure = true
+ continue
+ case "httponly":
+ c.HttpOnly = true
+ continue
+ case "domain":
+ c.Domain = val
+ // TODO: Add domain parsing
+ continue
+ case "max-age":
+ secs, err := strconv.Atoi(val)
+ if err != nil || secs < 0 || secs != 0 && val[0] == '0' {
+ break
+ }
+ if secs <= 0 {
+ c.MaxAge = -1
+ } else {
+ c.MaxAge = secs
+ }
+ continue
+ case "expires":
+ c.RawExpires = val
+ exptime, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
+ if err != nil {
+ c.Expires = time.Time{}
+ break
+ }
+ }
+ c.Expires = *exptime
+ continue
+ case "path":
+ c.Path = val
+ // TODO: Add path parsing
+ continue
+ }
+ c.Unparsed = append(c.Unparsed, parts[i])
+ }
+ cookies = append(cookies, c)
+ }
+ return cookies
+}
+
+// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
+func SetCookie(w ResponseWriter, cookie *Cookie) {
+ w.Header().Add("Set-Cookie", cookie.String())
+}
+
+// String returns the serialization of the cookie for use in a Cookie
+// header (if only Name and Value are set) or a Set-Cookie response
+// header (if other fields are set).
+func (c *Cookie) String() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
+ if len(c.Path) > 0 {
+ fmt.Fprintf(&b, "; Path=%s", sanitizeValue(c.Path))
+ }
+ if len(c.Domain) > 0 {
+ fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(c.Domain))
+ }
+ if len(c.Expires.Zone) > 0 {
+ fmt.Fprintf(&b, "; Expires=%s", c.Expires.Format(time.RFC1123))
+ }
+ if c.MaxAge > 0 {
+ fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
+ } else if c.MaxAge < 0 {
+ fmt.Fprintf(&b, "; Max-Age=0")
+ }
+ if c.HttpOnly {
+ fmt.Fprintf(&b, "; HttpOnly")
+ }
+ if c.Secure {
+ fmt.Fprintf(&b, "; Secure")
+ }
+ return b.String()
+}
+
+// readCookies parses all "Cookie" values from the header h and
+// returns the successfully parsed Cookies.
+//
+// if filter isn't empty, only cookies of that name are returned
+func readCookies(h Header, filter string) []*Cookie {
+ cookies := []*Cookie{}
+ lines, ok := h["Cookie"]
+ if !ok {
+ return cookies
+ }
+
+ for _, line := range lines {
+ parts := strings.Split(strings.TrimSpace(line), ";")
+ if len(parts) == 1 && parts[0] == "" {
+ continue
+ }
+ // Per-line attributes
+ parsedPairs := 0
+ for i := 0; i < len(parts); i++ {
+ parts[i] = strings.TrimSpace(parts[i])
+ if len(parts[i]) == 0 {
+ continue
+ }
+ name, val := parts[i], ""
+ if j := strings.Index(name, "="); j >= 0 {
+ name, val = name[:j], name[j+1:]
+ }
+ if !isCookieNameValid(name) {
+ continue
+ }
+ if filter != "" && filter != name {
+ continue
+ }
+ val, success := parseCookieValue(val)
+ if !success {
+ continue
+ }
+ cookies = append(cookies, &Cookie{Name: name, Value: val})
+ parsedPairs++
+ }
+ }
+ return cookies
+}
+
+var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
+
+func sanitizeName(n string) string {
+ return cookieNameSanitizer.Replace(n)
+}
+
+var cookieValueSanitizer = strings.NewReplacer("\n", " ", "\r", " ", ";", " ")
+
+func sanitizeValue(v string) string {
+ return cookieValueSanitizer.Replace(v)
+}
+
+func unquoteCookieValue(v string) string {
+ if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' {
+ return v[1 : len(v)-1]
+ }
+ return v
+}
+
+func isCookieByte(c byte) bool {
+ switch {
+ case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,
+ 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:
+ return true
+ }
+ return false
+}
+
+func isCookieExpiresByte(c byte) (ok bool) {
+ return isCookieByte(c) || c == ',' || c == ' '
+}
+
+func parseCookieValue(raw string) (string, bool) {
+ return parseCookieValueUsing(raw, isCookieByte)
+}
+
+func parseCookieExpiresValue(raw string) (string, bool) {
+ return parseCookieValueUsing(raw, isCookieExpiresByte)
+}
+
+func parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) {
+ raw = unquoteCookieValue(raw)
+ for i := 0; i < len(raw); i++ {
+ if !validByte(raw[i]) {
+ return "", false
+ }
+ }
+ return raw, true
+}
+
+func isCookieNameValid(raw string) bool {
+ for _, c := range raw {
+ if !isToken(byte(c)) {
+ return false
+ }
+ }
+ return true
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+)
+
+var writeSetCookiesTests = []struct {
+ Cookie *Cookie
+ Raw string
+}{
+ {
+ &Cookie{Name: "cookie-1", Value: "v$1"},
+ "cookie-1=v$1",
+ },
+ {
+ &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600},
+ "cookie-2=two; Max-Age=3600",
+ },
+ {
+ &Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"},
+ "cookie-3=three; Domain=.example.com",
+ },
+ {
+ &Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"},
+ "cookie-4=four; Path=/restricted/",
+ },
+}
+
+func TestWriteSetCookies(t *testing.T) {
+ for i, tt := range writeSetCookiesTests {
+ if g, e := tt.Cookie.String(), tt.Raw; g != e {
+ t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g)
+ continue
+ }
+ }
+}
+
+type headerOnlyResponseWriter Header
+
+func (ho headerOnlyResponseWriter) Header() Header {
+ return Header(ho)
+}
+
+func (ho headerOnlyResponseWriter) Write([]byte) (int, error) {
+ panic("NOIMPL")
+}
+
+func (ho headerOnlyResponseWriter) WriteHeader(int) {
+ panic("NOIMPL")
+}
+
+func TestSetCookie(t *testing.T) {
+ m := make(Header)
+ SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"})
+ SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600})
+ if l := len(m["Set-Cookie"]); l != 2 {
+ t.Fatalf("expected %d cookies, got %d", 2, l)
+ }
+ if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e {
+ t.Errorf("cookie #1: want %q, got %q", e, g)
+ }
+ if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e {
+ t.Errorf("cookie #2: want %q, got %q", e, g)
+ }
+}
+
+var addCookieTests = []struct {
+ Cookies []*Cookie
+ Raw string
+}{
+ {
+ []*Cookie{},
+ "",
+ },
+ {
+ []*Cookie{&Cookie{Name: "cookie-1", Value: "v$1"}},
+ "cookie-1=v$1",
+ },
+ {
+ []*Cookie{
+ &Cookie{Name: "cookie-1", Value: "v$1"},
+ &Cookie{Name: "cookie-2", Value: "v$2"},
+ &Cookie{Name: "cookie-3", Value: "v$3"},
+ },
+ "cookie-1=v$1; cookie-2=v$2; cookie-3=v$3",
+ },
+}
+
+func TestAddCookie(t *testing.T) {
+ for i, tt := range addCookieTests {
+ req, _ := NewRequest("GET", "http://example.com/", nil)
+ for _, c := range tt.Cookies {
+ req.AddCookie(c)
+ }
+ if g := req.Header.Get("Cookie"); g != tt.Raw {
+ t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g)
+ continue
+ }
+ }
+}
+
+var readSetCookiesTests = []struct {
+ Header Header
+ Cookies []*Cookie
+}{
+ {
+ Header{"Set-Cookie": {"Cookie-1=v$1"}},
+ []*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}},
+ },
+ {
+ Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}},
+ []*Cookie{&Cookie{
+ Name: "NID",
+ Value: "99=YsDT5i3E-CXax-",
+ Path: "/",
+ Domain: ".google.ch",
+ HttpOnly: true,
+ Expires: time.Time{Year: 2011, Month: 11, Day: 23, Hour: 1, Minute: 5, Second: 3, ZoneOffset: 0, Zone: "GMT"},
+ RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT",
+ Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly",
+ }},
+ },
+}
+
+func toJSON(v interface{}) string {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return fmt.Sprintf("%#v", v)
+ }
+ return string(b)
+}
+
+func TestReadSetCookies(t *testing.T) {
+ for i, tt := range readSetCookiesTests {
+ for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input
+ c := readSetCookies(tt.Header)
+ if !reflect.DeepEqual(c, tt.Cookies) {
+ t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
+ continue
+ }
+ }
+ }
+}
+
+var readCookiesTests = []struct {
+ Header Header
+ Filter string
+ Cookies []*Cookie
+}{
+ {
+ Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
+ "",
+ []*Cookie{
+ &Cookie{Name: "Cookie-1", Value: "v$1"},
+ &Cookie{Name: "c2", Value: "v2"},
+ },
+ },
+ {
+ Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
+ "c2",
+ []*Cookie{
+ &Cookie{Name: "c2", Value: "v2"},
+ },
+ },
+ {
+ Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
+ "",
+ []*Cookie{
+ &Cookie{Name: "Cookie-1", Value: "v$1"},
+ &Cookie{Name: "c2", Value: "v2"},
+ },
+ },
+ {
+ Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
+ "c2",
+ []*Cookie{
+ &Cookie{Name: "c2", Value: "v2"},
+ },
+ },
+}
+
+func TestReadCookies(t *testing.T) {
+ for i, tt := range readCookiesTests {
+ for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input
+ c := readCookies(tt.Header, tt.Filter)
+ if !reflect.DeepEqual(c, tt.Cookies) {
+ t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies))
+ continue
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package http provides HTTP client and server implementations.
+
+Get, Head, Post, and PostForm make HTTP requests:
+
+ resp, err := http.Get("http://example.com/")
+ ...
+ resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
+ ...
+ resp, err := http.PostForm("http://example.com/form",
+ url.Values{"key": {"Value"}, "id": {"123"}})
+
+The client must close the response body when finished with it:
+
+ resp, err := http.Get("http://example.com/")
+ if err != nil {
+ // handle error
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ // ...
+
+For control over HTTP client headers, redirect policy, and other
+settings, create a Client:
+
+ client := &http.Client{
+ CheckRedirect: redirectPolicyFunc,
+ }
+
+ resp, err := client.Get("http://example.com")
+ // ...
+
+ req := http.NewRequest("GET", "http://example.com", nil)
+ req.Header.Add("If-None-Match", `W/"wyzzy"`)
+ resp, err := client.Do(req)
+ // ...
+
+For control over proxies, TLS configuration, keep-alives,
+compression, and other settings, create a Transport:
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{RootCAs: pool},
+ DisableCompression: true,
+ }
+ client := &http.Client{Transport: tr}
+ resp, err := client.Get("https://example.com")
+
+Clients and Transports are safe for concurrent use by multiple
+goroutines and for efficiency should only be created once and re-used.
+
+ListenAndServe starts an HTTP server with a given address and handler.
+The handler is usually nil, which means to use DefaultServeMux.
+Handle and HandleFunc add handlers to DefaultServeMux:
+
+ http.Handle("/foo", fooHandler)
+
+ http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.RawPath))
+ })
+
+ log.Fatal(http.ListenAndServe(":8080", nil))
+
+More control over the server's behavior is available by creating a
+custom Server:
+
+ s := &http.Server{
+ Addr: ":8080",
+ Handler: myHandler,
+ ReadTimeout: 10e9,
+ WriteTimeout: 10e9,
+ MaxHeaderBytes: 1 << 20,
+ }
+ log.Fatal(s.ListenAndServe())
+*/
+package http
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Bridge package to expose http internals to tests in the http_test
+// package.
+
+package http
+
+func (t *Transport) IdleConnKeysForTesting() (keys []string) {
+ keys = make([]string, 0)
+ t.lk.Lock()
+ defer t.lk.Unlock()
+ if t.idleConn == nil {
+ return
+ }
+ for key := range t.idleConn {
+ keys = append(keys, key)
+ }
+ return
+}
+
+func (t *Transport) IdleConnCountForTesting(cacheKey string) int {
+ t.lk.Lock()
+ defer t.lk.Unlock()
+ if t.idleConn == nil {
+ return 0
+ }
+ conns, ok := t.idleConn[cacheKey]
+ if !ok {
+ return 0
+ }
+ return len(conns)
+}
+
+func NewTestTimeoutHandler(handler Handler, ch <-chan int64) Handler {
+ f := func() <-chan int64 {
+ return ch
+ }
+ return &timeoutHandler{handler, f, ""}
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fcgi
+
+// This file implements FastCGI from the perspective of a child process.
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/http/cgi"
+ "os"
+ "time"
+)
+
+// request holds the state for an in-progress request. As soon as it's complete,
+// it's converted to an http.Request.
+type request struct {
+ pw *io.PipeWriter
+ reqId uint16
+ params map[string]string
+ buf [1024]byte
+ rawParams []byte
+ keepConn bool
+}
+
+func newRequest(reqId uint16, flags uint8) *request {
+ r := &request{
+ reqId: reqId,
+ params: map[string]string{},
+ keepConn: flags&flagKeepConn != 0,
+ }
+ r.rawParams = r.buf[:0]
+ return r
+}
+
+// parseParams reads an encoded []byte into Params.
+func (r *request) parseParams() {
+ text := r.rawParams
+ r.rawParams = nil
+ for len(text) > 0 {
+ keyLen, n := readSize(text)
+ if n == 0 {
+ return
+ }
+ text = text[n:]
+ valLen, n := readSize(text)
+ if n == 0 {
+ return
+ }
+ text = text[n:]
+ key := readString(text, keyLen)
+ text = text[keyLen:]
+ val := readString(text, valLen)
+ text = text[valLen:]
+ r.params[key] = val
+ }
+}
+
+// response implements http.ResponseWriter.
+type response struct {
+ req *request
+ header http.Header
+ w *bufWriter
+ wroteHeader bool
+}
+
+func newResponse(c *child, req *request) *response {
+ return &response{
+ req: req,
+ header: http.Header{},
+ w: newWriter(c.conn, typeStdout, req.reqId),
+ }
+}
+
+func (r *response) Header() http.Header {
+ return r.header
+}
+
+func (r *response) Write(data []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ return r.w.Write(data)
+}
+
+func (r *response) WriteHeader(code int) {
+ if r.wroteHeader {
+ return
+ }
+ r.wroteHeader = true
+ if code == http.StatusNotModified {
+ // Must not have body.
+ r.header.Del("Content-Type")
+ r.header.Del("Content-Length")
+ r.header.Del("Transfer-Encoding")
+ } else if r.header.Get("Content-Type") == "" {
+ r.header.Set("Content-Type", "text/html; charset=utf-8")
+ }
+
+ if r.header.Get("Date") == "" {
+ r.header.Set("Date", time.UTC().Format(http.TimeFormat))
+ }
+
+ fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
+ r.header.Write(r.w)
+ r.w.WriteString("\r\n")
+}
+
+func (r *response) Flush() {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ r.w.Flush()
+}
+
+func (r *response) Close() error {
+ r.Flush()
+ return r.w.Close()
+}
+
+type child struct {
+ conn *conn
+ handler http.Handler
+}
+
+func newChild(rwc net.Conn, handler http.Handler) *child {
+ return &child{newConn(rwc), handler}
+}
+
+func (c *child) serve() {
+ requests := map[uint16]*request{}
+ defer c.conn.Close()
+ var rec record
+ var br beginRequest
+ for {
+ if err := rec.read(c.conn.rwc); err != nil {
+ return
+ }
+
+ req, ok := requests[rec.h.Id]
+ if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
+ // The spec says to ignore unknown request IDs.
+ continue
+ }
+ if ok && rec.h.Type == typeBeginRequest {
+ // The server is trying to begin a request with the same ID
+ // as an in-progress request. This is an error.
+ return
+ }
+
+ switch rec.h.Type {
+ case typeBeginRequest:
+ if err := br.read(rec.content()); err != nil {
+ return
+ }
+ if br.role != roleResponder {
+ c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
+ break
+ }
+ requests[rec.h.Id] = newRequest(rec.h.Id, br.flags)
+ case typeParams:
+ // NOTE(eds): Technically a key-value pair can straddle the boundary
+ // between two packets. We buffer until we've received all parameters.
+ if len(rec.content()) > 0 {
+ req.rawParams = append(req.rawParams, rec.content()...)
+ break
+ }
+ req.parseParams()
+ case typeStdin:
+ content := rec.content()
+ if req.pw == nil {
+ var body io.ReadCloser
+ if len(content) > 0 {
+ // body could be an io.LimitReader, but it shouldn't matter
+ // as long as both sides are behaving.
+ body, req.pw = io.Pipe()
+ }
+ go c.serveRequest(req, body)
+ }
+ if len(content) > 0 {
+ // TODO(eds): This blocks until the handler reads from the pipe.
+ // If the handler takes a long time, it might be a problem.
+ req.pw.Write(content)
+ } else if req.pw != nil {
+ req.pw.Close()
+ }
+ case typeGetValues:
+ values := map[string]string{"FCGI_MPXS_CONNS": "1"}
+ c.conn.writePairs(0, typeGetValuesResult, values)
+ case typeData:
+ // If the filter role is implemented, read the data stream here.
+ case typeAbortRequest:
+ delete(requests, rec.h.Id)
+ c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
+ if !req.keepConn {
+ // connection will close upon return
+ return
+ }
+ default:
+ b := make([]byte, 8)
+ b[0] = rec.h.Type
+ c.conn.writeRecord(typeUnknownType, 0, b)
+ }
+ }
+}
+
+func (c *child) serveRequest(req *request, body io.ReadCloser) {
+ r := newResponse(c, req)
+ httpReq, err := cgi.RequestFromMap(req.params)
+ if err != nil {
+ // there was an error reading the request
+ r.WriteHeader(http.StatusInternalServerError)
+ c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
+ } else {
+ httpReq.Body = body
+ c.handler.ServeHTTP(r, httpReq)
+ }
+ if body != nil {
+ body.Close()
+ }
+ r.Close()
+ c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
+ if !req.keepConn {
+ c.conn.Close()
+ }
+}
+
+// Serve accepts incoming FastCGI connections on the listener l, creating a new
+// service thread for each. The service threads read requests and then call handler
+// to reply to them.
+// If l is nil, Serve accepts connections on stdin.
+// If handler is nil, http.DefaultServeMux is used.
+func Serve(l net.Listener, handler http.Handler) error {
+ if l == nil {
+ var err error
+ l, err = net.FileListener(os.Stdin)
+ if err != nil {
+ return err
+ }
+ defer l.Close()
+ }
+ if handler == nil {
+ handler = http.DefaultServeMux
+ }
+ for {
+ rw, err := l.Accept()
+ if err != nil {
+ return err
+ }
+ c := newChild(rw, handler)
+ go c.serve()
+ }
+ panic("unreachable")
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fcgi implements the FastCGI protocol.
+// Currently only the responder role is supported.
+// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
+package fcgi
+
+// This file defines the raw protocol and some utilities used by the child and
+// the host.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "sync"
+)
+
+const (
+ // Packet Types
+ typeBeginRequest = iota + 1
+ typeAbortRequest
+ typeEndRequest
+ typeParams
+ typeStdin
+ typeStdout
+ typeStderr
+ typeData
+ typeGetValues
+ typeGetValuesResult
+ typeUnknownType
+)
+
+// keep the connection between web-server and responder open after request
+const flagKeepConn = 1
+
+const (
+ maxWrite = 65535 // maximum record body
+ maxPad = 255
+)
+
+const (
+ roleResponder = iota + 1 // only Responders are implemented.
+ roleAuthorizer
+ roleFilter
+)
+
+const (
+ statusRequestComplete = iota
+ statusCantMultiplex
+ statusOverloaded
+ statusUnknownRole
+)
+
+const headerLen = 8
+
+type header struct {
+ Version uint8
+ Type uint8
+ Id uint16
+ ContentLength uint16
+ PaddingLength uint8
+ Reserved uint8
+}
+
+type beginRequest struct {
+ role uint16
+ flags uint8
+ reserved [5]uint8
+}
+
+func (br *beginRequest) read(content []byte) error {
+ if len(content) != 8 {
+ return errors.New("fcgi: invalid begin request record")
+ }
+ br.role = binary.BigEndian.Uint16(content)
+ br.flags = content[2]
+ return nil
+}
+
+// for padding so we don't have to allocate all the time
+// not synchronized because we don't care what the contents are
+var pad [maxPad]byte
+
+func (h *header) init(recType uint8, reqId uint16, contentLength int) {
+ h.Version = 1
+ h.Type = recType
+ h.Id = reqId
+ h.ContentLength = uint16(contentLength)
+ h.PaddingLength = uint8(-contentLength & 7)
+}
+
+// conn sends records over rwc
+type conn struct {
+ mutex sync.Mutex
+ rwc io.ReadWriteCloser
+
+ // to avoid allocations
+ buf bytes.Buffer
+ h header
+}
+
+func newConn(rwc io.ReadWriteCloser) *conn {
+ return &conn{rwc: rwc}
+}
+
+func (c *conn) Close() error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ return c.rwc.Close()
+}
+
+type record struct {
+ h header
+ buf [maxWrite + maxPad]byte
+}
+
+func (rec *record) read(r io.Reader) (err error) {
+ if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
+ return err
+ }
+ if rec.h.Version != 1 {
+ return errors.New("fcgi: invalid header version")
+ }
+ n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
+ if _, err = io.ReadFull(r, rec.buf[:n]); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *record) content() []byte {
+ return r.buf[:r.h.ContentLength]
+}
+
+// writeRecord writes and sends a single record.
+func (c *conn) writeRecord(recType uint8, reqId uint16, b []byte) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ c.buf.Reset()
+ c.h.init(recType, reqId, len(b))
+ if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
+ return err
+ }
+ if _, err := c.buf.Write(b); err != nil {
+ return err
+ }
+ if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
+ return err
+ }
+ _, err := c.rwc.Write(c.buf.Bytes())
+ return err
+}
+
+func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
+ b := [8]byte{byte(role >> 8), byte(role), flags}
+ return c.writeRecord(typeBeginRequest, reqId, b[:])
+}
+
+func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint32(b, uint32(appStatus))
+ b[4] = protocolStatus
+ return c.writeRecord(typeEndRequest, reqId, b)
+}
+
+func (c *conn) writePairs(recType uint8, reqId uint16, pairs map[string]string) error {
+ w := newWriter(c, recType, reqId)
+ b := make([]byte, 8)
+ for k, v := range pairs {
+ n := encodeSize(b, uint32(len(k)))
+ n += encodeSize(b[n:], uint32(len(k)))
+ if _, err := w.Write(b[:n]); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(k); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(v); err != nil {
+ return err
+ }
+ }
+ w.Close()
+ return nil
+}
+
+func readSize(s []byte) (uint32, int) {
+ if len(s) == 0 {
+ return 0, 0
+ }
+ size, n := uint32(s[0]), 1
+ if size&(1<<7) != 0 {
+ if len(s) < 4 {
+ return 0, 0
+ }
+ n = 4
+ size = binary.BigEndian.Uint32(s)
+ size &^= 1 << 31
+ }
+ return size, n
+}
+
+func readString(s []byte, size uint32) string {
+ if size > uint32(len(s)) {
+ return ""
+ }
+ return string(s[:size])
+}
+
+func encodeSize(b []byte, size uint32) int {
+ if size > 127 {
+ size |= 1 << 31
+ binary.BigEndian.PutUint32(b, size)
+ return 4
+ }
+ b[0] = byte(size)
+ return 1
+}
+
+// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
+// Closed.
+type bufWriter struct {
+ closer io.Closer
+ *bufio.Writer
+}
+
+func (w *bufWriter) Close() error {
+ if err := w.Writer.Flush(); err != nil {
+ w.closer.Close()
+ return err
+ }
+ return w.closer.Close()
+}
+
+func newWriter(c *conn, recType uint8, reqId uint16) *bufWriter {
+ s := &streamWriter{c: c, recType: recType, reqId: reqId}
+ w, _ := bufio.NewWriterSize(s, maxWrite)
+ return &bufWriter{s, w}
+}
+
+// streamWriter abstracts out the separation of a stream into discrete records.
+// It only writes maxWrite bytes at a time.
+type streamWriter struct {
+ c *conn
+ recType uint8
+ reqId uint16
+}
+
+func (w *streamWriter) Write(p []byte) (int, error) {
+ nn := 0
+ for len(p) > 0 {
+ n := len(p)
+ if n > maxWrite {
+ n = maxWrite
+ }
+ if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
+ return nn, err
+ }
+ nn += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *streamWriter) Close() error {
+ // send empty record to close the stream
+ return w.c.writeRecord(w.recType, w.reqId, nil)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fcgi
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+var sizeTests = []struct {
+ size uint32
+ bytes []byte
+}{
+ {0, []byte{0x00}},
+ {127, []byte{0x7F}},
+ {128, []byte{0x80, 0x00, 0x00, 0x80}},
+ {1000, []byte{0x80, 0x00, 0x03, 0xE8}},
+ {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
+}
+
+func TestSize(t *testing.T) {
+ b := make([]byte, 4)
+ for i, test := range sizeTests {
+ n := encodeSize(b, test.size)
+ if !bytes.Equal(b[:n], test.bytes) {
+ t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
+ }
+ size, n := readSize(test.bytes)
+ if size != test.size {
+ t.Errorf("%d expected %d, read %d", i, test.size, size)
+ }
+ if len(test.bytes) != n {
+ t.Errorf("%d did not consume all the bytes", i)
+ }
+ }
+}
+
+var streamTests = []struct {
+ desc string
+ recType uint8
+ reqId uint16
+ content []byte
+ raw []byte
+}{
+ {"single record", typeStdout, 1, nil,
+ []byte{1, typeStdout, 0, 1, 0, 0, 0, 0},
+ },
+ // this data will have to be split into two records
+ {"two records", typeStdin, 300, make([]byte, 66000),
+ bytes.Join([][]byte{
+ // header for the first record
+ {1, typeStdin, 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
+ make([]byte, 65536),
+ // header for the second
+ {1, typeStdin, 0x01, 0x2C, 0x01, 0xD1, 7, 0},
+ make([]byte, 472),
+ // header for the empty record
+ {1, typeStdin, 0x01, 0x2C, 0, 0, 0, 0},
+ },
+ nil),
+ },
+}
+
+type nilCloser struct {
+ io.ReadWriter
+}
+
+func (c *nilCloser) Close() error { return nil }
+
+func TestStreams(t *testing.T) {
+ var rec record
+outer:
+ for _, test := range streamTests {
+ buf := bytes.NewBuffer(test.raw)
+ var content []byte
+ for buf.Len() > 0 {
+ if err := rec.read(buf); err != nil {
+ t.Errorf("%s: error reading record: %v", test.desc, err)
+ continue outer
+ }
+ content = append(content, rec.content()...)
+ }
+ if rec.h.Type != test.recType {
+ t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
+ continue
+ }
+ if rec.h.Id != test.reqId {
+ t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
+ continue
+ }
+ if !bytes.Equal(content, test.content) {
+ t.Errorf("%s: read wrong content", test.desc)
+ continue
+ }
+ buf.Reset()
+ c := newConn(&nilCloser{buf})
+ w := newWriter(c, test.recType, test.reqId)
+ if _, err := w.Write(test.content); err != nil {
+ t.Errorf("%s: error writing record: %v", test.desc, err)
+ continue
+ }
+ if err := w.Close(); err != nil {
+ t.Errorf("%s: error closing stream: %v", test.desc, err)
+ continue
+ }
+ if !bytes.Equal(buf.Bytes(), test.raw) {
+ t.Errorf("%s: wrote wrong content", test.desc)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "fmt"
+ "io"
+)
+
+// fileTransport implements RoundTripper for the 'file' protocol.
+type fileTransport struct {
+ fh fileHandler
+}
+
+// NewFileTransport returns a new RoundTripper, serving the provided
+// FileSystem. The returned RoundTripper ignores the URL host in its
+// incoming requests, as well as most other properties of the
+// request.
+//
+// The typical use case for NewFileTransport is to register the "file"
+// protocol with a Transport, as in:
+//
+// t := &http.Transport{}
+// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
+// c := &http.Client{Transport: t}
+// res, err := c.Get("file:///etc/passwd")
+// ...
+func NewFileTransport(fs FileSystem) RoundTripper {
+ return fileTransport{fileHandler{fs}}
+}
+
+func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
+ // We start ServeHTTP in a goroutine, which may take a long
+ // time if the file is large. The newPopulateResponseWriter
+ // call returns a channel which either ServeHTTP or finish()
+ // sends our *Response on, once the *Response itself has been
+ // populated (even if the body itself is still being
+ // written to the res.Body, a pipe)
+ rw, resc := newPopulateResponseWriter()
+ go func() {
+ t.fh.ServeHTTP(rw, req)
+ rw.finish()
+ }()
+ return <-resc, nil
+}
+
+func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
+ pr, pw := io.Pipe()
+ rw := &populateResponse{
+ ch: make(chan *Response),
+ pw: pw,
+ res: &Response{
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ Header: make(Header),
+ Close: true,
+ Body: pr,
+ },
+ }
+ return rw, rw.ch
+}
+
+// populateResponse is a ResponseWriter that populates the *Response
+// in res, and writes its body to a pipe connected to the response
+// body. Once writes begin or finish() is called, the response is sent
+// on ch.
+type populateResponse struct {
+ res *Response
+ ch chan *Response
+ wroteHeader bool
+ hasContent bool
+ sentResponse bool
+ pw *io.PipeWriter
+}
+
+func (pr *populateResponse) finish() {
+ if !pr.wroteHeader {
+ pr.WriteHeader(500)
+ }
+ if !pr.sentResponse {
+ pr.sendResponse()
+ }
+ pr.pw.Close()
+}
+
+func (pr *populateResponse) sendResponse() {
+ if pr.sentResponse {
+ return
+ }
+ pr.sentResponse = true
+
+ if pr.hasContent {
+ pr.res.ContentLength = -1
+ }
+ pr.ch <- pr.res
+}
+
+func (pr *populateResponse) Header() Header {
+ return pr.res.Header
+}
+
+func (pr *populateResponse) WriteHeader(code int) {
+ if pr.wroteHeader {
+ return
+ }
+ pr.wroteHeader = true
+
+ pr.res.StatusCode = code
+ pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
+}
+
+func (pr *populateResponse) Write(p []byte) (n int, err error) {
+ if !pr.wroteHeader {
+ pr.WriteHeader(StatusOK)
+ }
+ pr.hasContent = true
+ if !pr.sentResponse {
+ pr.sendResponse()
+ }
+ return pr.pw.Write(p)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http_test
+
+import (
+ "io/ioutil"
+ "net/http"
+ "path/filepath"
+ "testing"
+)
+
+func checker(t *testing.T) func(string, error) {
+ return func(call string, err error) {
+ if err == nil {
+ return
+ }
+ t.Fatalf("%s: %v", call, err)
+ }
+}
+
+func TestFileTransport(t *testing.T) {
+ check := checker(t)
+
+ dname, err := ioutil.TempDir("", "")
+ check("TempDir", err)
+ fname := filepath.Join(dname, "foo.txt")
+ err = ioutil.WriteFile(fname, []byte("Bar"), 0644)
+ check("WriteFile", err)
+
+ tr := &http.Transport{}
+ tr.RegisterProtocol("file", http.NewFileTransport(http.Dir(dname)))
+ c := &http.Client{Transport: tr}
+
+ fooURLs := []string{"file:///foo.txt", "file://../foo.txt"}
+ for _, urlstr := range fooURLs {
+ res, err := c.Get(urlstr)
+ check("Get "+urlstr, err)
+ if res.StatusCode != 200 {
+ t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode)
+ }
+ if res.ContentLength != -1 {
+ t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength)
+ }
+ if res.Body == nil {
+ t.Fatalf("for %s, nil Body", urlstr)
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ check("ReadAll "+urlstr, err)
+ if string(slurp) != "Bar" {
+ t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
+ }
+ }
+
+ const badURL = "file://../no-exist.txt"
+ res, err := c.Get(badURL)
+ check("Get "+badURL, err)
+ if res.StatusCode != 404 {
+ t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP file system request handler
+
+package http
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// A Dir implements http.FileSystem using the native file
+// system restricted to a specific directory tree.
+type Dir string
+
+func (d Dir) Open(name string) (File, error) {
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ f, err := os.Open(filepath.Join(string(d), filepath.FromSlash(path.Clean("/"+name))))
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// A FileSystem implements access to a collection of named files.
+// The elements in a file path are separated by slash ('/', U+002F)
+// characters, regardless of host operating system convention.
+type FileSystem interface {
+ Open(name string) (File, error)
+}
+
+// A File is returned by a FileSystem's Open method and can be
+// served by the FileServer implementation.
+type File interface {
+ Close() error
+ Stat() (*os.FileInfo, error)
+ Readdir(count int) ([]os.FileInfo, error)
+ Read([]byte) (int, error)
+ Seek(offset int64, whence int) (int64, error)
+}
+
+// Heuristic: b is text if it is valid UTF-8 and doesn't
+// contain any unprintable ASCII or Unicode characters.
+func isText(b []byte) bool {
+ for len(b) > 0 && utf8.FullRune(b) {
+ rune, size := utf8.DecodeRune(b)
+ if size == 1 && rune == utf8.RuneError {
+ // decoding error
+ return false
+ }
+ if 0x7F <= rune && rune <= 0x9F {
+ return false
+ }
+ if rune < ' ' {
+ switch rune {
+ case '\n', '\r', '\t':
+ // okay
+ default:
+ // binary garbage
+ return false
+ }
+ }
+ b = b[size:]
+ }
+ return true
+}
+
+func dirList(w ResponseWriter, f File) {
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ fmt.Fprintf(w, "<pre>\n")
+ for {
+ dirs, err := f.Readdir(100)
+ if err != nil || len(dirs) == 0 {
+ break
+ }
+ for _, d := range dirs {
+ name := d.Name
+ if d.IsDirectory() {
+ name += "/"
+ }
+ // TODO htmlescape
+ fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", name, name)
+ }
+ }
+ fmt.Fprintf(w, "</pre>\n")
+}
+
+// name is '/'-separated, not filepath.Separator.
+func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
+ const indexPage = "/index.html"
+
+ // redirect .../index.html to .../
+ // can't use Redirect() because that would make the path absolute,
+ // which would be a problem running under StripPrefix
+ if strings.HasSuffix(r.URL.Path, indexPage) {
+ localRedirect(w, r, "./")
+ return
+ }
+
+ f, err := fs.Open(name)
+ if err != nil {
+ // TODO expose actual error?
+ NotFound(w, r)
+ return
+ }
+ defer f.Close()
+
+ d, err1 := f.Stat()
+ if err1 != nil {
+ // TODO expose actual error?
+ NotFound(w, r)
+ return
+ }
+
+ if redirect {
+ // redirect to canonical path: / at end of directory url
+ // r.URL.Path always begins with /
+ url := r.URL.Path
+ if d.IsDirectory() {
+ if url[len(url)-1] != '/' {
+ localRedirect(w, r, path.Base(url)+"/")
+ return
+ }
+ } else {
+ if url[len(url)-1] == '/' {
+ localRedirect(w, r, "../"+path.Base(url))
+ return
+ }
+ }
+ }
+
+ if t, _ := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); t != nil && d.Mtime_ns/1e9 <= t.Seconds() {
+ w.WriteHeader(StatusNotModified)
+ return
+ }
+ w.Header().Set("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat))
+
+ // use contents of index.html for directory, if present
+ if d.IsDirectory() {
+ index := name + indexPage
+ ff, err := fs.Open(index)
+ if err == nil {
+ defer ff.Close()
+ dd, err := ff.Stat()
+ if err == nil {
+ name = index
+ d = dd
+ f = ff
+ }
+ }
+ }
+
+ if d.IsDirectory() {
+ dirList(w, f)
+ return
+ }
+
+ // serve file
+ size := d.Size
+ code := StatusOK
+
+ // If Content-Type isn't set, use the file's extension to find it.
+ if w.Header().Get("Content-Type") == "" {
+ ctype := mime.TypeByExtension(filepath.Ext(name))
+ if ctype == "" {
+ // read a chunk to decide between utf-8 text and binary
+ var buf [1024]byte
+ n, _ := io.ReadFull(f, buf[:])
+ b := buf[:n]
+ if isText(b) {
+ ctype = "text/plain; charset=utf-8"
+ } else {
+ // generic binary
+ ctype = "application/octet-stream"
+ }
+ f.Seek(0, os.SEEK_SET) // rewind to output whole file
+ }
+ w.Header().Set("Content-Type", ctype)
+ }
+
+ // handle Content-Range header.
+ // TODO(adg): handle multiple ranges
+ ranges, err := parseRange(r.Header.Get("Range"), size)
+ if err == nil && len(ranges) > 1 {
+ err = errors.New("multiple ranges not supported")
+ }
+ if err != nil {
+ Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ if len(ranges) == 1 {
+ ra := ranges[0]
+ if _, err := f.Seek(ra.start, os.SEEK_SET); err != nil {
+ Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ size = ra.length
+ code = StatusPartialContent
+ w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size))
+ }
+
+ w.Header().Set("Accept-Ranges", "bytes")
+ if w.Header().Get("Content-Encoding") == "" {
+ w.Header().Set("Content-Length", strconv.Itoa64(size))
+ }
+
+ w.WriteHeader(code)
+
+ if r.Method != "HEAD" {
+ io.CopyN(w, f, size)
+ }
+}
+
+// localRedirect gives a Moved Permanently response.
+// It does not convert relative paths to absolute paths like Redirect does.
+func localRedirect(w ResponseWriter, r *Request, newPath string) {
+ if q := r.URL.RawQuery; q != "" {
+ newPath += "?" + q
+ }
+ w.Header().Set("Location", newPath)
+ w.WriteHeader(StatusMovedPermanently)
+}
+
+// ServeFile replies to the request with the contents of the named file or directory.
+func ServeFile(w ResponseWriter, r *Request, name string) {
+ dir, file := filepath.Split(name)
+ serveFile(w, r, Dir(dir), file, false)
+}
+
+type fileHandler struct {
+ root FileSystem
+}
+
+// FileServer returns a handler that serves HTTP requests
+// with the contents of the file system rooted at root.
+//
+// To use the operating system's file system implementation,
+// use http.Dir:
+//
+// http.Handle("/", http.FileServer(http.Dir("/tmp")))
+func FileServer(root FileSystem) Handler {
+ return &fileHandler{root}
+}
+
+func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ upath := r.URL.Path
+ if !strings.HasPrefix(upath, "/") {
+ upath = "/" + upath
+ r.URL.Path = upath
+ }
+ serveFile(w, r, f.root, path.Clean(upath), true)
+}
+
+// httpRange specifies the byte range to be sent to the client.
+type httpRange struct {
+ start, length int64
+}
+
+// parseRange parses a Range header string as per RFC 2616.
+func parseRange(s string, size int64) ([]httpRange, error) {
+ if s == "" {
+ return nil, nil // header not present
+ }
+ const b = "bytes="
+ if !strings.HasPrefix(s, b) {
+ return nil, errors.New("invalid range")
+ }
+ var ranges []httpRange
+ for _, ra := range strings.Split(s[len(b):], ",") {
+ i := strings.Index(ra, "-")
+ if i < 0 {
+ return nil, errors.New("invalid range")
+ }
+ start, end := ra[:i], ra[i+1:]
+ var r httpRange
+ if start == "" {
+ // If no start is specified, end specifies the
+ // range start relative to the end of the file.
+ i, err := strconv.Atoi64(end)
+ if err != nil {
+ return nil, errors.New("invalid range")
+ }
+ if i > size {
+ i = size
+ }
+ r.start = size - i
+ r.length = size - r.start
+ } else {
+ i, err := strconv.Atoi64(start)
+ if err != nil || i > size || i < 0 {
+ return nil, errors.New("invalid range")
+ }
+ r.start = i
+ if end == "" {
+ // If no end is specified, range extends to end of the file.
+ r.length = size - r.start
+ } else {
+ i, err := strconv.Atoi64(end)
+ if err != nil || r.start > i {
+ return nil, errors.New("invalid range")
+ }
+ if i >= size {
+ i = size - 1
+ }
+ r.length = i - r.start + 1
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ return ranges, nil
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ . "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+const (
+ testFile = "testdata/file"
+ testFileLength = 11
+)
+
+var ServeFileRangeTests = []struct {
+ start, end int
+ r string
+ code int
+}{
+ {0, testFileLength, "", StatusOK},
+ {0, 5, "0-4", StatusPartialContent},
+ {2, testFileLength, "2-", StatusPartialContent},
+ {testFileLength - 5, testFileLength, "-5", StatusPartialContent},
+ {3, 8, "3-7", StatusPartialContent},
+ {0, 0, "20-", StatusRequestedRangeNotSatisfiable},
+}
+
+func TestServeFile(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ServeFile(w, r, "testdata/file")
+ }))
+ defer ts.Close()
+
+ var err error
+
+ file, err := ioutil.ReadFile(testFile)
+ if err != nil {
+ t.Fatal("reading file:", err)
+ }
+
+ // set up the Request (re-used for all tests)
+ var req Request
+ req.Header = make(Header)
+ if req.URL, err = url.Parse(ts.URL); err != nil {
+ t.Fatal("ParseURL:", err)
+ }
+ req.Method = "GET"
+
+ // straight GET
+ _, body := getBody(t, req)
+ if !equal(body, file) {
+ t.Fatalf("body mismatch: got %q, want %q", body, file)
+ }
+
+ // Range tests
+ for _, rt := range ServeFileRangeTests {
+ req.Header.Set("Range", "bytes="+rt.r)
+ if rt.r == "" {
+ req.Header["Range"] = nil
+ }
+ r, body := getBody(t, req)
+ if r.StatusCode != rt.code {
+ t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, r.StatusCode, rt.code)
+ }
+ if rt.code == StatusRequestedRangeNotSatisfiable {
+ continue
+ }
+ h := fmt.Sprintf("bytes %d-%d/%d", rt.start, rt.end-1, testFileLength)
+ if rt.r == "" {
+ h = ""
+ }
+ cr := r.Header.Get("Content-Range")
+ if cr != h {
+ t.Errorf("header mismatch: range=%q: got %q, want %q", rt.r, cr, h)
+ }
+ if !equal(body, file[rt.start:rt.end]) {
+ t.Errorf("body mismatch: range=%q: got %q, want %q", rt.r, body, file[rt.start:rt.end])
+ }
+ }
+}
+
+var fsRedirectTestData = []struct {
+ original, redirect string
+}{
+ {"/test/index.html", "/test/"},
+ {"/test/testdata", "/test/testdata/"},
+ {"/test/testdata/file/", "/test/testdata/file"},
+}
+
+func TestFSRedirect(t *testing.T) {
+ ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
+ defer ts.Close()
+
+ for _, data := range fsRedirectTestData {
+ res, err := Get(ts.URL + data.original)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ if g, e := res.Request.URL.Path, data.redirect; g != e {
+ t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
+ }
+ }
+}
+
+type testFileSystem struct {
+ open func(name string) (File, error)
+}
+
+func (fs *testFileSystem) Open(name string) (File, error) {
+ return fs.open(name)
+}
+
+func TestFileServerCleans(t *testing.T) {
+ ch := make(chan string, 1)
+ fs := FileServer(&testFileSystem{func(name string) (File, error) {
+ ch <- name
+ return nil, os.ENOENT
+ }})
+ tests := []struct {
+ reqPath, openArg string
+ }{
+ {"/foo.txt", "/foo.txt"},
+ {"//foo.txt", "/foo.txt"},
+ {"/../foo.txt", "/foo.txt"},
+ }
+ req, _ := NewRequest("GET", "http://example.com", nil)
+ for n, test := range tests {
+ rec := httptest.NewRecorder()
+ req.URL.Path = test.reqPath
+ fs.ServeHTTP(rec, req)
+ if got := <-ch; got != test.openArg {
+ t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
+ }
+ }
+}
+
+func TestFileServerImplicitLeadingSlash(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("TempDir: %v", err)
+ }
+ defer os.RemoveAll(tempDir)
+ if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
+ t.Fatalf("WriteFile: %v", err)
+ }
+ ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
+ defer ts.Close()
+ get := func(suffix string) string {
+ res, err := Get(ts.URL + suffix)
+ if err != nil {
+ t.Fatalf("Get %s: %v", suffix, err)
+ }
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("ReadAll %s: %v", suffix, err)
+ }
+ return string(b)
+ }
+ if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
+ t.Logf("expected a directory listing with foo.txt, got %q", s)
+ }
+ if s := get("/bar/foo.txt"); s != "Hello world" {
+ t.Logf("expected %q, got %q", "Hello world", s)
+ }
+}
+
+func TestDirJoin(t *testing.T) {
+ wfi, err := os.Stat("/etc/hosts")
+ if err != nil {
+ t.Logf("skipping test; no /etc/hosts file")
+ return
+ }
+ test := func(d Dir, name string) {
+ f, err := d.Open(name)
+ if err != nil {
+ t.Fatalf("open of %s: %v", name, err)
+ }
+ defer f.Close()
+ gfi, err := f.Stat()
+ if err != nil {
+ t.Fatalf("stat of %s: %v", name, err)
+ }
+ if gfi.Ino != wfi.Ino {
+ t.Errorf("%s got different inode", name)
+ }
+ }
+ test(Dir("/etc/"), "/hosts")
+ test(Dir("/etc/"), "hosts")
+ test(Dir("/etc/"), "../../../../hosts")
+ test(Dir("/etc"), "/hosts")
+ test(Dir("/etc"), "hosts")
+ test(Dir("/etc"), "../../../../hosts")
+
+ // Not really directories, but since we use this trick in
+ // ServeFile, test it:
+ test(Dir("/etc/hosts"), "")
+ test(Dir("/etc/hosts"), "/")
+ test(Dir("/etc/hosts"), "../")
+}
+
+func TestServeFileContentType(t *testing.T) {
+ const ctype = "icecream/chocolate"
+ override := false
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ if override {
+ w.Header().Set("Content-Type", ctype)
+ }
+ ServeFile(w, r, "testdata/file")
+ }))
+ defer ts.Close()
+ get := func(want string) {
+ resp, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if h := resp.Header.Get("Content-Type"); h != want {
+ t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
+ }
+ }
+ get("text/plain; charset=utf-8")
+ override = true
+ get(ctype)
+}
+
+func TestServeFileMimeType(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ServeFile(w, r, "testdata/style.css")
+ }))
+ defer ts.Close()
+ resp, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "text/css; charset=utf-8"
+ if h := resp.Header.Get("Content-Type"); h != want {
+ t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
+ }
+}
+
+func TestServeFileWithContentEncoding(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Encoding", "foo")
+ ServeFile(w, r, "testdata/file")
+ }))
+ defer ts.Close()
+ resp, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := resp.ContentLength, int64(-1); g != e {
+ t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
+ }
+}
+
+func TestServeIndexHtml(t *testing.T) {
+ const want = "index.html says hello\n"
+ ts := httptest.NewServer(FileServer(Dir(".")))
+ defer ts.Close()
+
+ for _, path := range []string{"/testdata/", "/testdata/index.html"} {
+ res, err := Get(ts.URL + path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal("reading Body:", err)
+ }
+ if s := string(b); s != want {
+ t.Errorf("for path %q got %q, want %q", path, s, want)
+ }
+ }
+}
+
+func getBody(t *testing.T, req Request) (*Response, []byte) {
+ r, err := DefaultClient.Do(&req)
+ if err != nil {
+ t.Fatal(req.URL.String(), "send:", err)
+ }
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatal("reading Body:", err)
+ }
+ return r, b
+}
+
+func equal(a, b []byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "fmt"
+ "io"
+ "net/textproto"
+ "sort"
+ "strings"
+)
+
+// A Header represents the key-value pairs in an HTTP header.
+type Header map[string][]string
+
+// Add adds the key, value pair to the header.
+// It appends to any existing values associated with key.
+func (h Header) Add(key, value string) {
+ textproto.MIMEHeader(h).Add(key, value)
+}
+
+// Set sets the header entries associated with key to
+// the single element value. It replaces any existing
+// values associated with key.
+func (h Header) Set(key, value string) {
+ textproto.MIMEHeader(h).Set(key, value)
+}
+
+// Get gets the first value associated with the given key.
+// If there are no values associated with the key, Get returns "".
+// To access multiple values of a key, access the map directly
+// with CanonicalHeaderKey.
+func (h Header) Get(key string) string {
+ return textproto.MIMEHeader(h).Get(key)
+}
+
+// Del deletes the values associated with key.
+func (h Header) Del(key string) {
+ textproto.MIMEHeader(h).Del(key)
+}
+
+// Write writes a header in wire format.
+func (h Header) Write(w io.Writer) error {
+ return h.WriteSubset(w, nil)
+}
+
+var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
+
+// WriteSubset writes a header in wire format.
+// If exclude is not nil, keys where exclude[key] == true are not written.
+func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
+ keys := make([]string, 0, len(h))
+ for k := range h {
+ if exclude == nil || !exclude[k] {
+ keys = append(keys, k)
+ }
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ for _, v := range h[k] {
+ v = headerNewlineToSpace.Replace(v)
+ v = strings.TrimSpace(v)
+ if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// CanonicalHeaderKey returns the canonical format of the
+// header key s. The canonicalization converts the first
+// letter and any letter following a hyphen to upper case;
+// the rest are converted to lowercase. For example, the
+// canonical key for "accept-encoding" is "Accept-Encoding".
+func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "testing"
+)
+
+var headerWriteTests = []struct {
+ h Header
+ exclude map[string]bool
+ expected string
+}{
+ {Header{}, nil, ""},
+ {
+ Header{
+ "Content-Type": {"text/html; charset=UTF-8"},
+ "Content-Length": {"0"},
+ },
+ nil,
+ "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n",
+ },
+ {
+ Header{
+ "Content-Length": {"0", "1", "2"},
+ },
+ nil,
+ "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n",
+ },
+ {
+ Header{
+ "Expires": {"-1"},
+ "Content-Length": {"0"},
+ "Content-Encoding": {"gzip"},
+ },
+ map[string]bool{"Content-Length": true},
+ "Content-Encoding: gzip\r\nExpires: -1\r\n",
+ },
+ {
+ Header{
+ "Expires": {"-1"},
+ "Content-Length": {"0", "1", "2"},
+ "Content-Encoding": {"gzip"},
+ },
+ map[string]bool{"Content-Length": true},
+ "Content-Encoding: gzip\r\nExpires: -1\r\n",
+ },
+ {
+ Header{
+ "Expires": {"-1"},
+ "Content-Length": {"0"},
+ "Content-Encoding": {"gzip"},
+ },
+ map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true},
+ "",
+ },
+ {
+ Header{
+ "Nil": nil,
+ "Empty": {},
+ "Blank": {""},
+ "Double-Blank": {"", ""},
+ },
+ nil,
+ "Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n",
+ },
+}
+
+func TestHeaderWrite(t *testing.T) {
+ var buf bytes.Buffer
+ for i, test := range headerWriteTests {
+ test.h.WriteSubset(&buf, test.exclude)
+ if buf.String() != test.expected {
+ t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected)
+ }
+ buf.Reset()
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httptest provides utilities for HTTP testing.
+package httptest
+
+import (
+ "bytes"
+ "net/http"
+)
+
+// ResponseRecorder is an implementation of http.ResponseWriter that
+// records its mutations for later inspection in tests.
+type ResponseRecorder struct {
+ Code int // the HTTP response code from WriteHeader
+ HeaderMap http.Header // the HTTP response headers
+ Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
+ Flushed bool
+}
+
+// NewRecorder returns an initialized ResponseRecorder.
+func NewRecorder() *ResponseRecorder {
+ return &ResponseRecorder{
+ HeaderMap: make(http.Header),
+ Body: new(bytes.Buffer),
+ }
+}
+
+// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
+// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
+const DefaultRemoteAddr = "1.2.3.4"
+
+// Header returns the response headers.
+func (rw *ResponseRecorder) Header() http.Header {
+ return rw.HeaderMap
+}
+
+// Write always succeeds and writes to rw.Body, if not nil.
+func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
+ if rw.Body != nil {
+ rw.Body.Write(buf)
+ }
+ if rw.Code == 0 {
+ rw.Code = http.StatusOK
+ }
+ return len(buf), nil
+}
+
+// WriteHeader sets rw.Code.
+func (rw *ResponseRecorder) WriteHeader(code int) {
+ rw.Code = code
+}
+
+// Flush sets rw.Flushed to true.
+func (rw *ResponseRecorder) Flush() {
+ rw.Flushed = true
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of Server
+
+package httptest
+
+import (
+ "crypto/rand"
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "time"
+)
+
+// A Server is an HTTP server listening on a system-chosen port on the
+// local loopback interface, for use in end-to-end HTTP tests.
+type Server struct {
+ URL string // base URL of form http://ipaddr:port with no trailing slash
+ Listener net.Listener
+ TLS *tls.Config // nil if not using using TLS
+
+ // Config may be changed after calling NewUnstartedServer and
+ // before Start or StartTLS.
+ Config *http.Server
+}
+
+// historyListener keeps track of all connections that it's ever
+// accepted.
+type historyListener struct {
+ net.Listener
+ history []net.Conn
+}
+
+func (hs *historyListener) Accept() (c net.Conn, err error) {
+ c, err = hs.Listener.Accept()
+ if err == nil {
+ hs.history = append(hs.history, c)
+ }
+ return
+}
+
+func newLocalListener() net.Listener {
+ if *serve != "" {
+ l, err := net.Listen("tcp", *serve)
+ if err != nil {
+ panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err))
+ }
+ return l
+ }
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
+ panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
+ }
+ }
+ return l
+}
+
+// When debugging a particular http server-based test,
+// this flag lets you run
+// gotest -run=BrokenTest -httptest.serve=127.0.0.1:8000
+// to start the broken server so you can interact with it manually.
+var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks")
+
+// NewServer starts and returns a new Server.
+// The caller should call Close when finished, to shut it down.
+func NewServer(handler http.Handler) *Server {
+ ts := NewUnstartedServer(handler)
+ ts.Start()
+ return ts
+}
+
+// NewUnstartedServer returns a new Server but doesn't start it.
+//
+// After changing its configuration, the caller should call Start or
+// StartTLS.
+//
+// The caller should call Close when finished, to shut it down.
+func NewUnstartedServer(handler http.Handler) *Server {
+ return &Server{
+ Listener: newLocalListener(),
+ Config: &http.Server{Handler: handler},
+ }
+}
+
+// Start starts a server from NewUnstartedServer.
+func (s *Server) Start() {
+ if s.URL != "" {
+ panic("Server already started")
+ }
+ s.Listener = &historyListener{s.Listener, make([]net.Conn, 0)}
+ s.URL = "http://" + s.Listener.Addr().String()
+ go s.Config.Serve(s.Listener)
+ if *serve != "" {
+ fmt.Println(os.Stderr, "httptest: serving on", s.URL)
+ select {}
+ }
+}
+
+// StartTLS starts TLS on a server from NewUnstartedServer.
+func (s *Server) StartTLS() {
+ if s.URL != "" {
+ panic("Server already started")
+ }
+ cert, err := tls.X509KeyPair(localhostCert, localhostKey)
+ if err != nil {
+ panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
+ }
+
+ s.TLS = &tls.Config{
+ Rand: rand.Reader,
+ Time: time.Seconds,
+ NextProtos: []string{"http/1.1"},
+ Certificates: []tls.Certificate{cert},
+ }
+ tlsListener := tls.NewListener(s.Listener, s.TLS)
+
+ s.Listener = &historyListener{tlsListener, make([]net.Conn, 0)}
+ s.URL = "https://" + s.Listener.Addr().String()
+ go s.Config.Serve(s.Listener)
+}
+
+// NewTLSServer starts and returns a new Server using TLS.
+// The caller should call Close when finished, to shut it down.
+func NewTLSServer(handler http.Handler) *Server {
+ ts := NewUnstartedServer(handler)
+ ts.StartTLS()
+ return ts
+}
+
+// Close shuts down the server.
+func (s *Server) Close() {
+ s.Listener.Close()
+}
+
+// CloseClientConnections closes any currently open HTTP connections
+// to the test Server.
+func (s *Server) CloseClientConnections() {
+ hl, ok := s.Listener.(*historyListener)
+ if !ok {
+ return
+ }
+ for _, conn := range hl.history {
+ conn.Close()
+ }
+}
+
+// localhostCert is a PEM-encoded TLS cert with SAN DNS names
+// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end
+// of ASN.1 time).
+var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIBOTCB5qADAgECAgEAMAsGCSqGSIb3DQEBBTAAMB4XDTcwMDEwMTAwMDAwMFoX
+DTQ5MTIzMTIzNTk1OVowADBaMAsGCSqGSIb3DQEBAQNLADBIAkEAsuA5mAFMj6Q7
+qoBzcvKzIq4kzuT5epSp2AkcQfyBHm7K13Ws7u+0b5Vb9gqTf5cAiIKcrtrXVqkL
+8i1UQF6AzwIDAQABo08wTTAOBgNVHQ8BAf8EBAMCACQwDQYDVR0OBAYEBAECAwQw
+DwYDVR0jBAgwBoAEAQIDBDAbBgNVHREEFDASggkxMjcuMC4wLjGCBVs6OjFdMAsG
+CSqGSIb3DQEBBQNBAJH30zjLWRztrWpOCgJL8RQWLaKzhK79pVhAx6q/3NrF16C7
++l1BRZstTwIGdoGId8BRpErK1TXkniFb95ZMynM=
+-----END CERTIFICATE-----
+`)
+
+// localhostKey is the private key for localhostCert.
+var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIBPQIBAAJBALLgOZgBTI+kO6qAc3LysyKuJM7k+XqUqdgJHEH8gR5uytd1rO7v
+tG+VW/YKk3+XAIiCnK7a11apC/ItVEBegM8CAwEAAQJBAI5sxq7naeR9ahyqRkJi
+SIv2iMxLuPEHaezf5CYOPWjSjBPyVhyRevkhtqEjF/WkgL7C2nWpYHsUcBDBQVF0
+3KECIQDtEGB2ulnkZAahl3WuJziXGLB+p8Wgx7wzSM6bHu1c6QIhAMEp++CaS+SJ
+/TrU0zwY/fW4SvQeb49BPZUF3oqR8Xz3AiEA1rAJHBzBgdOQKdE3ksMUPcnvNJSN
+poCcELmz2clVXtkCIQCLytuLV38XHToTipR4yMl6O+6arzAjZ56uq7m7ZRV0TwIh
+AM65XAOw8Dsg9Kq78aYXiOEDc5DL0sbFUu/SlmRcCg93
+-----END RSA PRIVATE KEY-----
+`)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+ "bufio"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// NewChunkedWriter returns a new writer that translates writes into HTTP
+// "chunked" format before writing them to w. Closing the returned writer
+// sends the final 0-length chunk that marks the end of the stream.
+//
+// NewChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using NewChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
+func NewChunkedWriter(w io.Writer) io.WriteCloser {
+ return &chunkedWriter{w}
+}
+
+// Writing to ChunkedWriter translates to writing in HTTP chunked Transfer
+// Encoding wire format to the underlying Wire writer.
+type chunkedWriter struct {
+ Wire io.Writer
+}
+
+// Write the contents of data as one chunk to Wire.
+// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
+// a bug since it does not check for success of io.WriteString
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
+
+ // Don't send 0-length data. It looks like EOF for chunked encoding.
+ if len(data) == 0 {
+ return 0, nil
+ }
+
+ head := strconv.Itob(len(data), 16) + "\r\n"
+
+ if _, err = io.WriteString(cw.Wire, head); err != nil {
+ return 0, err
+ }
+ if n, err = cw.Wire.Write(data); err != nil {
+ return
+ }
+ if n != len(data) {
+ err = io.ErrShortWrite
+ return
+ }
+ _, err = io.WriteString(cw.Wire, "\r\n")
+
+ return
+}
+
+func (cw *chunkedWriter) Close() error {
+ _, err := io.WriteString(cw.Wire, "0\r\n")
+ return err
+}
+
+// NewChunkedReader returns a new reader that translates the data read from r
+// out of HTTP "chunked" format before returning it.
+// The reader returns io.EOF when the final 0-length chunk is read.
+//
+// NewChunkedReader is not needed by normal applications. The http package
+// automatically decodes chunking when reading response bodies.
+func NewChunkedReader(r io.Reader) io.Reader {
+ // This is a bit of a hack so we don't have to copy chunkedReader into
+ // httputil. It's a bit more complex than chunkedWriter, which is copied
+ // above.
+ req, err := http.ReadRequest(bufio.NewReader(io.MultiReader(
+ strings.NewReader("POST / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"),
+ r,
+ strings.NewReader("\r\n"))))
+ if err != nil {
+ panic("bad fake request: " + err.Error())
+ }
+ return req.Body
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+)
+
+func TestChunk(t *testing.T) {
+ var b bytes.Buffer
+
+ w := NewChunkedWriter(&b)
+ const chunk1 = "hello, "
+ const chunk2 = "world! 0123456789abcdef"
+ w.Write([]byte(chunk1))
+ w.Write([]byte(chunk2))
+ w.Close()
+
+ if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
+ t.Fatalf("chunk writer wrote %q; want %q", g, e)
+ }
+
+ r := NewChunkedReader(&b)
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ t.Fatalf("ReadAll from NewChunkedReader: %v", err)
+ }
+ if g, e := string(data), chunk1+chunk2; g != e {
+ t.Errorf("chunk reader read %q; want %q", g, e)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// One of the copies, say from b to r2, could be avoided by using a more
+// elaborate trick where the other copy is made during Request/Response.Write.
+// This would complicate things too much, given that these functions are for
+// debugging only.
+func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
+ var buf bytes.Buffer
+ if _, err = buf.ReadFrom(b); err != nil {
+ return nil, nil, err
+ }
+ if err = b.Close(); err != nil {
+ return nil, nil, err
+ }
+ return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
+}
+
+// dumpConn is a net.Conn which writes to Writer and reads from Reader
+type dumpConn struct {
+ io.Writer
+ io.Reader
+}
+
+func (c *dumpConn) Close() error { return nil }
+func (c *dumpConn) LocalAddr() net.Addr { return nil }
+func (c *dumpConn) RemoteAddr() net.Addr { return nil }
+func (c *dumpConn) SetTimeout(nsec int64) error { return nil }
+func (c *dumpConn) SetReadTimeout(nsec int64) error { return nil }
+func (c *dumpConn) SetWriteTimeout(nsec int64) error { return nil }
+
+// DumpRequestOut is like DumpRequest but includes
+// headers that the standard http.Transport adds,
+// such as User-Agent.
+func DumpRequestOut(req *http.Request, body bool) (dump []byte, err error) {
+ save := req.Body
+ if !body || req.Body == nil {
+ req.Body = nil
+ } else {
+ save, req.Body, err = drainBody(req.Body)
+ if err != nil {
+ return
+ }
+ }
+
+ var b bytes.Buffer
+ dialed := false
+ t := &http.Transport{
+ Dial: func(net, addr string) (c net.Conn, err error) {
+ if dialed {
+ return nil, errors.New("unexpected second dial")
+ }
+ c = &dumpConn{
+ Writer: &b,
+ Reader: strings.NewReader("HTTP/1.1 500 Fake Error\r\n\r\n"),
+ }
+ return
+ },
+ }
+
+ _, err = t.RoundTrip(req)
+
+ req.Body = save
+ if err != nil {
+ return
+ }
+ dump = b.Bytes()
+ return
+}
+
+// Return value if nonempty, def otherwise.
+func valueOrDefault(value, def string) string {
+ if value != "" {
+ return value
+ }
+ return def
+}
+
+var reqWriteExcludeHeaderDump = map[string]bool{
+ "Host": true, // not in Header map anyway
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// dumpAsReceived writes req to w in the form as it was received, or
+// at least as accurately as possible from the information retained in
+// the request.
+func dumpAsReceived(req *http.Request, w io.Writer) error {
+ return nil
+}
+
+// DumpRequest returns the as-received wire representation of req,
+// optionally including the request body, for debugging.
+// DumpRequest is semantically a no-op, but in order to
+// dump the body, it reads the body data into memory and
+// changes req.Body to refer to the in-memory copy.
+// The documentation for http.Request.Write details which fields
+// of req are used.
+func DumpRequest(req *http.Request, body bool) (dump []byte, err error) {
+ save := req.Body
+ if !body || req.Body == nil {
+ req.Body = nil
+ } else {
+ save, req.Body, err = drainBody(req.Body)
+ if err != nil {
+ return
+ }
+ }
+
+ var b bytes.Buffer
+
+ urlStr := req.URL.Raw
+ if urlStr == "" {
+ urlStr = valueOrDefault(req.URL.EncodedPath(), "/")
+ if req.URL.RawQuery != "" {
+ urlStr += "?" + req.URL.RawQuery
+ }
+ }
+
+ fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), urlStr,
+ req.ProtoMajor, req.ProtoMinor)
+
+ host := req.Host
+ if host == "" && req.URL != nil {
+ host = req.URL.Host
+ }
+ if host != "" {
+ fmt.Fprintf(&b, "Host: %s\r\n", host)
+ }
+
+ chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
+ if len(req.TransferEncoding) > 0 {
+ fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
+ }
+ if req.Close {
+ fmt.Fprintf(&b, "Connection: close\r\n")
+ }
+
+ err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
+ if err != nil {
+ return
+ }
+
+ io.WriteString(&b, "\r\n")
+
+ if req.Body != nil {
+ var dest io.Writer = &b
+ if chunked {
+ dest = NewChunkedWriter(dest)
+ }
+ _, err = io.Copy(dest, req.Body)
+ if chunked {
+ dest.(io.Closer).Close()
+ io.WriteString(&b, "\r\n")
+ }
+ }
+
+ req.Body = save
+ if err != nil {
+ return
+ }
+ dump = b.Bytes()
+ return
+}
+
+// DumpResponse is like DumpRequest but dumps a response.
+func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) {
+ var b bytes.Buffer
+ save := resp.Body
+ savecl := resp.ContentLength
+ if !body || resp.Body == nil {
+ resp.Body = nil
+ resp.ContentLength = 0
+ } else {
+ save, resp.Body, err = drainBody(resp.Body)
+ if err != nil {
+ return
+ }
+ }
+ err = resp.Write(&b)
+ resp.Body = save
+ resp.ContentLength = savecl
+ if err != nil {
+ return
+ }
+ dump = b.Bytes()
+ return
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+)
+
+type dumpTest struct {
+ Req http.Request
+ Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
+
+ WantDump string
+ WantDumpOut string
+}
+
+var dumpTests = []dumpTest{
+
+ // HTTP/1.1 => chunked coding; body; empty trailer
+ {
+ Req: http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/search",
+ },
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ TransferEncoding: []string{"chunked"},
+ },
+
+ Body: []byte("abcdef"),
+
+ WantDump: "GET /search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("abcdef") + chunk(""),
+ },
+
+ // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
+ // and doesn't add a User-Agent.
+ {
+ Req: http.Request{
+ Method: "GET",
+ URL: mustParseURL("/foo"),
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Header: http.Header{
+ "X-Foo": []string{"X-Bar"},
+ },
+ },
+
+ WantDump: "GET /foo HTTP/1.0\r\n" +
+ "X-Foo: X-Bar\r\n\r\n",
+ },
+
+ {
+ Req: *mustNewRequest("GET", "http://example.com/foo", nil),
+
+ WantDumpOut: "GET /foo HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Accept-Encoding: gzip\r\n\r\n",
+ },
+}
+
+func TestDumpRequest(t *testing.T) {
+ for i, tt := range dumpTests {
+ setBody := func() {
+ if tt.Body == nil {
+ return
+ }
+ switch b := tt.Body.(type) {
+ case []byte:
+ tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+ case func() io.ReadCloser:
+ tt.Req.Body = b()
+ }
+ }
+ setBody()
+ if tt.Req.Header == nil {
+ tt.Req.Header = make(http.Header)
+ }
+
+ if tt.WantDump != "" {
+ setBody()
+ dump, err := DumpRequest(&tt.Req, true)
+ if err != nil {
+ t.Errorf("DumpRequest #%d: %s", i, err)
+ continue
+ }
+ if string(dump) != tt.WantDump {
+ t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
+ continue
+ }
+ }
+
+ if tt.WantDumpOut != "" {
+ setBody()
+ dump, err := DumpRequestOut(&tt.Req, true)
+ if err != nil {
+ t.Errorf("DumpRequestOut #%d: %s", i, err)
+ continue
+ }
+ if string(dump) != tt.WantDumpOut {
+ t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump))
+ continue
+ }
+ }
+ }
+}
+
+func chunk(s string) string {
+ return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
+}
+
+func mustParseURL(s string) *url.URL {
+ u, err := url.Parse(s)
+ if err != nil {
+ panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
+ }
+ return u
+}
+
+func mustNewRequest(method, url string, body io.Reader) *http.Request {
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err))
+ }
+ return req
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httputil provides HTTP utility functions, complementing the
+// more common ones in the net/http package.
+package httputil
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/textproto"
+ "os"
+ "sync"
+)
+
+var (
+ ErrPersistEOF = &http.ProtocolError{"persistent connection closed"}
+ ErrPipeline = &http.ProtocolError{"pipeline error"}
+)
+
+// A ServerConn reads requests and sends responses over an underlying
+// connection, until the HTTP keepalive logic commands an end. ServerConn
+// also allows hijacking the underlying connection by calling Hijack
+// to regain control over the connection. ServerConn supports pipe-lining,
+// i.e. requests can be read out of sync (but in the same order) while the
+// respective responses are sent.
+//
+// ServerConn is low-level and should not be needed by most applications.
+// See Server.
+type ServerConn struct {
+ lk sync.Mutex // read-write protects the following fields
+ c net.Conn
+ r *bufio.Reader
+ re, we error // read/write errors
+ lastbody io.ReadCloser
+ nread, nwritten int
+ pipereq map[*http.Request]uint
+
+ pipe textproto.Pipeline
+}
+
+// NewServerConn returns a new ServerConn reading and writing c. If r is not
+// nil, it is the buffer to use when reading c.
+func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn {
+ if r == nil {
+ r = bufio.NewReader(c)
+ }
+ return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)}
+}
+
+// Hijack detaches the ServerConn and returns the underlying connection as well
+// as the read-side bufio which may have some left over data. Hijack may be
+// called before Read has signaled the end of the keep-alive logic. The user
+// should not call Hijack while Read or Write is in progress.
+func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) {
+ sc.lk.Lock()
+ defer sc.lk.Unlock()
+ c = sc.c
+ r = sc.r
+ sc.c = nil
+ sc.r = nil
+ return
+}
+
+// Close calls Hijack and then also closes the underlying connection
+func (sc *ServerConn) Close() error {
+ c, _ := sc.Hijack()
+ if c != nil {
+ return c.Close()
+ }
+ return nil
+}
+
+// Read returns the next request on the wire. An ErrPersistEOF is returned if
+// it is gracefully determined that there are no more requests (e.g. after the
+// first request on an HTTP/1.0 connection, or after a Connection:close on a
+// HTTP/1.1 connection).
+func (sc *ServerConn) Read() (req *http.Request, err error) {
+
+ // Ensure ordered execution of Reads and Writes
+ id := sc.pipe.Next()
+ sc.pipe.StartRequest(id)
+ defer func() {
+ sc.pipe.EndRequest(id)
+ if req == nil {
+ sc.pipe.StartResponse(id)
+ sc.pipe.EndResponse(id)
+ } else {
+ // Remember the pipeline id of this request
+ sc.lk.Lock()
+ sc.pipereq[req] = id
+ sc.lk.Unlock()
+ }
+ }()
+
+ sc.lk.Lock()
+ if sc.we != nil { // no point receiving if write-side broken or closed
+ defer sc.lk.Unlock()
+ return nil, sc.we
+ }
+ if sc.re != nil {
+ defer sc.lk.Unlock()
+ return nil, sc.re
+ }
+ if sc.r == nil { // connection closed by user in the meantime
+ defer sc.lk.Unlock()
+ return nil, os.EBADF
+ }
+ r := sc.r
+ lastbody := sc.lastbody
+ sc.lastbody = nil
+ sc.lk.Unlock()
+
+ // Make sure body is fully consumed, even if user does not call body.Close
+ if lastbody != nil {
+ // body.Close is assumed to be idempotent and multiple calls to
+ // it should return the error that its first invocation
+ // returned.
+ err = lastbody.Close()
+ if err != nil {
+ sc.lk.Lock()
+ defer sc.lk.Unlock()
+ sc.re = err
+ return nil, err
+ }
+ }
+
+ req, err = http.ReadRequest(r)
+ sc.lk.Lock()
+ defer sc.lk.Unlock()
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // A close from the opposing client is treated as a
+ // graceful close, even if there was some unparse-able
+ // data before the close.
+ sc.re = ErrPersistEOF
+ return nil, sc.re
+ } else {
+ sc.re = err
+ return req, err
+ }
+ }
+ sc.lastbody = req.Body
+ sc.nread++
+ if req.Close {
+ sc.re = ErrPersistEOF
+ return req, sc.re
+ }
+ return req, err
+}
+
+// Pending returns the number of unanswered requests
+// that have been received on the connection.
+func (sc *ServerConn) Pending() int {
+ sc.lk.Lock()
+ defer sc.lk.Unlock()
+ return sc.nread - sc.nwritten
+}
+
+// Write writes resp in response to req. To close the connection gracefully, set the
+// Response.Close field to true. Write should be considered operational until
+// it returns an error, regardless of any errors returned on the Read side.
+func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error {
+
+ // Retrieve the pipeline ID of this request/response pair
+ sc.lk.Lock()
+ id, ok := sc.pipereq[req]
+ delete(sc.pipereq, req)
+ if !ok {
+ sc.lk.Unlock()
+ return ErrPipeline
+ }
+ sc.lk.Unlock()
+
+ // Ensure pipeline order
+ sc.pipe.StartResponse(id)
+ defer sc.pipe.EndResponse(id)
+
+ sc.lk.Lock()
+ if sc.we != nil {
+ defer sc.lk.Unlock()
+ return sc.we
+ }
+ if sc.c == nil { // connection closed by user in the meantime
+ defer sc.lk.Unlock()
+ return os.EBADF
+ }
+ c := sc.c
+ if sc.nread <= sc.nwritten {
+ defer sc.lk.Unlock()
+ return errors.New("persist server pipe count")
+ }
+ if resp.Close {
+ // After signaling a keep-alive close, any pipelined unread
+ // requests will be lost. It is up to the user to drain them
+ // before signaling.
+ sc.re = ErrPersistEOF
+ }
+ sc.lk.Unlock()
+
+ err := resp.Write(c)
+ sc.lk.Lock()
+ defer sc.lk.Unlock()
+ if err != nil {
+ sc.we = err
+ return err
+ }
+ sc.nwritten++
+
+ return nil
+}
+
+// A ClientConn sends request and receives headers over an underlying
+// connection, while respecting the HTTP keepalive logic. ClientConn
+// supports hijacking the connection calling Hijack to
+// regain control of the underlying net.Conn and deal with it as desired.
+//
+// ClientConn is low-level and should not be needed by most applications.
+// See Client.
+type ClientConn struct {
+ lk sync.Mutex // read-write protects the following fields
+ c net.Conn
+ r *bufio.Reader
+ re, we error // read/write errors
+ lastbody io.ReadCloser
+ nread, nwritten int
+ pipereq map[*http.Request]uint
+
+ pipe textproto.Pipeline
+ writeReq func(*http.Request, io.Writer) error
+}
+
+// NewClientConn returns a new ClientConn reading and writing c. If r is not
+// nil, it is the buffer to use when reading c.
+func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
+ if r == nil {
+ r = bufio.NewReader(c)
+ }
+ return &ClientConn{
+ c: c,
+ r: r,
+ pipereq: make(map[*http.Request]uint),
+ writeReq: (*http.Request).Write,
+ }
+}
+
+// NewProxyClientConn works like NewClientConn but writes Requests
+// using Request's WriteProxy method.
+func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
+ cc := NewClientConn(c, r)
+ cc.writeReq = (*http.Request).WriteProxy
+ return cc
+}
+
+// Hijack detaches the ClientConn and returns the underlying connection as well
+// as the read-side bufio which may have some left over data. Hijack may be
+// called before the user or Read have signaled the end of the keep-alive
+// logic. The user should not call Hijack while Read or Write is in progress.
+func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) {
+ cc.lk.Lock()
+ defer cc.lk.Unlock()
+ c = cc.c
+ r = cc.r
+ cc.c = nil
+ cc.r = nil
+ return
+}
+
+// Close calls Hijack and then also closes the underlying connection
+func (cc *ClientConn) Close() error {
+ c, _ := cc.Hijack()
+ if c != nil {
+ return c.Close()
+ }
+ return nil
+}
+
+// Write writes a request. An ErrPersistEOF error is returned if the connection
+// has been closed in an HTTP keepalive sense. If req.Close equals true, the
+// keepalive connection is logically closed after this request and the opposing
+// server is informed. An ErrUnexpectedEOF indicates the remote closed the
+// underlying TCP connection, which is usually considered as graceful close.
+func (cc *ClientConn) Write(req *http.Request) (err error) {
+
+ // Ensure ordered execution of Writes
+ id := cc.pipe.Next()
+ cc.pipe.StartRequest(id)
+ defer func() {
+ cc.pipe.EndRequest(id)
+ if err != nil {
+ cc.pipe.StartResponse(id)
+ cc.pipe.EndResponse(id)
+ } else {
+ // Remember the pipeline id of this request
+ cc.lk.Lock()
+ cc.pipereq[req] = id
+ cc.lk.Unlock()
+ }
+ }()
+
+ cc.lk.Lock()
+ if cc.re != nil { // no point sending if read-side closed or broken
+ defer cc.lk.Unlock()
+ return cc.re
+ }
+ if cc.we != nil {
+ defer cc.lk.Unlock()
+ return cc.we
+ }
+ if cc.c == nil { // connection closed by user in the meantime
+ defer cc.lk.Unlock()
+ return os.EBADF
+ }
+ c := cc.c
+ if req.Close {
+ // We write the EOF to the write-side error, because there
+ // still might be some pipelined reads
+ cc.we = ErrPersistEOF
+ }
+ cc.lk.Unlock()
+
+ err = cc.writeReq(req, c)
+ cc.lk.Lock()
+ defer cc.lk.Unlock()
+ if err != nil {
+ cc.we = err
+ return err
+ }
+ cc.nwritten++
+
+ return nil
+}
+
+// Pending returns the number of unanswered requests
+// that have been sent on the connection.
+func (cc *ClientConn) Pending() int {
+ cc.lk.Lock()
+ defer cc.lk.Unlock()
+ return cc.nwritten - cc.nread
+}
+
+// Read reads the next response from the wire. A valid response might be
+// returned together with an ErrPersistEOF, which means that the remote
+// requested that this be the last request serviced. Read can be called
+// concurrently with Write, but not with another Read.
+func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) {
+ // Retrieve the pipeline ID of this request/response pair
+ cc.lk.Lock()
+ id, ok := cc.pipereq[req]
+ delete(cc.pipereq, req)
+ if !ok {
+ cc.lk.Unlock()
+ return nil, ErrPipeline
+ }
+ cc.lk.Unlock()
+
+ // Ensure pipeline order
+ cc.pipe.StartResponse(id)
+ defer cc.pipe.EndResponse(id)
+
+ cc.lk.Lock()
+ if cc.re != nil {
+ defer cc.lk.Unlock()
+ return nil, cc.re
+ }
+ if cc.r == nil { // connection closed by user in the meantime
+ defer cc.lk.Unlock()
+ return nil, os.EBADF
+ }
+ r := cc.r
+ lastbody := cc.lastbody
+ cc.lastbody = nil
+ cc.lk.Unlock()
+
+ // Make sure body is fully consumed, even if user does not call body.Close
+ if lastbody != nil {
+ // body.Close is assumed to be idempotent and multiple calls to
+ // it should return the error that its first invokation
+ // returned.
+ err = lastbody.Close()
+ if err != nil {
+ cc.lk.Lock()
+ defer cc.lk.Unlock()
+ cc.re = err
+ return nil, err
+ }
+ }
+
+ resp, err = http.ReadResponse(r, req)
+ cc.lk.Lock()
+ defer cc.lk.Unlock()
+ if err != nil {
+ cc.re = err
+ return resp, err
+ }
+ cc.lastbody = resp.Body
+
+ cc.nread++
+
+ if resp.Close {
+ cc.re = ErrPersistEOF // don't send any more requests
+ return resp, cc.re
+ }
+ return resp, err
+}
+
+// Do is convenience method that writes a request and reads a response.
+func (cc *ClientConn) Do(req *http.Request) (resp *http.Response, err error) {
+ err = cc.Write(req)
+ if err != nil {
+ return
+ }
+ return cc.Read(req)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP reverse proxy handler
+
+package httputil
+
+import (
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+)
+
+// ReverseProxy is an HTTP Handler that takes an incoming request and
+// sends it to another server, proxying the response back to the
+// client.
+type ReverseProxy struct {
+ // Director must be a function which modifies
+ // the request into a new request to be sent
+ // using Transport. Its response is then copied
+ // back to the original client unmodified.
+ Director func(*http.Request)
+
+ // The transport used to perform proxy requests.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+
+ // FlushInterval specifies the flush interval, in
+ // nanoseconds, to flush to the client while
+ // coping the response body.
+ // If zero, no periodic flushing is done.
+ FlushInterval int64
+}
+
+func singleJoiningSlash(a, b string) string {
+ aslash := strings.HasSuffix(a, "/")
+ bslash := strings.HasPrefix(b, "/")
+ switch {
+ case aslash && bslash:
+ return a + b[1:]
+ case !aslash && !bslash:
+ return a + "/" + b
+ }
+ return a + b
+}
+
+// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
+// URLs to the scheme, host, and base path provided in target. If the
+// target's path is "/base" and the incoming request was for "/dir",
+// the target request will be for /base/dir.
+func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
+ director := func(req *http.Request) {
+ req.URL.Scheme = target.Scheme
+ req.URL.Host = target.Host
+ req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
+ if q := req.URL.RawQuery; q != "" {
+ req.URL.RawPath = req.URL.Path + "?" + q
+ } else {
+ req.URL.RawPath = req.URL.Path
+ }
+ req.URL.RawQuery = target.RawQuery
+ }
+ return &ReverseProxy{Director: director}
+}
+
+func copyHeader(dst, src http.Header) {
+ for k, vv := range src {
+ for _, v := range vv {
+ dst.Add(k, v)
+ }
+ }
+}
+
+func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ transport := p.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ outreq := new(http.Request)
+ *outreq = *req // includes shallow copies of maps, but okay
+
+ p.Director(outreq)
+ outreq.Proto = "HTTP/1.1"
+ outreq.ProtoMajor = 1
+ outreq.ProtoMinor = 1
+ outreq.Close = false
+
+ // Remove the connection header to the backend. We want a
+ // persistent connection, regardless of what the client sent
+ // to us. This is modifying the same underlying map from req
+ // (shallow copied above) so we only copy it if necessary.
+ if outreq.Header.Get("Connection") != "" {
+ outreq.Header = make(http.Header)
+ copyHeader(outreq.Header, req.Header)
+ outreq.Header.Del("Connection")
+ }
+
+ if clientIp, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+ outreq.Header.Set("X-Forwarded-For", clientIp)
+ }
+
+ res, err := transport.RoundTrip(outreq)
+ if err != nil {
+ log.Printf("http: proxy error: %v", err)
+ rw.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ copyHeader(rw.Header(), res.Header)
+
+ rw.WriteHeader(res.StatusCode)
+
+ if res.Body != nil {
+ var dst io.Writer = rw
+ if p.FlushInterval != 0 {
+ if wf, ok := rw.(writeFlusher); ok {
+ dst = &maxLatencyWriter{dst: wf, latency: p.FlushInterval}
+ }
+ }
+ io.Copy(dst, res.Body)
+ }
+}
+
+type writeFlusher interface {
+ io.Writer
+ http.Flusher
+}
+
+type maxLatencyWriter struct {
+ dst writeFlusher
+ latency int64 // nanos
+
+ lk sync.Mutex // protects init of done, as well Write + Flush
+ done chan bool
+}
+
+func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
+ m.lk.Lock()
+ defer m.lk.Unlock()
+ if m.done == nil {
+ m.done = make(chan bool)
+ go m.flushLoop()
+ }
+ n, err = m.dst.Write(p)
+ if err != nil {
+ m.done <- true
+ }
+ return
+}
+
+func (m *maxLatencyWriter) flushLoop() {
+ t := time.NewTicker(m.latency)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ m.lk.Lock()
+ m.dst.Flush()
+ m.lk.Unlock()
+ case <-m.done:
+ return
+ }
+ }
+ panic("unreached")
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Reverse proxy tests.
+
+package httputil
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+)
+
+func TestReverseProxy(t *testing.T) {
+ const backendResponse = "I am the backend"
+ const backendStatus = 404
+ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if len(r.TransferEncoding) > 0 {
+ t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding)
+ }
+ if r.Header.Get("X-Forwarded-For") == "" {
+ t.Errorf("didn't get X-Forwarded-For header")
+ }
+ if c := r.Header.Get("Connection"); c != "" {
+ t.Errorf("handler got Connection header value %q", c)
+ }
+ if g, e := r.Host, "some-name"; g != e {
+ t.Errorf("backend got Host header %q, want %q", g, e)
+ }
+ w.Header().Set("X-Foo", "bar")
+ http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"})
+ w.WriteHeader(backendStatus)
+ w.Write([]byte(backendResponse))
+ }))
+ defer backend.Close()
+ backendURL, err := url.Parse(backend.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ proxyHandler := NewSingleHostReverseProxy(backendURL)
+ frontend := httptest.NewServer(proxyHandler)
+ defer frontend.Close()
+
+ getReq, _ := http.NewRequest("GET", frontend.URL, nil)
+ getReq.Host = "some-name"
+ getReq.Header.Set("Connection", "close")
+ getReq.Close = true
+ res, err := http.DefaultClient.Do(getReq)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ if g, e := res.StatusCode, backendStatus; g != e {
+ t.Errorf("got res.StatusCode %d; expected %d", g, e)
+ }
+ if g, e := res.Header.Get("X-Foo"), "bar"; g != e {
+ t.Errorf("got X-Foo %q; expected %q", g, e)
+ }
+ if g, e := len(res.Header["Set-Cookie"]), 1; g != e {
+ t.Fatalf("got %d SetCookies, want %d", g, e)
+ }
+ if cookie := res.Cookies()[0]; cookie.Name != "flavor" {
+ t.Errorf("unexpected cookie %q", cookie.Name)
+ }
+ bodyBytes, _ := ioutil.ReadAll(res.Body)
+ if g, e := string(bodyBytes), backendResponse; g != e {
+ t.Errorf("got body %q; expected %q", g, e)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// This file deals with lexical matters of HTTP
+
+func isSeparator(c byte) bool {
+ switch c {
+ case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
+ return true
+ }
+ return false
+}
+
+func isSpace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\r', '\n':
+ return true
+ }
+ return false
+}
+
+func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 }
+
+func isChar(c byte) bool { return 0 <= c && c <= 127 }
+
+func isAnyText(c byte) bool { return !isCtl(c) }
+
+func isQdText(c byte) bool { return isAnyText(c) && c != '"' }
+
+func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) }
+
+// Valid escaped sequences are not specified in RFC 2616, so for now, we assume
+// that they coincide with the common sense ones used by GO. Malformed
+// characters should probably not be treated as errors by a robust (forgiving)
+// parser, so we replace them with the '?' character.
+func httpUnquotePair(b byte) byte {
+ // skip the first byte, which should always be '\'
+ switch b {
+ case 'a':
+ return '\a'
+ case 'b':
+ return '\b'
+ case 'f':
+ return '\f'
+ case 'n':
+ return '\n'
+ case 'r':
+ return '\r'
+ case 't':
+ return '\t'
+ case 'v':
+ return '\v'
+ case '\\':
+ return '\\'
+ case '\'':
+ return '\''
+ case '"':
+ return '"'
+ }
+ return '?'
+}
+
+// raw must begin with a valid quoted string. Only the first quoted string is
+// parsed and is unquoted in result. eaten is the number of bytes parsed, or -1
+// upon failure.
+func httpUnquote(raw []byte) (eaten int, result string) {
+ buf := make([]byte, len(raw))
+ if raw[0] != '"' {
+ return -1, ""
+ }
+ eaten = 1
+ j := 0 // # of bytes written in buf
+ for i := 1; i < len(raw); i++ {
+ switch b := raw[i]; b {
+ case '"':
+ eaten++
+ buf = buf[0:j]
+ return i + 1, string(buf)
+ case '\\':
+ if len(raw) < i+2 {
+ return -1, ""
+ }
+ buf[j] = httpUnquotePair(raw[i+1])
+ eaten += 2
+ j++
+ i++
+ default:
+ if isQdText(b) {
+ buf[j] = b
+ } else {
+ buf[j] = '?'
+ }
+ eaten++
+ j++
+ }
+ }
+ return -1, ""
+}
+
+// This is a best effort parse, so errors are not returned, instead not all of
+// the input string might be parsed. result is always non-nil.
+func httpSplitFieldValue(fv string) (eaten int, result []string) {
+ result = make([]string, 0, len(fv))
+ raw := []byte(fv)
+ i := 0
+ chunk := ""
+ for i < len(raw) {
+ b := raw[i]
+ switch {
+ case b == '"':
+ eaten, unq := httpUnquote(raw[i:len(raw)])
+ if eaten < 0 {
+ return i, result
+ } else {
+ i += eaten
+ chunk += unq
+ }
+ case isSeparator(b):
+ if chunk != "" {
+ result = result[0 : len(result)+1]
+ result[len(result)-1] = chunk
+ chunk = ""
+ }
+ i++
+ case isToken(b):
+ chunk += string(b)
+ i++
+ case b == '\n' || b == '\r':
+ i++
+ default:
+ chunk += "?"
+ i++
+ }
+ }
+ if chunk != "" {
+ result = result[0 : len(result)+1]
+ result[len(result)-1] = chunk
+ chunk = ""
+ }
+ return i, result
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "testing"
+)
+
+type lexTest struct {
+ Raw string
+ Parsed int // # of parsed characters
+ Result []string
+}
+
+var lexTests = []lexTest{
+ {
+ Raw: `"abc"def,:ghi`,
+ Parsed: 13,
+ Result: []string{"abcdef", "ghi"},
+ },
+ // My understanding of the RFC is that escape sequences outside of
+ // quotes are not interpreted?
+ {
+ Raw: `"\t"\t"\t"`,
+ Parsed: 10,
+ Result: []string{"\t", "t\t"},
+ },
+ {
+ Raw: `"\yab"\r\n`,
+ Parsed: 10,
+ Result: []string{"?ab", "r", "n"},
+ },
+ {
+ Raw: "ab\f",
+ Parsed: 3,
+ Result: []string{"ab?"},
+ },
+ {
+ Raw: "\"ab \" c,de f, gh, ij\n\t\r",
+ Parsed: 23,
+ Result: []string{"ab ", "c", "de", "f", "gh", "ij"},
+ },
+}
+
+func min(x, y int) int {
+ if x <= y {
+ return x
+ }
+ return y
+}
+
+func TestSplitFieldValue(t *testing.T) {
+ for k, l := range lexTests {
+ parsed, result := httpSplitFieldValue(l.Raw)
+ if parsed != l.Parsed {
+ t.Errorf("#%d: Parsed %d, expected %d", k, parsed, l.Parsed)
+ }
+ if len(result) != len(l.Result) {
+ t.Errorf("#%d: Result len %d, expected %d", k, len(result), len(l.Result))
+ }
+ for i := 0; i < min(len(result), len(l.Result)); i++ {
+ if result[i] != l.Result[i] {
+ t.Errorf("#%d: %d-th entry mismatch. Have {%s}, expect {%s}",
+ k, i, result[i], l.Result[i])
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pprof serves via its HTTP server runtime profiling data
+// in the format expected by the pprof visualization tool.
+// For more information about pprof, see
+// http://code.google.com/p/google-perftools/.
+//
+// The package is typically only imported for the side effect of
+// registering its HTTP handlers.
+// The handled paths all begin with /debug/pprof/.
+//
+// To use pprof, link this package into your program:
+// import _ "http/pprof"
+//
+// Then use the pprof tool to look at the heap profile:
+//
+// pprof http://localhost:6060/debug/pprof/heap
+//
+// Or to look at a 30-second CPU profile:
+//
+// pprof http://localhost:6060/debug/pprof/profile
+//
+package pprof
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strconv"
+ "strings"
+ "time"
+)
+
+func init() {
+ http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
+ http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
+ http.Handle("/debug/pprof/heap", http.HandlerFunc(Heap))
+ http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
+}
+
+// Cmdline responds with the running program's
+// command line, with arguments separated by NUL bytes.
+// The package initialization registers it as /debug/pprof/cmdline.
+func Cmdline(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
+}
+
+// Heap responds with the pprof-formatted heap profile.
+// The package initialization registers it as /debug/pprof/heap.
+func Heap(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ pprof.WriteHeapProfile(w)
+}
+
+// Profile responds with the pprof-formatted cpu profile.
+// The package initialization registers it as /debug/pprof/profile.
+func Profile(w http.ResponseWriter, r *http.Request) {
+ sec, _ := strconv.Atoi64(r.FormValue("seconds"))
+ if sec == 0 {
+ sec = 30
+ }
+
+ // Set Content Type assuming StartCPUProfile will work,
+ // because if it does it starts writing.
+ w.Header().Set("Content-Type", "application/octet-stream")
+ if err := pprof.StartCPUProfile(w); err != nil {
+ // StartCPUProfile failed, so no writes yet.
+ // Can change header back to text content
+ // and send error code.
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
+ return
+ }
+ time.Sleep(sec * 1e9)
+ pprof.StopCPUProfile()
+}
+
+// Symbol looks up the program counters listed in the request,
+// responding with a table mapping program counters to function names.
+// The package initialization registers it as /debug/pprof/symbol.
+func Symbol(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+
+ // We have to read the whole POST body before
+ // writing any output. Buffer the output here.
+ var buf bytes.Buffer
+
+ // We don't know how many symbols we have, but we
+ // do have symbol information. Pprof only cares whether
+ // this number is 0 (no symbols available) or > 0.
+ fmt.Fprintf(&buf, "num_symbols: 1\n")
+
+ var b *bufio.Reader
+ if r.Method == "POST" {
+ b = bufio.NewReader(r.Body)
+ } else {
+ b = bufio.NewReader(strings.NewReader(r.URL.RawQuery))
+ }
+
+ for {
+ word, err := b.ReadSlice('+')
+ if err == nil {
+ word = word[0 : len(word)-1] // trim +
+ }
+ pc, _ := strconv.Btoui64(string(word), 0)
+ if pc != 0 {
+ f := runtime.FuncForPC(uintptr(pc))
+ if f != nil {
+ fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
+ }
+ }
+
+ // Wait until here to check for err; the last
+ // symbol will have an err because it doesn't end in +.
+ if err != nil {
+ if err != io.EOF {
+ fmt.Fprintf(&buf, "reading request: %v\n", err)
+ }
+ break
+ }
+ }
+
+ w.Write(buf.Bytes())
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "os"
+ "testing"
+)
+
+// TODO(mattn):
+// test ProxyAuth
+
+var UseProxyTests = []struct {
+ host string
+ match bool
+}{
+ // Never proxy localhost:
+ {"localhost:80", false},
+ {"127.0.0.1", false},
+ {"127.0.0.2", false},
+ {"[::1]", false},
+ {"[::2]", true}, // not a loopback address
+
+ {"barbaz.net", false}, // match as .barbaz.net
+ {"foobar.com", false}, // have a port but match
+ {"foofoobar.com", true}, // not match as a part of foobar.com
+ {"baz.com", true}, // not match as a part of barbaz.com
+ {"localhost.net", true}, // not match as suffix of address
+ {"local.localhost", true}, // not match as prefix as address
+ {"barbarbaz.net", true}, // not match because NO_PROXY have a '.'
+ {"www.foobar.com", true}, // not match because NO_PROXY is not .foobar.com
+}
+
+func TestUseProxy(t *testing.T) {
+ oldenv := os.Getenv("NO_PROXY")
+ defer os.Setenv("NO_PROXY", oldenv)
+
+ no_proxy := "foobar.com, .barbaz.net"
+ os.Setenv("NO_PROXY", no_proxy)
+
+ for _, test := range UseProxyTests {
+ if useProxy(test.host+":80") != test.match {
+ t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "testing"
+)
+
+var ParseRangeTests = []struct {
+ s string
+ length int64
+ r []httpRange
+}{
+ {"", 0, nil},
+ {"foo", 0, nil},
+ {"bytes=", 0, nil},
+ {"bytes=5-4", 10, nil},
+ {"bytes=0-2,5-4", 10, nil},
+ {"bytes=0-9", 10, []httpRange{{0, 10}}},
+ {"bytes=0-", 10, []httpRange{{0, 10}}},
+ {"bytes=5-", 10, []httpRange{{5, 5}}},
+ {"bytes=0-20", 10, []httpRange{{0, 10}}},
+ {"bytes=15-,0-5", 10, nil},
+ {"bytes=-5", 10, []httpRange{{5, 5}}},
+ {"bytes=-15", 10, []httpRange{{0, 10}}},
+ {"bytes=0-499", 10000, []httpRange{{0, 500}}},
+ {"bytes=500-999", 10000, []httpRange{{500, 500}}},
+ {"bytes=-500", 10000, []httpRange{{9500, 500}}},
+ {"bytes=9500-", 10000, []httpRange{{9500, 500}}},
+ {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
+ {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
+ {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
+}
+
+func TestParseRange(t *testing.T) {
+ for _, test := range ParseRangeTests {
+ r := test.r
+ ranges, err := parseRange(test.s, test.length)
+ if err != nil && r != nil {
+ t.Errorf("parseRange(%q) returned error %q", test.s, err)
+ }
+ if len(ranges) != len(r) {
+ t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
+ continue
+ }
+ for i := range r {
+ if ranges[i].start != r[i].start {
+ t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
+ }
+ if ranges[i].length != r[i].length {
+ t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "reflect"
+ "testing"
+)
+
+type reqTest struct {
+ Raw string
+ Req *Request
+ Body string
+ Trailer Header
+ Error string
+}
+
+var noError = ""
+var noBody = ""
+var noTrailer Header = nil
+
+var reqTests = []reqTest{
+ // Baseline test; All Request fields included for template use
+ {
+ "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
+ "Host: www.techcrunch.com\r\n" +
+ "User-Agent: Fake\r\n" +
+ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
+ "Accept-Language: en-us,en;q=0.5\r\n" +
+ "Accept-Encoding: gzip,deflate\r\n" +
+ "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
+ "Keep-Alive: 300\r\n" +
+ "Content-Length: 7\r\n" +
+ "Proxy-Connection: keep-alive\r\n\r\n" +
+ "abcdef\n???",
+
+ &Request{
+ Method: "GET",
+ URL: &url.URL{
+ Raw: "http://www.techcrunch.com/",
+ Scheme: "http",
+ RawPath: "/",
+ RawAuthority: "www.techcrunch.com",
+ RawUserinfo: "",
+ Host: "www.techcrunch.com",
+ Path: "/",
+ RawQuery: "",
+ Fragment: "",
+ },
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{
+ "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
+ "Accept-Language": {"en-us,en;q=0.5"},
+ "Accept-Encoding": {"gzip,deflate"},
+ "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
+ "Keep-Alive": {"300"},
+ "Proxy-Connection": {"keep-alive"},
+ "Content-Length": {"7"},
+ "User-Agent": {"Fake"},
+ },
+ Close: false,
+ ContentLength: 7,
+ Host: "www.techcrunch.com",
+ Form: url.Values{},
+ },
+
+ "abcdef\n",
+
+ noTrailer,
+ noError,
+ },
+
+ // GET request with no body (the normal case)
+ {
+ "GET / HTTP/1.1\r\n" +
+ "Host: foo.com\r\n\r\n",
+
+ &Request{
+ Method: "GET",
+ URL: &url.URL{
+ Raw: "/",
+ Path: "/",
+ RawPath: "/",
+ },
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Close: false,
+ ContentLength: 0,
+ Host: "foo.com",
+ Form: url.Values{},
+ },
+
+ noBody,
+ noTrailer,
+ noError,
+ },
+
+ // Tests that we don't parse a path that looks like a
+ // scheme-relative URI as a scheme-relative URI.
+ {
+ "GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" +
+ "Host: test\r\n\r\n",
+
+ &Request{
+ Method: "GET",
+ URL: &url.URL{
+ Raw: "//user@host/is/actually/a/path/",
+ Scheme: "",
+ RawPath: "//user@host/is/actually/a/path/",
+ RawAuthority: "",
+ RawUserinfo: "",
+ Host: "",
+ Path: "//user@host/is/actually/a/path/",
+ RawQuery: "",
+ Fragment: "",
+ },
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{},
+ Close: false,
+ ContentLength: 0,
+ Host: "test",
+ Form: url.Values{},
+ },
+
+ noBody,
+ noTrailer,
+ noError,
+ },
+
+ // Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2)
+ {
+ "GET ../../../../etc/passwd HTTP/1.1\r\n" +
+ "Host: test\r\n\r\n",
+ nil,
+ noBody,
+ noTrailer,
+ "parse ../../../../etc/passwd: invalid URI for request",
+ },
+
+ // Tests missing URL:
+ {
+ "GET HTTP/1.1\r\n" +
+ "Host: test\r\n\r\n",
+ nil,
+ noBody,
+ noTrailer,
+ "parse : empty url",
+ },
+
+ // Tests chunked body with trailer:
+ {
+ "POST / HTTP/1.1\r\n" +
+ "Host: foo.com\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ "3\r\nfoo\r\n" +
+ "3\r\nbar\r\n" +
+ "0\r\n" +
+ "Trailer-Key: Trailer-Value\r\n" +
+ "\r\n",
+ &Request{
+ Method: "POST",
+ URL: &url.URL{
+ Raw: "/",
+ Path: "/",
+ RawPath: "/",
+ },
+ TransferEncoding: []string{"chunked"},
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ ContentLength: -1,
+ Host: "foo.com",
+ Form: url.Values{},
+ },
+
+ "foobar",
+ Header{
+ "Trailer-Key": {"Trailer-Value"},
+ },
+ noError,
+ },
+}
+
+func TestReadRequest(t *testing.T) {
+ for i := range reqTests {
+ tt := &reqTests[i]
+ var braw bytes.Buffer
+ braw.WriteString(tt.Raw)
+ req, err := ReadRequest(bufio.NewReader(&braw))
+ if err != nil {
+ if err.Error() != tt.Error {
+ t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error)
+ }
+ continue
+ }
+ rbody := req.Body
+ req.Body = nil
+ diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req)
+ var bout bytes.Buffer
+ if rbody != nil {
+ _, err := io.Copy(&bout, rbody)
+ if err != nil {
+ t.Fatalf("#%d. copying body: %v", i, err)
+ }
+ rbody.Close()
+ }
+ body := bout.String()
+ if body != tt.Body {
+ t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
+ }
+ if !reflect.DeepEqual(tt.Trailer, req.Trailer) {
+ t.Errorf("%#d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP Request reading and parsing.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+const (
+ maxLineLength = 4096 // assumed <= bufio.defaultBufSize
+ maxValueLength = 4096
+ maxHeaderLines = 1024
+ chunkSize = 4 << 10 // 4 KB chunks
+ defaultMaxMemory = 32 << 20 // 32 MB
+)
+
+// ErrMissingFile is returned by FormFile when the provided file field name
+// is either not present in the request or not a file field.
+var ErrMissingFile = errors.New("http: no such file")
+
+// HTTP request parsing errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrLineTooLong = &ProtocolError{"header line too long"}
+ ErrHeaderTooLong = &ProtocolError{"header too long"}
+ ErrShortBody = &ProtocolError{"entity body too short"}
+ ErrNotSupported = &ProtocolError{"feature not supported"}
+ ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"}
+ ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"}
+ ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"}
+ ErrMissingBoundary = &ProtocolError{"no multipart boundary param Content-Type"}
+)
+
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// Headers that Request.Write handles itself and should be skipped.
+var reqWriteExcludeHeader = map[string]bool{
+ "Host": true, // not in Header map anyway
+ "User-Agent": true,
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// A Request represents an HTTP request received by a server
+// or to be sent by a client.
+type Request struct {
+ Method string // GET, POST, PUT, etc.
+ URL *url.URL
+
+ // The protocol version for incoming requests.
+ // Outgoing requests always use HTTP/1.1.
+ Proto string // "HTTP/1.0"
+ ProtoMajor int // 1
+ ProtoMinor int // 0
+
+ // A header maps request lines to their values.
+ // If the header says
+ //
+ // accept-encoding: gzip, deflate
+ // Accept-Language: en-us
+ // Connection: keep-alive
+ //
+ // then
+ //
+ // Header = map[string][]string{
+ // "Accept-Encoding": {"gzip, deflate"},
+ // "Accept-Language": {"en-us"},
+ // "Connection": {"keep-alive"},
+ // }
+ //
+ // HTTP defines that header names are case-insensitive.
+ // The request parser implements this by canonicalizing the
+ // name, making the first character and any characters
+ // following a hyphen uppercase and the rest lowercase.
+ Header Header
+
+ // The message body.
+ Body io.ReadCloser
+
+ // ContentLength records the length of the associated content.
+ // The value -1 indicates that the length is unknown.
+ // Values >= 0 indicate that the given number of bytes may
+ // be read from Body.
+ // For outgoing requests, a value of 0 means unknown if Body is not nil.
+ ContentLength int64
+
+ // TransferEncoding lists the transfer encodings from outermost to
+ // innermost. An empty list denotes the "identity" encoding.
+ // TransferEncoding can usually be ignored; chunked encoding is
+ // automatically added and removed as necessary when sending and
+ // receiving requests.
+ TransferEncoding []string
+
+ // Close indicates whether to close the connection after
+ // replying to this request.
+ Close bool
+
+ // The host on which the URL is sought.
+ // Per RFC 2616, this is either the value of the Host: header
+ // or the host name given in the URL itself.
+ Host string
+
+ // Form contains the parsed form data, including both the URL
+ // field's query parameters and the POST or PUT form data.
+ // This field is only available after ParseForm is called.
+ // The HTTP client ignores Form and uses Body instead.
+ Form url.Values
+
+ // MultipartForm is the parsed multipart form, including file uploads.
+ // This field is only available after ParseMultipartForm is called.
+ // The HTTP client ignores MultipartForm and uses Body instead.
+ MultipartForm *multipart.Form
+
+ // Trailer maps trailer keys to values. Like for Header, if the
+ // response has multiple trailer lines with the same key, they will be
+ // concatenated, delimited by commas.
+ // For server requests, Trailer is only populated after Body has been
+ // closed or fully consumed.
+ // Trailer support is only partially complete.
+ Trailer Header
+
+ // RemoteAddr allows HTTP servers and other software to record
+ // the network address that sent the request, usually for
+ // logging. This field is not filled in by ReadRequest and
+ // has no defined format. The HTTP server in this package
+ // sets RemoteAddr to an "IP:port" address before invoking a
+ // handler.
+ // This field is ignored by the HTTP client.
+ RemoteAddr string
+
+ // TLS allows HTTP servers and other software to record
+ // information about the TLS connection on which the request
+ // was received. This field is not filled in by ReadRequest.
+ // The HTTP server in this package sets the field for
+ // TLS-enabled connections before invoking a handler;
+ // otherwise it leaves the field nil.
+ // This field is ignored by the HTTP client.
+ TLS *tls.ConnectionState
+}
+
+// ProtoAtLeast returns whether the HTTP protocol used
+// in the request is at least major.minor.
+func (r *Request) ProtoAtLeast(major, minor int) bool {
+ return r.ProtoMajor > major ||
+ r.ProtoMajor == major && r.ProtoMinor >= minor
+}
+
+// UserAgent returns the client's User-Agent, if sent in the request.
+func (r *Request) UserAgent() string {
+ return r.Header.Get("User-Agent")
+}
+
+// Cookies parses and returns the HTTP cookies sent with the request.
+func (r *Request) Cookies() []*Cookie {
+ return readCookies(r.Header, "")
+}
+
+var ErrNoCookie = errors.New("http: named cookied not present")
+
+// Cookie returns the named cookie provided in the request or
+// ErrNoCookie if not found.
+func (r *Request) Cookie(name string) (*Cookie, error) {
+ for _, c := range readCookies(r.Header, name) {
+ return c, nil
+ }
+ return nil, ErrNoCookie
+}
+
+// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
+// AddCookie does not attach more than one Cookie header field. That
+// means all cookies, if any, are written into the same line,
+// separated by semicolon.
+func (r *Request) AddCookie(c *Cookie) {
+ s := fmt.Sprintf("%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
+ if c := r.Header.Get("Cookie"); c != "" {
+ r.Header.Set("Cookie", c+"; "+s)
+ } else {
+ r.Header.Set("Cookie", s)
+ }
+}
+
+// Referer returns the referring URL, if sent in the request.
+//
+// Referer is misspelled as in the request itself, a mistake from the
+// earliest days of HTTP. This value can also be fetched from the
+// Header map as Header["Referer"]; the benefit of making it available
+// as a method is that the compiler can diagnose programs that use the
+// alternate (correct English) spelling req.Referrer() but cannot
+// diagnose programs that use Header["Referrer"].
+func (r *Request) Referer() string {
+ return r.Header.Get("Referer")
+}
+
+// multipartByReader is a sentinel value.
+// Its presence in Request.MultipartForm indicates that parsing of the request
+// body has been handed off to a MultipartReader instead of ParseMultipartFrom.
+var multipartByReader = &multipart.Form{
+ Value: make(map[string][]string),
+ File: make(map[string][]*multipart.FileHeader),
+}
+
+// MultipartReader returns a MIME multipart reader if this is a
+// multipart/form-data POST request, else returns nil and an error.
+// Use this function instead of ParseMultipartForm to
+// process the request body as a stream.
+func (r *Request) MultipartReader() (*multipart.Reader, error) {
+ if r.MultipartForm == multipartByReader {
+ return nil, errors.New("http: MultipartReader called twice")
+ }
+ if r.MultipartForm != nil {
+ return nil, errors.New("http: multipart handled by ParseMultipartForm")
+ }
+ r.MultipartForm = multipartByReader
+ return r.multipartReader()
+}
+
+func (r *Request) multipartReader() (*multipart.Reader, error) {
+ v := r.Header.Get("Content-Type")
+ if v == "" {
+ return nil, ErrNotMultipart
+ }
+ d, params, err := mime.ParseMediaType(v)
+ if err != nil || d != "multipart/form-data" {
+ return nil, ErrNotMultipart
+ }
+ boundary, ok := params["boundary"]
+ if !ok {
+ return nil, ErrMissingBoundary
+ }
+ return multipart.NewReader(r.Body, boundary), nil
+}
+
+// Return value if nonempty, def otherwise.
+func valueOrDefault(value, def string) string {
+ if value != "" {
+ return value
+ }
+ return def
+}
+
+const defaultUserAgent = "Go http package"
+
+// Write writes an HTTP/1.1 request -- header and body -- in wire format.
+// This method consults the following fields of req:
+// Host
+// URL
+// Method (defaults to "GET")
+// Header
+// ContentLength
+// TransferEncoding
+// Body
+//
+// If Body is present, Content-Length is <= 0 and TransferEncoding
+// hasn't been set to "identity", Write adds "Transfer-Encoding:
+// chunked" to the header. Body is closed after it is sent.
+func (req *Request) Write(w io.Writer) error {
+ return req.write(w, false, nil)
+}
+
+// WriteProxy is like Write but writes the request in the form
+// expected by an HTTP proxy. In particular, WriteProxy writes the
+// initial Request-URI line of the request with an absolute URI, per
+// section 5.1.2 of RFC 2616, including the scheme and host. In
+// either case, WriteProxy also writes a Host header, using either
+// req.Host or req.URL.Host.
+func (req *Request) WriteProxy(w io.Writer) error {
+ return req.write(w, true, nil)
+}
+
+// extraHeaders may be nil
+func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error {
+ host := req.Host
+ if host == "" {
+ if req.URL == nil {
+ return errors.New("http: Request.Write on Request with no Host or URL set")
+ }
+ host = req.URL.Host
+ }
+
+ urlStr := req.URL.RawPath
+ if strings.HasPrefix(urlStr, "?") {
+ urlStr = "/" + urlStr // Issue 2344
+ }
+ if urlStr == "" {
+ urlStr = valueOrDefault(req.URL.RawPath, valueOrDefault(req.URL.EncodedPath(), "/"))
+ if req.URL.RawQuery != "" {
+ urlStr += "?" + req.URL.RawQuery
+ }
+ if usingProxy {
+ if urlStr == "" || urlStr[0] != '/' {
+ urlStr = "/" + urlStr
+ }
+ urlStr = req.URL.Scheme + "://" + host + urlStr
+ }
+ }
+ // TODO(bradfitz): escape at least newlines in urlStr?
+
+ bw := bufio.NewWriter(w)
+ fmt.Fprintf(bw, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), urlStr)
+
+ // Header lines
+ fmt.Fprintf(bw, "Host: %s\r\n", host)
+
+ // Use the defaultUserAgent unless the Header contains one, which
+ // may be blank to not send the header.
+ userAgent := defaultUserAgent
+ if req.Header != nil {
+ if ua := req.Header["User-Agent"]; len(ua) > 0 {
+ userAgent = ua[0]
+ }
+ }
+ if userAgent != "" {
+ fmt.Fprintf(bw, "User-Agent: %s\r\n", userAgent)
+ }
+
+ // Process Body,ContentLength,Close,Trailer
+ tw, err := newTransferWriter(req)
+ if err != nil {
+ return err
+ }
+ err = tw.WriteHeader(bw)
+ if err != nil {
+ return err
+ }
+
+ // TODO: split long values? (If so, should share code with Conn.Write)
+ err = req.Header.WriteSubset(bw, reqWriteExcludeHeader)
+ if err != nil {
+ return err
+ }
+
+ if extraHeaders != nil {
+ err = extraHeaders.Write(bw)
+ if err != nil {
+ return err
+ }
+ }
+
+ io.WriteString(bw, "\r\n")
+
+ // Write body and trailer
+ err = tw.WriteBody(bw)
+ if err != nil {
+ return err
+ }
+ bw.Flush()
+ return nil
+}
+
+// Read a line of bytes (up to \n) from b.
+// Give up if the line exceeds maxLineLength.
+// The returned bytes are a pointer into storage in
+// the bufio, so they are only valid until the next bufio read.
+func readLineBytes(b *bufio.Reader) (p []byte, err error) {
+ if p, err = b.ReadSlice('\n'); err != nil {
+ // We always know when EOF is coming.
+ // If the caller asked for a line, there should be a line.
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else if err == bufio.ErrBufferFull {
+ err = ErrLineTooLong
+ }
+ return nil, err
+ }
+ if len(p) >= maxLineLength {
+ return nil, ErrLineTooLong
+ }
+
+ // Chop off trailing white space.
+ var i int
+ for i = len(p); i > 0; i-- {
+ if c := p[i-1]; c != ' ' && c != '\r' && c != '\t' && c != '\n' {
+ break
+ }
+ }
+ return p[0:i], nil
+}
+
+// readLineBytes, but convert the bytes into a string.
+func readLine(b *bufio.Reader) (s string, err error) {
+ p, e := readLineBytes(b)
+ if e != nil {
+ return "", e
+ }
+ return string(p), nil
+}
+
+// Convert decimal at s[i:len(s)] to integer,
+// returning value, string position where the digits stopped,
+// and whether there was a valid number (digits, not too big).
+func atoi(s string, i int) (n, i1 int, ok bool) {
+ const Big = 1000000
+ if i >= len(s) || s[i] < '0' || s[i] > '9' {
+ return 0, 0, false
+ }
+ n = 0
+ for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
+ n = n*10 + int(s[i]-'0')
+ if n > Big {
+ return 0, 0, false
+ }
+ }
+ return n, i, true
+}
+
+// ParseHTTPVersion parses a HTTP version string.
+// "HTTP/1.0" returns (1, 0, true).
+func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
+ if len(vers) < 5 || vers[0:5] != "HTTP/" {
+ return 0, 0, false
+ }
+ major, i, ok := atoi(vers, 5)
+ if !ok || i >= len(vers) || vers[i] != '.' {
+ return 0, 0, false
+ }
+ minor, i, ok = atoi(vers, i+1)
+ if !ok || i != len(vers) {
+ return 0, 0, false
+ }
+ return major, minor, true
+}
+
+type chunkedReader struct {
+ r *bufio.Reader
+ n uint64 // unread bytes in chunk
+ err error
+}
+
+func (cr *chunkedReader) beginChunk() {
+ // chunk-size CRLF
+ var line string
+ line, cr.err = readLine(cr.r)
+ if cr.err != nil {
+ return
+ }
+ cr.n, cr.err = strconv.Btoui64(line, 16)
+ if cr.err != nil {
+ return
+ }
+ if cr.n == 0 {
+ cr.err = io.EOF
+ }
+}
+
+func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
+ if cr.err != nil {
+ return 0, cr.err
+ }
+ if cr.n == 0 {
+ cr.beginChunk()
+ if cr.err != nil {
+ return 0, cr.err
+ }
+ }
+ if uint64(len(b)) > cr.n {
+ b = b[0:cr.n]
+ }
+ n, cr.err = cr.r.Read(b)
+ cr.n -= uint64(n)
+ if cr.n == 0 && cr.err == nil {
+ // end of chunk (CRLF)
+ b := make([]byte, 2)
+ if _, cr.err = io.ReadFull(cr.r, b); cr.err == nil {
+ if b[0] != '\r' || b[1] != '\n' {
+ cr.err = errors.New("malformed chunked encoding")
+ }
+ }
+ }
+ return n, cr.err
+}
+
+// NewRequest returns a new Request given a method, URL, and optional body.
+func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, err
+ }
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = ioutil.NopCloser(body)
+ }
+ req := &Request{
+ Method: method,
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(Header),
+ Body: rc,
+ Host: u.Host,
+ }
+ if body != nil {
+ switch v := body.(type) {
+ case *strings.Reader:
+ req.ContentLength = int64(v.Len())
+ case *bytes.Buffer:
+ req.ContentLength = int64(v.Len())
+ }
+ }
+
+ return req, nil
+}
+
+// SetBasicAuth sets the request's Authorization header to use HTTP
+// Basic Authentication with the provided username and password.
+//
+// With HTTP Basic Authentication the provided username and password
+// are not encrypted.
+func (r *Request) SetBasicAuth(username, password string) {
+ s := username + ":" + password
+ r.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(s)))
+}
+
+// ReadRequest reads and parses a request from b.
+func ReadRequest(b *bufio.Reader) (req *Request, err error) {
+
+ tp := textproto.NewReader(b)
+ req = new(Request)
+
+ // First line: GET /index.html HTTP/1.0
+ var s string
+ if s, err = tp.ReadLine(); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+
+ var f []string
+ if f = strings.SplitN(s, " ", 3); len(f) < 3 {
+ return nil, &badStringError{"malformed HTTP request", s}
+ }
+ var rawurl string
+ req.Method, rawurl, req.Proto = f[0], f[1], f[2]
+ var ok bool
+ if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
+ return nil, &badStringError{"malformed HTTP version", req.Proto}
+ }
+
+ if req.URL, err = url.ParseRequest(rawurl); err != nil {
+ return nil, err
+ }
+
+ // Subsequent lines: Key: value.
+ mimeHeader, err := tp.ReadMIMEHeader()
+ if err != nil {
+ return nil, err
+ }
+ req.Header = Header(mimeHeader)
+
+ // RFC2616: Must treat
+ // GET /index.html HTTP/1.1
+ // Host: www.google.com
+ // and
+ // GET http://www.google.com/index.html HTTP/1.1
+ // Host: doesntmatter
+ // the same. In the second case, any Host line is ignored.
+ req.Host = req.URL.Host
+ if req.Host == "" {
+ req.Host = req.Header.Get("Host")
+ }
+ req.Header.Del("Host")
+
+ fixPragmaCacheControl(req.Header)
+
+ // TODO: Parse specific header values:
+ // Accept
+ // Accept-Encoding
+ // Accept-Language
+ // Authorization
+ // Cache-Control
+ // Connection
+ // Date
+ // Expect
+ // From
+ // If-Match
+ // If-Modified-Since
+ // If-None-Match
+ // If-Range
+ // If-Unmodified-Since
+ // Max-Forwards
+ // Proxy-Authorization
+ // Referer [sic]
+ // TE (transfer-codings)
+ // Trailer
+ // Transfer-Encoding
+ // Upgrade
+ // User-Agent
+ // Via
+ // Warning
+
+ err = readTransfer(req, b)
+ if err != nil {
+ return nil, err
+ }
+
+ return req, nil
+}
+
+// MaxBytesReader is similar to io.LimitReader but is intended for
+// limiting the size of incoming request bodies. In contrast to
+// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
+// non-EOF error for a Read beyond the limit, and Closes the
+// underlying reader when its Close method is called.
+//
+// MaxBytesReader prevents clients from accidentally or maliciously
+// sending a large request and wasting server resources.
+func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
+ return &maxBytesReader{w: w, r: r, n: n}
+}
+
+type maxBytesReader struct {
+ w ResponseWriter
+ r io.ReadCloser // underlying reader
+ n int64 // max bytes remaining
+ stopped bool
+}
+
+func (l *maxBytesReader) Read(p []byte) (n int, err error) {
+ if l.n <= 0 {
+ if !l.stopped {
+ l.stopped = true
+ if res, ok := l.w.(*response); ok {
+ res.requestTooLarge()
+ }
+ }
+ return 0, errors.New("http: request body too large")
+ }
+ if int64(len(p)) > l.n {
+ p = p[:l.n]
+ }
+ n, err = l.r.Read(p)
+ l.n -= int64(n)
+ return
+}
+
+func (l *maxBytesReader) Close() error {
+ return l.r.Close()
+}
+
+// ParseForm parses the raw query from the URL.
+//
+// For POST or PUT requests, it also parses the request body as a form.
+// If the request Body's size has not already been limited by MaxBytesReader,
+// the size is capped at 10MB.
+//
+// ParseMultipartForm calls ParseForm automatically.
+// It is idempotent.
+func (r *Request) ParseForm() (err error) {
+ if r.Form != nil {
+ return
+ }
+ if r.URL != nil {
+ r.Form, err = url.ParseQuery(r.URL.RawQuery)
+ }
+ if r.Method == "POST" || r.Method == "PUT" {
+ if r.Body == nil {
+ return errors.New("missing form body")
+ }
+ ct := r.Header.Get("Content-Type")
+ ct, _, err := mime.ParseMediaType(ct)
+ switch {
+ case ct == "application/x-www-form-urlencoded":
+ var reader io.Reader = r.Body
+ maxFormSize := int64(1<<63 - 1)
+ if _, ok := r.Body.(*maxBytesReader); !ok {
+ maxFormSize = int64(10 << 20) // 10 MB is a lot of text.
+ reader = io.LimitReader(r.Body, maxFormSize+1)
+ }
+ b, e := ioutil.ReadAll(reader)
+ if e != nil {
+ if err == nil {
+ err = e
+ }
+ break
+ }
+ if int64(len(b)) > maxFormSize {
+ return errors.New("http: POST too large")
+ }
+ var newValues url.Values
+ newValues, e = url.ParseQuery(string(b))
+ if err == nil {
+ err = e
+ }
+ if r.Form == nil {
+ r.Form = make(url.Values)
+ }
+ // Copy values into r.Form. TODO: make this smoother.
+ for k, vs := range newValues {
+ for _, value := range vs {
+ r.Form.Add(k, value)
+ }
+ }
+ case ct == "multipart/form-data":
+ // handled by ParseMultipartForm (which is calling us, or should be)
+ // TODO(bradfitz): there are too many possible
+ // orders to call too many functions here.
+ // Clean this up and write more tests.
+ // request_test.go contains the start of this,
+ // in TestRequestMultipartCallOrder.
+ default:
+ return &badStringError{"unknown Content-Type", ct}
+ }
+ }
+ return err
+}
+
+// ParseMultipartForm parses a request body as multipart/form-data.
+// The whole request body is parsed and up to a total of maxMemory bytes of
+// its file parts are stored in memory, with the remainder stored on
+// disk in temporary files.
+// ParseMultipartForm calls ParseForm if necessary.
+// After one call to ParseMultipartForm, subsequent calls have no effect.
+func (r *Request) ParseMultipartForm(maxMemory int64) error {
+ if r.MultipartForm == multipartByReader {
+ return errors.New("http: multipart handled by MultipartReader")
+ }
+ if r.Form == nil {
+ err := r.ParseForm()
+ if err != nil {
+ return err
+ }
+ }
+ if r.MultipartForm != nil {
+ return nil
+ }
+
+ mr, err := r.multipartReader()
+ if err == ErrNotMultipart {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ f, err := mr.ReadForm(maxMemory)
+ if err != nil {
+ return err
+ }
+ for k, v := range f.Value {
+ r.Form[k] = append(r.Form[k], v...)
+ }
+ r.MultipartForm = f
+
+ return nil
+}
+
+// FormValue returns the first value for the named component of the query.
+// FormValue calls ParseMultipartForm and ParseForm if necessary.
+func (r *Request) FormValue(key string) string {
+ if r.Form == nil {
+ r.ParseMultipartForm(defaultMaxMemory)
+ }
+ if vs := r.Form[key]; len(vs) > 0 {
+ return vs[0]
+ }
+ return ""
+}
+
+// FormFile returns the first file for the provided form key.
+// FormFile calls ParseMultipartForm and ParseForm if necessary.
+func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
+ if r.MultipartForm == multipartByReader {
+ return nil, nil, errors.New("http: multipart handled by MultipartReader")
+ }
+ if r.MultipartForm == nil {
+ err := r.ParseMultipartForm(defaultMaxMemory)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if r.MultipartForm != nil && r.MultipartForm.File != nil {
+ if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
+ f, err := fhs[0].Open()
+ return f, fhs[0], err
+ }
+ }
+ return nil, nil, ErrMissingFile
+}
+
+func (r *Request) expectsContinue() bool {
+ return strings.ToLower(r.Header.Get("Expect")) == "100-continue"
+}
+
+func (r *Request) wantsHttp10KeepAlive() bool {
+ if r.ProtoMajor != 1 || r.ProtoMinor != 0 {
+ return false
+ }
+ return strings.Contains(strings.ToLower(r.Header.Get("Connection")), "keep-alive")
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ . "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestQuery(t *testing.T) {
+ req := &Request{Method: "GET"}
+ req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar")
+ if q := req.FormValue("q"); q != "foo" {
+ t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
+ }
+}
+
+func TestPostQuery(t *testing.T) {
+ req, _ := NewRequest("POST", "http://www.google.com/search?q=foo&q=bar&both=x",
+ strings.NewReader("z=post&both=y"))
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
+
+ if q := req.FormValue("q"); q != "foo" {
+ t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
+ }
+ if z := req.FormValue("z"); z != "post" {
+ t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
+ }
+ if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"x", "y"}) {
+ t.Errorf(`req.FormValue("both") = %q, want ["x", "y"]`, both)
+ }
+}
+
+type stringMap map[string][]string
+type parseContentTypeTest struct {
+ contentType stringMap
+}
+
+var parseContentTypeTests = []parseContentTypeTest{
+ {contentType: stringMap{"Content-Type": {"text/plain"}}},
+ {contentType: stringMap{}}, // Non-existent keys are not placed. The value nil is illegal.
+ {contentType: stringMap{"Content-Type": {"text/plain; boundary="}}},
+ {
+ contentType: stringMap{"Content-Type": {"application/unknown"}},
+ },
+}
+
+func TestParseFormBadContentType(t *testing.T) {
+ for i, test := range parseContentTypeTests {
+ req := &Request{
+ Method: "POST",
+ Header: Header(test.contentType),
+ Body: ioutil.NopCloser(bytes.NewBufferString("body")),
+ }
+ err := req.ParseForm()
+ if err == nil {
+ t.Errorf("test %d should have returned error", i)
+ }
+ }
+}
+
+func TestMultipartReader(t *testing.T) {
+ req := &Request{
+ Method: "POST",
+ Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
+ Body: ioutil.NopCloser(new(bytes.Buffer)),
+ }
+ multipart, err := req.MultipartReader()
+ if multipart == nil {
+ t.Errorf("expected multipart; error: %v", err)
+ }
+
+ req.Header = Header{"Content-Type": {"text/plain"}}
+ multipart, err = req.MultipartReader()
+ if multipart != nil {
+ t.Errorf("unexpected multipart for text/plain")
+ }
+}
+
+func TestRedirect(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ switch r.URL.Path {
+ case "/":
+ w.Header().Set("Location", "/foo/")
+ w.WriteHeader(StatusSeeOther)
+ case "/foo/":
+ fmt.Fprintf(w, "foo")
+ default:
+ w.WriteHeader(StatusBadRequest)
+ }
+ }))
+ defer ts.Close()
+
+ var end = regexp.MustCompile("/foo/$")
+ r, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ r.Body.Close()
+ url := r.Request.URL.String()
+ if r.StatusCode != 200 || !end.MatchString(url) {
+ t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url)
+ }
+}
+
+func TestSetBasicAuth(t *testing.T) {
+ r, _ := NewRequest("GET", "http://example.com/", nil)
+ r.SetBasicAuth("Aladdin", "open sesame")
+ if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e {
+ t.Errorf("got header %q, want %q", g, e)
+ }
+}
+
+func TestMultipartRequest(t *testing.T) {
+ // Test that we can read the values and files of a
+ // multipart request with FormValue and FormFile,
+ // and that ParseMultipartForm can be called multiple times.
+ req := newTestMultipartRequest(t)
+ if err := req.ParseMultipartForm(25); err != nil {
+ t.Fatal("ParseMultipartForm first call:", err)
+ }
+ defer req.MultipartForm.RemoveAll()
+ validateTestMultipartContents(t, req, false)
+ if err := req.ParseMultipartForm(25); err != nil {
+ t.Fatal("ParseMultipartForm second call:", err)
+ }
+ validateTestMultipartContents(t, req, false)
+}
+
+func TestMultipartRequestAuto(t *testing.T) {
+ // Test that FormValue and FormFile automatically invoke
+ // ParseMultipartForm and return the right values.
+ req := newTestMultipartRequest(t)
+ defer func() {
+ if req.MultipartForm != nil {
+ req.MultipartForm.RemoveAll()
+ }
+ }()
+ validateTestMultipartContents(t, req, true)
+}
+
+func TestEmptyMultipartRequest(t *testing.T) {
+ // Test that FormValue and FormFile automatically invoke
+ // ParseMultipartForm and return the right values.
+ req, err := NewRequest("GET", "/", nil)
+ if err != nil {
+ t.Errorf("NewRequest err = %q", err)
+ }
+ testMissingFile(t, req)
+}
+
+func TestRequestMultipartCallOrder(t *testing.T) {
+ req := newTestMultipartRequest(t)
+ _, err := req.MultipartReader()
+ if err != nil {
+ t.Fatalf("MultipartReader: %v", err)
+ }
+ err = req.ParseMultipartForm(1024)
+ if err == nil {
+ t.Errorf("expected an error from ParseMultipartForm after call to MultipartReader")
+ }
+}
+
+func testMissingFile(t *testing.T, req *Request) {
+ f, fh, err := req.FormFile("missing")
+ if f != nil {
+ t.Errorf("FormFile file = %q, want nil", f)
+ }
+ if fh != nil {
+ t.Errorf("FormFile file header = %q, want nil", fh)
+ }
+ if err != ErrMissingFile {
+ t.Errorf("FormFile err = %q, want ErrMissingFile", err)
+ }
+}
+
+func newTestMultipartRequest(t *testing.T) *Request {
+ b := bytes.NewBufferString(strings.Replace(message, "\n", "\r\n", -1))
+ req, err := NewRequest("POST", "/", b)
+ if err != nil {
+ t.Fatal("NewRequest:", err)
+ }
+ ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary)
+ req.Header.Set("Content-type", ctype)
+ return req
+}
+
+func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
+ if g, e := req.FormValue("texta"), textaValue; g != e {
+ t.Errorf("texta value = %q, want %q", g, e)
+ }
+ if g, e := req.FormValue("texta"), textaValue; g != e {
+ t.Errorf("texta value = %q, want %q", g, e)
+ }
+ if g := req.FormValue("missing"); g != "" {
+ t.Errorf("missing value = %q, want empty string", g)
+ }
+
+ assertMem := func(n string, fd multipart.File) {
+ if _, ok := fd.(*os.File); ok {
+ t.Error(n, " is *os.File, should not be")
+ }
+ }
+ fd := testMultipartFile(t, req, "filea", "filea.txt", fileaContents)
+ assertMem("filea", fd)
+ fd = testMultipartFile(t, req, "fileb", "fileb.txt", filebContents)
+ if allMem {
+ assertMem("fileb", fd)
+ } else {
+ if _, ok := fd.(*os.File); !ok {
+ t.Errorf("fileb has unexpected underlying type %T", fd)
+ }
+ }
+
+ testMissingFile(t, req)
+}
+
+func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File {
+ f, fh, err := req.FormFile(key)
+ if err != nil {
+ t.Fatalf("FormFile(%q): %q", key, err)
+ }
+ if fh.Filename != expectFilename {
+ t.Errorf("filename = %q, want %q", fh.Filename, expectFilename)
+ }
+ var b bytes.Buffer
+ _, err = io.Copy(&b, f)
+ if err != nil {
+ t.Fatal("copying contents:", err)
+ }
+ if g := b.String(); g != expectContent {
+ t.Errorf("contents = %q, want %q", g, expectContent)
+ }
+ return f
+}
+
+const (
+ fileaContents = "This is a test file."
+ filebContents = "Another test file."
+ textaValue = "foo"
+ textbValue = "bar"
+ boundary = `MyBoundary`
+)
+
+const message = `
+--MyBoundary
+Content-Disposition: form-data; name="filea"; filename="filea.txt"
+Content-Type: text/plain
+
+` + fileaContents + `
+--MyBoundary
+Content-Disposition: form-data; name="fileb"; filename="fileb.txt"
+Content-Type: text/plain
+
+` + filebContents + `
+--MyBoundary
+Content-Disposition: form-data; name="texta"
+
+` + textaValue + `
+--MyBoundary
+Content-Disposition: form-data; name="textb"
+
+` + textbValue + `
+--MyBoundary--
+`
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "strings"
+ "testing"
+)
+
+type reqWriteTest struct {
+ Req Request
+ Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
+
+ // Any of these three may be empty to skip that test.
+ WantWrite string // Request.Write
+ WantProxy string // Request.WriteProxy
+
+ WantError error // wanted error from Request.Write
+}
+
+var reqWriteTests = []reqWriteTest{
+ // HTTP/1.1 => chunked coding; no body; no trailer
+ {
+ Req: Request{
+ Method: "GET",
+ URL: &url.URL{
+ Raw: "http://www.techcrunch.com/",
+ Scheme: "http",
+ RawPath: "http://www.techcrunch.com/",
+ RawAuthority: "www.techcrunch.com",
+ RawUserinfo: "",
+ Host: "www.techcrunch.com",
+ Path: "/",
+ RawQuery: "",
+ Fragment: "",
+ },
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{
+ "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
+ "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
+ "Accept-Encoding": {"gzip,deflate"},
+ "Accept-Language": {"en-us,en;q=0.5"},
+ "Keep-Alive": {"300"},
+ "Proxy-Connection": {"keep-alive"},
+ "User-Agent": {"Fake"},
+ },
+ Body: nil,
+ Close: false,
+ Host: "www.techcrunch.com",
+ Form: map[string][]string{},
+ },
+
+ WantWrite: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
+ "Host: www.techcrunch.com\r\n" +
+ "User-Agent: Fake\r\n" +
+ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
+ "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
+ "Accept-Encoding: gzip,deflate\r\n" +
+ "Accept-Language: en-us,en;q=0.5\r\n" +
+ "Keep-Alive: 300\r\n" +
+ "Proxy-Connection: keep-alive\r\n\r\n",
+
+ WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
+ "Host: www.techcrunch.com\r\n" +
+ "User-Agent: Fake\r\n" +
+ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
+ "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
+ "Accept-Encoding: gzip,deflate\r\n" +
+ "Accept-Language: en-us,en;q=0.5\r\n" +
+ "Keep-Alive: 300\r\n" +
+ "Proxy-Connection: keep-alive\r\n\r\n",
+ },
+ // HTTP/1.1 => chunked coding; body; empty trailer
+ {
+ Req: Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/search",
+ },
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{},
+ TransferEncoding: []string{"chunked"},
+ },
+
+ Body: []byte("abcdef"),
+
+ WantWrite: "GET /search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("abcdef") + chunk(""),
+
+ WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("abcdef") + chunk(""),
+ },
+ // HTTP/1.1 POST => chunked coding; body; empty trailer
+ {
+ Req: Request{
+ Method: "POST",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/search",
+ },
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{},
+ Close: true,
+ TransferEncoding: []string{"chunked"},
+ },
+
+ Body: []byte("abcdef"),
+
+ WantWrite: "POST /search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Connection: close\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("abcdef") + chunk(""),
+
+ WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Connection: close\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("abcdef") + chunk(""),
+ },
+
+ // HTTP/1.1 POST with Content-Length, no chunking
+ {
+ Req: Request{
+ Method: "POST",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/search",
+ },
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{},
+ Close: true,
+ ContentLength: 6,
+ },
+
+ Body: []byte("abcdef"),
+
+ WantWrite: "POST /search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 6\r\n" +
+ "\r\n" +
+ "abcdef",
+
+ WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 6\r\n" +
+ "\r\n" +
+ "abcdef",
+ },
+
+ // HTTP/1.1 POST with Content-Length in headers
+ {
+ Req: Request{
+ Method: "POST",
+ URL: mustParseURL("http://example.com/"),
+ Host: "example.com",
+ Header: Header{
+ "Content-Length": []string{"10"}, // ignored
+ },
+ ContentLength: 6,
+ },
+
+ Body: []byte("abcdef"),
+
+ WantWrite: "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Content-Length: 6\r\n" +
+ "\r\n" +
+ "abcdef",
+
+ WantProxy: "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Content-Length: 6\r\n" +
+ "\r\n" +
+ "abcdef",
+ },
+
+ // default to HTTP/1.1
+ {
+ Req: Request{
+ Method: "GET",
+ URL: mustParseURL("/search"),
+ Host: "www.google.com",
+ },
+
+ WantWrite: "GET /search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "\r\n",
+ },
+
+ // Request with a 0 ContentLength and a 0 byte body.
+ {
+ Req: Request{
+ Method: "POST",
+ URL: mustParseURL("/"),
+ Host: "example.com",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ ContentLength: 0, // as if unset by user
+ },
+
+ Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) },
+
+ // RFC 2616 Section 14.13 says Content-Length should be specified
+ // unless body is prohibited by the request method.
+ // Also, nginx expects it for POST and PUT.
+ WantWrite: "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Content-Length: 0\r\n" +
+ "\r\n",
+
+ WantProxy: "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Content-Length: 0\r\n" +
+ "\r\n",
+ },
+
+ // Request with a 0 ContentLength and a 1 byte body.
+ {
+ Req: Request{
+ Method: "POST",
+ URL: mustParseURL("/"),
+ Host: "example.com",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ ContentLength: 0, // as if unset by user
+ },
+
+ Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) },
+
+ WantWrite: "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("x") + chunk(""),
+
+ WantProxy: "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ chunk("x") + chunk(""),
+ },
+
+ // Request with a ContentLength of 10 but a 5 byte body.
+ {
+ Req: Request{
+ Method: "POST",
+ URL: mustParseURL("/"),
+ Host: "example.com",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ ContentLength: 10, // but we're going to send only 5 bytes
+ },
+ Body: []byte("12345"),
+ WantError: errors.New("http: Request.ContentLength=10 with Body length 5"),
+ },
+
+ // Request with a ContentLength of 4 but an 8 byte body.
+ {
+ Req: Request{
+ Method: "POST",
+ URL: mustParseURL("/"),
+ Host: "example.com",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ ContentLength: 4, // but we're going to try to send 8 bytes
+ },
+ Body: []byte("12345678"),
+ WantError: errors.New("http: Request.ContentLength=4 with Body length 8"),
+ },
+
+ // Request with a 5 ContentLength and nil body.
+ {
+ Req: Request{
+ Method: "POST",
+ URL: mustParseURL("/"),
+ Host: "example.com",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ ContentLength: 5, // but we'll omit the body
+ },
+ WantError: errors.New("http: Request.ContentLength=5 with nil Body"),
+ },
+
+ // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
+ // and doesn't add a User-Agent.
+ {
+ Req: Request{
+ Method: "GET",
+ URL: mustParseURL("/foo"),
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Header: Header{
+ "X-Foo": []string{"X-Bar"},
+ },
+ },
+
+ WantWrite: "GET /foo HTTP/1.1\r\n" +
+ "Host: \r\n" +
+ "User-Agent: Go http package\r\n" +
+ "X-Foo: X-Bar\r\n\r\n",
+ },
+}
+
+func TestRequestWrite(t *testing.T) {
+ for i := range reqWriteTests {
+ tt := &reqWriteTests[i]
+
+ setBody := func() {
+ if tt.Body == nil {
+ return
+ }
+ switch b := tt.Body.(type) {
+ case []byte:
+ tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+ case func() io.ReadCloser:
+ tt.Req.Body = b()
+ }
+ }
+ setBody()
+ if tt.Req.Header == nil {
+ tt.Req.Header = make(Header)
+ }
+
+ var braw bytes.Buffer
+ err := tt.Req.Write(&braw)
+ if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e {
+ t.Errorf("writing #%d, err = %q, want %q", i, g, e)
+ continue
+ }
+ if err != nil {
+ continue
+ }
+
+ if tt.WantWrite != "" {
+ sraw := braw.String()
+ if sraw != tt.WantWrite {
+ t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw)
+ continue
+ }
+ }
+
+ if tt.WantProxy != "" {
+ setBody()
+ var praw bytes.Buffer
+ err = tt.Req.WriteProxy(&praw)
+ if err != nil {
+ t.Errorf("WriteProxy #%d: %s", i, err)
+ continue
+ }
+ sraw := praw.String()
+ if sraw != tt.WantProxy {
+ t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw)
+ continue
+ }
+ }
+ }
+}
+
+type closeChecker struct {
+ io.Reader
+ closed bool
+}
+
+func (rc *closeChecker) Close() error {
+ rc.closed = true
+ return nil
+}
+
+// TestRequestWriteClosesBody tests that Request.Write does close its request.Body.
+// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer
+// inside a NopCloser, and that it serializes it correctly.
+func TestRequestWriteClosesBody(t *testing.T) {
+ rc := &closeChecker{Reader: strings.NewReader("my body")}
+ req, _ := NewRequest("POST", "http://foo.com/", rc)
+ if req.ContentLength != 0 {
+ t.Errorf("got req.ContentLength %d, want 0", req.ContentLength)
+ }
+ buf := new(bytes.Buffer)
+ req.Write(buf)
+ if !rc.closed {
+ t.Error("body not closed after write")
+ }
+ expected := "POST / HTTP/1.1\r\n" +
+ "Host: foo.com\r\n" +
+ "User-Agent: Go http package\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ // TODO: currently we don't buffer before chunking, so we get a
+ // single "m" chunk before the other chunks, as this was the 1-byte
+ // read from our MultiReader where we stiched the Body back together
+ // after sniffing whether the Body was 0 bytes or not.
+ chunk("m") +
+ chunk("y body") +
+ chunk("")
+ if buf.String() != expected {
+ t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected)
+ }
+}
+
+func chunk(s string) string {
+ return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
+}
+
+func mustParseURL(s string) *url.URL {
+ u, err := url.Parse(s)
+ if err != nil {
+ panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
+ }
+ return u
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP Response reading and parsing.
+
+package http
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var respExcludeHeader = map[string]bool{
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// Response represents the response from an HTTP request.
+//
+type Response struct {
+ Status string // e.g. "200 OK"
+ StatusCode int // e.g. 200
+ Proto string // e.g. "HTTP/1.0"
+ ProtoMajor int // e.g. 1
+ ProtoMinor int // e.g. 0
+
+ // Header maps header keys to values. If the response had multiple
+ // headers with the same key, they will be concatenated, with comma
+ // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers
+ // be semantically equivalent to a comma-delimited sequence.) Values
+ // duplicated by other fields in this struct (e.g., ContentLength) are
+ // omitted from Header.
+ //
+ // Keys in the map are canonicalized (see CanonicalHeaderKey).
+ Header Header
+
+ // Body represents the response body.
+ //
+ // The http Client and Transport guarantee that Body is always
+ // non-nil, even on responses without a body or responses with
+ // a zero-lengthed body.
+ Body io.ReadCloser
+
+ // ContentLength records the length of the associated content. The
+ // value -1 indicates that the length is unknown. Unless RequestMethod
+ // is "HEAD", values >= 0 indicate that the given number of bytes may
+ // be read from Body.
+ ContentLength int64
+
+ // Contains transfer encodings from outer-most to inner-most. Value is
+ // nil, means that "identity" encoding is used.
+ TransferEncoding []string
+
+ // Close records whether the header directed that the connection be
+ // closed after reading Body. The value is advice for clients: neither
+ // ReadResponse nor Response.Write ever closes a connection.
+ Close bool
+
+ // Trailer maps trailer keys to values, in the same
+ // format as the header.
+ Trailer Header
+
+ // The Request that was sent to obtain this Response.
+ // Request's Body is nil (having already been consumed).
+ // This is only populated for Client requests.
+ Request *Request
+}
+
+// Cookies parses and returns the cookies set in the Set-Cookie headers.
+func (r *Response) Cookies() []*Cookie {
+ return readSetCookies(r.Header)
+}
+
+var ErrNoLocation = errors.New("http: no Location header in response")
+
+// Location returns the URL of the response's "Location" header,
+// if present. Relative redirects are resolved relative to
+// the Response's Request. ErrNoLocation is returned if no
+// Location header is present.
+func (r *Response) Location() (*url.URL, error) {
+ lv := r.Header.Get("Location")
+ if lv == "" {
+ return nil, ErrNoLocation
+ }
+ if r.Request != nil && r.Request.URL != nil {
+ return r.Request.URL.Parse(lv)
+ }
+ return url.Parse(lv)
+}
+
+// ReadResponse reads and returns an HTTP response from r. The
+// req parameter specifies the Request that corresponds to
+// this Response. Clients must call resp.Body.Close when finished
+// reading resp.Body. After that call, clients can inspect
+// resp.Trailer to find key/value pairs included in the response
+// trailer.
+func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err error) {
+
+ tp := textproto.NewReader(r)
+ resp = new(Response)
+
+ resp.Request = req
+ resp.Request.Method = strings.ToUpper(resp.Request.Method)
+
+ // Parse the first line of the response.
+ line, err := tp.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+ f := strings.SplitN(line, " ", 3)
+ if len(f) < 2 {
+ return nil, &badStringError{"malformed HTTP response", line}
+ }
+ reasonPhrase := ""
+ if len(f) > 2 {
+ reasonPhrase = f[2]
+ }
+ resp.Status = f[1] + " " + reasonPhrase
+ resp.StatusCode, err = strconv.Atoi(f[1])
+ if err != nil {
+ return nil, &badStringError{"malformed HTTP status code", f[1]}
+ }
+
+ resp.Proto = f[0]
+ var ok bool
+ if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
+ return nil, &badStringError{"malformed HTTP version", resp.Proto}
+ }
+
+ // Parse the response headers.
+ mimeHeader, err := tp.ReadMIMEHeader()
+ if err != nil {
+ return nil, err
+ }
+ resp.Header = Header(mimeHeader)
+
+ fixPragmaCacheControl(resp.Header)
+
+ err = readTransfer(resp, r)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// RFC2616: Should treat
+// Pragma: no-cache
+// like
+// Cache-Control: no-cache
+func fixPragmaCacheControl(header Header) {
+ if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" {
+ if _, presentcc := header["Cache-Control"]; !presentcc {
+ header["Cache-Control"] = []string{"no-cache"}
+ }
+ }
+}
+
+// ProtoAtLeast returns whether the HTTP protocol used
+// in the response is at least major.minor.
+func (r *Response) ProtoAtLeast(major, minor int) bool {
+ return r.ProtoMajor > major ||
+ r.ProtoMajor == major && r.ProtoMinor >= minor
+}
+
+// Writes the response (header, body and trailer) in wire format. This method
+// consults the following fields of resp:
+//
+// StatusCode
+// ProtoMajor
+// ProtoMinor
+// RequestMethod
+// TransferEncoding
+// Trailer
+// Body
+// ContentLength
+// Header, values for non-canonical keys will have unpredictable behavior
+//
+func (resp *Response) Write(w io.Writer) error {
+
+ // RequestMethod should be upper-case
+ if resp.Request != nil {
+ resp.Request.Method = strings.ToUpper(resp.Request.Method)
+ }
+
+ // Status line
+ text := resp.Status
+ if text == "" {
+ var ok bool
+ text, ok = statusText[resp.StatusCode]
+ if !ok {
+ text = "status code " + strconv.Itoa(resp.StatusCode)
+ }
+ }
+ io.WriteString(w, "HTTP/"+strconv.Itoa(resp.ProtoMajor)+".")
+ io.WriteString(w, strconv.Itoa(resp.ProtoMinor)+" ")
+ io.WriteString(w, strconv.Itoa(resp.StatusCode)+" "+text+"\r\n")
+
+ // Process Body,ContentLength,Close,Trailer
+ tw, err := newTransferWriter(resp)
+ if err != nil {
+ return err
+ }
+ err = tw.WriteHeader(w)
+ if err != nil {
+ return err
+ }
+
+ // Rest of header
+ err = resp.Header.WriteSubset(w, respExcludeHeader)
+ if err != nil {
+ return err
+ }
+
+ // End-of-header
+ io.WriteString(w, "\r\n")
+
+ // Write body and trailer
+ err = tw.WriteBody(w)
+ if err != nil {
+ return err
+ }
+
+ // Success
+ return nil
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "reflect"
+ "testing"
+)
+
+type respTest struct {
+ Raw string
+ Resp Response
+ Body string
+}
+
+func dummyReq(method string) *Request {
+ return &Request{Method: method}
+}
+
+var respTests = []respTest{
+ // Unchunked response without Content-Length.
+ {
+ "HTTP/1.0 200 OK\r\n" +
+ "Connection: close\r\n" +
+ "\r\n" +
+ "Body here\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{
+ "Connection": {"close"}, // TODO(rsc): Delete?
+ },
+ Close: true,
+ ContentLength: -1,
+ },
+
+ "Body here\n",
+ },
+
+ // Unchunked HTTP/1.1 response without Content-Length or
+ // Connection headers.
+ {
+ "HTTP/1.1 200 OK\r\n" +
+ "\r\n" +
+ "Body here\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
+ Close: true,
+ ContentLength: -1,
+ },
+
+ "Body here\n",
+ },
+
+ // Unchunked HTTP/1.1 204 response without Content-Length.
+ {
+ "HTTP/1.1 204 No Content\r\n" +
+ "\r\n" +
+ "Body should not be read!\n",
+
+ Response{
+ Status: "204 No Content",
+ StatusCode: 204,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
+ Close: false,
+ ContentLength: 0,
+ },
+
+ "",
+ },
+
+ // Unchunked response with Content-Length.
+ {
+ "HTTP/1.0 200 OK\r\n" +
+ "Content-Length: 10\r\n" +
+ "Connection: close\r\n" +
+ "\r\n" +
+ "Body here\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{
+ "Connection": {"close"}, // TODO(rsc): Delete?
+ "Content-Length": {"10"}, // TODO(rsc): Delete?
+ },
+ Close: true,
+ ContentLength: 10,
+ },
+
+ "Body here\n",
+ },
+
+ // Chunked response without Content-Length.
+ {
+ "HTTP/1.0 200 OK\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "\r\n" +
+ "0a\r\n" +
+ "Body here\n\r\n" +
+ "09\r\n" +
+ "continued\r\n" +
+ "0\r\n" +
+ "\r\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Close: true,
+ ContentLength: -1,
+ TransferEncoding: []string{"chunked"},
+ },
+
+ "Body here\ncontinued",
+ },
+
+ // Chunked response with Content-Length.
+ {
+ "HTTP/1.0 200 OK\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "Content-Length: 10\r\n" +
+ "\r\n" +
+ "0a\r\n" +
+ "Body here\n" +
+ "0\r\n" +
+ "\r\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Close: true,
+ ContentLength: -1, // TODO(rsc): Fix?
+ TransferEncoding: []string{"chunked"},
+ },
+
+ "Body here\n",
+ },
+
+ // Chunked response in response to a HEAD request (the "chunked" should
+ // be ignored, as HEAD responses never have bodies)
+ {
+ "HTTP/1.0 200 OK\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "\r\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("HEAD"),
+ Header: Header{},
+ Close: true,
+ ContentLength: 0,
+ },
+
+ "",
+ },
+
+ // explicit Content-Length of 0.
+ {
+ "HTTP/1.1 200 OK\r\n" +
+ "Content-Length: 0\r\n" +
+ "\r\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
+ Header: Header{
+ "Content-Length": {"0"},
+ },
+ Close: false,
+ ContentLength: 0,
+ },
+
+ "",
+ },
+
+ // Status line without a Reason-Phrase, but trailing space.
+ // (permitted by RFC 2616)
+ {
+ "HTTP/1.0 303 \r\n\r\n",
+ Response{
+ Status: "303 ",
+ StatusCode: 303,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Close: true,
+ ContentLength: -1,
+ },
+
+ "",
+ },
+
+ // Status line without a Reason-Phrase, and no trailing space.
+ // (not permitted by RFC 2616, but we'll accept it anyway)
+ {
+ "HTTP/1.0 303\r\n\r\n",
+ Response{
+ Status: "303 ",
+ StatusCode: 303,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Close: true,
+ ContentLength: -1,
+ },
+
+ "",
+ },
+}
+
+func TestReadResponse(t *testing.T) {
+ for i := range respTests {
+ tt := &respTests[i]
+ var braw bytes.Buffer
+ braw.WriteString(tt.Raw)
+ resp, err := ReadResponse(bufio.NewReader(&braw), tt.Resp.Request)
+ if err != nil {
+ t.Errorf("#%d: %s", i, err)
+ continue
+ }
+ rbody := resp.Body
+ resp.Body = nil
+ diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp)
+ var bout bytes.Buffer
+ if rbody != nil {
+ io.Copy(&bout, rbody)
+ rbody.Close()
+ }
+ body := bout.String()
+ if body != tt.Body {
+ t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
+ }
+ }
+}
+
+var readResponseCloseInMiddleTests = []struct {
+ chunked, compressed bool
+}{
+ {false, false},
+ {true, false},
+ {true, true},
+}
+
+// TestReadResponseCloseInMiddle tests that closing a body after
+// reading only part of its contents advances the read to the end of
+// the request, right up until the next request.
+func TestReadResponseCloseInMiddle(t *testing.T) {
+ for _, test := range readResponseCloseInMiddleTests {
+ fatalf := func(format string, args ...interface{}) {
+ args = append([]interface{}{test.chunked, test.compressed}, args...)
+ t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...)
+ }
+ checkErr := func(err error, msg string) {
+ if err == nil {
+ return
+ }
+ fatalf(msg+": %v", err)
+ }
+ var buf bytes.Buffer
+ buf.WriteString("HTTP/1.1 200 OK\r\n")
+ if test.chunked {
+ buf.WriteString("Transfer-Encoding: chunked\r\n")
+ } else {
+ buf.WriteString("Content-Length: 1000000\r\n")
+ }
+ var wr io.Writer = &buf
+ if test.chunked {
+ wr = &chunkedWriter{wr}
+ }
+ if test.compressed {
+ buf.WriteString("Content-Encoding: gzip\r\n")
+ var err error
+ wr, err = gzip.NewWriter(wr)
+ checkErr(err, "gzip.NewWriter")
+ }
+ buf.WriteString("\r\n")
+
+ chunk := bytes.Repeat([]byte{'x'}, 1000)
+ for i := 0; i < 1000; i++ {
+ if test.compressed {
+ // Otherwise this compresses too well.
+ _, err := io.ReadFull(rand.Reader, chunk)
+ checkErr(err, "rand.Reader ReadFull")
+ }
+ wr.Write(chunk)
+ }
+ if test.compressed {
+ err := wr.(*gzip.Compressor).Close()
+ checkErr(err, "compressor close")
+ }
+ if test.chunked {
+ buf.WriteString("0\r\n\r\n")
+ }
+ buf.WriteString("Next Request Here")
+
+ bufr := bufio.NewReader(&buf)
+ resp, err := ReadResponse(bufr, dummyReq("GET"))
+ checkErr(err, "ReadResponse")
+ expectedLength := int64(-1)
+ if !test.chunked {
+ expectedLength = 1000000
+ }
+ if resp.ContentLength != expectedLength {
+ fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength)
+ }
+ if resp.Body == nil {
+ fatalf("nil body")
+ }
+ if test.compressed {
+ gzReader, err := gzip.NewReader(resp.Body)
+ checkErr(err, "gzip.NewReader")
+ resp.Body = &readFirstCloseBoth{gzReader, resp.Body}
+ }
+
+ rbuf := make([]byte, 2500)
+ n, err := io.ReadFull(resp.Body, rbuf)
+ checkErr(err, "2500 byte ReadFull")
+ if n != 2500 {
+ fatalf("ReadFull only read %d bytes", n)
+ }
+ if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) {
+ fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf))
+ }
+ resp.Body.Close()
+
+ rest, err := ioutil.ReadAll(bufr)
+ checkErr(err, "ReadAll on remainder")
+ if e, g := "Next Request Here", string(rest); e != g {
+ fatalf("remainder = %q, expected %q", g, e)
+ }
+ }
+}
+
+func diff(t *testing.T, prefix string, have, want interface{}) {
+ hv := reflect.ValueOf(have).Elem()
+ wv := reflect.ValueOf(want).Elem()
+ if hv.Type() != wv.Type() {
+ t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type())
+ }
+ for i := 0; i < hv.NumField(); i++ {
+ hf := hv.Field(i).Interface()
+ wf := wv.Field(i).Interface()
+ if !reflect.DeepEqual(hf, wf) {
+ t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf)
+ }
+ }
+}
+
+type responseLocationTest struct {
+ location string // Response's Location header or ""
+ requrl string // Response.Request.URL or ""
+ want string
+ wantErr error
+}
+
+var responseLocationTests = []responseLocationTest{
+ {"/foo", "http://bar.com/baz", "http://bar.com/foo", nil},
+ {"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil},
+ {"", "http://bar.com/baz", "", ErrNoLocation},
+}
+
+func TestLocationResponse(t *testing.T) {
+ for i, tt := range responseLocationTests {
+ res := new(Response)
+ res.Header = make(Header)
+ res.Header.Set("Location", tt.location)
+ if tt.requrl != "" {
+ res.Request = &Request{}
+ var err error
+ res.Request.URL, err = url.Parse(tt.requrl)
+ if err != nil {
+ t.Fatalf("bad test URL %q: %v", tt.requrl, err)
+ }
+ }
+
+ got, err := res.Location()
+ if tt.wantErr != nil {
+ if err == nil {
+ t.Errorf("%d. err=nil; want %q", i, tt.wantErr)
+ continue
+ }
+ if g, e := err.Error(), tt.wantErr.Error(); g != e {
+ t.Errorf("%d. err=%q; want %q", i, g, e)
+ continue
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("%d. err=%q", i, err)
+ continue
+ }
+ if g, e := got.String(), tt.want; g != e {
+ t.Errorf("%d. Location=%q; want %q", i, g, e)
+ }
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+)
+
+type respWriteTest struct {
+ Resp Response
+ Raw string
+}
+
+var respWriteTests = []respWriteTest{
+ // HTTP/1.0, identity coding; no trailer
+ {
+ Response{
+ StatusCode: 503,
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
+ ContentLength: 6,
+ },
+
+ "HTTP/1.0 503 Service Unavailable\r\n" +
+ "Content-Length: 6\r\n\r\n" +
+ "abcdef",
+ },
+ // Unchunked response without Content-Length.
+ {
+ Response{
+ StatusCode: 200,
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
+ ContentLength: -1,
+ },
+ "HTTP/1.0 200 OK\r\n" +
+ "\r\n" +
+ "abcdef",
+ },
+ // HTTP/1.1, chunked coding; empty trailer; close
+ {
+ Response{
+ StatusCode: 200,
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
+ Header: Header{},
+ Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
+ ContentLength: 6,
+ TransferEncoding: []string{"chunked"},
+ Close: true,
+ },
+
+ "HTTP/1.1 200 OK\r\n" +
+ "Connection: close\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ "6\r\nabcdef\r\n0\r\n\r\n",
+ },
+
+ // Header value with a newline character (Issue 914).
+ // Also tests removal of leading and trailing whitespace.
+ {
+ Response{
+ StatusCode: 204,
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
+ Header: Header{
+ "Foo": []string{" Bar\nBaz "},
+ },
+ Body: nil,
+ ContentLength: 0,
+ TransferEncoding: []string{"chunked"},
+ Close: true,
+ },
+
+ "HTTP/1.1 204 No Content\r\n" +
+ "Connection: close\r\n" +
+ "Foo: Bar Baz\r\n" +
+ "\r\n",
+ },
+}
+
+func TestResponseWrite(t *testing.T) {
+ for i := range respWriteTests {
+ tt := &respWriteTests[i]
+ var braw bytes.Buffer
+ err := tt.Resp.Write(&braw)
+ if err != nil {
+ t.Errorf("error writing #%d: %s", i, err)
+ continue
+ }
+ sraw := braw.String()
+ if sraw != tt.Raw {
+ t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw)
+ continue
+ }
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// End-to-end serving tests
+
+package http_test
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ . "net/http"
+ "net/http/httptest"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+)
+
+type dummyAddr string
+type oneConnListener struct {
+ conn net.Conn
+}
+
+func (l *oneConnListener) Accept() (c net.Conn, err error) {
+ c = l.conn
+ if c == nil {
+ err = io.EOF
+ return
+ }
+ err = nil
+ l.conn = nil
+ return
+}
+
+func (l *oneConnListener) Close() error {
+ return nil
+}
+
+func (l *oneConnListener) Addr() net.Addr {
+ return dummyAddr("test-address")
+}
+
+func (a dummyAddr) Network() string {
+ return string(a)
+}
+
+func (a dummyAddr) String() string {
+ return string(a)
+}
+
+type testConn struct {
+ readBuf bytes.Buffer
+ writeBuf bytes.Buffer
+}
+
+func (c *testConn) Read(b []byte) (int, error) {
+ return c.readBuf.Read(b)
+}
+
+func (c *testConn) Write(b []byte) (int, error) {
+ return c.writeBuf.Write(b)
+}
+
+func (c *testConn) Close() error {
+ return nil
+}
+
+func (c *testConn) LocalAddr() net.Addr {
+ return dummyAddr("local-addr")
+}
+
+func (c *testConn) RemoteAddr() net.Addr {
+ return dummyAddr("remote-addr")
+}
+
+func (c *testConn) SetTimeout(nsec int64) error {
+ return nil
+}
+
+func (c *testConn) SetReadTimeout(nsec int64) error {
+ return nil
+}
+
+func (c *testConn) SetWriteTimeout(nsec int64) error {
+ return nil
+}
+
+func TestConsumingBodyOnNextConn(t *testing.T) {
+ conn := new(testConn)
+ for i := 0; i < 2; i++ {
+ conn.readBuf.Write([]byte(
+ "POST / HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "Content-Length: 11\r\n" +
+ "\r\n" +
+ "foo=1&bar=1"))
+ }
+
+ reqNum := 0
+ ch := make(chan *Request)
+ servech := make(chan error)
+ listener := &oneConnListener{conn}
+ handler := func(res ResponseWriter, req *Request) {
+ reqNum++
+ ch <- req
+ }
+
+ go func() {
+ servech <- Serve(listener, HandlerFunc(handler))
+ }()
+
+ var req *Request
+ req = <-ch
+ if req == nil {
+ t.Fatal("Got nil first request.")
+ }
+ if req.Method != "POST" {
+ t.Errorf("For request #1's method, got %q; expected %q",
+ req.Method, "POST")
+ }
+
+ req = <-ch
+ if req == nil {
+ t.Fatal("Got nil first request.")
+ }
+ if req.Method != "POST" {
+ t.Errorf("For request #2's method, got %q; expected %q",
+ req.Method, "POST")
+ }
+
+ if serveerr := <-servech; serveerr != io.EOF {
+ t.Errorf("Serve returned %q; expected EOF", serveerr)
+ }
+}
+
+type stringHandler string
+
+func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ w.Header().Set("Result", string(s))
+}
+
+var handlers = []struct {
+ pattern string
+ msg string
+}{
+ {"/", "Default"},
+ {"/someDir/", "someDir"},
+ {"someHost.com/someDir/", "someHost.com/someDir"},
+}
+
+var vtests = []struct {
+ url string
+ expected string
+}{
+ {"http://localhost/someDir/apage", "someDir"},
+ {"http://localhost/otherDir/apage", "Default"},
+ {"http://someHost.com/someDir/apage", "someHost.com/someDir"},
+ {"http://otherHost.com/someDir/apage", "someDir"},
+ {"http://otherHost.com/aDir/apage", "Default"},
+}
+
+func TestHostHandlers(t *testing.T) {
+ for _, h := range handlers {
+ Handle(h.pattern, stringHandler(h.msg))
+ }
+ ts := httptest.NewServer(nil)
+ defer ts.Close()
+
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+ cc := httputil.NewClientConn(conn, nil)
+ for _, vt := range vtests {
+ var r *Response
+ var req Request
+ if req.URL, err = url.Parse(vt.url); err != nil {
+ t.Errorf("cannot parse url: %v", err)
+ continue
+ }
+ if err := cc.Write(&req); err != nil {
+ t.Errorf("writing request: %v", err)
+ continue
+ }
+ r, err := cc.Read(&req)
+ if err != nil {
+ t.Errorf("reading response: %v", err)
+ continue
+ }
+ s := r.Header.Get("Result")
+ if s != vt.expected {
+ t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
+ }
+ }
+}
+
+// Tests for http://code.google.com/p/go/issues/detail?id=900
+func TestMuxRedirectLeadingSlashes(t *testing.T) {
+ paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"}
+ for _, path := range paths {
+ req, err := ReadRequest(bufio.NewReader(bytes.NewBufferString("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n")))
+ if err != nil {
+ t.Errorf("%s", err)
+ }
+ mux := NewServeMux()
+ resp := httptest.NewRecorder()
+
+ mux.ServeHTTP(resp, req)
+
+ if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected {
+ t.Errorf("Expected Location header set to %q; got %q", expected, loc)
+ return
+ }
+
+ if code, expected := resp.Code, StatusMovedPermanently; code != expected {
+ t.Errorf("Expected response code of StatusMovedPermanently; got %d", code)
+ return
+ }
+ }
+}
+
+func TestServerTimeouts(t *testing.T) {
+ // TODO(bradfitz): convert this to use httptest.Server
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("listen error: %v", err)
+ }
+ addr, _ := l.Addr().(*net.TCPAddr)
+
+ reqNum := 0
+ handler := HandlerFunc(func(res ResponseWriter, req *Request) {
+ reqNum++
+ fmt.Fprintf(res, "req=%d", reqNum)
+ })
+
+ const second = 1000000000 /* nanos */
+ server := &Server{Handler: handler, ReadTimeout: 0.25 * second, WriteTimeout: 0.25 * second}
+ go server.Serve(l)
+
+ url := fmt.Sprintf("http://%s/", addr)
+
+ // Hit the HTTP server successfully.
+ tr := &Transport{DisableKeepAlives: true} // they interfere with this test
+ c := &Client{Transport: tr}
+ r, err := c.Get(url)
+ if err != nil {
+ t.Fatalf("http Get #1: %v", err)
+ }
+ got, _ := ioutil.ReadAll(r.Body)
+ expected := "req=1"
+ if string(got) != expected {
+ t.Errorf("Unexpected response for request #1; got %q; expected %q",
+ string(got), expected)
+ }
+
+ // Slow client that should timeout.
+ t1 := time.Nanoseconds()
+ conn, err := net.Dial("tcp", addr.String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ buf := make([]byte, 1)
+ n, err := conn.Read(buf)
+ latency := time.Nanoseconds() - t1
+ if n != 0 || err != io.EOF {
+ t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
+ }
+ if latency < second*0.20 /* fudge from 0.25 above */ {
+ t.Errorf("got EOF after %d ns, want >= %d", latency, second*0.20)
+ }
+
+ // Hit the HTTP server successfully again, verifying that the
+ // previous slow connection didn't run our handler. (that we
+ // get "req=2", not "req=3")
+ r, err = Get(url)
+ if err != nil {
+ t.Fatalf("http Get #2: %v", err)
+ }
+ got, _ = ioutil.ReadAll(r.Body)
+ expected = "req=2"
+ if string(got) != expected {
+ t.Errorf("Get #2 got %q, want %q", string(got), expected)
+ }
+
+ l.Close()
+}
+
+// TestIdentityResponse verifies that a handler can unset
+func TestIdentityResponse(t *testing.T) {
+ handler := HandlerFunc(func(rw ResponseWriter, req *Request) {
+ rw.Header().Set("Content-Length", "3")
+ rw.Header().Set("Transfer-Encoding", req.FormValue("te"))
+ switch {
+ case req.FormValue("overwrite") == "1":
+ _, err := rw.Write([]byte("foo TOO LONG"))
+ if err != ErrContentLength {
+ t.Errorf("expected ErrContentLength; got %v", err)
+ }
+ case req.FormValue("underwrite") == "1":
+ rw.Header().Set("Content-Length", "500")
+ rw.Write([]byte("too short"))
+ default:
+ rw.Write([]byte("foo"))
+ }
+ })
+
+ ts := httptest.NewServer(handler)
+ defer ts.Close()
+
+ // Note: this relies on the assumption (which is true) that
+ // Get sends HTTP/1.1 or greater requests. Otherwise the
+ // server wouldn't have the choice to send back chunked
+ // responses.
+ for _, te := range []string{"", "identity"} {
+ url := ts.URL + "/?te=" + te
+ res, err := Get(url)
+ if err != nil {
+ t.Fatalf("error with Get of %s: %v", url, err)
+ }
+ if cl, expected := res.ContentLength, int64(3); cl != expected {
+ t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl)
+ }
+ if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected {
+ t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl)
+ }
+ if tl, expected := len(res.TransferEncoding), 0; tl != expected {
+ t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)",
+ url, expected, tl, res.TransferEncoding)
+ }
+ res.Body.Close()
+ }
+
+ // Verify that ErrContentLength is returned
+ url := ts.URL + "/?overwrite=1"
+ _, err := Get(url)
+ if err != nil {
+ t.Fatalf("error with Get of %s: %v", url, err)
+ }
+ // Verify that the connection is closed when the declared Content-Length
+ // is larger than what the handler wrote.
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatalf("error dialing: %v", err)
+ }
+ _, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n"))
+ if err != nil {
+ t.Fatalf("error writing: %v", err)
+ }
+
+ // The ReadAll will hang for a failing test, so use a Timer to
+ // fail explicitly.
+ goTimeout(t, 2e9, func() {
+ got, _ := ioutil.ReadAll(conn)
+ expectedSuffix := "\r\n\r\ntoo short"
+ if !strings.HasSuffix(string(got), expectedSuffix) {
+ t.Errorf("Expected output to end with %q; got response body %q",
+ expectedSuffix, string(got))
+ }
+ })
+}
+
+func testTcpConnectionCloses(t *testing.T, req string, h Handler) {
+ s := httptest.NewServer(h)
+ defer s.Close()
+
+ conn, err := net.Dial("tcp", s.Listener.Addr().String())
+ if err != nil {
+ t.Fatal("dial error:", err)
+ }
+ defer conn.Close()
+
+ _, err = fmt.Fprint(conn, req)
+ if err != nil {
+ t.Fatal("print error:", err)
+ }
+
+ r := bufio.NewReader(conn)
+ _, err = ReadResponse(r, &Request{Method: "GET"})
+ if err != nil {
+ t.Fatal("ReadResponse error:", err)
+ }
+
+ success := make(chan bool)
+ go func() {
+ select {
+ case <-time.After(5e9):
+ t.Fatal("body not closed after 5s")
+ case <-success:
+ }
+ }()
+
+ _, err = ioutil.ReadAll(r)
+ if err != nil {
+ t.Fatal("read error:", err)
+ }
+
+ success <- true
+}
+
+// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive.
+func TestServeHTTP10Close(t *testing.T) {
+ testTcpConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
+ ServeFile(w, r, "testdata/file")
+ }))
+}
+
+// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close,
+// even for HTTP/1.1 requests.
+func TestHandlersCanSetConnectionClose11(t *testing.T) {
+ testTcpConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Connection", "close")
+ }))
+}
+
+func TestHandlersCanSetConnectionClose10(t *testing.T) {
+ testTcpConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Connection", "close")
+ }))
+}
+
+func TestSetsRemoteAddr(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ fmt.Fprintf(w, "%s", r.RemoteAddr)
+ }))
+ defer ts.Close()
+
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Fatalf("Get error: %v", err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("ReadAll error: %v", err)
+ }
+ ip := string(body)
+ if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") {
+ t.Fatalf("Expected local addr; got %q", ip)
+ }
+}
+
+func TestChunkedResponseHeaders(t *testing.T) {
+ log.SetOutput(ioutil.Discard) // is noisy otherwise
+ defer log.SetOutput(os.Stderr)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted
+ fmt.Fprintf(w, "I am a chunked response.")
+ }))
+ defer ts.Close()
+
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Fatalf("Get error: %v", err)
+ }
+ if g, e := res.ContentLength, int64(-1); g != e {
+ t.Errorf("expected ContentLength of %d; got %d", e, g)
+ }
+ if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) {
+ t.Errorf("expected TransferEncoding of %v; got %v", e, g)
+ }
+ if _, haveCL := res.Header["Content-Length"]; haveCL {
+ t.Errorf("Unexpected Content-Length")
+ }
+}
+
+// Test304Responses verifies that 304s don't declare that they're
+// chunking in their response headers and aren't allowed to produce
+// output.
+func Test304Responses(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.WriteHeader(StatusNotModified)
+ _, err := w.Write([]byte("illegal body"))
+ if err != ErrBodyNotAllowed {
+ t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err)
+ }
+ }))
+ defer ts.Close()
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ }
+ if len(res.TransferEncoding) > 0 {
+ t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Error(err)
+ }
+ if len(body) > 0 {
+ t.Errorf("got unexpected body %q", string(body))
+ }
+}
+
+// TestHeadResponses verifies that responses to HEAD requests don't
+// declare that they're chunking in their response headers and aren't
+// allowed to produce output.
+func TestHeadResponses(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ _, err := w.Write([]byte("Ignored body"))
+ if err != ErrBodyNotAllowed {
+ t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err)
+ }
+
+ // Also exercise the ReaderFrom path
+ _, err = io.Copy(w, strings.NewReader("Ignored body"))
+ if err != ErrBodyNotAllowed {
+ t.Errorf("on Copy, expected ErrBodyNotAllowed, got %v", err)
+ }
+ }))
+ defer ts.Close()
+ res, err := Head(ts.URL)
+ if err != nil {
+ t.Error(err)
+ }
+ if len(res.TransferEncoding) > 0 {
+ t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Error(err)
+ }
+ if len(body) > 0 {
+ t.Errorf("got unexpected body %q", string(body))
+ }
+}
+
+func TestTLSHandshakeTimeout(t *testing.T) {
+ ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
+ ts.Config.ReadTimeout = 250e6
+ ts.StartTLS()
+ defer ts.Close()
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer conn.Close()
+ goTimeout(t, 10e9, func() {
+ var buf [1]byte
+ n, err := conn.Read(buf[:])
+ if err == nil || n != 0 {
+ t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
+ }
+ })
+}
+
+func TestTLSServer(t *testing.T) {
+ ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ if r.TLS != nil {
+ w.Header().Set("X-TLS-Set", "true")
+ if r.TLS.HandshakeComplete {
+ w.Header().Set("X-TLS-HandshakeComplete", "true")
+ }
+ }
+ }))
+ defer ts.Close()
+
+ // Connect an idle TCP connection to this server before we run
+ // our real tests. This idle connection used to block forever
+ // in the TLS handshake, preventing future connections from
+ // being accepted. It may prevent future accidental blocking
+ // in newConn.
+ idleConn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer idleConn.Close()
+ goTimeout(t, 10e9, func() {
+ if !strings.HasPrefix(ts.URL, "https://") {
+ t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
+ return
+ }
+ noVerifyTransport := &Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ }
+ client := &Client{Transport: noVerifyTransport}
+ res, err := client.Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if res == nil {
+ t.Errorf("got nil Response")
+ return
+ }
+ defer res.Body.Close()
+ if res.Header.Get("X-TLS-Set") != "true" {
+ t.Errorf("expected X-TLS-Set response header")
+ return
+ }
+ if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
+ t.Errorf("expected X-TLS-HandshakeComplete header")
+ }
+ })
+}
+
+type serverExpectTest struct {
+ contentLength int // of request body
+ expectation string // e.g. "100-continue"
+ readBody bool // whether handler should read the body (if false, sends StatusUnauthorized)
+ expectedResponse string // expected substring in first line of http response
+}
+
+var serverExpectTests = []serverExpectTest{
+ // Normal 100-continues, case-insensitive.
+ {100, "100-continue", true, "100 Continue"},
+ {100, "100-cOntInUE", true, "100 Continue"},
+
+ // No 100-continue.
+ {100, "", true, "200 OK"},
+
+ // 100-continue but requesting client to deny us,
+ // so it never reads the body.
+ {100, "100-continue", false, "401 Unauthorized"},
+ // Likewise without 100-continue:
+ {100, "", false, "401 Unauthorized"},
+
+ // Non-standard expectations are failures
+ {0, "a-pony", false, "417 Expectation Failed"},
+
+ // Expect-100 requested but no body
+ {0, "100-continue", true, "400 Bad Request"},
+}
+
+// Tests that the server responds to the "Expect" request header
+// correctly.
+func TestServerExpect(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ // Note using r.FormValue("readbody") because for POST
+ // requests that would read from r.Body, which we only
+ // conditionally want to do.
+ if strings.Contains(r.URL.RawPath, "readbody=true") {
+ ioutil.ReadAll(r.Body)
+ w.Write([]byte("Hi"))
+ } else {
+ w.WriteHeader(StatusUnauthorized)
+ }
+ }))
+ defer ts.Close()
+
+ runTest := func(test serverExpectTest) {
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer conn.Close()
+ sendf := func(format string, args ...interface{}) {
+ _, err := fmt.Fprintf(conn, format, args...)
+ if err != nil {
+ t.Fatalf("On test %#v, error writing %q: %v", test, format, err)
+ }
+ }
+ go func() {
+ sendf("POST /?readbody=%v HTTP/1.1\r\n"+
+ "Connection: close\r\n"+
+ "Content-Length: %d\r\n"+
+ "Expect: %s\r\nHost: foo\r\n\r\n",
+ test.readBody, test.contentLength, test.expectation)
+ if test.contentLength > 0 && strings.ToLower(test.expectation) != "100-continue" {
+ body := strings.Repeat("A", test.contentLength)
+ sendf(body)
+ }
+ }()
+ bufr := bufio.NewReader(conn)
+ line, err := bufr.ReadString('\n')
+ if err != nil {
+ t.Fatalf("ReadString: %v", err)
+ }
+ if !strings.Contains(line, test.expectedResponse) {
+ t.Errorf("for test %#v got first line=%q", test, line)
+ }
+ }
+
+ for _, test := range serverExpectTests {
+ runTest(test)
+ }
+}
+
+// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server
+// should consume client request bodies that a handler didn't read.
+func TestServerUnreadRequestBodyLittle(t *testing.T) {
+ conn := new(testConn)
+ body := strings.Repeat("x", 100<<10)
+ conn.readBuf.Write([]byte(fmt.Sprintf(
+ "POST / HTTP/1.1\r\n"+
+ "Host: test\r\n"+
+ "Content-Length: %d\r\n"+
+ "\r\n", len(body))))
+ conn.readBuf.Write([]byte(body))
+
+ done := make(chan bool)
+
+ ls := &oneConnListener{conn}
+ go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
+ defer close(done)
+ if conn.readBuf.Len() < len(body)/2 {
+ t.Errorf("on request, read buffer length is %d; expected about 100 KB", conn.readBuf.Len())
+ }
+ rw.WriteHeader(200)
+ if g, e := conn.readBuf.Len(), 0; g != e {
+ t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e)
+ }
+ if c := rw.Header().Get("Connection"); c != "" {
+ t.Errorf(`Connection header = %q; want ""`, c)
+ }
+ }))
+ <-done
+}
+
+// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server
+// should ignore client request bodies that a handler didn't read
+// and close the connection.
+func TestServerUnreadRequestBodyLarge(t *testing.T) {
+ conn := new(testConn)
+ body := strings.Repeat("x", 1<<20)
+ conn.readBuf.Write([]byte(fmt.Sprintf(
+ "POST / HTTP/1.1\r\n"+
+ "Host: test\r\n"+
+ "Content-Length: %d\r\n"+
+ "\r\n", len(body))))
+ conn.readBuf.Write([]byte(body))
+
+ done := make(chan bool)
+
+ ls := &oneConnListener{conn}
+ go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
+ defer close(done)
+ if conn.readBuf.Len() < len(body)/2 {
+ t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
+ }
+ rw.WriteHeader(200)
+ if conn.readBuf.Len() < len(body)/2 {
+ t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
+ }
+ if c := rw.Header().Get("Connection"); c != "close" {
+ t.Errorf(`Connection header = %q; want "close"`, c)
+ }
+ }))
+ <-done
+}
+
+func TestTimeoutHandler(t *testing.T) {
+ sendHi := make(chan bool, 1)
+ writeErrors := make(chan error, 1)
+ sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
+ <-sendHi
+ _, werr := w.Write([]byte("hi"))
+ writeErrors <- werr
+ })
+ timeout := make(chan int64, 1) // write to this to force timeouts
+ ts := httptest.NewServer(NewTestTimeoutHandler(sayHi, timeout))
+ defer ts.Close()
+
+ // Succeed without timing out:
+ sendHi <- true
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ }
+ if g, e := res.StatusCode, StatusOK; g != e {
+ t.Errorf("got res.StatusCode %d; expected %d", g, e)
+ }
+ body, _ := ioutil.ReadAll(res.Body)
+ if g, e := string(body), "hi"; g != e {
+ t.Errorf("got body %q; expected %q", g, e)
+ }
+ if g := <-writeErrors; g != nil {
+ t.Errorf("got unexpected Write error on first request: %v", g)
+ }
+
+ // Times out:
+ timeout <- 1
+ res, err = Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ }
+ if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
+ t.Errorf("got res.StatusCode %d; expected %d", g, e)
+ }
+ body, _ = ioutil.ReadAll(res.Body)
+ if !strings.Contains(string(body), "<title>Timeout</title>") {
+ t.Errorf("expected timeout body; got %q", string(body))
+ }
+
+ // Now make the previously-timed out handler speak again,
+ // which verifies the panic is handled:
+ sendHi <- true
+ if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
+ t.Errorf("expected Write error of %v; got %v", e, g)
+ }
+}
+
+// Verifies we don't path.Clean() on the wrong parts in redirects.
+func TestRedirectMunging(t *testing.T) {
+ req, _ := NewRequest("GET", "http://example.com/", nil)
+
+ resp := httptest.NewRecorder()
+ Redirect(resp, req, "/foo?next=http://bar.com/", 302)
+ if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e {
+ t.Errorf("Location header was %q; want %q", g, e)
+ }
+
+ resp = httptest.NewRecorder()
+ Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302)
+ if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e {
+ t.Errorf("Location header was %q; want %q", g, e)
+ }
+}
+
+// TestZeroLengthPostAndResponse exercises an optimization done by the Transport:
+// when there is no body (either because the method doesn't permit a body, or an
+// explicit Content-Length of zero is present), then the transport can re-use the
+// connection immediately. But when it re-uses the connection, it typically closes
+// the previous request's body, which is not optimal for zero-lengthed bodies,
+// as the client would then see http.ErrBodyReadAfterClose and not 0, io.EOF.
+func TestZeroLengthPostAndResponse(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
+ all, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatalf("handler ReadAll: %v", err)
+ }
+ if len(all) != 0 {
+ t.Errorf("handler got %d bytes; expected 0", len(all))
+ }
+ rw.Header().Set("Content-Length", "0")
+ }))
+ defer ts.Close()
+
+ req, err := NewRequest("POST", ts.URL, strings.NewReader(""))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ContentLength = 0
+
+ var resp [5]*Response
+ for i := range resp {
+ resp[i], err = DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("client post #%d: %v", i, err)
+ }
+ }
+
+ for i := range resp {
+ all, err := ioutil.ReadAll(resp[i].Body)
+ if err != nil {
+ t.Fatalf("req #%d: client ReadAll: %v", i, err)
+ }
+ if len(all) != 0 {
+ t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all))
+ }
+ }
+}
+
+func TestHandlerPanic(t *testing.T) {
+ testHandlerPanic(t, false)
+}
+
+func TestHandlerPanicWithHijack(t *testing.T) {
+ testHandlerPanic(t, true)
+}
+
+func testHandlerPanic(t *testing.T, withHijack bool) {
+ // Unlike the other tests that set the log output to ioutil.Discard
+ // to quiet the output, this test uses a pipe. The pipe serves three
+ // purposes:
+ //
+ // 1) The log.Print from the http server (generated by the caught
+ // panic) will go to the pipe instead of stderr, making the
+ // output quiet.
+ //
+ // 2) We read from the pipe to verify that the handler
+ // actually caught the panic and logged something.
+ //
+ // 3) The blocking Read call prevents this TestHandlerPanic
+ // function from exiting before the HTTP server handler
+ // finishes crashing. If this text function exited too
+ // early (and its defer log.SetOutput(os.Stderr) ran),
+ // then the crash output could spill into the next test.
+ pr, pw := io.Pipe()
+ log.SetOutput(pw)
+ defer log.SetOutput(os.Stderr)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ if withHijack {
+ rwc, _, err := w.(Hijacker).Hijack()
+ if err != nil {
+ t.Logf("unexpected error: %v", err)
+ }
+ defer rwc.Close()
+ }
+ panic("intentional death for testing")
+ }))
+ defer ts.Close()
+ _, err := Get(ts.URL)
+ if err == nil {
+ t.Logf("expected an error")
+ }
+
+ // Do a blocking read on the log output pipe so its logging
+ // doesn't bleed into the next test. But wait only 5 seconds
+ // for it.
+ done := make(chan bool)
+ go func() {
+ buf := make([]byte, 1024)
+ _, err := pr.Read(buf)
+ pr.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ done <- true
+ }()
+ select {
+ case <-done:
+ return
+ case <-time.After(5e9):
+ t.Fatal("expected server handler to log an error")
+ }
+}
+
+func TestNoDate(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header()["Date"] = nil
+ }))
+ defer ts.Close()
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, present := res.Header["Date"]
+ if present {
+ t.Fatalf("Expected no Date header; got %v", res.Header["Date"])
+ }
+}
+
+func TestStripPrefix(t *testing.T) {
+ h := HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("X-Path", r.URL.Path)
+ })
+ ts := httptest.NewServer(StripPrefix("/foo", h))
+ defer ts.Close()
+
+ res, err := Get(ts.URL + "/foo/bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := res.Header.Get("X-Path"), "/bar"; g != e {
+ t.Errorf("test 1: got %s, want %s", g, e)
+ }
+
+ res, err = Get(ts.URL + "/bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := res.StatusCode, 404; g != e {
+ t.Errorf("test 2: got status %v, want %v", g, e)
+ }
+}
+
+func TestRequestLimit(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ t.Fatalf("didn't expect to get request in Handler")
+ }))
+ defer ts.Close()
+ req, _ := NewRequest("GET", ts.URL, nil)
+ var bytesPerHeader = len("header12345: val12345\r\n")
+ for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ {
+ req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i))
+ }
+ res, err := DefaultClient.Do(req)
+ if err != nil {
+ // Some HTTP clients may fail on this undefined behavior (server replying and
+ // closing the connection while the request is still being written), but
+ // we do support it (at least currently), so we expect a response below.
+ t.Fatalf("Do: %v", err)
+ }
+ if res.StatusCode != 413 {
+ t.Fatalf("expected 413 response status; got: %d %s", res.StatusCode, res.Status)
+ }
+}
+
+type neverEnding byte
+
+func (b neverEnding) Read(p []byte) (n int, err error) {
+ for i := range p {
+ p[i] = byte(b)
+ }
+ return len(p), nil
+}
+
+type countReader struct {
+ r io.Reader
+ n *int64
+}
+
+func (cr countReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ *cr.n += int64(n)
+ return
+}
+
+func TestRequestBodyLimit(t *testing.T) {
+ const limit = 1 << 20
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ r.Body = MaxBytesReader(w, r.Body, limit)
+ n, err := io.Copy(ioutil.Discard, r.Body)
+ if err == nil {
+ t.Errorf("expected error from io.Copy")
+ }
+ if n != limit {
+ t.Errorf("io.Copy = %d, want %d", n, limit)
+ }
+ }))
+ defer ts.Close()
+
+ nWritten := int64(0)
+ req, _ := NewRequest("POST", ts.URL, io.LimitReader(countReader{neverEnding('a'), &nWritten}, limit*200))
+
+ // Send the POST, but don't care it succeeds or not. The
+ // remote side is going to reply and then close the TCP
+ // connection, and HTTP doesn't really define if that's
+ // allowed or not. Some HTTP clients will get the response
+ // and some (like ours, currently) will complain that the
+ // request write failed, without reading the response.
+ //
+ // But that's okay, since what we're really testing is that
+ // the remote side hung up on us before we wrote too much.
+ _, _ = DefaultClient.Do(req)
+
+ if nWritten > limit*100 {
+ t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d",
+ limit, nWritten)
+ }
+}
+
+// TestClientWriteShutdown tests that if the client shuts down the write
+// side of their TCP connection, the server doesn't send a 400 Bad Request.
+func TestClientWriteShutdown(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
+ defer ts.Close()
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ err = conn.(*net.TCPConn).CloseWrite()
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ donec := make(chan bool)
+ go func() {
+ defer close(donec)
+ bs, err := ioutil.ReadAll(conn)
+ if err != nil {
+ t.Fatalf("ReadAll: %v", err)
+ }
+ got := string(bs)
+ if got != "" {
+ t.Errorf("read %q from server; want nothing", got)
+ }
+ }()
+ select {
+ case <-donec:
+ case <-time.After(10e9):
+ t.Fatalf("timeout")
+ }
+}
+
+// goTimeout runs f, failing t if f takes more than ns to complete.
+func goTimeout(t *testing.T, ns int64, f func()) {
+ ch := make(chan bool, 2)
+ timer := time.AfterFunc(ns, func() {
+ t.Errorf("Timeout expired after %d ns", ns)
+ ch <- true
+ })
+ defer timer.Stop()
+ go func() {
+ defer func() { ch <- true }()
+ f()
+ }()
+ <-ch
+}
+
+type errorListener struct {
+ errs []error
+}
+
+func (l *errorListener) Accept() (c net.Conn, err error) {
+ if len(l.errs) == 0 {
+ return nil, io.EOF
+ }
+ err = l.errs[0]
+ l.errs = l.errs[1:]
+ return
+}
+
+func (l *errorListener) Close() error {
+ return nil
+}
+
+func (l *errorListener) Addr() net.Addr {
+ return dummyAddr("test-address")
+}
+
+func TestAcceptMaxFds(t *testing.T) {
+ log.SetOutput(ioutil.Discard) // is noisy otherwise
+ defer log.SetOutput(os.Stderr)
+
+ ln := &errorListener{[]error{
+ &net.OpError{
+ Op: "accept",
+ Err: os.Errno(syscall.EMFILE),
+ }}}
+ err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {})))
+ if err != io.EOF {
+ t.Errorf("got error %v, want EOF", err)
+ }
+}
+
+func BenchmarkClientServer(b *testing.B) {
+ b.StopTimer()
+ ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
+ fmt.Fprintf(rw, "Hello world.\n")
+ }))
+ defer ts.Close()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ res, err := Get(ts.URL)
+ if err != nil {
+ panic("Get: " + err.Error())
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ panic("ReadAll: " + err.Error())
+ }
+ body := string(all)
+ if body != "Hello world.\n" {
+ panic("Got body: " + body)
+ }
+ }
+
+ b.StopTimer()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP server. See RFC 2616.
+
+// TODO(rsc):
+// logging
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/url"
+ "path"
+ "runtime/debug"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Errors introduced by the HTTP server.
+var (
+ ErrWriteAfterFlush = errors.New("Conn.Write called after Flush")
+ ErrBodyNotAllowed = errors.New("http: response status code does not allow body")
+ ErrHijacked = errors.New("Conn has been hijacked")
+ ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length")
+)
+
+// Objects implementing the Handler interface can be
+// registered to serve a particular path or subtree
+// in the HTTP server.
+//
+// ServeHTTP should write reply headers and data to the ResponseWriter
+// and then return. Returning signals that the request is finished
+// and that the HTTP server can move on to the next request on
+// the connection.
+type Handler interface {
+ ServeHTTP(ResponseWriter, *Request)
+}
+
+// A ResponseWriter interface is used by an HTTP handler to
+// construct an HTTP response.
+type ResponseWriter interface {
+ // Header returns the header map that will be sent by WriteHeader.
+ // Changing the header after a call to WriteHeader (or Write) has
+ // no effect.
+ Header() Header
+
+ // Write writes the data to the connection as part of an HTTP reply.
+ // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK)
+ // before writing the data.
+ Write([]byte) (int, error)
+
+ // WriteHeader sends an HTTP response header with status code.
+ // If WriteHeader is not called explicitly, the first call to Write
+ // will trigger an implicit WriteHeader(http.StatusOK).
+ // Thus explicit calls to WriteHeader are mainly used to
+ // send error codes.
+ WriteHeader(int)
+}
+
+// The Flusher interface is implemented by ResponseWriters that allow
+// an HTTP handler to flush buffered data to the client.
+//
+// Note that even for ResponseWriters that support Flush,
+// if the client is connected through an HTTP proxy,
+// the buffered data may not reach the client until the response
+// completes.
+type Flusher interface {
+ // Flush sends any buffered data to the client.
+ Flush()
+}
+
+// The Hijacker interface is implemented by ResponseWriters that allow
+// an HTTP handler to take over the connection.
+type Hijacker interface {
+ // Hijack lets the caller take over the connection.
+ // After a call to Hijack(), the HTTP server library
+ // will not do anything else with the connection.
+ // It becomes the caller's responsibility to manage
+ // and close the connection.
+ Hijack() (net.Conn, *bufio.ReadWriter, error)
+}
+
+// A conn represents the server side of an HTTP connection.
+type conn struct {
+ remoteAddr string // network address of remote side
+ server *Server // the Server on which the connection arrived
+ rwc net.Conn // i/o connection
+ lr *io.LimitedReader // io.LimitReader(rwc)
+ buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->rwc
+ hijacked bool // connection has been hijacked by handler
+ tlsState *tls.ConnectionState // or nil when not using TLS
+ body []byte
+}
+
+// A response represents the server side of an HTTP response.
+type response struct {
+ conn *conn
+ req *Request // request for this response
+ chunking bool // using chunked transfer encoding for reply body
+ wroteHeader bool // reply header has been written
+ wroteContinue bool // 100 Continue response was written
+ header Header // reply header parameters
+ written int64 // number of bytes written in body
+ contentLength int64 // explicitly-declared Content-Length; or -1
+ status int // status code passed to WriteHeader
+ needSniff bool // need to sniff to find Content-Type
+
+ // close connection after this reply. set on request and
+ // updated after response from handler if there's a
+ // "Connection: keep-alive" response header and a
+ // Content-Length.
+ closeAfterReply bool
+
+ // requestBodyLimitHit is set by requestTooLarge when
+ // maxBytesReader hits its max size. It is checked in
+ // WriteHeader, to make sure we don't consume the the
+ // remaining request body to try to advance to the next HTTP
+ // request. Instead, when this is set, we stop doing
+ // subsequent requests on this connection and stop reading
+ // input from it.
+ requestBodyLimitHit bool
+}
+
+// requestTooLarge is called by maxBytesReader when too much input has
+// been read from the client.
+func (w *response) requestTooLarge() {
+ w.closeAfterReply = true
+ w.requestBodyLimitHit = true
+ if !w.wroteHeader {
+ w.Header().Set("Connection", "close")
+ }
+}
+
+type writerOnly struct {
+ io.Writer
+}
+
+func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
+ // Flush before checking w.chunking, as Flush will call
+ // WriteHeader if it hasn't been called yet, and WriteHeader
+ // is what sets w.chunking.
+ w.Flush()
+ if !w.chunking && w.bodyAllowed() && !w.needSniff {
+ if rf, ok := w.conn.rwc.(io.ReaderFrom); ok {
+ n, err = rf.ReadFrom(src)
+ w.written += n
+ return
+ }
+ }
+ // Fall back to default io.Copy implementation.
+ // Use wrapper to hide w.ReadFrom from io.Copy.
+ return io.Copy(writerOnly{w}, src)
+}
+
+// noLimit is an effective infinite upper bound for io.LimitedReader
+const noLimit int64 = (1 << 63) - 1
+
+// Create new connection from rwc.
+func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) {
+ c = new(conn)
+ c.remoteAddr = rwc.RemoteAddr().String()
+ c.server = srv
+ c.rwc = rwc
+ c.body = make([]byte, sniffLen)
+ c.lr = io.LimitReader(rwc, noLimit).(*io.LimitedReader)
+ br := bufio.NewReader(c.lr)
+ bw := bufio.NewWriter(rwc)
+ c.buf = bufio.NewReadWriter(br, bw)
+ return c, nil
+}
+
+// DefaultMaxHeaderBytes is the maximum permitted size of the headers
+// in an HTTP request.
+// This can be overridden by setting Server.MaxHeaderBytes.
+const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
+
+func (srv *Server) maxHeaderBytes() int {
+ if srv.MaxHeaderBytes > 0 {
+ return srv.MaxHeaderBytes
+ }
+ return DefaultMaxHeaderBytes
+}
+
+// wrapper around io.ReaderCloser which on first read, sends an
+// HTTP/1.1 100 Continue header
+type expectContinueReader struct {
+ resp *response
+ readCloser io.ReadCloser
+ closed bool
+}
+
+func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
+ if ecr.closed {
+ return 0, errors.New("http: Read after Close on request Body")
+ }
+ if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked {
+ ecr.resp.wroteContinue = true
+ io.WriteString(ecr.resp.conn.buf, "HTTP/1.1 100 Continue\r\n\r\n")
+ ecr.resp.conn.buf.Flush()
+ }
+ return ecr.readCloser.Read(p)
+}
+
+func (ecr *expectContinueReader) Close() error {
+ ecr.closed = true
+ return ecr.readCloser.Close()
+}
+
+// TimeFormat is the time format to use with
+// time.Parse and time.Time.Format when parsing
+// or generating times in HTTP headers.
+// It is like time.RFC1123 but hard codes GMT as the time zone.
+const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+var errTooLarge = errors.New("http: request too large")
+
+// Read next request from connection.
+func (c *conn) readRequest() (w *response, err error) {
+ if c.hijacked {
+ return nil, ErrHijacked
+ }
+ c.lr.N = int64(c.server.maxHeaderBytes()) + 4096 /* bufio slop */
+ var req *Request
+ if req, err = ReadRequest(c.buf.Reader); err != nil {
+ if c.lr.N == 0 {
+ return nil, errTooLarge
+ }
+ return nil, err
+ }
+ c.lr.N = noLimit
+
+ req.RemoteAddr = c.remoteAddr
+ req.TLS = c.tlsState
+
+ w = new(response)
+ w.conn = c
+ w.req = req
+ w.header = make(Header)
+ w.contentLength = -1
+ c.body = c.body[:0]
+ return w, nil
+}
+
+func (w *response) Header() Header {
+ return w.header
+}
+
+// maxPostHandlerReadBytes is the max number of Request.Body bytes not
+// consumed by a handler that the server will read from the a client
+// in order to keep a connection alive. If there are more bytes than
+// this then the server to be paranoid instead sends a "Connection:
+// close" response.
+//
+// This number is approximately what a typical machine's TCP buffer
+// size is anyway. (if we have the bytes on the machine, we might as
+// well read them)
+const maxPostHandlerReadBytes = 256 << 10
+
+func (w *response) WriteHeader(code int) {
+ if w.conn.hijacked {
+ log.Print("http: response.WriteHeader on hijacked connection")
+ return
+ }
+ if w.wroteHeader {
+ log.Print("http: multiple response.WriteHeader calls")
+ return
+ }
+ w.wroteHeader = true
+ w.status = code
+
+ // Check for a explicit (and valid) Content-Length header.
+ var hasCL bool
+ var contentLength int64
+ if clenStr := w.header.Get("Content-Length"); clenStr != "" {
+ var err error
+ contentLength, err = strconv.Atoi64(clenStr)
+ if err == nil {
+ hasCL = true
+ } else {
+ log.Printf("http: invalid Content-Length of %q sent", clenStr)
+ w.header.Del("Content-Length")
+ }
+ }
+
+ if w.req.wantsHttp10KeepAlive() && (w.req.Method == "HEAD" || hasCL) {
+ _, connectionHeaderSet := w.header["Connection"]
+ if !connectionHeaderSet {
+ w.header.Set("Connection", "keep-alive")
+ }
+ } else if !w.req.ProtoAtLeast(1, 1) {
+ // Client did not ask to keep connection alive.
+ w.closeAfterReply = true
+ }
+
+ if w.header.Get("Connection") == "close" {
+ w.closeAfterReply = true
+ }
+
+ // Per RFC 2616, we should consume the request body before
+ // replying, if the handler hasn't already done so. But we
+ // don't want to do an unbounded amount of reading here for
+ // DoS reasons, so we only try up to a threshold.
+ if w.req.ContentLength != 0 && !w.closeAfterReply {
+ ecr, isExpecter := w.req.Body.(*expectContinueReader)
+ if !isExpecter || ecr.resp.wroteContinue {
+ n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1)
+ if n >= maxPostHandlerReadBytes {
+ w.requestTooLarge()
+ w.header.Set("Connection", "close")
+ } else {
+ w.req.Body.Close()
+ }
+ }
+ }
+
+ if code == StatusNotModified {
+ // Must not have body.
+ for _, header := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} {
+ if w.header.Get(header) != "" {
+ // TODO: return an error if WriteHeader gets a return parameter
+ // or set a flag on w to make future Writes() write an error page?
+ // for now just log and drop the header.
+ log.Printf("http: StatusNotModified response with header %q defined", header)
+ w.header.Del(header)
+ }
+ }
+ } else {
+ // If no content type, apply sniffing algorithm to body.
+ if w.header.Get("Content-Type") == "" {
+ w.needSniff = true
+ }
+ }
+
+ if _, ok := w.header["Date"]; !ok {
+ w.Header().Set("Date", time.UTC().Format(TimeFormat))
+ }
+
+ te := w.header.Get("Transfer-Encoding")
+ hasTE := te != ""
+ if hasCL && hasTE && te != "identity" {
+ // TODO: return an error if WriteHeader gets a return parameter
+ // For now just ignore the Content-Length.
+ log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
+ te, contentLength)
+ w.header.Del("Content-Length")
+ hasCL = false
+ }
+
+ if w.req.Method == "HEAD" || code == StatusNotModified {
+ // do nothing
+ } else if hasCL {
+ w.contentLength = contentLength
+ w.header.Del("Transfer-Encoding")
+ } else if w.req.ProtoAtLeast(1, 1) {
+ // HTTP/1.1 or greater: use chunked transfer encoding
+ // to avoid closing the connection at EOF.
+ // TODO: this blows away any custom or stacked Transfer-Encoding they
+ // might have set. Deal with that as need arises once we have a valid
+ // use case.
+ w.chunking = true
+ w.header.Set("Transfer-Encoding", "chunked")
+ } else {
+ // HTTP version < 1.1: cannot do chunked transfer
+ // encoding and we don't know the Content-Length so
+ // signal EOF by closing connection.
+ w.closeAfterReply = true
+ w.header.Del("Transfer-Encoding") // in case already set
+ }
+
+ // Cannot use Content-Length with non-identity Transfer-Encoding.
+ if w.chunking {
+ w.header.Del("Content-Length")
+ }
+ if !w.req.ProtoAtLeast(1, 0) {
+ return
+ }
+ proto := "HTTP/1.0"
+ if w.req.ProtoAtLeast(1, 1) {
+ proto = "HTTP/1.1"
+ }
+ codestring := strconv.Itoa(code)
+ text, ok := statusText[code]
+ if !ok {
+ text = "status code " + codestring
+ }
+ io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n")
+ w.header.Write(w.conn.buf)
+
+ // If we need to sniff the body, leave the header open.
+ // Otherwise, end it here.
+ if !w.needSniff {
+ io.WriteString(w.conn.buf, "\r\n")
+ }
+}
+
+// sniff uses the first block of written data,
+// stored in w.conn.body, to decide the Content-Type
+// for the HTTP body.
+func (w *response) sniff() {
+ if !w.needSniff {
+ return
+ }
+ w.needSniff = false
+
+ data := w.conn.body
+ fmt.Fprintf(w.conn.buf, "Content-Type: %s\r\n\r\n", DetectContentType(data))
+
+ if len(data) == 0 {
+ return
+ }
+ if w.chunking {
+ fmt.Fprintf(w.conn.buf, "%x\r\n", len(data))
+ }
+ _, err := w.conn.buf.Write(data)
+ if w.chunking && err == nil {
+ io.WriteString(w.conn.buf, "\r\n")
+ }
+}
+
+// bodyAllowed returns true if a Write is allowed for this response type.
+// It's illegal to call this before the header has been flushed.
+func (w *response) bodyAllowed() bool {
+ if !w.wroteHeader {
+ panic("")
+ }
+ return w.status != StatusNotModified && w.req.Method != "HEAD"
+}
+
+func (w *response) Write(data []byte) (n int, err error) {
+ if w.conn.hijacked {
+ log.Print("http: response.Write on hijacked connection")
+ return 0, ErrHijacked
+ }
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ if len(data) == 0 {
+ return 0, nil
+ }
+ if !w.bodyAllowed() {
+ return 0, ErrBodyNotAllowed
+ }
+
+ w.written += int64(len(data)) // ignoring errors, for errorKludge
+ if w.contentLength != -1 && w.written > w.contentLength {
+ return 0, ErrContentLength
+ }
+
+ var m int
+ if w.needSniff {
+ // We need to sniff the beginning of the output to
+ // determine the content type. Accumulate the
+ // initial writes in w.conn.body.
+ // Cap m so that append won't allocate.
+ m := cap(w.conn.body) - len(w.conn.body)
+ if m > len(data) {
+ m = len(data)
+ }
+ w.conn.body = append(w.conn.body, data[:m]...)
+ data = data[m:]
+ if len(data) == 0 {
+ // Copied everything into the buffer.
+ // Wait for next write.
+ return m, nil
+ }
+
+ // Filled the buffer; more data remains.
+ // Sniff the content (flushes the buffer)
+ // and then proceed with the remainder
+ // of the data as a normal Write.
+ // Calling sniff clears needSniff.
+ w.sniff()
+ }
+
+ // TODO(rsc): if chunking happened after the buffering,
+ // then there would be fewer chunk headers.
+ // On the other hand, it would make hijacking more difficult.
+ if w.chunking {
+ fmt.Fprintf(w.conn.buf, "%x\r\n", len(data)) // TODO(rsc): use strconv not fmt
+ }
+ n, err = w.conn.buf.Write(data)
+ if err == nil && w.chunking {
+ if n != len(data) {
+ err = io.ErrShortWrite
+ }
+ if err == nil {
+ io.WriteString(w.conn.buf, "\r\n")
+ }
+ }
+
+ return m + n, err
+}
+
+func (w *response) finishRequest() {
+ // If this was an HTTP/1.0 request with keep-alive and we sent a Content-Length
+ // back, we can make this a keep-alive response ...
+ if w.req.wantsHttp10KeepAlive() {
+ sentLength := w.header.Get("Content-Length") != ""
+ if sentLength && w.header.Get("Connection") == "keep-alive" {
+ w.closeAfterReply = false
+ }
+ }
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ if w.needSniff {
+ w.sniff()
+ }
+ if w.chunking {
+ io.WriteString(w.conn.buf, "0\r\n")
+ // trailer key/value pairs, followed by blank line
+ io.WriteString(w.conn.buf, "\r\n")
+ }
+ w.conn.buf.Flush()
+ // Close the body, unless we're about to close the whole TCP connection
+ // anyway.
+ if !w.closeAfterReply {
+ w.req.Body.Close()
+ }
+ if w.req.MultipartForm != nil {
+ w.req.MultipartForm.RemoveAll()
+ }
+
+ if w.contentLength != -1 && w.contentLength != w.written {
+ // Did not write enough. Avoid getting out of sync.
+ w.closeAfterReply = true
+ }
+}
+
+func (w *response) Flush() {
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ w.sniff()
+ w.conn.buf.Flush()
+}
+
+// Close the connection.
+func (c *conn) close() {
+ if c.buf != nil {
+ c.buf.Flush()
+ c.buf = nil
+ }
+ if c.rwc != nil {
+ c.rwc.Close()
+ c.rwc = nil
+ }
+}
+
+// Serve a new connection.
+func (c *conn) serve() {
+ defer func() {
+ err := recover()
+ if err == nil {
+ return
+ }
+ if c.rwc != nil { // may be nil if connection hijacked
+ c.rwc.Close()
+ }
+
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "http: panic serving %v: %v\n", c.remoteAddr, err)
+ buf.Write(debug.Stack())
+ log.Print(buf.String())
+ }()
+
+ if tlsConn, ok := c.rwc.(*tls.Conn); ok {
+ if err := tlsConn.Handshake(); err != nil {
+ c.close()
+ return
+ }
+ c.tlsState = new(tls.ConnectionState)
+ *c.tlsState = tlsConn.ConnectionState()
+ }
+
+ for {
+ w, err := c.readRequest()
+ if err != nil {
+ msg := "400 Bad Request"
+ if err == errTooLarge {
+ // Their HTTP client may or may not be
+ // able to read this if we're
+ // responding to them and hanging up
+ // while they're still writing their
+ // request. Undefined behavior.
+ msg = "413 Request Entity Too Large"
+ } else if err == io.ErrUnexpectedEOF {
+ break // Don't reply
+ } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
+ break // Don't reply
+ }
+ fmt.Fprintf(c.rwc, "HTTP/1.1 %s\r\n\r\n", msg)
+ break
+ }
+
+ // Expect 100 Continue support
+ req := w.req
+ if req.expectsContinue() {
+ if req.ProtoAtLeast(1, 1) {
+ // Wrap the Body reader with one that replies on the connection
+ req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
+ }
+ if req.ContentLength == 0 {
+ w.Header().Set("Connection", "close")
+ w.WriteHeader(StatusBadRequest)
+ w.finishRequest()
+ break
+ }
+ req.Header.Del("Expect")
+ } else if req.Header.Get("Expect") != "" {
+ // TODO(bradfitz): let ServeHTTP handlers handle
+ // requests with non-standard expectation[s]? Seems
+ // theoretical at best, and doesn't fit into the
+ // current ServeHTTP model anyway. We'd need to
+ // make the ResponseWriter an optional
+ // "ExpectReplier" interface or something.
+ //
+ // For now we'll just obey RFC 2616 14.20 which says
+ // "If a server receives a request containing an
+ // Expect field that includes an expectation-
+ // extension that it does not support, it MUST
+ // respond with a 417 (Expectation Failed) status."
+ w.Header().Set("Connection", "close")
+ w.WriteHeader(StatusExpectationFailed)
+ w.finishRequest()
+ break
+ }
+
+ handler := c.server.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+
+ // HTTP cannot have multiple simultaneous active requests.[*]
+ // Until the server replies to this request, it can't read another,
+ // so we might as well run the handler in this goroutine.
+ // [*] Not strictly true: HTTP pipelining. We could let them all process
+ // in parallel even if their responses need to be serialized.
+ handler.ServeHTTP(w, w.req)
+ if c.hijacked {
+ return
+ }
+ w.finishRequest()
+ if w.closeAfterReply {
+ break
+ }
+ }
+ c.close()
+}
+
+// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
+// and a Hijacker.
+func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
+ if w.conn.hijacked {
+ return nil, nil, ErrHijacked
+ }
+ w.conn.hijacked = true
+ rwc = w.conn.rwc
+ buf = w.conn.buf
+ w.conn.rwc = nil
+ w.conn.buf = nil
+ return
+}
+
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as HTTP handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// Handler object that calls f.
+type HandlerFunc func(ResponseWriter, *Request)
+
+// ServeHTTP calls f(w, r).
+func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
+ f(w, r)
+}
+
+// Helper handlers
+
+// Error replies to the request with the specified error message and HTTP code.
+func Error(w ResponseWriter, error string, code int) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(code)
+ fmt.Fprintln(w, error)
+}
+
+// NotFound replies to the request with an HTTP 404 not found error.
+func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
+
+// NotFoundHandler returns a simple request handler
+// that replies to each request with a ``404 page not found'' reply.
+func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
+
+// StripPrefix returns a handler that serves HTTP requests
+// by removing the given prefix from the request URL's Path
+// and invoking the handler h. StripPrefix handles a
+// request for a path that doesn't begin with prefix by
+// replying with an HTTP 404 not found error.
+func StripPrefix(prefix string, h Handler) Handler {
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ if !strings.HasPrefix(r.URL.Path, prefix) {
+ NotFound(w, r)
+ return
+ }
+ r.URL.Path = r.URL.Path[len(prefix):]
+ h.ServeHTTP(w, r)
+ })
+}
+
+// Redirect replies to the request with a redirect to url,
+// which may be a path relative to the request path.
+func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
+ if u, err := url.Parse(urlStr); err == nil {
+ // If url was relative, make absolute by
+ // combining with request path.
+ // The browser would probably do this for us,
+ // but doing it ourselves is more reliable.
+
+ // NOTE(rsc): RFC 2616 says that the Location
+ // line must be an absolute URI, like
+ // "http://www.google.com/redirect/",
+ // not a path like "/redirect/".
+ // Unfortunately, we don't know what to
+ // put in the host name section to get the
+ // client to connect to us again, so we can't
+ // know the right absolute URI to send back.
+ // Because of this problem, no one pays attention
+ // to the RFC; they all send back just a new path.
+ // So do we.
+ oldpath := r.URL.Path
+ if oldpath == "" { // should not happen, but avoid a crash if it does
+ oldpath = "/"
+ }
+ if u.Scheme == "" {
+ // no leading http://server
+ if urlStr == "" || urlStr[0] != '/' {
+ // make relative path absolute
+ olddir, _ := path.Split(oldpath)
+ urlStr = olddir + urlStr
+ }
+
+ var query string
+ if i := strings.Index(urlStr, "?"); i != -1 {
+ urlStr, query = urlStr[:i], urlStr[i:]
+ }
+
+ // clean up but preserve trailing slash
+ trailing := urlStr[len(urlStr)-1] == '/'
+ urlStr = path.Clean(urlStr)
+ if trailing && urlStr[len(urlStr)-1] != '/' {
+ urlStr += "/"
+ }
+ urlStr += query
+ }
+ }
+
+ w.Header().Set("Location", urlStr)
+ w.WriteHeader(code)
+
+ // RFC2616 recommends that a short note "SHOULD" be included in the
+ // response because older user agents may not understand 301/307.
+ // Shouldn't send the response for POST or HEAD; that leaves GET.
+ if r.Method == "GET" {
+ note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n"
+ fmt.Fprintln(w, note)
+ }
+}
+
+var htmlReplacer = strings.NewReplacer(
+ "&", "&",
+ "<", "<",
+ ">", ">",
+ `"`, """,
+ "'", "'",
+)
+
+func htmlEscape(s string) string {
+ return htmlReplacer.Replace(s)
+}
+
+// Redirect to a fixed URL
+type redirectHandler struct {
+ url string
+ code int
+}
+
+func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ Redirect(w, r, rh.url, rh.code)
+}
+
+// RedirectHandler returns a request handler that redirects
+// each request it receives to the given url using the given
+// status code.
+func RedirectHandler(url string, code int) Handler {
+ return &redirectHandler{url, code}
+}
+
+// ServeMux is an HTTP request multiplexer.
+// It matches the URL of each incoming request against a list of registered
+// patterns and calls the handler for the pattern that
+// most closely matches the URL.
+//
+// Patterns named fixed, rooted paths, like "/favicon.ico",
+// or rooted subtrees, like "/images/" (note the trailing slash).
+// Longer patterns take precedence over shorter ones, so that
+// if there are handlers registered for both "/images/"
+// and "/images/thumbnails/", the latter handler will be
+// called for paths beginning "/images/thumbnails/" and the
+// former will receiver requests for any other paths in the
+// "/images/" subtree.
+//
+// Patterns may optionally begin with a host name, restricting matches to
+// URLs on that host only. Host-specific patterns take precedence over
+// general patterns, so that a handler might register for the two patterns
+// "/codesearch" and "codesearch.google.com/" without also taking over
+// requests for "http://www.google.com/".
+//
+// ServeMux also takes care of sanitizing the URL request path,
+// redirecting any request containing . or .. elements to an
+// equivalent .- and ..-free URL.
+type ServeMux struct {
+ m map[string]Handler
+}
+
+// NewServeMux allocates and returns a new ServeMux.
+func NewServeMux() *ServeMux { return &ServeMux{make(map[string]Handler)} }
+
+// DefaultServeMux is the default ServeMux used by Serve.
+var DefaultServeMux = NewServeMux()
+
+// Does path match pattern?
+func pathMatch(pattern, path string) bool {
+ if len(pattern) == 0 {
+ // should not happen
+ return false
+ }
+ n := len(pattern)
+ if pattern[n-1] != '/' {
+ return pattern == path
+ }
+ return len(path) >= n && path[0:n] == pattern
+}
+
+// Return the canonical path for p, eliminating . and .. elements.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+ return np
+}
+
+// Find a handler on a handler map given a path string
+// Most-specific (longest) pattern wins
+func (mux *ServeMux) match(path string) Handler {
+ var h Handler
+ var n = 0
+ for k, v := range mux.m {
+ if !pathMatch(k, path) {
+ continue
+ }
+ if h == nil || len(k) > n {
+ n = len(k)
+ h = v
+ }
+ }
+ return h
+}
+
+// ServeHTTP dispatches the request to the handler whose
+// pattern most closely matches the request URL.
+func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(r.URL.Path); p != r.URL.Path {
+ w.Header().Set("Location", p)
+ w.WriteHeader(StatusMovedPermanently)
+ return
+ }
+ // Host-specific pattern takes precedence over generic ones
+ h := mux.match(r.Host + r.URL.Path)
+ if h == nil {
+ h = mux.match(r.URL.Path)
+ }
+ if h == nil {
+ h = NotFoundHandler()
+ }
+ h.ServeHTTP(w, r)
+}
+
+// Handle registers the handler for the given pattern.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ if pattern == "" {
+ panic("http: invalid pattern " + pattern)
+ }
+
+ mux.m[pattern] = handler
+
+ // Helpful behavior:
+ // If pattern is /tree/, insert permanent redirect for /tree.
+ n := len(pattern)
+ if n > 0 && pattern[n-1] == '/' {
+ mux.m[pattern[0:n-1]] = RedirectHandler(pattern, StatusMovedPermanently)
+ }
+}
+
+// HandleFunc registers the handler function for the given pattern.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ mux.Handle(pattern, HandlerFunc(handler))
+}
+
+// Handle registers the handler for the given pattern
+// in the DefaultServeMux.
+// The documentation for ServeMux explains how patterns are matched.
+func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+
+// HandleFunc registers the handler function for the given pattern
+// in the DefaultServeMux.
+// The documentation for ServeMux explains how patterns are matched.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ DefaultServeMux.HandleFunc(pattern, handler)
+}
+
+// Serve accepts incoming HTTP connections on the listener l,
+// creating a new service thread for each. The service threads
+// read requests and then call handler to reply to them.
+// Handler is typically nil, in which case the DefaultServeMux is used.
+func Serve(l net.Listener, handler Handler) error {
+ srv := &Server{Handler: handler}
+ return srv.Serve(l)
+}
+
+// A Server defines parameters for running an HTTP server.
+type Server struct {
+ Addr string // TCP address to listen on, ":http" if empty
+ Handler Handler // handler to invoke, http.DefaultServeMux if nil
+ ReadTimeout int64 // the net.Conn.SetReadTimeout value for new connections
+ WriteTimeout int64 // the net.Conn.SetWriteTimeout value for new connections
+ MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0
+}
+
+// ListenAndServe listens on the TCP network address srv.Addr and then
+// calls Serve to handle requests on incoming connections. If
+// srv.Addr is blank, ":http" is used.
+func (srv *Server) ListenAndServe() error {
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":http"
+ }
+ l, e := net.Listen("tcp", addr)
+ if e != nil {
+ return e
+ }
+ return srv.Serve(l)
+}
+
+// Serve accepts incoming connections on the Listener l, creating a
+// new service thread for each. The service threads read requests and
+// then call srv.Handler to reply to them.
+func (srv *Server) Serve(l net.Listener) error {
+ defer l.Close()
+ for {
+ rw, e := l.Accept()
+ if e != nil {
+ if ne, ok := e.(net.Error); ok && ne.Temporary() {
+ log.Printf("http: Accept error: %v", e)
+ continue
+ }
+ return e
+ }
+ if srv.ReadTimeout != 0 {
+ rw.SetReadTimeout(srv.ReadTimeout)
+ }
+ if srv.WriteTimeout != 0 {
+ rw.SetWriteTimeout(srv.WriteTimeout)
+ }
+ c, err := srv.newConn(rw)
+ if err != nil {
+ continue
+ }
+ go c.serve()
+ }
+ panic("not reached")
+}
+
+// ListenAndServe listens on the TCP network address addr
+// and then calls Serve with handler to handle requests
+// on incoming connections. Handler is typically nil,
+// in which case the DefaultServeMux is used.
+//
+// A trivial example server is:
+//
+// package main
+//
+// import (
+// "http"
+// "io"
+// "log"
+// )
+//
+// // hello world, the web server
+// func HelloServer(w http.ResponseWriter, req *http.Request) {
+// io.WriteString(w, "hello, world!\n")
+// }
+//
+// func main() {
+// http.HandleFunc("/hello", HelloServer)
+// err := http.ListenAndServe(":12345", nil)
+// if err != nil {
+// log.Fatal("ListenAndServe: ", err.String())
+// }
+// }
+func ListenAndServe(addr string, handler Handler) error {
+ server := &Server{Addr: addr, Handler: handler}
+ return server.ListenAndServe()
+}
+
+// ListenAndServeTLS acts identically to ListenAndServe, except that it
+// expects HTTPS connections. Additionally, files containing a certificate and
+// matching private key for the server must be provided. If the certificate
+// is signed by a certificate authority, the certFile should be the concatenation
+// of the server's certificate followed by the CA's certificate.
+//
+// A trivial example server is:
+//
+// import (
+// "http"
+// "log"
+// )
+//
+// func handler(w http.ResponseWriter, req *http.Request) {
+// w.Header().Set("Content-Type", "text/plain")
+// w.Write([]byte("This is an example server.\n"))
+// }
+//
+// func main() {
+// http.HandleFunc("/", handler)
+// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/")
+// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil)
+// if err != nil {
+// log.Fatal(err)
+// }
+// }
+//
+// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
+func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error {
+ server := &Server{Addr: addr, Handler: handler}
+ return server.ListenAndServeTLS(certFile, keyFile)
+}
+
+// ListenAndServeTLS listens on the TCP network address srv.Addr and
+// then calls Serve to handle requests on incoming TLS connections.
+//
+// Filenames containing a certificate and matching private key for
+// the server must be provided. If the certificate is signed by a
+// certificate authority, the certFile should be the concatenation
+// of the server's certificate followed by the CA's certificate.
+//
+// If srv.Addr is blank, ":https" is used.
+func (s *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ addr := s.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+ config := &tls.Config{
+ Rand: rand.Reader,
+ Time: time.Seconds,
+ NextProtos: []string{"http/1.1"},
+ }
+
+ var err error
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+
+ conn, err := net.Listen("tcp", addr)
+ if err != nil {
+ return err
+ }
+
+ tlsListener := tls.NewListener(conn, config)
+ return s.Serve(tlsListener)
+}
+
+// TimeoutHandler returns a Handler that runs h with the given time limit.
+//
+// The new Handler calls h.ServeHTTP to handle each request, but if a
+// call runs for more than ns nanoseconds, the handler responds with
+// a 503 Service Unavailable error and the given message in its body.
+// (If msg is empty, a suitable default message will be sent.)
+// After such a timeout, writes by h to its ResponseWriter will return
+// ErrHandlerTimeout.
+func TimeoutHandler(h Handler, ns int64, msg string) Handler {
+ f := func() <-chan int64 {
+ return time.After(ns)
+ }
+ return &timeoutHandler{h, f, msg}
+}
+
+// ErrHandlerTimeout is returned on ResponseWriter Write calls
+// in handlers which have timed out.
+var ErrHandlerTimeout = errors.New("http: Handler timeout")
+
+type timeoutHandler struct {
+ handler Handler
+ timeout func() <-chan int64 // returns channel producing a timeout
+ body string
+}
+
+func (h *timeoutHandler) errorBody() string {
+ if h.body != "" {
+ return h.body
+ }
+ return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
+}
+
+func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ done := make(chan bool)
+ tw := &timeoutWriter{w: w}
+ go func() {
+ h.handler.ServeHTTP(tw, r)
+ done <- true
+ }()
+ select {
+ case <-done:
+ return
+ case <-h.timeout():
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ if !tw.wroteHeader {
+ tw.w.WriteHeader(StatusServiceUnavailable)
+ tw.w.Write([]byte(h.errorBody()))
+ }
+ tw.timedOut = true
+ }
+}
+
+type timeoutWriter struct {
+ w ResponseWriter
+
+ mu sync.Mutex
+ timedOut bool
+ wroteHeader bool
+}
+
+func (tw *timeoutWriter) Header() Header {
+ return tw.w.Header()
+}
+
+func (tw *timeoutWriter) Write(p []byte) (int, error) {
+ tw.mu.Lock()
+ timedOut := tw.timedOut
+ tw.mu.Unlock()
+ if timedOut {
+ return 0, ErrHandlerTimeout
+ }
+ return tw.w.Write(p)
+}
+
+func (tw *timeoutWriter) WriteHeader(code int) {
+ tw.mu.Lock()
+ if tw.timedOut || tw.wroteHeader {
+ tw.mu.Unlock()
+ return
+ }
+ tw.wroteHeader = true
+ tw.mu.Unlock()
+ tw.w.WriteHeader(code)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Content-type sniffing algorithm.
+// References in this file refer to this draft specification:
+// http://mimesniff.spec.whatwg.org/
+
+// The algorithm prefers to use sniffLen bytes to make its decision.
+const sniffLen = 512
+
+// DetectContentType returns the sniffed Content-Type string
+// for the given data. This function always returns a valid MIME type.
+func DetectContentType(data []byte) string {
+ if len(data) > sniffLen {
+ data = data[:sniffLen]
+ }
+
+ // Index of the first non-whitespace byte in data.
+ firstNonWS := 0
+ for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ {
+ }
+
+ for _, sig := range sniffSignatures {
+ if ct := sig.match(data, firstNonWS); ct != "" {
+ return ct
+ }
+ }
+
+ return "application/octet-stream" // fallback
+}
+
+func isWS(b byte) bool {
+ return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1
+}
+
+type sniffSig interface {
+ // match returns the MIME type of the data, or "" if unknown.
+ match(data []byte, firstNonWS int) string
+}
+
+// Data matching the table in section 6.
+var sniffSignatures = []sniffSig{
+ htmlSig([]byte("<!DOCTYPE HTML")),
+ htmlSig([]byte("<HTML")),
+ htmlSig([]byte("<HEAD")),
+ htmlSig([]byte("<SCRIPT")),
+ htmlSig([]byte("<IFRAME")),
+ htmlSig([]byte("<H1")),
+ htmlSig([]byte("<DIV")),
+ htmlSig([]byte("<FONT")),
+ htmlSig([]byte("<TABLE")),
+ htmlSig([]byte("<A")),
+ htmlSig([]byte("<STYLE")),
+ htmlSig([]byte("<TITLE")),
+ htmlSig([]byte("<B")),
+ htmlSig([]byte("<BODY")),
+ htmlSig([]byte("<BR")),
+ htmlSig([]byte("<P")),
+ htmlSig([]byte("<!--")),
+
+ &maskedSig{mask: []byte("\xFF\xFF\xFF\xFF\xFF"), pat: []byte("<?xml"), skipWS: true, ct: "text/xml; charset=utf-8"},
+
+ &exactSig{[]byte("%PDF-"), "application/pdf"},
+ &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"},
+
+ // UTF BOMs.
+ &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFE\xFF\x00\x00"), ct: "text/plain; charset=utf-16be"},
+ &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFF\xFE\x00\x00"), ct: "text/plain; charset=utf-16le"},
+ &maskedSig{mask: []byte("\xFF\xFF\xFF\x00"), pat: []byte("\xEF\xBB\xBF\x00"), ct: "text/plain; charset=utf-8"},
+
+ &exactSig{[]byte("GIF87a"), "image/gif"},
+ &exactSig{[]byte("GIF89a"), "image/gif"},
+ &exactSig{[]byte("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), "image/png"},
+ &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"},
+ &exactSig{[]byte("BM"), "image/bmp"},
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"),
+ ct: "image/webp",
+ },
+ &exactSig{[]byte("\x00\x00\x01\x00"), "image/vnd.microsoft.icon"},
+ &exactSig{[]byte("\x4F\x67\x67\x53\x00"), "application/ogg"},
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00WAVE"),
+ ct: "audio/wave",
+ },
+ &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"},
+ &exactSig{[]byte("\x52\x61\x72\x20\x1A\x07\x00"), "application/x-rar-compressed"},
+ &exactSig{[]byte("\x50\x4B\x03\x04"), "application/zip"},
+ &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"},
+
+ // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
+ //mp4Sig(0),
+
+ textSig(0), // should be last
+}
+
+type exactSig struct {
+ sig []byte
+ ct string
+}
+
+func (e *exactSig) match(data []byte, firstNonWS int) string {
+ if bytes.HasPrefix(data, e.sig) {
+ return e.ct
+ }
+ return ""
+}
+
+type maskedSig struct {
+ mask, pat []byte
+ skipWS bool
+ ct string
+}
+
+func (m *maskedSig) match(data []byte, firstNonWS int) string {
+ if m.skipWS {
+ data = data[firstNonWS:]
+ }
+ if len(data) < len(m.mask) {
+ return ""
+ }
+ for i, mask := range m.mask {
+ db := data[i] & mask
+ if db != m.pat[i] {
+ return ""
+ }
+ }
+ return m.ct
+}
+
+type htmlSig []byte
+
+func (h htmlSig) match(data []byte, firstNonWS int) string {
+ data = data[firstNonWS:]
+ if len(data) < len(h)+1 {
+ return ""
+ }
+ for i, b := range h {
+ db := data[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return ""
+ }
+ }
+ // Next byte must be space or right angle bracket.
+ if db := data[len(h)]; db != ' ' && db != '>' {
+ return ""
+ }
+ return "text/html; charset=utf-8"
+}
+
+type mp4Sig int
+
+func (mp4Sig) match(data []byte, firstNonWS int) string {
+ // c.f. section 6.1.
+ if len(data) < 8 {
+ return ""
+ }
+ boxSize := int(binary.BigEndian.Uint32(data[:4]))
+ if boxSize%4 != 0 || len(data) < boxSize {
+ return ""
+ }
+ if !bytes.Equal(data[4:8], []byte("ftyp")) {
+ return ""
+ }
+ for st := 8; st < boxSize; st += 4 {
+ if st == 12 {
+ // minor version number
+ continue
+ }
+ seg := string(data[st : st+3])
+ switch seg {
+ case "mp4", "iso", "M4V", "M4P", "M4B":
+ return "video/mp4"
+ /* The remainder are not in the spec.
+ case "M4A":
+ return "audio/mp4"
+ case "3gp":
+ return "video/3gpp"
+ case "jp2":
+ return "image/jp2" // JPEG 2000
+ */
+ }
+ }
+ return ""
+}
+
+type textSig int
+
+func (textSig) match(data []byte, firstNonWS int) string {
+ // c.f. section 5, step 4.
+ for _, b := range data[firstNonWS:] {
+ switch {
+ case 0x00 <= b && b <= 0x08,
+ b == 0x0B,
+ 0x0E <= b && b <= 0x1A,
+ 0x1C <= b && b <= 0x1F:
+ return ""
+ }
+ }
+ return "text/plain; charset=utf-8"
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "log"
+ . "net/http"
+ "net/http/httptest"
+ "strconv"
+ "testing"
+)
+
+var sniffTests = []struct {
+ desc string
+ data []byte
+ contentType string
+}{
+ // Some nonsense.
+ {"Empty", []byte{}, "text/plain; charset=utf-8"},
+ {"Binary", []byte{1, 2, 3}, "application/octet-stream"},
+
+ {"HTML document #1", []byte(`<HtMl><bOdY>blah blah blah</body></html>`), "text/html; charset=utf-8"},
+ {"HTML document #2", []byte(`<HTML></HTML>`), "text/html; charset=utf-8"},
+ {"HTML document #3 (leading whitespace)", []byte(` <!DOCTYPE HTML>...`), "text/html; charset=utf-8"},
+ {"HTML document #4 (leading CRLF)", []byte("\r\n<html>..."), "text/html; charset=utf-8"},
+
+ {"Plain text", []byte(`This is not HTML. It has ☃ though.`), "text/plain; charset=utf-8"},
+
+ {"XML", []byte("\n<?xml!"), "text/xml; charset=utf-8"},
+
+ // Image types.
+ {"GIF 87a", []byte(`GIF87a`), "image/gif"},
+ {"GIF 89a", []byte(`GIF89a...`), "image/gif"},
+
+ // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
+ //{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"},
+ //{"MP4 audio", []byte("\x00\x00\x00\x20ftypM4A \x00\x00\x00\x00M4A mp42isom\x00\x00\x00\x00"), "audio/mp4"},
+}
+
+func TestDetectContentType(t *testing.T) {
+ for _, tt := range sniffTests {
+ ct := DetectContentType(tt.data)
+ if ct != tt.contentType {
+ t.Errorf("%v: DetectContentType = %q, want %q", tt.desc, ct, tt.contentType)
+ }
+ }
+}
+
+func TestServerContentType(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ i, _ := strconv.Atoi(r.FormValue("i"))
+ tt := sniffTests[i]
+ n, err := w.Write(tt.data)
+ if n != len(tt.data) || err != nil {
+ log.Fatalf("%v: Write(%q) = %v, %v want %d, nil", tt.desc, tt.data, n, err, len(tt.data))
+ }
+ }))
+ defer ts.Close()
+
+ for i, tt := range sniffTests {
+ resp, err := Get(ts.URL + "/?i=" + strconv.Itoa(i))
+ if err != nil {
+ t.Errorf("%v: %v", tt.desc, err)
+ continue
+ }
+ if ct := resp.Header.Get("Content-Type"); ct != tt.contentType {
+ t.Errorf("%v: Content-Type = %q, want %q", tt.desc, ct, tt.contentType)
+ }
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Errorf("%v: reading body: %v", tt.desc, err)
+ } else if !bytes.Equal(data, tt.data) {
+ t.Errorf("%v: data is %q, want %q", tt.desc, data, tt.data)
+ }
+ resp.Body.Close()
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// HTTP status codes, defined in RFC 2616.
+const (
+ StatusContinue = 100
+ StatusSwitchingProtocols = 101
+
+ StatusOK = 200
+ StatusCreated = 201
+ StatusAccepted = 202
+ StatusNonAuthoritativeInfo = 203
+ StatusNoContent = 204
+ StatusResetContent = 205
+ StatusPartialContent = 206
+
+ StatusMultipleChoices = 300
+ StatusMovedPermanently = 301
+ StatusFound = 302
+ StatusSeeOther = 303
+ StatusNotModified = 304
+ StatusUseProxy = 305
+ StatusTemporaryRedirect = 307
+
+ StatusBadRequest = 400
+ StatusUnauthorized = 401
+ StatusPaymentRequired = 402
+ StatusForbidden = 403
+ StatusNotFound = 404
+ StatusMethodNotAllowed = 405
+ StatusNotAcceptable = 406
+ StatusProxyAuthRequired = 407
+ StatusRequestTimeout = 408
+ StatusConflict = 409
+ StatusGone = 410
+ StatusLengthRequired = 411
+ StatusPreconditionFailed = 412
+ StatusRequestEntityTooLarge = 413
+ StatusRequestURITooLong = 414
+ StatusUnsupportedMediaType = 415
+ StatusRequestedRangeNotSatisfiable = 416
+ StatusExpectationFailed = 417
+
+ StatusInternalServerError = 500
+ StatusNotImplemented = 501
+ StatusBadGateway = 502
+ StatusServiceUnavailable = 503
+ StatusGatewayTimeout = 504
+ StatusHTTPVersionNotSupported = 505
+)
+
+var statusText = map[int]string{
+ StatusContinue: "Continue",
+ StatusSwitchingProtocols: "Switching Protocols",
+
+ StatusOK: "OK",
+ StatusCreated: "Created",
+ StatusAccepted: "Accepted",
+ StatusNonAuthoritativeInfo: "Non-Authoritative Information",
+ StatusNoContent: "No Content",
+ StatusResetContent: "Reset Content",
+ StatusPartialContent: "Partial Content",
+
+ StatusMultipleChoices: "Multiple Choices",
+ StatusMovedPermanently: "Moved Permanently",
+ StatusFound: "Found",
+ StatusSeeOther: "See Other",
+ StatusNotModified: "Not Modified",
+ StatusUseProxy: "Use Proxy",
+ StatusTemporaryRedirect: "Temporary Redirect",
+
+ StatusBadRequest: "Bad Request",
+ StatusUnauthorized: "Unauthorized",
+ StatusPaymentRequired: "Payment Required",
+ StatusForbidden: "Forbidden",
+ StatusNotFound: "Not Found",
+ StatusMethodNotAllowed: "Method Not Allowed",
+ StatusNotAcceptable: "Not Acceptable",
+ StatusProxyAuthRequired: "Proxy Authentication Required",
+ StatusRequestTimeout: "Request Timeout",
+ StatusConflict: "Conflict",
+ StatusGone: "Gone",
+ StatusLengthRequired: "Length Required",
+ StatusPreconditionFailed: "Precondition Failed",
+ StatusRequestEntityTooLarge: "Request Entity Too Large",
+ StatusRequestURITooLong: "Request URI Too Long",
+ StatusUnsupportedMediaType: "Unsupported Media Type",
+ StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
+ StatusExpectationFailed: "Expectation Failed",
+
+ StatusInternalServerError: "Internal Server Error",
+ StatusNotImplemented: "Not Implemented",
+ StatusBadGateway: "Bad Gateway",
+ StatusServiceUnavailable: "Service Unavailable",
+ StatusGatewayTimeout: "Gateway Timeout",
+ StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
+}
+
+// StatusText returns a text for the HTTP status code. It returns the empty
+// string if the code is unknown.
+func StatusText(code int) string {
+ return statusText[code]
+}
--- /dev/null
+0123456789
--- /dev/null
+index.html says hello
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/textproto"
+ "strconv"
+ "strings"
+)
+
+// transferWriter inspects the fields of a user-supplied Request or Response,
+// sanitizes them without changing the user object and provides methods for
+// writing the respective header, body and trailer in wire format.
+type transferWriter struct {
+ Method string
+ Body io.Reader
+ BodyCloser io.Closer
+ ResponseToHEAD bool
+ ContentLength int64 // -1 means unknown, 0 means exactly none
+ Close bool
+ TransferEncoding []string
+ Trailer Header
+}
+
+func newTransferWriter(r interface{}) (t *transferWriter, err error) {
+ t = &transferWriter{}
+
+ // Extract relevant fields
+ atLeastHTTP11 := false
+ switch rr := r.(type) {
+ case *Request:
+ if rr.ContentLength != 0 && rr.Body == nil {
+ return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength)
+ }
+ t.Method = rr.Method
+ t.Body = rr.Body
+ t.BodyCloser = rr.Body
+ t.ContentLength = rr.ContentLength
+ t.Close = rr.Close
+ t.TransferEncoding = rr.TransferEncoding
+ t.Trailer = rr.Trailer
+ atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
+ if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 {
+ if t.ContentLength == 0 {
+ // Test to see if it's actually zero or just unset.
+ var buf [1]byte
+ n, _ := io.ReadFull(t.Body, buf[:])
+ if n == 1 {
+ // Oh, guess there is data in this Body Reader after all.
+ // The ContentLength field just wasn't set.
+ // Stich the Body back together again, re-attaching our
+ // consumed byte.
+ t.ContentLength = -1
+ t.Body = io.MultiReader(bytes.NewBuffer(buf[:]), t.Body)
+ } else {
+ // Body is actually empty.
+ t.Body = nil
+ t.BodyCloser = nil
+ }
+ }
+ if t.ContentLength < 0 {
+ t.TransferEncoding = []string{"chunked"}
+ }
+ }
+ case *Response:
+ t.Method = rr.Request.Method
+ t.Body = rr.Body
+ t.BodyCloser = rr.Body
+ t.ContentLength = rr.ContentLength
+ t.Close = rr.Close
+ t.TransferEncoding = rr.TransferEncoding
+ t.Trailer = rr.Trailer
+ atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
+ t.ResponseToHEAD = noBodyExpected(rr.Request.Method)
+ }
+
+ // Sanitize Body,ContentLength,TransferEncoding
+ if t.ResponseToHEAD {
+ t.Body = nil
+ t.TransferEncoding = nil
+ // ContentLength is expected to hold Content-Length
+ if t.ContentLength < 0 {
+ return nil, ErrMissingContentLength
+ }
+ } else {
+ if !atLeastHTTP11 || t.Body == nil {
+ t.TransferEncoding = nil
+ }
+ if chunked(t.TransferEncoding) {
+ t.ContentLength = -1
+ } else if t.Body == nil { // no chunking, no body
+ t.ContentLength = 0
+ }
+ }
+
+ // Sanitize Trailer
+ if !chunked(t.TransferEncoding) {
+ t.Trailer = nil
+ }
+
+ return t, nil
+}
+
+func noBodyExpected(requestMethod string) bool {
+ return requestMethod == "HEAD"
+}
+
+func (t *transferWriter) shouldSendContentLength() bool {
+ if chunked(t.TransferEncoding) {
+ return false
+ }
+ if t.ContentLength > 0 {
+ return true
+ }
+ if t.ResponseToHEAD {
+ return true
+ }
+ // Many servers expect a Content-Length for these methods
+ if t.Method == "POST" || t.Method == "PUT" {
+ return true
+ }
+ if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
+ return true
+ }
+
+ return false
+}
+
+func (t *transferWriter) WriteHeader(w io.Writer) (err error) {
+ if t.Close {
+ _, err = io.WriteString(w, "Connection: close\r\n")
+ if err != nil {
+ return
+ }
+ }
+
+ // Write Content-Length and/or Transfer-Encoding whose values are a
+ // function of the sanitized field triple (Body, ContentLength,
+ // TransferEncoding)
+ if t.shouldSendContentLength() {
+ io.WriteString(w, "Content-Length: ")
+ _, err = io.WriteString(w, strconv.Itoa64(t.ContentLength)+"\r\n")
+ if err != nil {
+ return
+ }
+ } else if chunked(t.TransferEncoding) {
+ _, err = io.WriteString(w, "Transfer-Encoding: chunked\r\n")
+ if err != nil {
+ return
+ }
+ }
+
+ // Write Trailer header
+ if t.Trailer != nil {
+ // TODO: At some point, there should be a generic mechanism for
+ // writing long headers, using HTTP line splitting
+ io.WriteString(w, "Trailer: ")
+ needComma := false
+ for k := range t.Trailer {
+ k = CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return &badStringError{"invalid Trailer key", k}
+ }
+ if needComma {
+ io.WriteString(w, ",")
+ }
+ io.WriteString(w, k)
+ needComma = true
+ }
+ _, err = io.WriteString(w, "\r\n")
+ }
+
+ return
+}
+
+func (t *transferWriter) WriteBody(w io.Writer) (err error) {
+ var ncopy int64
+
+ // Write body
+ if t.Body != nil {
+ if chunked(t.TransferEncoding) {
+ cw := newChunkedWriter(w)
+ _, err = io.Copy(cw, t.Body)
+ if err == nil {
+ err = cw.Close()
+ }
+ } else if t.ContentLength == -1 {
+ ncopy, err = io.Copy(w, t.Body)
+ } else {
+ ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength))
+ nextra, err := io.Copy(ioutil.Discard, t.Body)
+ if err != nil {
+ return err
+ }
+ ncopy += nextra
+ }
+ if err != nil {
+ return err
+ }
+ if err = t.BodyCloser.Close(); err != nil {
+ return err
+ }
+ }
+
+ if t.ContentLength != -1 && t.ContentLength != ncopy {
+ return fmt.Errorf("http: Request.ContentLength=%d with Body length %d",
+ t.ContentLength, ncopy)
+ }
+
+ // TODO(petar): Place trailer writer code here.
+ if chunked(t.TransferEncoding) {
+ // Last chunk, empty trailer
+ _, err = io.WriteString(w, "\r\n")
+ }
+
+ return
+}
+
+type transferReader struct {
+ // Input
+ Header Header
+ StatusCode int
+ RequestMethod string
+ ProtoMajor int
+ ProtoMinor int
+ // Output
+ Body io.ReadCloser
+ ContentLength int64
+ TransferEncoding []string
+ Close bool
+ Trailer Header
+}
+
+// bodyAllowedForStatus returns whether a given response status code
+// permits a body. See RFC2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+// msg is *Request or *Response.
+func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
+ t := &transferReader{}
+
+ // Unify input
+ isResponse := false
+ switch rr := msg.(type) {
+ case *Response:
+ t.Header = rr.Header
+ t.StatusCode = rr.StatusCode
+ t.RequestMethod = rr.Request.Method
+ t.ProtoMajor = rr.ProtoMajor
+ t.ProtoMinor = rr.ProtoMinor
+ t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header)
+ isResponse = true
+ case *Request:
+ t.Header = rr.Header
+ t.ProtoMajor = rr.ProtoMajor
+ t.ProtoMinor = rr.ProtoMinor
+ // Transfer semantics for Requests are exactly like those for
+ // Responses with status code 200, responding to a GET method
+ t.StatusCode = 200
+ t.RequestMethod = "GET"
+ default:
+ panic("unexpected type")
+ }
+
+ // Default to HTTP/1.1
+ if t.ProtoMajor == 0 && t.ProtoMinor == 0 {
+ t.ProtoMajor, t.ProtoMinor = 1, 1
+ }
+
+ // Transfer encoding, content length
+ t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header)
+ if err != nil {
+ return err
+ }
+
+ t.ContentLength, err = fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
+ if err != nil {
+ return err
+ }
+
+ // Trailer
+ t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
+ if err != nil {
+ return err
+ }
+
+ // If there is no Content-Length or chunked Transfer-Encoding on a *Response
+ // and the status is not 1xx, 204 or 304, then the body is unbounded.
+ // See RFC2616, section 4.4.
+ switch msg.(type) {
+ case *Response:
+ if t.ContentLength == -1 &&
+ !chunked(t.TransferEncoding) &&
+ bodyAllowedForStatus(t.StatusCode) {
+ // Unbounded body.
+ t.Close = true
+ }
+ }
+
+ // Prepare body reader. ContentLength < 0 means chunked encoding
+ // or close connection when finished, since multipart is not supported yet
+ switch {
+ case chunked(t.TransferEncoding):
+ t.Body = &body{Reader: newChunkedReader(r), hdr: msg, r: r, closing: t.Close}
+ case t.ContentLength >= 0:
+ // TODO: limit the Content-Length. This is an easy DoS vector.
+ t.Body = &body{Reader: io.LimitReader(r, t.ContentLength), closing: t.Close}
+ default:
+ // t.ContentLength < 0, i.e. "Content-Length" not mentioned in header
+ if t.Close {
+ // Close semantics (i.e. HTTP/1.0)
+ t.Body = &body{Reader: r, closing: t.Close}
+ } else {
+ // Persistent connection (i.e. HTTP/1.1)
+ t.Body = &body{Reader: io.LimitReader(r, 0), closing: t.Close}
+ }
+ }
+
+ // Unify output
+ switch rr := msg.(type) {
+ case *Request:
+ rr.Body = t.Body
+ rr.ContentLength = t.ContentLength
+ rr.TransferEncoding = t.TransferEncoding
+ rr.Close = t.Close
+ rr.Trailer = t.Trailer
+ case *Response:
+ rr.Body = t.Body
+ rr.ContentLength = t.ContentLength
+ rr.TransferEncoding = t.TransferEncoding
+ rr.Close = t.Close
+ rr.Trailer = t.Trailer
+ }
+
+ return nil
+}
+
+// Checks whether chunked is part of the encodings stack
+func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
+
+// Checks whether the encoding is explicitly "identity".
+func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
+
+// Sanitize transfer encoding
+func fixTransferEncoding(requestMethod string, header Header) ([]string, error) {
+ raw, present := header["Transfer-Encoding"]
+ if !present {
+ return nil, nil
+ }
+
+ delete(header, "Transfer-Encoding")
+
+ // Head responses have no bodies, so the transfer encoding
+ // should be ignored.
+ if requestMethod == "HEAD" {
+ return nil, nil
+ }
+
+ encodings := strings.Split(raw[0], ",")
+ te := make([]string, 0, len(encodings))
+ // TODO: Even though we only support "identity" and "chunked"
+ // encodings, the loop below is designed with foresight. One
+ // invariant that must be maintained is that, if present,
+ // chunked encoding must always come first.
+ for _, encoding := range encodings {
+ encoding = strings.ToLower(strings.TrimSpace(encoding))
+ // "identity" encoding is not recored
+ if encoding == "identity" {
+ break
+ }
+ if encoding != "chunked" {
+ return nil, &badStringError{"unsupported transfer encoding", encoding}
+ }
+ te = te[0 : len(te)+1]
+ te[len(te)-1] = encoding
+ }
+ if len(te) > 1 {
+ return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")}
+ }
+ if len(te) > 0 {
+ // Chunked encoding trumps Content-Length. See RFC 2616
+ // Section 4.4. Currently len(te) > 0 implies chunked
+ // encoding.
+ delete(header, "Content-Length")
+ return te, nil
+ }
+
+ return nil, nil
+}
+
+// Determine the expected body length, using RFC 2616 Section 4.4. This
+// function is not a method, because ultimately it should be shared by
+// ReadResponse and ReadRequest.
+func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
+
+ // Logic based on response type or status
+ if noBodyExpected(requestMethod) {
+ return 0, nil
+ }
+ if status/100 == 1 {
+ return 0, nil
+ }
+ switch status {
+ case 204, 304:
+ return 0, nil
+ }
+
+ // Logic based on Transfer-Encoding
+ if chunked(te) {
+ return -1, nil
+ }
+
+ // Logic based on Content-Length
+ cl := strings.TrimSpace(header.Get("Content-Length"))
+ if cl != "" {
+ n, err := strconv.Atoi64(cl)
+ if err != nil || n < 0 {
+ return -1, &badStringError{"bad Content-Length", cl}
+ }
+ return n, nil
+ } else {
+ header.Del("Content-Length")
+ }
+
+ if !isResponse && requestMethod == "GET" {
+ // RFC 2616 doesn't explicitly permit nor forbid an
+ // entity-body on a GET request so we permit one if
+ // declared, but we default to 0 here (not -1 below)
+ // if there's no mention of a body.
+ return 0, nil
+ }
+
+ // Logic based on media type. The purpose of the following code is just
+ // to detect whether the unsupported "multipart/byteranges" is being
+ // used. A proper Content-Type parser is needed in the future.
+ if strings.Contains(strings.ToLower(header.Get("Content-Type")), "multipart/byteranges") {
+ return -1, ErrNotSupported
+ }
+
+ // Body-EOF logic based on other methods (like closing, or chunked coding)
+ return -1, nil
+}
+
+// Determine whether to hang up after sending a request and body, or
+// receiving a response and body
+// 'header' is the request headers
+func shouldClose(major, minor int, header Header) bool {
+ if major < 1 {
+ return true
+ } else if major == 1 && minor == 0 {
+ if !strings.Contains(strings.ToLower(header.Get("Connection")), "keep-alive") {
+ return true
+ }
+ return false
+ } else {
+ // TODO: Should split on commas, toss surrounding white space,
+ // and check each field.
+ if strings.ToLower(header.Get("Connection")) == "close" {
+ header.Del("Connection")
+ return true
+ }
+ }
+ return false
+}
+
+// Parse the trailer header
+func fixTrailer(header Header, te []string) (Header, error) {
+ raw := header.Get("Trailer")
+ if raw == "" {
+ return nil, nil
+ }
+
+ header.Del("Trailer")
+ trailer := make(Header)
+ keys := strings.Split(raw, ",")
+ for _, key := range keys {
+ key = CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return nil, &badStringError{"bad trailer key", key}
+ }
+ trailer.Del(key)
+ }
+ if len(trailer) == 0 {
+ return nil, nil
+ }
+ if !chunked(te) {
+ // Trailer and no chunking
+ return nil, ErrUnexpectedTrailer
+ }
+ return trailer, nil
+}
+
+// body turns a Reader into a ReadCloser.
+// Close ensures that the body has been fully read
+// and then reads the trailer if necessary.
+type body struct {
+ io.Reader
+ hdr interface{} // non-nil (Response or Request) value means read trailer
+ r *bufio.Reader // underlying wire-format reader for the trailer
+ closing bool // is the connection to be closed after reading body?
+ closed bool
+
+ res *response // response writer for server requests, else nil
+}
+
+// ErrBodyReadAfterClose is returned when reading a Request Body after
+// the body has been closed. This typically happens when the body is
+// read after an HTTP Handler calls WriteHeader or Write on its
+// ResponseWriter.
+var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed request Body")
+
+func (b *body) Read(p []byte) (n int, err error) {
+ if b.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+ n, err = b.Reader.Read(p)
+
+ // Read the final trailer once we hit EOF.
+ if err == io.EOF && b.hdr != nil {
+ err = b.readTrailer()
+ b.hdr = nil
+ }
+ return n, err
+}
+
+var (
+ singleCRLF = []byte("\r\n")
+ doubleCRLF = []byte("\r\n\r\n")
+)
+
+func seeUpcomingDoubleCRLF(r *bufio.Reader) bool {
+ for peekSize := 4; ; peekSize++ {
+ // This loop stops when Peek returns an error,
+ // which it does when r's buffer has been filled.
+ buf, err := r.Peek(peekSize)
+ if bytes.HasSuffix(buf, doubleCRLF) {
+ return true
+ }
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+func (b *body) readTrailer() error {
+ // The common case, since nobody uses trailers.
+ buf, _ := b.r.Peek(2)
+ if bytes.Equal(buf, singleCRLF) {
+ b.r.ReadByte()
+ b.r.ReadByte()
+ return nil
+ }
+
+ // Make sure there's a header terminator coming up, to prevent
+ // a DoS with an unbounded size Trailer. It's not easy to
+ // slip in a LimitReader here, as textproto.NewReader requires
+ // a concrete *bufio.Reader. Also, we can't get all the way
+ // back up to our conn's LimitedReader that *might* be backing
+ // this bufio.Reader. Instead, a hack: we iteratively Peek up
+ // to the bufio.Reader's max size, looking for a double CRLF.
+ // This limits the trailer to the underlying buffer size, typically 4kB.
+ if !seeUpcomingDoubleCRLF(b.r) {
+ return errors.New("http: suspiciously long trailer after chunked body")
+ }
+
+ hdr, err := textproto.NewReader(b.r).ReadMIMEHeader()
+ if err != nil {
+ return err
+ }
+ switch rr := b.hdr.(type) {
+ case *Request:
+ rr.Trailer = Header(hdr)
+ case *Response:
+ rr.Trailer = Header(hdr)
+ }
+ return nil
+}
+
+func (b *body) Close() error {
+ if b.closed {
+ return nil
+ }
+ defer func() {
+ b.closed = true
+ }()
+ if b.hdr == nil && b.closing {
+ // no trailer and closing the connection next.
+ // no point in reading to EOF.
+ return nil
+ }
+
+ // In a server request, don't continue reading from the client
+ // if we've already hit the maximum body size set by the
+ // handler. If this is set, that also means the TCP connection
+ // is about to be closed, so getting to the next HTTP request
+ // in the stream is not necessary.
+ if b.res != nil && b.res.requestBodyLimitHit {
+ return nil
+ }
+
+ // Fully consume the body, which will also lead to us reading
+ // the trailer headers after the body, if present.
+ if _, err := io.Copy(ioutil.Discard, b); err != nil {
+ return err
+ }
+ return nil
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP client implementation. See RFC 2616.
+//
+// This is the low-level Transport implementation of RoundTripper.
+// The high-level interface is in client.go.
+
+package http
+
+import (
+ "bufio"
+ "compress/gzip"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+)
+
+// DefaultTransport is the default implementation of Transport and is
+// used by DefaultClient. It establishes a new network connection for
+// each call to Do and uses HTTP proxies as directed by the
+// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy)
+// environment variables.
+var DefaultTransport RoundTripper = &Transport{Proxy: ProxyFromEnvironment}
+
+// DefaultMaxIdleConnsPerHost is the default value of Transport's
+// MaxIdleConnsPerHost.
+const DefaultMaxIdleConnsPerHost = 2
+
+// Transport is an implementation of RoundTripper that supports http,
+// https, and http proxies (for either http or https with CONNECT).
+// Transport can also cache connections for future re-use.
+type Transport struct {
+ lk sync.Mutex
+ idleConn map[string][]*persistConn
+ altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper
+
+ // TODO: tunable on global max cached connections
+ // TODO: tunable on timeout on cached connections
+ // TODO: optional pipelining
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*Request) (*url.URL, error)
+
+ // Dial specifies the dial function for creating TCP
+ // connections.
+ // If Dial is nil, net.Dial is used.
+ Dial func(net, addr string) (c net.Conn, err error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ DisableKeepAlives bool
+ DisableCompression bool
+
+ // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
+ // (keep-alive) to keep to keep per-host. If zero,
+ // DefaultMaxIdleConnsPerHost is used.
+ MaxIdleConnsPerHost int
+}
+
+// ProxyFromEnvironment returns the URL of the proxy to use for a
+// given request, as indicated by the environment variables
+// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy).
+// Either URL or an error is returned.
+func ProxyFromEnvironment(req *Request) (*url.URL, error) {
+ proxy := getenvEitherCase("HTTP_PROXY")
+ if proxy == "" {
+ return nil, nil
+ }
+ if !useProxy(canonicalAddr(req.URL)) {
+ return nil, nil
+ }
+ proxyURL, err := url.ParseRequest(proxy)
+ if err != nil {
+ return nil, errors.New("invalid proxy address")
+ }
+ if proxyURL.Host == "" {
+ proxyURL, err = url.ParseRequest("http://" + proxy)
+ if err != nil {
+ return nil, errors.New("invalid proxy address")
+ }
+ }
+ return proxyURL, nil
+}
+
+// ProxyURL returns a proxy function (for use in a Transport)
+// that always returns the same URL.
+func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
+ return func(*Request) (*url.URL, error) {
+ return fixedURL, nil
+ }
+}
+
+// transportRequest is a wrapper around a *Request that adds
+// optional extra headers to write.
+type transportRequest struct {
+ *Request // original request, not to be mutated
+ extra Header // extra headers to write, or nil
+}
+
+func (tr *transportRequest) extraHeaders() Header {
+ if tr.extra == nil {
+ tr.extra = make(Header)
+ }
+ return tr.extra
+}
+
+// RoundTrip implements the RoundTripper interface.
+func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) {
+ if req.URL == nil {
+ return nil, errors.New("http: nil Request.URL")
+ }
+ if req.Header == nil {
+ return nil, errors.New("http: nil Request.Header")
+ }
+ if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
+ t.lk.Lock()
+ var rt RoundTripper
+ if t.altProto != nil {
+ rt = t.altProto[req.URL.Scheme]
+ }
+ t.lk.Unlock()
+ if rt == nil {
+ return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme}
+ }
+ return rt.RoundTrip(req)
+ }
+ treq := &transportRequest{Request: req}
+ cm, err := t.connectMethodForRequest(treq)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the cached or newly-created connection to either the
+ // host (for http or https), the http proxy, or the http proxy
+ // pre-CONNECTed to https server. In any case, we'll be ready
+ // to send it requests.
+ pconn, err := t.getConn(cm)
+ if err != nil {
+ return nil, err
+ }
+
+ return pconn.roundTrip(treq)
+}
+
+// RegisterProtocol registers a new protocol with scheme.
+// The Transport will pass requests using the given scheme to rt.
+// It is rt's responsibility to simulate HTTP request semantics.
+//
+// RegisterProtocol can be used by other packages to provide
+// implementations of protocol schemes like "ftp" or "file".
+func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
+ if scheme == "http" || scheme == "https" {
+ panic("protocol " + scheme + " already registered")
+ }
+ t.lk.Lock()
+ defer t.lk.Unlock()
+ if t.altProto == nil {
+ t.altProto = make(map[string]RoundTripper)
+ }
+ if _, exists := t.altProto[scheme]; exists {
+ panic("protocol " + scheme + " already registered")
+ }
+ t.altProto[scheme] = rt
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle in
+// a "keep-alive" state. It does not interrupt any connections currently
+// in use.
+func (t *Transport) CloseIdleConnections() {
+ t.lk.Lock()
+ defer t.lk.Unlock()
+ if t.idleConn == nil {
+ return
+ }
+ for _, conns := range t.idleConn {
+ for _, pconn := range conns {
+ pconn.close()
+ }
+ }
+ t.idleConn = nil
+}
+
+//
+// Private implementation past this point.
+//
+
+func getenvEitherCase(k string) string {
+ if v := os.Getenv(strings.ToUpper(k)); v != "" {
+ return v
+ }
+ return os.Getenv(strings.ToLower(k))
+}
+
+func (t *Transport) connectMethodForRequest(treq *transportRequest) (*connectMethod, error) {
+ cm := &connectMethod{
+ targetScheme: treq.URL.Scheme,
+ targetAddr: canonicalAddr(treq.URL),
+ }
+ if t.Proxy != nil {
+ var err error
+ cm.proxyURL, err = t.Proxy(treq.Request)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return cm, nil
+}
+
+// proxyAuth returns the Proxy-Authorization header to set
+// on requests, if applicable.
+func (cm *connectMethod) proxyAuth() string {
+ if cm.proxyURL == nil {
+ return ""
+ }
+ proxyInfo := cm.proxyURL.RawUserinfo
+ if proxyInfo != "" {
+ return "Basic " + base64.URLEncoding.EncodeToString([]byte(proxyInfo))
+ }
+ return ""
+}
+
+func (t *Transport) putIdleConn(pconn *persistConn) {
+ t.lk.Lock()
+ defer t.lk.Unlock()
+ if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
+ pconn.close()
+ return
+ }
+ if pconn.isBroken() {
+ return
+ }
+ key := pconn.cacheKey
+ max := t.MaxIdleConnsPerHost
+ if max == 0 {
+ max = DefaultMaxIdleConnsPerHost
+ }
+ if len(t.idleConn[key]) >= max {
+ pconn.close()
+ return
+ }
+ t.idleConn[key] = append(t.idleConn[key], pconn)
+}
+
+func (t *Transport) getIdleConn(cm *connectMethod) (pconn *persistConn) {
+ t.lk.Lock()
+ defer t.lk.Unlock()
+ if t.idleConn == nil {
+ t.idleConn = make(map[string][]*persistConn)
+ }
+ key := cm.String()
+ for {
+ pconns, ok := t.idleConn[key]
+ if !ok {
+ return nil
+ }
+ if len(pconns) == 1 {
+ pconn = pconns[0]
+ delete(t.idleConn, key)
+ } else {
+ // 2 or more cached connections; pop last
+ // TODO: queue?
+ pconn = pconns[len(pconns)-1]
+ t.idleConn[key] = pconns[0 : len(pconns)-1]
+ }
+ if !pconn.isBroken() {
+ return
+ }
+ }
+ return
+}
+
+func (t *Transport) dial(network, addr string) (c net.Conn, err error) {
+ if t.Dial != nil {
+ return t.Dial(network, addr)
+ }
+ return net.Dial(network, addr)
+}
+
+// getConn dials and creates a new persistConn to the target as
+// specified in the connectMethod. This includes doing a proxy CONNECT
+// and/or setting up TLS. If this doesn't return an error, the persistConn
+// is ready to write requests to.
+func (t *Transport) getConn(cm *connectMethod) (*persistConn, error) {
+ if pc := t.getIdleConn(cm); pc != nil {
+ return pc, nil
+ }
+
+ conn, err := t.dial("tcp", cm.addr())
+ if err != nil {
+ if cm.proxyURL != nil {
+ err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err)
+ }
+ return nil, err
+ }
+
+ pa := cm.proxyAuth()
+
+ pconn := &persistConn{
+ t: t,
+ cacheKey: cm.String(),
+ conn: conn,
+ reqch: make(chan requestAndChan, 50),
+ }
+
+ switch {
+ case cm.proxyURL == nil:
+ // Do nothing.
+ case cm.targetScheme == "http":
+ pconn.isProxy = true
+ if pa != "" {
+ pconn.mutateHeaderFunc = func(h Header) {
+ h.Set("Proxy-Authorization", pa)
+ }
+ }
+ case cm.targetScheme == "https":
+ connectReq := &Request{
+ Method: "CONNECT",
+ URL: &url.URL{RawPath: cm.targetAddr},
+ Host: cm.targetAddr,
+ Header: make(Header),
+ }
+ if pa != "" {
+ connectReq.Header.Set("Proxy-Authorization", pa)
+ }
+ connectReq.Write(conn)
+
+ // Read response.
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ if resp.StatusCode != 200 {
+ f := strings.SplitN(resp.Status, " ", 2)
+ conn.Close()
+ return nil, errors.New(f[1])
+ }
+ }
+
+ if cm.targetScheme == "https" {
+ // Initiate TLS and check remote host name against certificate.
+ conn = tls.Client(conn, t.TLSClientConfig)
+ if err = conn.(*tls.Conn).Handshake(); err != nil {
+ return nil, err
+ }
+ if t.TLSClientConfig == nil || !t.TLSClientConfig.InsecureSkipVerify {
+ if err = conn.(*tls.Conn).VerifyHostname(cm.tlsHost()); err != nil {
+ return nil, err
+ }
+ }
+ pconn.conn = conn
+ }
+
+ pconn.br = bufio.NewReader(pconn.conn)
+ pconn.bw = bufio.NewWriter(pconn.conn)
+ go pconn.readLoop()
+ return pconn, nil
+}
+
+// useProxy returns true if requests to addr should use a proxy,
+// according to the NO_PROXY or no_proxy environment variable.
+// addr is always a canonicalAddr with a host and port.
+func useProxy(addr string) bool {
+ if len(addr) == 0 {
+ return true
+ }
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return false
+ }
+ if host == "localhost" {
+ return false
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ if ip.IsLoopback() {
+ return false
+ }
+ }
+
+ no_proxy := getenvEitherCase("NO_PROXY")
+ if no_proxy == "*" {
+ return false
+ }
+
+ addr = strings.ToLower(strings.TrimSpace(addr))
+ if hasPort(addr) {
+ addr = addr[:strings.LastIndex(addr, ":")]
+ }
+
+ for _, p := range strings.Split(no_proxy, ",") {
+ p = strings.ToLower(strings.TrimSpace(p))
+ if len(p) == 0 {
+ continue
+ }
+ if hasPort(p) {
+ p = p[:strings.LastIndex(p, ":")]
+ }
+ if addr == p || (p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:])) {
+ return false
+ }
+ }
+ return true
+}
+
+// connectMethod is the map key (in its String form) for keeping persistent
+// TCP connections alive for subsequent HTTP requests.
+//
+// A connect method may be of the following types:
+//
+// Cache key form Description
+// ----------------- -------------------------
+// ||http|foo.com http directly to server, no proxy
+// ||https|foo.com https directly to server, no proxy
+// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
+// http://proxy.com|http http to proxy, http to anywhere after that
+//
+// Note: no support to https to the proxy yet.
+//
+type connectMethod struct {
+ proxyURL *url.URL // nil for no proxy, else full proxy URL
+ targetScheme string // "http" or "https"
+ targetAddr string // Not used if proxy + http targetScheme (4th example in table)
+}
+
+func (ck *connectMethod) String() string {
+ proxyStr := ""
+ if ck.proxyURL != nil {
+ proxyStr = ck.proxyURL.String()
+ }
+ return strings.Join([]string{proxyStr, ck.targetScheme, ck.targetAddr}, "|")
+}
+
+// addr returns the first hop "host:port" to which we need to TCP connect.
+func (cm *connectMethod) addr() string {
+ if cm.proxyURL != nil {
+ return canonicalAddr(cm.proxyURL)
+ }
+ return cm.targetAddr
+}
+
+// tlsHost returns the host name to match against the peer's
+// TLS certificate.
+func (cm *connectMethod) tlsHost() string {
+ h := cm.targetAddr
+ if hasPort(h) {
+ h = h[:strings.LastIndex(h, ":")]
+ }
+ return h
+}
+
+// persistConn wraps a connection, usually a persistent one
+// (but may be used for non-keep-alive requests as well)
+type persistConn struct {
+ t *Transport
+ cacheKey string // its connectMethod.String()
+ conn net.Conn
+ br *bufio.Reader // from conn
+ bw *bufio.Writer // to conn
+ reqch chan requestAndChan // written by roundTrip(); read by readLoop()
+ isProxy bool
+
+ // mutateHeaderFunc is an optional func to modify extra
+ // headers on each outbound request before it's written. (the
+ // original Request given to RoundTrip is not modified)
+ mutateHeaderFunc func(Header)
+
+ lk sync.Mutex // guards numExpectedResponses and broken
+ numExpectedResponses int
+ broken bool // an error has happened on this connection; marked broken so it's not reused.
+}
+
+func (pc *persistConn) isBroken() bool {
+ pc.lk.Lock()
+ defer pc.lk.Unlock()
+ return pc.broken
+}
+
+func (pc *persistConn) expectingResponse() bool {
+ pc.lk.Lock()
+ defer pc.lk.Unlock()
+ return pc.numExpectedResponses > 0
+}
+
+var remoteSideClosedFunc func(error) bool // or nil to use default
+
+func remoteSideClosed(err error) bool {
+ if err == io.EOF || err == os.EINVAL {
+ return true
+ }
+ if remoteSideClosedFunc != nil {
+ return remoteSideClosedFunc(err)
+ }
+ return false
+}
+
+func (pc *persistConn) readLoop() {
+ alive := true
+ var lastbody io.ReadCloser // last response body, if any, read on this connection
+
+ for alive {
+ pb, err := pc.br.Peek(1)
+ if err != nil {
+ if remoteSideClosed(err) && !pc.expectingResponse() {
+ // Remote side closed on us. (We probably hit their
+ // max idle timeout)
+ pc.close()
+ return
+ }
+ }
+ if !pc.expectingResponse() {
+ log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v",
+ string(pb), err)
+ pc.close()
+ return
+ }
+
+ rc := <-pc.reqch
+
+ // Advance past the previous response's body, if the
+ // caller hasn't done so.
+ if lastbody != nil {
+ lastbody.Close() // assumed idempotent
+ lastbody = nil
+ }
+ resp, err := ReadResponse(pc.br, rc.req)
+
+ if err == nil {
+ if rc.addedGzip && resp.Header.Get("Content-Encoding") == "gzip" {
+ resp.Header.Del("Content-Encoding")
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+ gzReader, zerr := gzip.NewReader(resp.Body)
+ if err != nil {
+ pc.close()
+ err = zerr
+ } else {
+ resp.Body = &readFirstCloseBoth{&discardOnCloseReadCloser{gzReader}, resp.Body}
+ }
+ }
+ resp.Body = &bodyEOFSignal{body: resp.Body}
+ }
+
+ if err != nil || resp.Close || rc.req.Close {
+ alive = false
+ }
+
+ hasBody := resp != nil && resp.ContentLength != 0
+ var waitForBodyRead chan bool
+ if alive {
+ if hasBody {
+ lastbody = resp.Body
+ waitForBodyRead = make(chan bool)
+ resp.Body.(*bodyEOFSignal).fn = func() {
+ pc.t.putIdleConn(pc)
+ waitForBodyRead <- true
+ }
+ } else {
+ // When there's no response body, we immediately
+ // reuse the TCP connection (putIdleConn), but
+ // we need to prevent ClientConn.Read from
+ // closing the Response.Body on the next
+ // loop, otherwise it might close the body
+ // before the client code has had a chance to
+ // read it (even though it'll just be 0, EOF).
+ lastbody = nil
+
+ pc.t.putIdleConn(pc)
+ }
+ }
+
+ rc.ch <- responseAndError{resp, err}
+
+ // Wait for the just-returned response body to be fully consumed
+ // before we race and peek on the underlying bufio reader.
+ if waitForBodyRead != nil {
+ <-waitForBodyRead
+ }
+ }
+}
+
+type responseAndError struct {
+ res *Response
+ err error
+}
+
+type requestAndChan struct {
+ req *Request
+ ch chan responseAndError
+
+ // did the Transport (as opposed to the client code) add an
+ // Accept-Encoding gzip header? only if it we set it do
+ // we transparently decode the gzip.
+ addedGzip bool
+}
+
+func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
+ if pc.mutateHeaderFunc != nil {
+ pc.mutateHeaderFunc(req.extraHeaders())
+ }
+
+ // Ask for a compressed version if the caller didn't set their
+ // own value for Accept-Encoding. We only attempted to
+ // uncompress the gzip stream if we were the layer that
+ // requested it.
+ requestedGzip := false
+ if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ requestedGzip = true
+ req.extraHeaders().Set("Accept-Encoding", "gzip")
+ }
+
+ pc.lk.Lock()
+ pc.numExpectedResponses++
+ pc.lk.Unlock()
+
+ err = req.Request.write(pc.bw, pc.isProxy, req.extra)
+ if err != nil {
+ pc.close()
+ return
+ }
+ pc.bw.Flush()
+
+ ch := make(chan responseAndError, 1)
+ pc.reqch <- requestAndChan{req.Request, ch, requestedGzip}
+ re := <-ch
+ pc.lk.Lock()
+ pc.numExpectedResponses--
+ pc.lk.Unlock()
+
+ return re.res, re.err
+}
+
+func (pc *persistConn) close() {
+ pc.lk.Lock()
+ defer pc.lk.Unlock()
+ pc.broken = true
+ pc.conn.Close()
+ pc.mutateHeaderFunc = nil
+}
+
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func canonicalAddr(url *url.URL) string {
+ addr := url.Host
+ if !hasPort(addr) {
+ return addr + ":" + portMap[url.Scheme]
+ }
+ return addr
+}
+
+func responseIsKeepAlive(res *Response) bool {
+ // TODO: implement. for now just always shutting down the connection.
+ return false
+}
+
+// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most
+// once, right before the final Read() or Close() call returns, but after
+// EOF has been seen.
+type bodyEOFSignal struct {
+ body io.ReadCloser
+ fn func()
+ isClosed bool
+}
+
+func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
+ n, err = es.body.Read(p)
+ if es.isClosed && n > 0 {
+ panic("http: unexpected bodyEOFSignal Read after Close; see issue 1725")
+ }
+ if err == io.EOF && es.fn != nil {
+ es.fn()
+ es.fn = nil
+ }
+ return
+}
+
+func (es *bodyEOFSignal) Close() (err error) {
+ if es.isClosed {
+ return nil
+ }
+ es.isClosed = true
+ err = es.body.Close()
+ if err == nil && es.fn != nil {
+ es.fn()
+ es.fn = nil
+ }
+ return
+}
+
+type readFirstCloseBoth struct {
+ io.ReadCloser
+ io.Closer
+}
+
+func (r *readFirstCloseBoth) Close() error {
+ if err := r.ReadCloser.Close(); err != nil {
+ r.Closer.Close()
+ return err
+ }
+ if err := r.Closer.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// discardOnCloseReadCloser consumes all its input on Close.
+type discardOnCloseReadCloser struct {
+ io.ReadCloser
+}
+
+func (d *discardOnCloseReadCloser) Close() error {
+ io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed
+ return d.ReadCloser.Close()
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for transport.go
+
+package http_test
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "io/ioutil"
+ . "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close
+// and then verify that the final 2 responses get errors back.
+
+// hostPortHandler writes back the client's "host:port".
+var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
+ if r.FormValue("close") == "true" {
+ w.Header().Set("Connection", "close")
+ }
+ w.Write([]byte(r.RemoteAddr))
+})
+
+// Two subsequent requests and verify their response is the same.
+// The response from the server is our own IP:port
+func TestTransportKeepAlives(t *testing.T) {
+ ts := httptest.NewServer(hostPortHandler)
+ defer ts.Close()
+
+ for _, disableKeepAlive := range []bool{false, true} {
+ tr := &Transport{DisableKeepAlives: disableKeepAlive}
+ c := &Client{Transport: tr}
+
+ fetch := func(n int) string {
+ res, err := c.Get(ts.URL)
+ if err != nil {
+ t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err)
+ }
+ return string(body)
+ }
+
+ body1 := fetch(1)
+ body2 := fetch(2)
+
+ bodiesDiffer := body1 != body2
+ if bodiesDiffer != disableKeepAlive {
+ t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
+ disableKeepAlive, bodiesDiffer, body1, body2)
+ }
+ }
+}
+
+func TestTransportConnectionCloseOnResponse(t *testing.T) {
+ ts := httptest.NewServer(hostPortHandler)
+ defer ts.Close()
+
+ for _, connectionClose := range []bool{false, true} {
+ tr := &Transport{}
+ c := &Client{Transport: tr}
+
+ fetch := func(n int) string {
+ req := new(Request)
+ var err error
+ req.URL, err = url.Parse(ts.URL + fmt.Sprintf("/?close=%v", connectionClose))
+ if err != nil {
+ t.Fatalf("URL parse error: %v", err)
+ }
+ req.Method = "GET"
+ req.Proto = "HTTP/1.1"
+ req.ProtoMajor = 1
+ req.ProtoMinor = 1
+
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ defer res.Body.Close()
+ if err != nil {
+ t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
+ }
+ return string(body)
+ }
+
+ body1 := fetch(1)
+ body2 := fetch(2)
+ bodiesDiffer := body1 != body2
+ if bodiesDiffer != connectionClose {
+ t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
+ connectionClose, bodiesDiffer, body1, body2)
+ }
+ }
+}
+
+func TestTransportConnectionCloseOnRequest(t *testing.T) {
+ ts := httptest.NewServer(hostPortHandler)
+ defer ts.Close()
+
+ for _, connectionClose := range []bool{false, true} {
+ tr := &Transport{}
+ c := &Client{Transport: tr}
+
+ fetch := func(n int) string {
+ req := new(Request)
+ var err error
+ req.URL, err = url.Parse(ts.URL)
+ if err != nil {
+ t.Fatalf("URL parse error: %v", err)
+ }
+ req.Method = "GET"
+ req.Proto = "HTTP/1.1"
+ req.ProtoMajor = 1
+ req.ProtoMinor = 1
+ req.Close = connectionClose
+
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
+ }
+ return string(body)
+ }
+
+ body1 := fetch(1)
+ body2 := fetch(2)
+ bodiesDiffer := body1 != body2
+ if bodiesDiffer != connectionClose {
+ t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
+ connectionClose, bodiesDiffer, body1, body2)
+ }
+ }
+}
+
+func TestTransportIdleCacheKeys(t *testing.T) {
+ ts := httptest.NewServer(hostPortHandler)
+ defer ts.Close()
+
+ tr := &Transport{DisableKeepAlives: false}
+ c := &Client{Transport: tr}
+
+ if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
+ t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
+ }
+
+ resp, err := c.Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ }
+ ioutil.ReadAll(resp.Body)
+
+ keys := tr.IdleConnKeysForTesting()
+ if e, g := 1, len(keys); e != g {
+ t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g)
+ }
+
+ if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e {
+ t.Errorf("Expected idle cache key %q; got %q", e, keys[0])
+ }
+
+ tr.CloseIdleConnections()
+ if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
+ t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
+ }
+}
+
+func TestTransportMaxPerHostIdleConns(t *testing.T) {
+ resch := make(chan string)
+ gotReq := make(chan bool)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ gotReq <- true
+ msg := <-resch
+ _, err := w.Write([]byte(msg))
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ }))
+ defer ts.Close()
+ maxIdleConns := 2
+ tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns}
+ c := &Client{Transport: tr}
+
+ // Start 3 outstanding requests and wait for the server to get them.
+ // Their responses will hang until we we write to resch, though.
+ donech := make(chan bool)
+ doReq := func() {
+ resp, err := c.Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatalf("ReadAll: %v", err)
+ }
+ donech <- true
+ }
+ go doReq()
+ <-gotReq
+ go doReq()
+ <-gotReq
+ go doReq()
+ <-gotReq
+
+ if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
+ t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g)
+ }
+
+ resch <- "res1"
+ <-donech
+ keys := tr.IdleConnKeysForTesting()
+ if e, g := 1, len(keys); e != g {
+ t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g)
+ }
+ cacheKey := "|http|" + ts.Listener.Addr().String()
+ if keys[0] != cacheKey {
+ t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0])
+ }
+ if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g {
+ t.Errorf("after first response, expected %d idle conns; got %d", e, g)
+ }
+
+ resch <- "res2"
+ <-donech
+ if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g {
+ t.Errorf("after second response, expected %d idle conns; got %d", e, g)
+ }
+
+ resch <- "res3"
+ <-donech
+ if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g {
+ t.Errorf("after third response, still expected %d idle conns; got %d", e, g)
+ }
+}
+
+func TestTransportServerClosingUnexpectedly(t *testing.T) {
+ ts := httptest.NewServer(hostPortHandler)
+ defer ts.Close()
+
+ tr := &Transport{}
+ c := &Client{Transport: tr}
+
+ fetch := func(n, retries int) string {
+ condFatalf := func(format string, arg ...interface{}) {
+ if retries <= 0 {
+ t.Fatalf(format, arg...)
+ }
+ t.Logf("retrying shortly after expected error: "+format, arg...)
+ time.Sleep(1e9 / int64(retries))
+ }
+ for retries >= 0 {
+ retries--
+ res, err := c.Get(ts.URL)
+ if err != nil {
+ condFatalf("error in req #%d, GET: %v", n, err)
+ continue
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ condFatalf("error in req #%d, ReadAll: %v", n, err)
+ continue
+ }
+ res.Body.Close()
+ return string(body)
+ }
+ panic("unreachable")
+ }
+
+ body1 := fetch(1, 0)
+ body2 := fetch(2, 0)
+
+ ts.CloseClientConnections() // surprise!
+
+ // This test has an expected race. Sleeping for 25 ms prevents
+ // it on most fast machines, causing the next fetch() call to
+ // succeed quickly. But if we do get errors, fetch() will retry 5
+ // times with some delays between.
+ time.Sleep(25e6)
+
+ body3 := fetch(3, 5)
+
+ if body1 != body2 {
+ t.Errorf("expected body1 and body2 to be equal")
+ }
+ if body2 == body3 {
+ t.Errorf("expected body2 and body3 to be different")
+ }
+}
+
+// TestTransportHeadResponses verifies that we deal with Content-Lengths
+// with no bodies properly
+func TestTransportHeadResponses(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ if r.Method != "HEAD" {
+ panic("expected HEAD; got " + r.Method)
+ }
+ w.Header().Set("Content-Length", "123")
+ w.WriteHeader(200)
+ }))
+ defer ts.Close()
+
+ tr := &Transport{DisableKeepAlives: false}
+ c := &Client{Transport: tr}
+ for i := 0; i < 2; i++ {
+ res, err := c.Head(ts.URL)
+ if err != nil {
+ t.Errorf("error on loop %d: %v", i, err)
+ }
+ if e, g := "123", res.Header.Get("Content-Length"); e != g {
+ t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g)
+ }
+ if e, g := int64(0), res.ContentLength; e != g {
+ t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g)
+ }
+ }
+}
+
+// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding
+// on responses to HEAD requests.
+func TestTransportHeadChunkedResponse(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ if r.Method != "HEAD" {
+ panic("expected HEAD; got " + r.Method)
+ }
+ w.Header().Set("Transfer-Encoding", "chunked") // client should ignore
+ w.Header().Set("x-client-ipport", r.RemoteAddr)
+ w.WriteHeader(200)
+ }))
+ defer ts.Close()
+
+ tr := &Transport{DisableKeepAlives: false}
+ c := &Client{Transport: tr}
+
+ res1, err := c.Head(ts.URL)
+ if err != nil {
+ t.Fatalf("request 1 error: %v", err)
+ }
+ res2, err := c.Head(ts.URL)
+ if err != nil {
+ t.Fatalf("request 2 error: %v", err)
+ }
+ if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 {
+ t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2)
+ }
+}
+
+var roundTripTests = []struct {
+ accept string
+ expectAccept string
+ compressed bool
+}{
+ // Requests with no accept-encoding header use transparent compression
+ {"", "gzip", false},
+ // Requests with other accept-encoding should pass through unmodified
+ {"foo", "foo", false},
+ // Requests with accept-encoding == gzip should be passed through
+ {"gzip", "gzip", true},
+}
+
+// Test that the modification made to the Request by the RoundTripper is cleaned up
+func TestRoundTripGzip(t *testing.T) {
+ const responseBody = "test response body"
+ ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
+ accept := req.Header.Get("Accept-Encoding")
+ if expect := req.FormValue("expect_accept"); accept != expect {
+ t.Errorf("in handler, test %v: Accept-Encoding = %q, want %q",
+ req.FormValue("testnum"), accept, expect)
+ }
+ if accept == "gzip" {
+ rw.Header().Set("Content-Encoding", "gzip")
+ gz, _ := gzip.NewWriter(rw)
+ gz.Write([]byte(responseBody))
+ gz.Close()
+ } else {
+ rw.Header().Set("Content-Encoding", accept)
+ rw.Write([]byte(responseBody))
+ }
+ }))
+ defer ts.Close()
+
+ for i, test := range roundTripTests {
+ // Test basic request (no accept-encoding)
+ req, _ := NewRequest("GET", fmt.Sprintf("%s/?testnum=%d&expect_accept=%s", ts.URL, i, test.expectAccept), nil)
+ if test.accept != "" {
+ req.Header.Set("Accept-Encoding", test.accept)
+ }
+ res, err := DefaultTransport.RoundTrip(req)
+ var body []byte
+ if test.compressed {
+ gzip, _ := gzip.NewReader(res.Body)
+ body, err = ioutil.ReadAll(gzip)
+ res.Body.Close()
+ } else {
+ body, err = ioutil.ReadAll(res.Body)
+ }
+ if err != nil {
+ t.Errorf("%d. Error: %q", i, err)
+ continue
+ }
+ if g, e := string(body), responseBody; g != e {
+ t.Errorf("%d. body = %q; want %q", i, g, e)
+ }
+ if g, e := req.Header.Get("Accept-Encoding"), test.accept; g != e {
+ t.Errorf("%d. Accept-Encoding = %q; want %q (it was mutated, in violation of RoundTrip contract)", i, g, e)
+ }
+ if g, e := res.Header.Get("Content-Encoding"), test.accept; g != e {
+ t.Errorf("%d. Content-Encoding = %q; want %q", i, g, e)
+ }
+ }
+
+}
+
+func TestTransportGzip(t *testing.T) {
+ const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ const nRandBytes = 1024 * 1024
+ ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
+ if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e {
+ t.Errorf("Accept-Encoding = %q, want %q", g, e)
+ }
+ rw.Header().Set("Content-Encoding", "gzip")
+ if req.Method == "HEAD" {
+ return
+ }
+
+ var w io.Writer = rw
+ var buf bytes.Buffer
+ if req.FormValue("chunked") == "0" {
+ w = &buf
+ defer io.Copy(rw, &buf)
+ defer func() {
+ rw.Header().Set("Content-Length", strconv.Itoa(buf.Len()))
+ }()
+ }
+ gz, _ := gzip.NewWriter(w)
+ gz.Write([]byte(testString))
+ if req.FormValue("body") == "large" {
+ io.CopyN(gz, rand.Reader, nRandBytes)
+ }
+ gz.Close()
+ }))
+ defer ts.Close()
+
+ for _, chunked := range []string{"1", "0"} {
+ c := &Client{Transport: &Transport{}}
+
+ // First fetch something large, but only read some of it.
+ res, err := c.Get(ts.URL + "/?body=large&chunked=" + chunked)
+ if err != nil {
+ t.Fatalf("large get: %v", err)
+ }
+ buf := make([]byte, len(testString))
+ n, err := io.ReadFull(res.Body, buf)
+ if err != nil {
+ t.Fatalf("partial read of large response: size=%d, %v", n, err)
+ }
+ if e, g := testString, string(buf); e != g {
+ t.Errorf("partial read got %q, expected %q", g, e)
+ }
+ res.Body.Close()
+ // Read on the body, even though it's closed
+ n, err = res.Body.Read(buf)
+ if n != 0 || err == nil {
+ t.Errorf("expected error post-closed large Read; got = %d, %v", n, err)
+ }
+
+ // Then something small.
+ res, err = c.Get(ts.URL + "/?chunked=" + chunked)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := string(body), testString; g != e {
+ t.Fatalf("body = %q; want %q", g, e)
+ }
+ if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
+ t.Fatalf("Content-Encoding = %q; want %q", g, e)
+ }
+
+ // Read on the body after it's been fully read:
+ n, err = res.Body.Read(buf)
+ if n != 0 || err == nil {
+ t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err)
+ }
+ res.Body.Close()
+ n, err = res.Body.Read(buf)
+ if n != 0 || err == nil {
+ t.Errorf("expected Read error after Close; got %d, %v", n, err)
+ }
+ }
+
+ // And a HEAD request too, because they're always weird.
+ c := &Client{Transport: &Transport{}}
+ res, err := c.Head(ts.URL)
+ if err != nil {
+ t.Fatalf("Head: %v", err)
+ }
+ if res.StatusCode != 200 {
+ t.Errorf("Head status=%d; want=200", res.StatusCode)
+ }
+}
+
+func TestTransportProxy(t *testing.T) {
+ ch := make(chan string, 1)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ch <- "real server"
+ }))
+ defer ts.Close()
+ proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ch <- "proxy for " + r.URL.String()
+ }))
+ defer proxy.Close()
+
+ pu, err := url.Parse(proxy.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}}
+ c.Head(ts.URL)
+ got := <-ch
+ want := "proxy for " + ts.URL + "/"
+ if got != want {
+ t.Errorf("want %q, got %q", want, got)
+ }
+}
+
+// TestTransportGzipRecursive sends a gzip quine and checks that the
+// client gets the same value back. This is more cute than anything,
+// but checks that we don't recurse forever, and checks that
+// Content-Encoding is removed.
+func TestTransportGzipRecursive(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Encoding", "gzip")
+ w.Write(rgz)
+ }))
+ defer ts.Close()
+
+ c := &Client{Transport: &Transport{}}
+ res, err := c.Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(body, rgz) {
+ t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x",
+ body, rgz)
+ }
+ if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
+ t.Fatalf("Content-Encoding = %q; want %q", g, e)
+ }
+}
+
+type fooProto struct{}
+
+func (fooProto) RoundTrip(req *Request) (*Response, error) {
+ res := &Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Header: make(Header),
+ Body: ioutil.NopCloser(strings.NewReader("You wanted " + req.URL.String())),
+ }
+ return res, nil
+}
+
+func TestTransportAltProto(t *testing.T) {
+ tr := &Transport{}
+ c := &Client{Transport: tr}
+ tr.RegisterProtocol("foo", fooProto{})
+ res, err := c.Get("foo://bar.com/path")
+ if err != nil {
+ t.Fatal(err)
+ }
+ bodyb, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := string(bodyb)
+ if e := "You wanted foo://bar.com/path"; body != e {
+ t.Errorf("got response %q, want %q", body, e)
+ }
+}
+
+// rgz is a gzip quine that uncompresses to itself.
+var rgz = []byte{
+ 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
+ 0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0,
+ 0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2,
+ 0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17,
+ 0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60,
+ 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2,
+ 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00,
+ 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00,
+ 0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16,
+ 0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05,
+ 0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff,
+ 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00,
+ 0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00,
+ 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
+ 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88,
+ 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff,
+ 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00,
+ 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00,
+ 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff,
+ 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00,
+ 0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16,
+ 0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08,
+ 0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa,
+ 0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00,
+ 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
+ 0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
+ 0x00, 0x00,
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "net"
+ "os"
+)
+
+func init() {
+ remoteSideClosedFunc = func(err error) (out bool) {
+ op, ok := err.(*net.OpError)
+ if ok && op.Op == "WSARecv" && op.Net == "tcp" && op.Err == os.Errno(10058) {
+ // TODO(bradfitz): find the symbol for 10058
+ return true
+ }
+ return false
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "expvar"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+)
+
+// hello world, the web server
+var helloRequests = expvar.NewInt("hello-requests")
+
+func HelloServer(w http.ResponseWriter, req *http.Request) {
+ helloRequests.Add(1)
+ io.WriteString(w, "hello, world!\n")
+}
+
+// Simple counter server. POSTing to it will set the value.
+type Counter struct {
+ n int
+}
+
+// This makes Counter satisfy the expvar.Var interface, so we can export
+// it directly.
+func (ctr *Counter) String() string { return fmt.Sprintf("%d", ctr.n) }
+
+func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ switch req.Method {
+ case "GET":
+ ctr.n++
+ case "POST":
+ buf := new(bytes.Buffer)
+ io.Copy(buf, req.Body)
+ body := buf.String()
+ if n, err := strconv.Atoi(body); err != nil {
+ fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body)
+ } else {
+ ctr.n = n
+ fmt.Fprint(w, "counter reset\n")
+ }
+ }
+ fmt.Fprintf(w, "counter = %d\n", ctr.n)
+}
+
+// simple flag server
+var booleanflag = flag.Bool("boolean", true, "another flag for testing")
+
+func FlagServer(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprint(w, "Flags:\n")
+ flag.VisitAll(func(f *flag.Flag) {
+ if f.Value.String() != f.DefValue {
+ fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue)
+ } else {
+ fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String())
+ }
+ })
+}
+
+// simple argument server
+func ArgServer(w http.ResponseWriter, req *http.Request) {
+ for _, s := range os.Args {
+ fmt.Fprint(w, s, " ")
+ }
+}
+
+// a channel (just for the fun of it)
+type Chan chan int
+
+func ChanCreate() Chan {
+ c := make(Chan)
+ go func(c Chan) {
+ for x := 0; ; x++ {
+ c <- x
+ }
+ }(c)
+ return c
+}
+
+func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch))
+}
+
+// exec a program, redirecting output
+func DateServer(rw http.ResponseWriter, req *http.Request) {
+ rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(rw, "pipe: %s\n", err)
+ return
+ }
+
+ p, err := os.StartProcess("/bin/date", []string{"date"}, &os.ProcAttr{Files: []*os.File{nil, w, w}})
+ defer r.Close()
+ w.Close()
+ if err != nil {
+ fmt.Fprintf(rw, "fork/exec: %s\n", err)
+ return
+ }
+ defer p.Release()
+ io.Copy(rw, r)
+ wait, err := p.Wait(0)
+ if err != nil {
+ fmt.Fprintf(rw, "wait: %s\n", err)
+ return
+ }
+ if !wait.Exited() || wait.ExitStatus() != 0 {
+ fmt.Fprintf(rw, "date: %v\n", wait)
+ return
+ }
+}
+
+func Logger(w http.ResponseWriter, req *http.Request) {
+ log.Print(req.URL.Raw)
+ w.WriteHeader(404)
+ w.Write([]byte("oops"))
+}
+
+var webroot = flag.String("root", "/home/rsc", "web root directory")
+
+func main() {
+ flag.Parse()
+
+ // The counter is published as a variable directly.
+ ctr := new(Counter)
+ http.Handle("/counter", ctr)
+ expvar.Publish("counter", ctr)
+
+ http.Handle("/", http.HandlerFunc(Logger))
+ http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot))))
+ http.Handle("/flags", http.HandlerFunc(FlagServer))
+ http.Handle("/args", http.HandlerFunc(ArgServer))
+ http.Handle("/go/hello", http.HandlerFunc(HelloServer))
+ http.Handle("/chan", ChanCreate())
+ http.Handle("/date", http.HandlerFunc(DateServer))
+ err := http.ListenAndServe(":12345", nil)
+ if err != nil {
+ log.Panicln("ListenAndServe:", err)
+ }
+}
import (
"bytes"
"reflect"
- "testing"
"runtime"
+ "testing"
)
func isEqual(a, b []byte) bool {
}
func TestGmailTXT(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Logf("LookupTXT is not implemented on Windows")
- return
- }
if testing.Short() || avoidMacFirewall {
t.Logf("skipping test to avoid external network")
return
package net
import (
- "errors"
- "syscall"
- "unsafe"
"os"
"sync"
+ "syscall"
+ "unsafe"
)
var (
func LookupCNAME(name string) (cname string, err error) {
var r *syscall.DNSRecord
e := syscall.DnsQuery(name, syscall.DNS_TYPE_CNAME, 0, nil, &r, nil)
- if int(e) != 0 {
+ if e != 0 {
return "", os.NewSyscallError("LookupCNAME", int(e))
}
defer syscall.DnsRecordListFree(r, 1)
}
var r *syscall.DNSRecord
e := syscall.DnsQuery(target, syscall.DNS_TYPE_SRV, 0, nil, &r, nil)
- if int(e) != 0 {
+ if e != 0 {
return "", nil, os.NewSyscallError("LookupSRV", int(e))
}
defer syscall.DnsRecordListFree(r, 1)
func LookupMX(name string) (mx []*MX, err error) {
var r *syscall.DNSRecord
e := syscall.DnsQuery(name, syscall.DNS_TYPE_MX, 0, nil, &r, nil)
- if int(e) != 0 {
+ if e != 0 {
return nil, os.NewSyscallError("LookupMX", int(e))
}
defer syscall.DnsRecordListFree(r, 1)
}
func LookupTXT(name string) (txt []string, err error) {
- return nil, errors.New("net.LookupTXT is not implemented on Windows")
+ var r *syscall.DNSRecord
+ e := syscall.DnsQuery(name, syscall.DNS_TYPE_TEXT, 0, nil, &r, nil)
+ if e != 0 {
+ return nil, os.NewSyscallError("LookupTXT", int(e))
+ }
+ defer syscall.DnsRecordListFree(r, 1)
+ txt = make([]string, 0, 10)
+ if r != nil && r.Type == syscall.DNS_TYPE_TEXT {
+ d := (*syscall.DNSTXTData)(unsafe.Pointer(&r.Data[0]))
+ for _, v := range (*[1 << 10]*uint16)(unsafe.Pointer(&(d.StringArray[0])))[:d.StringCount] {
+ s := syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(v))[:])
+ txt = append(txt, s)
+ }
+ }
+ return
}
func LookupAddr(addr string) (name []string, err error) {
}
var r *syscall.DNSRecord
e := syscall.DnsQuery(arpa, syscall.DNS_TYPE_PTR, 0, nil, &r, nil)
- if int(e) != 0 {
+ if e != 0 {
return nil, os.NewSyscallError("LookupAddr", int(e))
}
defer syscall.DnsRecordListFree(r, 1)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mail implements parsing of mail messages.
+
+For the most part, this package follows the syntax as specified by RFC 5322.
+Notable divergences:
+ * Obsolete address formats are not parsed, including addresses with
+ embedded route information.
+ * Group addresses are not parsed.
+ * The full range of spacing (the CFWS syntax element) is not supported,
+ such as breaking addresses across lines.
+*/
+package mail
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var debug = debugT(false)
+
+type debugT bool
+
+func (d debugT) Printf(format string, args ...interface{}) {
+ if d {
+ log.Printf(format, args...)
+ }
+}
+
+// A Message represents a parsed mail message.
+type Message struct {
+ Header Header
+ Body io.Reader
+}
+
+// ReadMessage reads a message from r.
+// The headers are parsed, and the body of the message will be reading from r.
+func ReadMessage(r io.Reader) (msg *Message, err error) {
+ tp := textproto.NewReader(bufio.NewReader(r))
+
+ hdr, err := tp.ReadMIMEHeader()
+ if err != nil {
+ return nil, err
+ }
+
+ return &Message{
+ Header: Header(hdr),
+ Body: tp.R,
+ }, nil
+}
+
+// Layouts suitable for passing to time.Parse.
+// These are tried in order.
+var dateLayouts []string
+
+func init() {
+ // Generate layouts based on RFC 5322, section 3.3.
+
+ dows := [...]string{"", "Mon, "} // day-of-week
+ days := [...]string{"2", "02"} // day = 1*2DIGIT
+ years := [...]string{"2006", "06"} // year = 4*DIGIT / 2*DIGIT
+ seconds := [...]string{":05", ""} // second
+ zones := [...]string{"-0700", "MST"} // zone = (("+" / "-") 4DIGIT) / "GMT" / ...
+
+ for _, dow := range dows {
+ for _, day := range days {
+ for _, year := range years {
+ for _, second := range seconds {
+ for _, zone := range zones {
+ s := dow + day + " Jan " + year + " 15:04" + second + " " + zone
+ dateLayouts = append(dateLayouts, s)
+ }
+ }
+ }
+ }
+ }
+}
+
+func parseDate(date string) (*time.Time, error) {
+ for _, layout := range dateLayouts {
+ t, err := time.Parse(layout, date)
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, errors.New("mail: header could not be parsed")
+}
+
+// A Header represents the key-value pairs in a mail message header.
+type Header map[string][]string
+
+// Get gets the first value associated with the given key.
+// If there are no values associated with the key, Get returns "".
+func (h Header) Get(key string) string {
+ return textproto.MIMEHeader(h).Get(key)
+}
+
+var ErrHeaderNotPresent = errors.New("mail: header not in message")
+
+// Date parses the Date header field.
+func (h Header) Date() (*time.Time, error) {
+ hdr := h.Get("Date")
+ if hdr == "" {
+ return nil, ErrHeaderNotPresent
+ }
+ return parseDate(hdr)
+}
+
+// AddressList parses the named header field as a list of addresses.
+func (h Header) AddressList(key string) ([]*Address, error) {
+ hdr := h.Get(key)
+ if hdr == "" {
+ return nil, ErrHeaderNotPresent
+ }
+ return newAddrParser(hdr).parseAddressList()
+}
+
+// Address represents a single mail address.
+// An address such as "Barry Gibbs <bg@example.com>" is represented
+// as Address{Name: "Barry Gibbs", Address: "bg@example.com"}.
+type Address struct {
+ Name string // Proper name; may be empty.
+ Address string // user@domain
+}
+
+// String formats the address as a valid RFC 5322 address.
+// If the address's name contains non-ASCII characters
+// the name will be rendered according to RFC 2047.
+func (a *Address) String() string {
+ s := "<" + a.Address + ">"
+ if a.Name == "" {
+ return s
+ }
+ // If every character is printable ASCII, quoting is simple.
+ allPrintable := true
+ for i := 0; i < len(a.Name); i++ {
+ if !isVchar(a.Name[i]) {
+ allPrintable = false
+ break
+ }
+ }
+ if allPrintable {
+ b := bytes.NewBufferString(`"`)
+ for i := 0; i < len(a.Name); i++ {
+ if !isQtext(a.Name[i]) {
+ b.WriteByte('\\')
+ }
+ b.WriteByte(a.Name[i])
+ }
+ b.WriteString(`" `)
+ b.WriteString(s)
+ return b.String()
+ }
+
+ // UTF-8 "Q" encoding
+ b := bytes.NewBufferString("=?utf-8?q?")
+ for i := 0; i < len(a.Name); i++ {
+ switch c := a.Name[i]; {
+ case c == ' ':
+ b.WriteByte('_')
+ case isVchar(c) && c != '=' && c != '?' && c != '_':
+ b.WriteByte(c)
+ default:
+ fmt.Fprintf(b, "=%02X", c)
+ }
+ }
+ b.WriteString("?= ")
+ b.WriteString(s)
+ return b.String()
+}
+
+type addrParser []byte
+
+func newAddrParser(s string) *addrParser {
+ p := addrParser([]byte(s))
+ return &p
+}
+
+func (p *addrParser) parseAddressList() ([]*Address, error) {
+ var list []*Address
+ for {
+ p.skipSpace()
+ addr, err := p.parseAddress()
+ if err != nil {
+ return nil, err
+ }
+ list = append(list, addr)
+
+ p.skipSpace()
+ if p.empty() {
+ break
+ }
+ if !p.consume(',') {
+ return nil, errors.New("mail: expected comma")
+ }
+ }
+ return list, nil
+}
+
+// parseAddress parses a single RFC 5322 address at the start of p.
+func (p *addrParser) parseAddress() (addr *Address, err error) {
+ debug.Printf("parseAddress: %q", *p)
+ p.skipSpace()
+ if p.empty() {
+ return nil, errors.New("mail: no address")
+ }
+
+ // address = name-addr / addr-spec
+ // TODO(dsymonds): Support parsing group address.
+
+ // addr-spec has a more restricted grammar than name-addr,
+ // so try parsing it first, and fallback to name-addr.
+ // TODO(dsymonds): Is this really correct?
+ spec, err := p.consumeAddrSpec()
+ if err == nil {
+ return &Address{
+ Address: spec,
+ }, err
+ }
+ debug.Printf("parseAddress: not an addr-spec: %v", err)
+ debug.Printf("parseAddress: state is now %q", *p)
+
+ // display-name
+ var displayName string
+ if p.peek() != '<' {
+ displayName, err = p.consumePhrase()
+ if err != nil {
+ return nil, err
+ }
+ }
+ debug.Printf("parseAddress: displayName=%q", displayName)
+
+ // angle-addr = "<" addr-spec ">"
+ p.skipSpace()
+ if !p.consume('<') {
+ return nil, errors.New("mail: no angle-addr")
+ }
+ spec, err = p.consumeAddrSpec()
+ if err != nil {
+ return nil, err
+ }
+ if !p.consume('>') {
+ return nil, errors.New("mail: unclosed angle-addr")
+ }
+ debug.Printf("parseAddress: spec=%q", spec)
+
+ return &Address{
+ Name: displayName,
+ Address: spec,
+ }, nil
+}
+
+// consumeAddrSpec parses a single RFC 5322 addr-spec at the start of p.
+func (p *addrParser) consumeAddrSpec() (spec string, err error) {
+ debug.Printf("consumeAddrSpec: %q", *p)
+
+ orig := *p
+ defer func() {
+ if err != nil {
+ *p = orig
+ }
+ }()
+
+ // local-part = dot-atom / quoted-string
+ var localPart string
+ p.skipSpace()
+ if p.empty() {
+ return "", errors.New("mail: no addr-spec")
+ }
+ if p.peek() == '"' {
+ // quoted-string
+ debug.Printf("consumeAddrSpec: parsing quoted-string")
+ localPart, err = p.consumeQuotedString()
+ } else {
+ // dot-atom
+ debug.Printf("consumeAddrSpec: parsing dot-atom")
+ localPart, err = p.consumeAtom(true)
+ }
+ if err != nil {
+ debug.Printf("consumeAddrSpec: failed: %v", err)
+ return "", err
+ }
+
+ if !p.consume('@') {
+ return "", errors.New("mail: missing @ in addr-spec")
+ }
+
+ // domain = dot-atom / domain-literal
+ var domain string
+ p.skipSpace()
+ if p.empty() {
+ return "", errors.New("mail: no domain in addr-spec")
+ }
+ // TODO(dsymonds): Handle domain-literal
+ domain, err = p.consumeAtom(true)
+ if err != nil {
+ return "", err
+ }
+
+ return localPart + "@" + domain, nil
+}
+
+// consumePhrase parses the RFC 5322 phrase at the start of p.
+func (p *addrParser) consumePhrase() (phrase string, err error) {
+ debug.Printf("consumePhrase: [%s]", *p)
+ // phrase = 1*word
+ var words []string
+ for {
+ // word = atom / quoted-string
+ var word string
+ p.skipSpace()
+ if p.empty() {
+ return "", errors.New("mail: missing phrase")
+ }
+ if p.peek() == '"' {
+ // quoted-string
+ word, err = p.consumeQuotedString()
+ } else {
+ // atom
+ word, err = p.consumeAtom(false)
+ }
+
+ // RFC 2047 encoded-word starts with =?, ends with ?=, and has two other ?s.
+ if err == nil && strings.HasPrefix(word, "=?") && strings.HasSuffix(word, "?=") && strings.Count(word, "?") == 4 {
+ word, err = decodeRFC2047Word(word)
+ }
+
+ if err != nil {
+ break
+ }
+ debug.Printf("consumePhrase: consumed %q", word)
+ words = append(words, word)
+ }
+ // Ignore any error if we got at least one word.
+ if err != nil && len(words) == 0 {
+ debug.Printf("consumePhrase: hit err: %v", err)
+ return "", errors.New("mail: missing word in phrase")
+ }
+ phrase = strings.Join(words, " ")
+ return phrase, nil
+}
+
+// consumeQuotedString parses the quoted string at the start of p.
+func (p *addrParser) consumeQuotedString() (qs string, err error) {
+ // Assume first byte is '"'.
+ i := 1
+ qsb := make([]byte, 0, 10)
+Loop:
+ for {
+ if i >= p.len() {
+ return "", errors.New("mail: unclosed quoted-string")
+ }
+ switch c := (*p)[i]; {
+ case c == '"':
+ break Loop
+ case c == '\\':
+ if i+1 == p.len() {
+ return "", errors.New("mail: unclosed quoted-string")
+ }
+ qsb = append(qsb, (*p)[i+1])
+ i += 2
+ case isQtext(c), c == ' ' || c == '\t':
+ // qtext (printable US-ASCII excluding " and \), or
+ // FWS (almost; we're ignoring CRLF)
+ qsb = append(qsb, c)
+ i++
+ default:
+ return "", fmt.Errorf("mail: bad character in quoted-string: %q", c)
+ }
+ }
+ *p = (*p)[i+1:]
+ return string(qsb), nil
+}
+
+// consumeAtom parses an RFC 5322 atom at the start of p.
+// If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
+func (p *addrParser) consumeAtom(dot bool) (atom string, err error) {
+ if !isAtext(p.peek(), false) {
+ return "", errors.New("mail: invalid string")
+ }
+ i := 1
+ for ; i < p.len() && isAtext((*p)[i], dot); i++ {
+ }
+ // TODO(dsymonds): Remove the []byte() conversion here when 6g doesn't need it.
+ atom, *p = string([]byte((*p)[:i])), (*p)[i:]
+ return atom, nil
+}
+
+func (p *addrParser) consume(c byte) bool {
+ if p.empty() || p.peek() != c {
+ return false
+ }
+ *p = (*p)[1:]
+ return true
+}
+
+// skipSpace skips the leading space and tab characters.
+func (p *addrParser) skipSpace() {
+ *p = bytes.TrimLeft(*p, " \t")
+}
+
+func (p *addrParser) peek() byte {
+ return (*p)[0]
+}
+
+func (p *addrParser) empty() bool {
+ return p.len() == 0
+}
+
+func (p *addrParser) len() int {
+ return len(*p)
+}
+
+func decodeRFC2047Word(s string) (string, error) {
+ fields := strings.Split(s, "?")
+ if len(fields) != 5 || fields[0] != "=" || fields[4] != "=" {
+ return "", errors.New("mail: address not RFC 2047 encoded")
+ }
+ charset, enc := strings.ToLower(fields[1]), strings.ToLower(fields[2])
+ if charset != "iso-8859-1" && charset != "utf-8" {
+ return "", fmt.Errorf("mail: charset not supported: %q", charset)
+ }
+
+ in := bytes.NewBufferString(fields[3])
+ var r io.Reader
+ switch enc {
+ case "b":
+ r = base64.NewDecoder(base64.StdEncoding, in)
+ case "q":
+ r = qDecoder{r: in}
+ default:
+ return "", fmt.Errorf("mail: RFC 2047 encoding not supported: %q", enc)
+ }
+
+ dec, err := ioutil.ReadAll(r)
+ if err != nil {
+ return "", err
+ }
+
+ switch charset {
+ case "iso-8859-1":
+ b := new(bytes.Buffer)
+ for _, c := range dec {
+ b.WriteRune(rune(c))
+ }
+ return b.String(), nil
+ case "utf-8":
+ return string(dec), nil
+ }
+ panic("unreachable")
+}
+
+type qDecoder struct {
+ r io.Reader
+ scratch [2]byte
+}
+
+func (qd qDecoder) Read(p []byte) (n int, err error) {
+ // This method writes at most one byte into p.
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if _, err := qd.r.Read(qd.scratch[:1]); err != nil {
+ return 0, err
+ }
+ switch c := qd.scratch[0]; {
+ case c == '=':
+ if _, err := io.ReadFull(qd.r, qd.scratch[:2]); err != nil {
+ return 0, err
+ }
+ x, err := strconv.Btoi64(string(qd.scratch[:2]), 16)
+ if err != nil {
+ return 0, fmt.Errorf("mail: invalid RFC 2047 encoding: %q", qd.scratch[:2])
+ }
+ p[0] = byte(x)
+ case c == '_':
+ p[0] = ' '
+ default:
+ p[0] = c
+ }
+ return 1, nil
+}
+
+var atextChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+ "abcdefghijklmnopqrstuvwxyz" +
+ "0123456789" +
+ "!#$%&'*+-/=?^_`{|}~")
+
+// isAtext returns true if c is an RFC 5322 atext character.
+// If dot is true, period is included.
+func isAtext(c byte, dot bool) bool {
+ if dot && c == '.' {
+ return true
+ }
+ return bytes.IndexByte(atextChars, c) >= 0
+}
+
+// isQtext returns true if c is an RFC 5322 qtest character.
+func isQtext(c byte) bool {
+ // Printable US-ASCII, excluding backslash or quote.
+ if c == '\\' || c == '"' {
+ return false
+ }
+ return '!' <= c && c <= '~'
+}
+
+// isVchar returns true if c is an RFC 5322 VCHAR character.
+func isVchar(c byte) bool {
+ // Visible (printing) characters.
+ return '!' <= c && c <= '~'
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mail
+
+import (
+ "bytes"
+ "io/ioutil"
+ "reflect"
+ "testing"
+ "time"
+)
+
+var parseTests = []struct {
+ in string
+ header Header
+ body string
+}{
+ {
+ // RFC 5322, Appendix A.1.1
+ in: `From: John Doe <jdoe@machine.example>
+To: Mary Smith <mary@example.net>
+Subject: Saying Hello
+Date: Fri, 21 Nov 1997 09:55:06 -0600
+Message-ID: <1234@local.machine.example>
+
+This is a message just to say hello.
+So, "Hello".
+`,
+ header: Header{
+ "From": []string{"John Doe <jdoe@machine.example>"},
+ "To": []string{"Mary Smith <mary@example.net>"},
+ "Subject": []string{"Saying Hello"},
+ "Date": []string{"Fri, 21 Nov 1997 09:55:06 -0600"},
+ "Message-Id": []string{"<1234@local.machine.example>"},
+ },
+ body: "This is a message just to say hello.\nSo, \"Hello\".\n",
+ },
+}
+
+func TestParsing(t *testing.T) {
+ for i, test := range parseTests {
+ msg, err := ReadMessage(bytes.NewBuffer([]byte(test.in)))
+ if err != nil {
+ t.Errorf("test #%d: Failed parsing message: %v", i, err)
+ continue
+ }
+ if !headerEq(msg.Header, test.header) {
+ t.Errorf("test #%d: Incorrectly parsed message header.\nGot:\n%+v\nWant:\n%+v",
+ i, msg.Header, test.header)
+ }
+ body, err := ioutil.ReadAll(msg.Body)
+ if err != nil {
+ t.Errorf("test #%d: Failed reading body: %v", i, err)
+ continue
+ }
+ bodyStr := string(body)
+ if bodyStr != test.body {
+ t.Errorf("test #%d: Incorrectly parsed message body.\nGot:\n%+v\nWant:\n%+v",
+ i, bodyStr, test.body)
+ }
+ }
+}
+
+func headerEq(a, b Header) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for k, as := range a {
+ bs, ok := b[k]
+ if !ok {
+ return false
+ }
+ if !reflect.DeepEqual(as, bs) {
+ return false
+ }
+ }
+ return true
+}
+
+func TestDateParsing(t *testing.T) {
+ tests := []struct {
+ dateStr string
+ exp *time.Time
+ }{
+ // RFC 5322, Appendix A.1.1
+ {
+ "Fri, 21 Nov 1997 09:55:06 -0600",
+ &time.Time{
+ Year: 1997,
+ Month: 11,
+ Day: 21,
+ Hour: 9,
+ Minute: 55,
+ Second: 6,
+ ZoneOffset: -6 * 60 * 60,
+ },
+ },
+ // RFC5322, Appendix A.6.2
+ // Obsolete date.
+ {
+ "21 Nov 97 09:55:06 GMT",
+ &time.Time{
+ Year: 1997,
+ Month: 11,
+ Day: 21,
+ Hour: 9,
+ Minute: 55,
+ Second: 6,
+ Zone: "GMT",
+ },
+ },
+ }
+ for _, test := range tests {
+ hdr := Header{
+ "Date": []string{test.dateStr},
+ }
+ date, err := hdr.Date()
+ if err != nil {
+ t.Errorf("Failed parsing %q: %v", test.dateStr, err)
+ continue
+ }
+ if !reflect.DeepEqual(date, test.exp) {
+ t.Errorf("Parse of %q: got %+v, want %+v", test.dateStr, date, test.exp)
+ }
+ }
+}
+
+func TestAddressParsing(t *testing.T) {
+ tests := []struct {
+ addrsStr string
+ exp []*Address
+ }{
+ // Bare address
+ {
+ `jdoe@machine.example`,
+ []*Address{&Address{
+ Address: "jdoe@machine.example",
+ }},
+ },
+ // RFC 5322, Appendix A.1.1
+ {
+ `John Doe <jdoe@machine.example>`,
+ []*Address{&Address{
+ Name: "John Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
+ // RFC 5322, Appendix A.1.2
+ {
+ `"Joe Q. Public" <john.q.public@example.com>`,
+ []*Address{&Address{
+ Name: "Joe Q. Public",
+ Address: "john.q.public@example.com",
+ }},
+ },
+ {
+ `Mary Smith <mary@x.test>, jdoe@example.org, Who? <one@y.test>`,
+ []*Address{
+ &Address{
+ Name: "Mary Smith",
+ Address: "mary@x.test",
+ },
+ &Address{
+ Address: "jdoe@example.org",
+ },
+ &Address{
+ Name: "Who?",
+ Address: "one@y.test",
+ },
+ },
+ },
+ {
+ `<boss@nil.test>, "Giant; \"Big\" Box" <sysservices@example.net>`,
+ []*Address{
+ &Address{
+ Address: "boss@nil.test",
+ },
+ &Address{
+ Name: `Giant; "Big" Box`,
+ Address: "sysservices@example.net",
+ },
+ },
+ },
+ // RFC 5322, Appendix A.1.3
+ // TODO(dsymonds): Group addresses.
+
+ // RFC 2047 "Q"-encoded ISO-8859-1 address.
+ {
+ `=?iso-8859-1?q?J=F6rg_Doe?= <joerg@example.com>`,
+ []*Address{
+ &Address{
+ Name: `Jörg Doe`,
+ Address: "joerg@example.com",
+ },
+ },
+ },
+ // RFC 2047 "Q"-encoded UTF-8 address.
+ {
+ `=?utf-8?q?J=C3=B6rg_Doe?= <joerg@example.com>`,
+ []*Address{
+ &Address{
+ Name: `Jörg Doe`,
+ Address: "joerg@example.com",
+ },
+ },
+ },
+ // RFC 2047, Section 8.
+ {
+ `=?ISO-8859-1?Q?Andr=E9?= Pirard <PIRARD@vm1.ulg.ac.be>`,
+ []*Address{
+ &Address{
+ Name: `André Pirard`,
+ Address: "PIRARD@vm1.ulg.ac.be",
+ },
+ },
+ },
+ // Custom example of RFC 2047 "B"-encoded ISO-8859-1 address.
+ {
+ `=?ISO-8859-1?B?SvZyZw==?= <joerg@example.com>`,
+ []*Address{
+ &Address{
+ Name: `Jörg`,
+ Address: "joerg@example.com",
+ },
+ },
+ },
+ // Custom example of RFC 2047 "B"-encoded UTF-8 address.
+ {
+ `=?UTF-8?B?SsO2cmc=?= <joerg@example.com>`,
+ []*Address{
+ &Address{
+ Name: `Jörg`,
+ Address: "joerg@example.com",
+ },
+ },
+ },
+ }
+ for _, test := range tests {
+ addrs, err := newAddrParser(test.addrsStr).parseAddressList()
+ if err != nil {
+ t.Errorf("Failed parsing %q: %v", test.addrsStr, err)
+ continue
+ }
+ if !reflect.DeepEqual(addrs, test.exp) {
+ t.Errorf("Parse of %q: got %+v, want %+v", test.addrsStr, addrs, test.exp)
+ }
+ }
+}
+
+func TestAddressFormatting(t *testing.T) {
+ tests := []struct {
+ addr *Address
+ exp string
+ }{
+ {
+ &Address{Address: "bob@example.com"},
+ "<bob@example.com>",
+ },
+ {
+ &Address{Name: "Bob", Address: "bob@example.com"},
+ `"Bob" <bob@example.com>`,
+ },
+ {
+ // note the ö (o with an umlaut)
+ &Address{Name: "Böb", Address: "bob@example.com"},
+ `=?utf-8?q?B=C3=B6b?= <bob@example.com>`,
+ },
+ }
+ for _, test := range tests {
+ s := test.addr.String()
+ if s != test.exp {
+ t.Errorf("Address%+v.String() = %v, want %v", *test.addr, s, test.exp)
+ }
+ }
+}
var buf [10]byte
n, err := c.Read(buf[:])
if n != 0 || err != io.EOF {
- t.Fatalf("server Read = %d, %v; want 0, os.EOF", n, err)
+ t.Fatalf("server Read = %d, %v; want 0, io.EOF", n, err)
}
c.Write([]byte("response"))
c.Close()
import (
"bufio"
"os"
- "testing"
"runtime"
+ "testing"
)
func TestReadLine(t *testing.T) {
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+ "bufio"
+ "encoding/gob"
+ "errors"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "sync"
+)
+
+// ServerError represents an error that has been returned from
+// the remote side of the RPC connection.
+type ServerError string
+
+func (e ServerError) Error() string {
+ return string(e)
+}
+
+var ErrShutdown = errors.New("connection is shut down")
+
+// Call represents an active RPC.
+type Call struct {
+ ServiceMethod string // The name of the service and method to call.
+ Args interface{} // The argument to the function (*struct).
+ Reply interface{} // The reply from the function (*struct).
+ Error error // After completion, the error status.
+ Done chan *Call // Strobes when call is complete; value is the error status.
+ seq uint64
+}
+
+// Client represents an RPC Client.
+// There may be multiple outstanding Calls associated
+// with a single Client.
+type Client struct {
+ mutex sync.Mutex // protects pending, seq, request
+ sending sync.Mutex
+ request Request
+ seq uint64
+ codec ClientCodec
+ pending map[uint64]*Call
+ closing bool
+ shutdown bool
+}
+
+// A ClientCodec implements writing of RPC requests and
+// reading of RPC responses for the client side of an RPC session.
+// The client calls WriteRequest to write a request to the connection
+// and calls ReadResponseHeader and ReadResponseBody in pairs
+// to read responses. The client calls Close when finished with the
+// connection. ReadResponseBody may be called with a nil
+// argument to force the body of the response to be read and then
+// discarded.
+type ClientCodec interface {
+ WriteRequest(*Request, interface{}) error
+ ReadResponseHeader(*Response) error
+ ReadResponseBody(interface{}) error
+
+ Close() error
+}
+
+func (client *Client) send(c *Call) {
+ // Register this call.
+ client.mutex.Lock()
+ if client.shutdown {
+ c.Error = ErrShutdown
+ client.mutex.Unlock()
+ c.done()
+ return
+ }
+ c.seq = client.seq
+ client.seq++
+ client.pending[c.seq] = c
+ client.mutex.Unlock()
+
+ // Encode and send the request.
+ client.sending.Lock()
+ defer client.sending.Unlock()
+ client.request.Seq = c.seq
+ client.request.ServiceMethod = c.ServiceMethod
+ if err := client.codec.WriteRequest(&client.request, c.Args); err != nil {
+ c.Error = err
+ c.done()
+ }
+}
+
+func (client *Client) input() {
+ var err error
+ var response Response
+ for err == nil {
+ response = Response{}
+ err = client.codec.ReadResponseHeader(&response)
+ if err != nil {
+ if err == io.EOF && !client.closing {
+ err = io.ErrUnexpectedEOF
+ }
+ break
+ }
+ seq := response.Seq
+ client.mutex.Lock()
+ c := client.pending[seq]
+ delete(client.pending, seq)
+ client.mutex.Unlock()
+
+ if response.Error == "" {
+ err = client.codec.ReadResponseBody(c.Reply)
+ if err != nil {
+ c.Error = errors.New("reading body " + err.Error())
+ }
+ } else {
+ // We've got an error response. Give this to the request;
+ // any subsequent requests will get the ReadResponseBody
+ // error if there is one.
+ c.Error = ServerError(response.Error)
+ err = client.codec.ReadResponseBody(nil)
+ if err != nil {
+ err = errors.New("reading error body: " + err.Error())
+ }
+ }
+ c.done()
+ }
+ // Terminate pending calls.
+ client.mutex.Lock()
+ client.shutdown = true
+ for _, call := range client.pending {
+ call.Error = err
+ call.done()
+ }
+ client.mutex.Unlock()
+ if err != io.EOF || !client.closing {
+ log.Println("rpc: client protocol error:", err)
+ }
+}
+
+func (call *Call) done() {
+ select {
+ case call.Done <- call:
+ // ok
+ default:
+ // We don't want to block here. It is the caller's responsibility to make
+ // sure the channel has enough buffer space. See comment in Go().
+ }
+}
+
+// NewClient returns a new Client to handle requests to the
+// set of services at the other end of the connection.
+// It adds a buffer to the write side of the connection so
+// the header and payload are sent as a unit.
+func NewClient(conn io.ReadWriteCloser) *Client {
+ encBuf := bufio.NewWriter(conn)
+ client := &gobClientCodec{conn, gob.NewDecoder(conn), gob.NewEncoder(encBuf), encBuf}
+ return NewClientWithCodec(client)
+}
+
+// NewClientWithCodec is like NewClient but uses the specified
+// codec to encode requests and decode responses.
+func NewClientWithCodec(codec ClientCodec) *Client {
+ client := &Client{
+ codec: codec,
+ pending: make(map[uint64]*Call),
+ }
+ go client.input()
+ return client
+}
+
+type gobClientCodec struct {
+ rwc io.ReadWriteCloser
+ dec *gob.Decoder
+ enc *gob.Encoder
+ encBuf *bufio.Writer
+}
+
+func (c *gobClientCodec) WriteRequest(r *Request, body interface{}) (err error) {
+ if err = c.enc.Encode(r); err != nil {
+ return
+ }
+ if err = c.enc.Encode(body); err != nil {
+ return
+ }
+ return c.encBuf.Flush()
+}
+
+func (c *gobClientCodec) ReadResponseHeader(r *Response) error {
+ return c.dec.Decode(r)
+}
+
+func (c *gobClientCodec) ReadResponseBody(body interface{}) error {
+ return c.dec.Decode(body)
+}
+
+func (c *gobClientCodec) Close() error {
+ return c.rwc.Close()
+}
+
+// DialHTTP connects to an HTTP RPC server at the specified network address
+// listening on the default HTTP RPC path.
+func DialHTTP(network, address string) (*Client, error) {
+ return DialHTTPPath(network, address, DefaultRPCPath)
+}
+
+// DialHTTPPath connects to an HTTP RPC server
+// at the specified network address and path.
+func DialHTTPPath(network, address, path string) (*Client, error) {
+ var err error
+ conn, err := net.Dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ io.WriteString(conn, "CONNECT "+path+" HTTP/1.0\n\n")
+
+ // Require successful HTTP response
+ // before switching to RPC protocol.
+ resp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: "CONNECT"})
+ if err == nil && resp.Status == connected {
+ return NewClient(conn), nil
+ }
+ if err == nil {
+ err = errors.New("unexpected HTTP response: " + resp.Status)
+ }
+ conn.Close()
+ return nil, &net.OpError{"dial-http", network + " " + address, nil, err}
+}
+
+// Dial connects to an RPC server at the specified network address.
+func Dial(network, address string) (*Client, error) {
+ conn, err := net.Dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return NewClient(conn), nil
+}
+
+func (client *Client) Close() error {
+ client.mutex.Lock()
+ if client.shutdown || client.closing {
+ client.mutex.Unlock()
+ return ErrShutdown
+ }
+ client.closing = true
+ client.mutex.Unlock()
+ return client.codec.Close()
+}
+
+// Go invokes the function asynchronously. It returns the Call structure representing
+// the invocation. The done channel will signal when the call is complete by returning
+// the same Call object. If done is nil, Go will allocate a new channel.
+// If non-nil, done must be buffered or Go will deliberately crash.
+func (client *Client) Go(serviceMethod string, args interface{}, reply interface{}, done chan *Call) *Call {
+ call := new(Call)
+ call.ServiceMethod = serviceMethod
+ call.Args = args
+ call.Reply = reply
+ if done == nil {
+ done = make(chan *Call, 10) // buffered.
+ } else {
+ // If caller passes done != nil, it must arrange that
+ // done has enough buffer for the number of simultaneous
+ // RPCs that will be using that channel. If the channel
+ // is totally unbuffered, it's best not to run at all.
+ if cap(done) == 0 {
+ log.Panic("rpc: done channel is unbuffered")
+ }
+ }
+ call.Done = done
+ if client.shutdown {
+ call.Error = ErrShutdown
+ call.done()
+ return call
+ }
+ client.send(call)
+ return call
+}
+
+// Call invokes the named function, waits for it to complete, and returns its error status.
+func (client *Client) Call(serviceMethod string, args interface{}, reply interface{}) error {
+ if client.shutdown {
+ return ErrShutdown
+ }
+ call := <-client.Go(serviceMethod, args, reply, make(chan *Call, 1)).Done
+ return call.Error
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+/*
+ Some HTML presented at http://machine:port/debug/rpc
+ Lists services, their methods, and some statistics, still rudimentary.
+*/
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "text/template"
+)
+
+const debugText = `<html>
+ <body>
+ <title>Services</title>
+ {{range .}}
+ <hr>
+ Service {{.Name}}
+ <hr>
+ <table>
+ <th align=center>Method</th><th align=center>Calls</th>
+ {{range .Method}}
+ <tr>
+ <td align=left font=fixed>{{.Name}}({{.Type.ArgType}}, {{.Type.ReplyType}}) error</td>
+ <td align=center>{{.Type.NumCalls}}</td>
+ </tr>
+ {{end}}
+ </table>
+ {{end}}
+ </body>
+ </html>`
+
+var debug = template.Must(template.New("RPC debug").Parse(debugText))
+
+type debugMethod struct {
+ Type *methodType
+ Name string
+}
+
+type methodArray []debugMethod
+
+type debugService struct {
+ Service *service
+ Name string
+ Method methodArray
+}
+
+type serviceArray []debugService
+
+func (s serviceArray) Len() int { return len(s) }
+func (s serviceArray) Less(i, j int) bool { return s[i].Name < s[j].Name }
+func (s serviceArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (m methodArray) Len() int { return len(m) }
+func (m methodArray) Less(i, j int) bool { return m[i].Name < m[j].Name }
+func (m methodArray) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+type debugHTTP struct {
+ *Server
+}
+
+// Runs at /debug/rpc
+func (server debugHTTP) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ // Build a sorted version of the data.
+ var services = make(serviceArray, len(server.serviceMap))
+ i := 0
+ server.mu.Lock()
+ for sname, service := range server.serviceMap {
+ services[i] = debugService{service, sname, make(methodArray, len(service.method))}
+ j := 0
+ for mname, method := range service.method {
+ services[i].Method[j] = debugMethod{method, mname}
+ j++
+ }
+ sort.Sort(services[i].Method)
+ i++
+ }
+ server.mu.Unlock()
+ sort.Sort(services)
+ err := debug.Execute(w, services)
+ if err != nil {
+ fmt.Fprintln(w, "rpc: error executing template:", err.Error())
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonrpc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/rpc"
+ "testing"
+)
+
+type Args struct {
+ A, B int
+}
+
+type Reply struct {
+ C int
+}
+
+type Arith int
+
+func (t *Arith) Add(args *Args, reply *Reply) error {
+ reply.C = args.A + args.B
+ return nil
+}
+
+func (t *Arith) Mul(args *Args, reply *Reply) error {
+ reply.C = args.A * args.B
+ return nil
+}
+
+func (t *Arith) Div(args *Args, reply *Reply) error {
+ if args.B == 0 {
+ return errors.New("divide by zero")
+ }
+ reply.C = args.A / args.B
+ return nil
+}
+
+func (t *Arith) Error(args *Args, reply *Reply) error {
+ panic("ERROR")
+}
+
+func init() {
+ rpc.Register(new(Arith))
+}
+
+func TestServer(t *testing.T) {
+ type addResp struct {
+ Id interface{} `json:"id"`
+ Result Reply `json:"result"`
+ Error interface{} `json:"error"`
+ }
+
+ cli, srv := net.Pipe()
+ defer cli.Close()
+ go ServeConn(srv)
+ dec := json.NewDecoder(cli)
+
+ // Send hand-coded requests to server, parse responses.
+ for i := 0; i < 10; i++ {
+ fmt.Fprintf(cli, `{"method": "Arith.Add", "id": "\u%04d", "params": [{"A": %d, "B": %d}]}`, i, i, i+1)
+ var resp addResp
+ err := dec.Decode(&resp)
+ if err != nil {
+ t.Fatalf("Decode: %s", err)
+ }
+ if resp.Error != nil {
+ t.Fatalf("resp.Error: %s", resp.Error)
+ }
+ if resp.Id.(string) != string(i) {
+ t.Fatalf("resp: bad id %q want %q", resp.Id.(string), string(i))
+ }
+ if resp.Result.C != 2*i+1 {
+ t.Fatalf("resp: bad result: %d+%d=%d", i, i+1, resp.Result.C)
+ }
+ }
+
+ fmt.Fprintf(cli, "{}\n")
+ var resp addResp
+ if err := dec.Decode(&resp); err != nil {
+ t.Fatalf("Decode after empty: %s", err)
+ }
+ if resp.Error == nil {
+ t.Fatalf("Expected error, got nil")
+ }
+}
+
+func TestClient(t *testing.T) {
+ // Assume server is okay (TestServer is above).
+ // Test client against server.
+ cli, srv := net.Pipe()
+ go ServeConn(srv)
+
+ client := NewClient(cli)
+ defer client.Close()
+
+ // Synchronous calls
+ args := &Args{7, 8}
+ reply := new(Reply)
+ err := client.Call("Arith.Add", args, reply)
+ if err != nil {
+ t.Errorf("Add: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
+ }
+
+ args = &Args{7, 8}
+ reply = new(Reply)
+ err = client.Call("Arith.Mul", args, reply)
+ if err != nil {
+ t.Errorf("Mul: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A*args.B {
+ t.Errorf("Mul: expected %d got %d", reply.C, args.A*args.B)
+ }
+
+ // Out of order.
+ args = &Args{7, 8}
+ mulReply := new(Reply)
+ mulCall := client.Go("Arith.Mul", args, mulReply, nil)
+ addReply := new(Reply)
+ addCall := client.Go("Arith.Add", args, addReply, nil)
+
+ addCall = <-addCall.Done
+ if addCall.Error != nil {
+ t.Errorf("Add: expected no error but got string %q", addCall.Error.Error())
+ }
+ if addReply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", addReply.C, args.A+args.B)
+ }
+
+ mulCall = <-mulCall.Done
+ if mulCall.Error != nil {
+ t.Errorf("Mul: expected no error but got string %q", mulCall.Error.Error())
+ }
+ if mulReply.C != args.A*args.B {
+ t.Errorf("Mul: expected %d got %d", mulReply.C, args.A*args.B)
+ }
+
+ // Error test
+ args = &Args{7, 0}
+ reply = new(Reply)
+ err = client.Call("Arith.Div", args, reply)
+ // expect an error: zero divide
+ if err == nil {
+ t.Error("Div: expected error")
+ } else if err.Error() != "divide by zero" {
+ t.Error("Div: expected divide by zero error; got", err)
+ }
+}
+
+func TestMalformedInput(t *testing.T) {
+ cli, srv := net.Pipe()
+ go cli.Write([]byte(`{id:1}`)) // invalid json
+ ServeConn(srv) // must return, not loop
+}
+
+func TestUnexpectedError(t *testing.T) {
+ cli, srv := myPipe()
+ go cli.PipeWriter.CloseWithError(errors.New("unexpected error!")) // reader will get this error
+ ServeConn(srv) // must return, not loop
+}
+
+// Copied from package net.
+func myPipe() (*pipe, *pipe) {
+ r1, w1 := io.Pipe()
+ r2, w2 := io.Pipe()
+
+ return &pipe{r1, w2}, &pipe{r2, w1}
+}
+
+type pipe struct {
+ *io.PipeReader
+ *io.PipeWriter
+}
+
+type pipeAddr int
+
+func (pipeAddr) Network() string {
+ return "pipe"
+}
+
+func (pipeAddr) String() string {
+ return "pipe"
+}
+
+func (p *pipe) Close() error {
+ err := p.PipeReader.Close()
+ err1 := p.PipeWriter.Close()
+ if err == nil {
+ err = err1
+ }
+ return err
+}
+
+func (p *pipe) LocalAddr() net.Addr {
+ return pipeAddr(0)
+}
+
+func (p *pipe) RemoteAddr() net.Addr {
+ return pipeAddr(0)
+}
+
+func (p *pipe) SetTimeout(nsec int64) error {
+ return errors.New("net.Pipe does not support timeouts")
+}
+
+func (p *pipe) SetReadTimeout(nsec int64) error {
+ return errors.New("net.Pipe does not support timeouts")
+}
+
+func (p *pipe) SetWriteTimeout(nsec int64) error {
+ return errors.New("net.Pipe does not support timeouts")
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec
+// for the rpc package.
+package jsonrpc
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/rpc"
+ "sync"
+)
+
+type clientCodec struct {
+ dec *json.Decoder // for reading JSON values
+ enc *json.Encoder // for writing JSON values
+ c io.Closer
+
+ // temporary work space
+ req clientRequest
+ resp clientResponse
+
+ // JSON-RPC responses include the request id but not the request method.
+ // Package rpc expects both.
+ // We save the request method in pending when sending a request
+ // and then look it up by request ID when filling out the rpc Response.
+ mutex sync.Mutex // protects pending
+ pending map[uint64]string // map request id to method name
+}
+
+// NewClientCodec returns a new rpc.ClientCodec using JSON-RPC on conn.
+func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec {
+ return &clientCodec{
+ dec: json.NewDecoder(conn),
+ enc: json.NewEncoder(conn),
+ c: conn,
+ pending: make(map[uint64]string),
+ }
+}
+
+type clientRequest struct {
+ Method string `json:"method"`
+ Params [1]interface{} `json:"params"`
+ Id uint64 `json:"id"`
+}
+
+func (c *clientCodec) WriteRequest(r *rpc.Request, param interface{}) error {
+ c.mutex.Lock()
+ c.pending[r.Seq] = r.ServiceMethod
+ c.mutex.Unlock()
+ c.req.Method = r.ServiceMethod
+ c.req.Params[0] = param
+ c.req.Id = r.Seq
+ return c.enc.Encode(&c.req)
+}
+
+type clientResponse struct {
+ Id uint64 `json:"id"`
+ Result *json.RawMessage `json:"result"`
+ Error interface{} `json:"error"`
+}
+
+func (r *clientResponse) reset() {
+ r.Id = 0
+ r.Result = nil
+ r.Error = nil
+}
+
+func (c *clientCodec) ReadResponseHeader(r *rpc.Response) error {
+ c.resp.reset()
+ if err := c.dec.Decode(&c.resp); err != nil {
+ return err
+ }
+
+ c.mutex.Lock()
+ r.ServiceMethod = c.pending[c.resp.Id]
+ delete(c.pending, c.resp.Id)
+ c.mutex.Unlock()
+
+ r.Error = ""
+ r.Seq = c.resp.Id
+ if c.resp.Error != nil {
+ x, ok := c.resp.Error.(string)
+ if !ok {
+ return fmt.Errorf("invalid error %v", c.resp.Error)
+ }
+ if x == "" {
+ x = "unspecified error"
+ }
+ r.Error = x
+ }
+ return nil
+}
+
+func (c *clientCodec) ReadResponseBody(x interface{}) error {
+ if x == nil {
+ return nil
+ }
+ return json.Unmarshal(*c.resp.Result, x)
+}
+
+func (c *clientCodec) Close() error {
+ return c.c.Close()
+}
+
+// NewClient returns a new rpc.Client to handle requests to the
+// set of services at the other end of the connection.
+func NewClient(conn io.ReadWriteCloser) *rpc.Client {
+ return rpc.NewClientWithCodec(NewClientCodec(conn))
+}
+
+// Dial connects to a JSON-RPC server at the specified network address.
+func Dial(network, address string) (*rpc.Client, error) {
+ conn, err := net.Dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return NewClient(conn), err
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonrpc
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "net/rpc"
+ "sync"
+)
+
+type serverCodec struct {
+ dec *json.Decoder // for reading JSON values
+ enc *json.Encoder // for writing JSON values
+ c io.Closer
+
+ // temporary work space
+ req serverRequest
+ resp serverResponse
+
+ // JSON-RPC clients can use arbitrary json values as request IDs.
+ // Package rpc expects uint64 request IDs.
+ // We assign uint64 sequence numbers to incoming requests
+ // but save the original request ID in the pending map.
+ // When rpc responds, we use the sequence number in
+ // the response to find the original request ID.
+ mutex sync.Mutex // protects seq, pending
+ seq uint64
+ pending map[uint64]*json.RawMessage
+}
+
+// NewServerCodec returns a new rpc.ServerCodec using JSON-RPC on conn.
+func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec {
+ return &serverCodec{
+ dec: json.NewDecoder(conn),
+ enc: json.NewEncoder(conn),
+ c: conn,
+ pending: make(map[uint64]*json.RawMessage),
+ }
+}
+
+type serverRequest struct {
+ Method string `json:"method"`
+ Params *json.RawMessage `json:"params"`
+ Id *json.RawMessage `json:"id"`
+}
+
+func (r *serverRequest) reset() {
+ r.Method = ""
+ if r.Params != nil {
+ *r.Params = (*r.Params)[0:0]
+ }
+ if r.Id != nil {
+ *r.Id = (*r.Id)[0:0]
+ }
+}
+
+type serverResponse struct {
+ Id *json.RawMessage `json:"id"`
+ Result interface{} `json:"result"`
+ Error interface{} `json:"error"`
+}
+
+func (c *serverCodec) ReadRequestHeader(r *rpc.Request) error {
+ c.req.reset()
+ if err := c.dec.Decode(&c.req); err != nil {
+ return err
+ }
+ r.ServiceMethod = c.req.Method
+
+ // JSON request id can be any JSON value;
+ // RPC package expects uint64. Translate to
+ // internal uint64 and save JSON on the side.
+ c.mutex.Lock()
+ c.seq++
+ c.pending[c.seq] = c.req.Id
+ c.req.Id = nil
+ r.Seq = c.seq
+ c.mutex.Unlock()
+
+ return nil
+}
+
+func (c *serverCodec) ReadRequestBody(x interface{}) error {
+ if x == nil {
+ return nil
+ }
+ // JSON params is array value.
+ // RPC params is struct.
+ // Unmarshal into array containing struct for now.
+ // Should think about making RPC more general.
+ var params [1]interface{}
+ params[0] = x
+ return json.Unmarshal(*c.req.Params, ¶ms)
+}
+
+var null = json.RawMessage([]byte("null"))
+
+func (c *serverCodec) WriteResponse(r *rpc.Response, x interface{}) error {
+ var resp serverResponse
+ c.mutex.Lock()
+ b, ok := c.pending[r.Seq]
+ if !ok {
+ c.mutex.Unlock()
+ return errors.New("invalid sequence number in response")
+ }
+ delete(c.pending, r.Seq)
+ c.mutex.Unlock()
+
+ if b == nil {
+ // Invalid request so no id. Use JSON null.
+ b = &null
+ }
+ resp.Id = b
+ resp.Result = x
+ if r.Error == "" {
+ resp.Error = nil
+ } else {
+ resp.Error = r.Error
+ }
+ return c.enc.Encode(resp)
+}
+
+func (c *serverCodec) Close() error {
+ return c.c.Close()
+}
+
+// ServeConn runs the JSON-RPC server on a single connection.
+// ServeConn blocks, serving the connection until the client hangs up.
+// The caller typically invokes ServeConn in a go statement.
+func ServeConn(conn io.ReadWriteCloser) {
+ rpc.ServeCodec(NewServerCodec(conn))
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ Package rpc provides access to the exported methods of an object across a
+ network or other I/O connection. A server registers an object, making it visible
+ as a service with the name of the type of the object. After registration, exported
+ methods of the object will be accessible remotely. A server may register multiple
+ objects (services) of different types but it is an error to register multiple
+ objects of the same type.
+
+ Only methods that satisfy these criteria will be made available for remote access;
+ other methods will be ignored:
+
+ - the method name is exported, that is, begins with an upper case letter.
+ - the method receiver is exported or local (defined in the package
+ registering the service).
+ - the method has two arguments, both exported or local types.
+ - the method's second argument is a pointer.
+ - the method has return type error.
+
+ The method's first argument represents the arguments provided by the caller; the
+ second argument represents the result parameters to be returned to the caller.
+ The method's return value, if non-nil, is passed back as a string that the client
+ sees as if created by errors.New.
+
+ The server may handle requests on a single connection by calling ServeConn. More
+ typically it will create a network listener and call Accept or, for an HTTP
+ listener, HandleHTTP and http.Serve.
+
+ A client wishing to use the service establishes a connection and then invokes
+ NewClient on the connection. The convenience function Dial (DialHTTP) performs
+ both steps for a raw network connection (an HTTP connection). The resulting
+ Client object has two methods, Call and Go, that specify the service and method to
+ call, a pointer containing the arguments, and a pointer to receive the result
+ parameters.
+
+ Call waits for the remote call to complete; Go launches the call asynchronously
+ and returns a channel that will signal completion.
+
+ Package "gob" is used to transport the data.
+
+ Here is a simple example. A server wishes to export an object of type Arith:
+
+ package server
+
+ type Args struct {
+ A, B int
+ }
+
+ type Quotient struct {
+ Quo, Rem int
+ }
+
+ type Arith int
+
+ func (t *Arith) Multiply(args *Args, reply *int) error {
+ *reply = args.A * args.B
+ return nil
+ }
+
+ func (t *Arith) Divide(args *Args, quo *Quotient) error {
+ if args.B == 0 {
+ return errors.New("divide by zero")
+ }
+ quo.Quo = args.A / args.B
+ quo.Rem = args.A % args.B
+ return nil
+ }
+
+ The server calls (for HTTP service):
+
+ arith := new(Arith)
+ rpc.Register(arith)
+ rpc.HandleHTTP()
+ l, e := net.Listen("tcp", ":1234")
+ if e != nil {
+ log.Fatal("listen error:", e)
+ }
+ go http.Serve(l, nil)
+
+ At this point, clients can see a service "Arith" with methods "Arith.Multiply" and
+ "Arith.Divide". To invoke one, a client first dials the server:
+
+ client, err := rpc.DialHTTP("tcp", serverAddress + ":1234")
+ if err != nil {
+ log.Fatal("dialing:", err)
+ }
+
+ Then it can make a remote call:
+
+ // Synchronous call
+ args := &server.Args{7,8}
+ var reply int
+ err = client.Call("Arith.Multiply", args, &reply)
+ if err != nil {
+ log.Fatal("arith error:", err)
+ }
+ fmt.Printf("Arith: %d*%d=%d", args.A, args.B, reply)
+
+ or
+
+ // Asynchronous call
+ quotient := new(Quotient)
+ divCall := client.Go("Arith.Divide", args, "ient, nil)
+ replyCall := <-divCall.Done // will be equal to divCall
+ // check errors, print, etc.
+
+ A server implementation will often provide a simple, type-safe wrapper for the
+ client.
+*/
+package rpc
+
+import (
+ "bufio"
+ "encoding/gob"
+ "errors"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ // Defaults used by HandleHTTP
+ DefaultRPCPath = "/_goRPC_"
+ DefaultDebugPath = "/debug/rpc"
+)
+
+// Precompute the reflect type for error. Can't use error directly
+// because Typeof takes an empty interface value. This is annoying.
+var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
+
+type methodType struct {
+ sync.Mutex // protects counters
+ method reflect.Method
+ ArgType reflect.Type
+ ReplyType reflect.Type
+ numCalls uint
+}
+
+type service struct {
+ name string // name of service
+ rcvr reflect.Value // receiver of methods for the service
+ typ reflect.Type // type of the receiver
+ method map[string]*methodType // registered methods
+}
+
+// Request is a header written before every RPC call. It is used internally
+// but documented here as an aid to debugging, such as when analyzing
+// network traffic.
+type Request struct {
+ ServiceMethod string // format: "Service.Method"
+ Seq uint64 // sequence number chosen by client
+ next *Request // for free list in Server
+}
+
+// Response is a header written before every RPC return. It is used internally
+// but documented here as an aid to debugging, such as when analyzing
+// network traffic.
+type Response struct {
+ ServiceMethod string // echoes that of the Request
+ Seq uint64 // echoes that of the request
+ Error string // error, if any.
+ next *Response // for free list in Server
+}
+
+// Server represents an RPC Server.
+type Server struct {
+ mu sync.Mutex // protects the serviceMap
+ serviceMap map[string]*service
+ reqLock sync.Mutex // protects freeReq
+ freeReq *Request
+ respLock sync.Mutex // protects freeResp
+ freeResp *Response
+}
+
+// NewServer returns a new Server.
+func NewServer() *Server {
+ return &Server{serviceMap: make(map[string]*service)}
+}
+
+// DefaultServer is the default instance of *Server.
+var DefaultServer = NewServer()
+
+// Is this an exported - upper case - name?
+func isExported(name string) bool {
+ rune, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(rune)
+}
+
+// Is this type exported or a builtin?
+func isExportedOrBuiltinType(t reflect.Type) bool {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ // PkgPath will be non-empty even for an exported type,
+ // so we need to check the type name as well.
+ return isExported(t.Name()) || t.PkgPath() == ""
+}
+
+// Register publishes in the server the set of methods of the
+// receiver value that satisfy the following conditions:
+// - exported method
+// - two arguments, both pointers to exported structs
+// - one return value, of type error
+// It returns an error if the receiver is not an exported type or has no
+// suitable methods.
+// The client accesses each method using a string of the form "Type.Method",
+// where Type is the receiver's concrete type.
+func (server *Server) Register(rcvr interface{}) error {
+ return server.register(rcvr, "", false)
+}
+
+// RegisterName is like Register but uses the provided name for the type
+// instead of the receiver's concrete type.
+func (server *Server) RegisterName(name string, rcvr interface{}) error {
+ return server.register(rcvr, name, true)
+}
+
+func (server *Server) register(rcvr interface{}, name string, useName bool) error {
+ server.mu.Lock()
+ defer server.mu.Unlock()
+ if server.serviceMap == nil {
+ server.serviceMap = make(map[string]*service)
+ }
+ s := new(service)
+ s.typ = reflect.TypeOf(rcvr)
+ s.rcvr = reflect.ValueOf(rcvr)
+ sname := reflect.Indirect(s.rcvr).Type().Name()
+ if useName {
+ sname = name
+ }
+ if sname == "" {
+ log.Fatal("rpc: no service name for type", s.typ.String())
+ }
+ if !isExported(sname) && !useName {
+ s := "rpc Register: type " + sname + " is not exported"
+ log.Print(s)
+ return errors.New(s)
+ }
+ if _, present := server.serviceMap[sname]; present {
+ return errors.New("rpc: service already defined: " + sname)
+ }
+ s.name = sname
+ s.method = make(map[string]*methodType)
+
+ // Install the methods
+ for m := 0; m < s.typ.NumMethod(); m++ {
+ method := s.typ.Method(m)
+ mtype := method.Type
+ mname := method.Name
+ if method.PkgPath != "" {
+ continue
+ }
+ // Method needs three ins: receiver, *args, *reply.
+ if mtype.NumIn() != 3 {
+ log.Println("method", mname, "has wrong number of ins:", mtype.NumIn())
+ continue
+ }
+ // First arg need not be a pointer.
+ argType := mtype.In(1)
+ if !isExportedOrBuiltinType(argType) {
+ log.Println(mname, "argument type not exported or local:", argType)
+ continue
+ }
+ // Second arg must be a pointer.
+ replyType := mtype.In(2)
+ if replyType.Kind() != reflect.Ptr {
+ log.Println("method", mname, "reply type not a pointer:", replyType)
+ continue
+ }
+ if !isExportedOrBuiltinType(replyType) {
+ log.Println("method", mname, "reply type not exported or local:", replyType)
+ continue
+ }
+ // Method needs one out: error.
+ if mtype.NumOut() != 1 {
+ log.Println("method", mname, "has wrong number of outs:", mtype.NumOut())
+ continue
+ }
+ if returnType := mtype.Out(0); returnType != typeOfError {
+ log.Println("method", mname, "returns", returnType.String(), "not error")
+ continue
+ }
+ s.method[mname] = &methodType{method: method, ArgType: argType, ReplyType: replyType}
+ }
+
+ if len(s.method) == 0 {
+ s := "rpc Register: type " + sname + " has no exported methods of suitable type"
+ log.Print(s)
+ return errors.New(s)
+ }
+ server.serviceMap[s.name] = s
+ return nil
+}
+
+// A value sent as a placeholder for the response when the server receives an invalid request.
+type InvalidRequest struct{}
+
+var invalidRequest = InvalidRequest{}
+
+func (server *Server) sendResponse(sending *sync.Mutex, req *Request, reply interface{}, codec ServerCodec, errmsg string) {
+ resp := server.getResponse()
+ // Encode the response header
+ resp.ServiceMethod = req.ServiceMethod
+ if errmsg != "" {
+ resp.Error = errmsg
+ reply = invalidRequest
+ }
+ resp.Seq = req.Seq
+ sending.Lock()
+ err := codec.WriteResponse(resp, reply)
+ if err != nil {
+ log.Println("rpc: writing response:", err)
+ }
+ sending.Unlock()
+ server.freeResponse(resp)
+}
+
+func (m *methodType) NumCalls() (n uint) {
+ m.Lock()
+ n = m.numCalls
+ m.Unlock()
+ return n
+}
+
+func (s *service) call(server *Server, sending *sync.Mutex, mtype *methodType, req *Request, argv, replyv reflect.Value, codec ServerCodec) {
+ mtype.Lock()
+ mtype.numCalls++
+ mtype.Unlock()
+ function := mtype.method.Func
+ // Invoke the method, providing a new value for the reply.
+ returnValues := function.Call([]reflect.Value{s.rcvr, argv, replyv})
+ // The return value for the method is an error.
+ errInter := returnValues[0].Interface()
+ errmsg := ""
+ if errInter != nil {
+ errmsg = errInter.(error).Error()
+ }
+ server.sendResponse(sending, req, replyv.Interface(), codec, errmsg)
+ server.freeRequest(req)
+}
+
+type gobServerCodec struct {
+ rwc io.ReadWriteCloser
+ dec *gob.Decoder
+ enc *gob.Encoder
+ encBuf *bufio.Writer
+}
+
+func (c *gobServerCodec) ReadRequestHeader(r *Request) error {
+ return c.dec.Decode(r)
+}
+
+func (c *gobServerCodec) ReadRequestBody(body interface{}) error {
+ return c.dec.Decode(body)
+}
+
+func (c *gobServerCodec) WriteResponse(r *Response, body interface{}) (err error) {
+ if err = c.enc.Encode(r); err != nil {
+ return
+ }
+ if err = c.enc.Encode(body); err != nil {
+ return
+ }
+ return c.encBuf.Flush()
+}
+
+func (c *gobServerCodec) Close() error {
+ return c.rwc.Close()
+}
+
+// ServeConn runs the server on a single connection.
+// ServeConn blocks, serving the connection until the client hangs up.
+// The caller typically invokes ServeConn in a go statement.
+// ServeConn uses the gob wire format (see package gob) on the
+// connection. To use an alternate codec, use ServeCodec.
+func (server *Server) ServeConn(conn io.ReadWriteCloser) {
+ buf := bufio.NewWriter(conn)
+ srv := &gobServerCodec{conn, gob.NewDecoder(conn), gob.NewEncoder(buf), buf}
+ server.ServeCodec(srv)
+}
+
+// ServeCodec is like ServeConn but uses the specified codec to
+// decode requests and encode responses.
+func (server *Server) ServeCodec(codec ServerCodec) {
+ sending := new(sync.Mutex)
+ for {
+ service, mtype, req, argv, replyv, keepReading, err := server.readRequest(codec)
+ if err != nil {
+ if err != io.EOF {
+ log.Println("rpc:", err)
+ }
+ if !keepReading {
+ break
+ }
+ // send a response if we actually managed to read a header.
+ if req != nil {
+ server.sendResponse(sending, req, invalidRequest, codec, err.Error())
+ server.freeRequest(req)
+ }
+ continue
+ }
+ go service.call(server, sending, mtype, req, argv, replyv, codec)
+ }
+ codec.Close()
+}
+
+// ServeRequest is like ServeCodec but synchronously serves a single request.
+// It does not close the codec upon completion.
+func (server *Server) ServeRequest(codec ServerCodec) error {
+ sending := new(sync.Mutex)
+ service, mtype, req, argv, replyv, keepReading, err := server.readRequest(codec)
+ if err != nil {
+ if !keepReading {
+ return err
+ }
+ // send a response if we actually managed to read a header.
+ if req != nil {
+ server.sendResponse(sending, req, invalidRequest, codec, err.Error())
+ server.freeRequest(req)
+ }
+ return err
+ }
+ service.call(server, sending, mtype, req, argv, replyv, codec)
+ return nil
+}
+
+func (server *Server) getRequest() *Request {
+ server.reqLock.Lock()
+ req := server.freeReq
+ if req == nil {
+ req = new(Request)
+ } else {
+ server.freeReq = req.next
+ *req = Request{}
+ }
+ server.reqLock.Unlock()
+ return req
+}
+
+func (server *Server) freeRequest(req *Request) {
+ server.reqLock.Lock()
+ req.next = server.freeReq
+ server.freeReq = req
+ server.reqLock.Unlock()
+}
+
+func (server *Server) getResponse() *Response {
+ server.respLock.Lock()
+ resp := server.freeResp
+ if resp == nil {
+ resp = new(Response)
+ } else {
+ server.freeResp = resp.next
+ *resp = Response{}
+ }
+ server.respLock.Unlock()
+ return resp
+}
+
+func (server *Server) freeResponse(resp *Response) {
+ server.respLock.Lock()
+ resp.next = server.freeResp
+ server.freeResp = resp
+ server.respLock.Unlock()
+}
+
+func (server *Server) readRequest(codec ServerCodec) (service *service, mtype *methodType, req *Request, argv, replyv reflect.Value, keepReading bool, err error) {
+ service, mtype, req, keepReading, err = server.readRequestHeader(codec)
+ if err != nil {
+ if !keepReading {
+ return
+ }
+ // discard body
+ codec.ReadRequestBody(nil)
+ return
+ }
+
+ // Decode the argument value.
+ argIsValue := false // if true, need to indirect before calling.
+ if mtype.ArgType.Kind() == reflect.Ptr {
+ argv = reflect.New(mtype.ArgType.Elem())
+ } else {
+ argv = reflect.New(mtype.ArgType)
+ argIsValue = true
+ }
+ // argv guaranteed to be a pointer now.
+ if err = codec.ReadRequestBody(argv.Interface()); err != nil {
+ return
+ }
+ if argIsValue {
+ argv = argv.Elem()
+ }
+
+ replyv = reflect.New(mtype.ReplyType.Elem())
+ return
+}
+
+func (server *Server) readRequestHeader(codec ServerCodec) (service *service, mtype *methodType, req *Request, keepReading bool, err error) {
+ // Grab the request header.
+ req = server.getRequest()
+ err = codec.ReadRequestHeader(req)
+ if err != nil {
+ req = nil
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ return
+ }
+ err = errors.New("rpc: server cannot decode request: " + err.Error())
+ return
+ }
+
+ // We read the header successfully. If we see an error now,
+ // we can still recover and move on to the next request.
+ keepReading = true
+
+ serviceMethod := strings.Split(req.ServiceMethod, ".")
+ if len(serviceMethod) != 2 {
+ err = errors.New("rpc: service/method request ill-formed: " + req.ServiceMethod)
+ return
+ }
+ // Look up the request.
+ server.mu.Lock()
+ service = server.serviceMap[serviceMethod[0]]
+ server.mu.Unlock()
+ if service == nil {
+ err = errors.New("rpc: can't find service " + req.ServiceMethod)
+ return
+ }
+ mtype = service.method[serviceMethod[1]]
+ if mtype == nil {
+ err = errors.New("rpc: can't find method " + req.ServiceMethod)
+ }
+ return
+}
+
+// Accept accepts connections on the listener and serves requests
+// for each incoming connection. Accept blocks; the caller typically
+// invokes it in a go statement.
+func (server *Server) Accept(lis net.Listener) {
+ for {
+ conn, err := lis.Accept()
+ if err != nil {
+ log.Fatal("rpc.Serve: accept:", err.Error()) // TODO(r): exit?
+ }
+ go server.ServeConn(conn)
+ }
+}
+
+// Register publishes the receiver's methods in the DefaultServer.
+func Register(rcvr interface{}) error { return DefaultServer.Register(rcvr) }
+
+// RegisterName is like Register but uses the provided name for the type
+// instead of the receiver's concrete type.
+func RegisterName(name string, rcvr interface{}) error {
+ return DefaultServer.RegisterName(name, rcvr)
+}
+
+// A ServerCodec implements reading of RPC requests and writing of
+// RPC responses for the server side of an RPC session.
+// The server calls ReadRequestHeader and ReadRequestBody in pairs
+// to read requests from the connection, and it calls WriteResponse to
+// write a response back. The server calls Close when finished with the
+// connection. ReadRequestBody may be called with a nil
+// argument to force the body of the request to be read and discarded.
+type ServerCodec interface {
+ ReadRequestHeader(*Request) error
+ ReadRequestBody(interface{}) error
+ WriteResponse(*Response, interface{}) error
+
+ Close() error
+}
+
+// ServeConn runs the DefaultServer on a single connection.
+// ServeConn blocks, serving the connection until the client hangs up.
+// The caller typically invokes ServeConn in a go statement.
+// ServeConn uses the gob wire format (see package gob) on the
+// connection. To use an alternate codec, use ServeCodec.
+func ServeConn(conn io.ReadWriteCloser) {
+ DefaultServer.ServeConn(conn)
+}
+
+// ServeCodec is like ServeConn but uses the specified codec to
+// decode requests and encode responses.
+func ServeCodec(codec ServerCodec) {
+ DefaultServer.ServeCodec(codec)
+}
+
+// ServeRequest is like ServeCodec but synchronously serves a single request.
+// It does not close the codec upon completion.
+func ServeRequest(codec ServerCodec) error {
+ return DefaultServer.ServeRequest(codec)
+}
+
+// Accept accepts connections on the listener and serves requests
+// to DefaultServer for each incoming connection.
+// Accept blocks; the caller typically invokes it in a go statement.
+func Accept(lis net.Listener) { DefaultServer.Accept(lis) }
+
+// Can connect to RPC service using HTTP CONNECT to rpcPath.
+var connected = "200 Connected to Go RPC"
+
+// ServeHTTP implements an http.Handler that answers RPC requests.
+func (server *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if req.Method != "CONNECT" {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ io.WriteString(w, "405 must CONNECT\n")
+ return
+ }
+ conn, _, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ log.Print("rpc hijacking ", req.RemoteAddr, ": ", err.Error())
+ return
+ }
+ io.WriteString(conn, "HTTP/1.0 "+connected+"\n\n")
+ server.ServeConn(conn)
+}
+
+// HandleHTTP registers an HTTP handler for RPC messages on rpcPath,
+// and a debugging handler on debugPath.
+// It is still necessary to invoke http.Serve(), typically in a go statement.
+func (server *Server) HandleHTTP(rpcPath, debugPath string) {
+ http.Handle(rpcPath, server)
+ http.Handle(debugPath, debugHTTP{server})
+}
+
+// HandleHTTP registers an HTTP handler for RPC messages to DefaultServer
+// on DefaultRPCPath and a debugging handler on DefaultDebugPath.
+// It is still necessary to invoke http.Serve(), typically in a go statement.
+func HandleHTTP() {
+ DefaultServer.HandleHTTP(DefaultRPCPath, DefaultDebugPath)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http/httptest"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+var (
+ newServer *Server
+ serverAddr, newServerAddr string
+ httpServerAddr string
+ once, newOnce, httpOnce sync.Once
+)
+
+const (
+ second = 1e9
+ newHttpPath = "/foo"
+)
+
+type Args struct {
+ A, B int
+}
+
+type Reply struct {
+ C int
+}
+
+type Arith int
+
+// Some of Arith's methods have value args, some have pointer args. That's deliberate.
+
+func (t *Arith) Add(args Args, reply *Reply) error {
+ reply.C = args.A + args.B
+ return nil
+}
+
+func (t *Arith) Mul(args *Args, reply *Reply) error {
+ reply.C = args.A * args.B
+ return nil
+}
+
+func (t *Arith) Div(args Args, reply *Reply) error {
+ if args.B == 0 {
+ return errors.New("divide by zero")
+ }
+ reply.C = args.A / args.B
+ return nil
+}
+
+func (t *Arith) String(args *Args, reply *string) error {
+ *reply = fmt.Sprintf("%d+%d=%d", args.A, args.B, args.A+args.B)
+ return nil
+}
+
+func (t *Arith) Scan(args string, reply *Reply) (err error) {
+ _, err = fmt.Sscan(args, &reply.C)
+ return
+}
+
+func (t *Arith) Error(args *Args, reply *Reply) error {
+ panic("ERROR")
+}
+
+func listenTCP() (net.Listener, string) {
+ l, e := net.Listen("tcp", "127.0.0.1:0") // any available address
+ if e != nil {
+ log.Fatalf("net.Listen tcp :0: %v", e)
+ }
+ return l, l.Addr().String()
+}
+
+func startServer() {
+ Register(new(Arith))
+
+ var l net.Listener
+ l, serverAddr = listenTCP()
+ log.Println("Test RPC server listening on", serverAddr)
+ go Accept(l)
+
+ HandleHTTP()
+ httpOnce.Do(startHttpServer)
+}
+
+func startNewServer() {
+ newServer = NewServer()
+ newServer.Register(new(Arith))
+
+ var l net.Listener
+ l, newServerAddr = listenTCP()
+ log.Println("NewServer test RPC server listening on", newServerAddr)
+ go Accept(l)
+
+ newServer.HandleHTTP(newHttpPath, "/bar")
+ httpOnce.Do(startHttpServer)
+}
+
+func startHttpServer() {
+ server := httptest.NewServer(nil)
+ httpServerAddr = server.Listener.Addr().String()
+ log.Println("Test HTTP RPC server listening on", httpServerAddr)
+}
+
+func TestRPC(t *testing.T) {
+ once.Do(startServer)
+ testRPC(t, serverAddr)
+ newOnce.Do(startNewServer)
+ testRPC(t, newServerAddr)
+}
+
+func testRPC(t *testing.T, addr string) {
+ client, err := Dial("tcp", addr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+
+ // Synchronous calls
+ args := &Args{7, 8}
+ reply := new(Reply)
+ err = client.Call("Arith.Add", args, reply)
+ if err != nil {
+ t.Errorf("Add: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
+ }
+
+ // Nonexistent method
+ args = &Args{7, 0}
+ reply = new(Reply)
+ err = client.Call("Arith.BadOperation", args, reply)
+ // expect an error
+ if err == nil {
+ t.Error("BadOperation: expected error")
+ } else if !strings.HasPrefix(err.Error(), "rpc: can't find method ") {
+ t.Errorf("BadOperation: expected can't find method error; got %q", err)
+ }
+
+ // Unknown service
+ args = &Args{7, 8}
+ reply = new(Reply)
+ err = client.Call("Arith.Unknown", args, reply)
+ if err == nil {
+ t.Error("expected error calling unknown service")
+ } else if strings.Index(err.Error(), "method") < 0 {
+ t.Error("expected error about method; got", err)
+ }
+
+ // Out of order.
+ args = &Args{7, 8}
+ mulReply := new(Reply)
+ mulCall := client.Go("Arith.Mul", args, mulReply, nil)
+ addReply := new(Reply)
+ addCall := client.Go("Arith.Add", args, addReply, nil)
+
+ addCall = <-addCall.Done
+ if addCall.Error != nil {
+ t.Errorf("Add: expected no error but got string %q", addCall.Error.Error())
+ }
+ if addReply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", addReply.C, args.A+args.B)
+ }
+
+ mulCall = <-mulCall.Done
+ if mulCall.Error != nil {
+ t.Errorf("Mul: expected no error but got string %q", mulCall.Error.Error())
+ }
+ if mulReply.C != args.A*args.B {
+ t.Errorf("Mul: expected %d got %d", mulReply.C, args.A*args.B)
+ }
+
+ // Error test
+ args = &Args{7, 0}
+ reply = new(Reply)
+ err = client.Call("Arith.Div", args, reply)
+ // expect an error: zero divide
+ if err == nil {
+ t.Error("Div: expected error")
+ } else if err.Error() != "divide by zero" {
+ t.Error("Div: expected divide by zero error; got", err)
+ }
+
+ // Bad type.
+ reply = new(Reply)
+ err = client.Call("Arith.Add", reply, reply) // args, reply would be the correct thing to use
+ if err == nil {
+ t.Error("expected error calling Arith.Add with wrong arg type")
+ } else if strings.Index(err.Error(), "type") < 0 {
+ t.Error("expected error about type; got", err)
+ }
+
+ // Non-struct argument
+ const Val = 12345
+ str := fmt.Sprint(Val)
+ reply = new(Reply)
+ err = client.Call("Arith.Scan", &str, reply)
+ if err != nil {
+ t.Errorf("Scan: expected no error but got string %q", err.Error())
+ } else if reply.C != Val {
+ t.Errorf("Scan: expected %d got %d", Val, reply.C)
+ }
+
+ // Non-struct reply
+ args = &Args{27, 35}
+ str = ""
+ err = client.Call("Arith.String", args, &str)
+ if err != nil {
+ t.Errorf("String: expected no error but got string %q", err.Error())
+ }
+ expect := fmt.Sprintf("%d+%d=%d", args.A, args.B, args.A+args.B)
+ if str != expect {
+ t.Errorf("String: expected %s got %s", expect, str)
+ }
+
+ args = &Args{7, 8}
+ reply = new(Reply)
+ err = client.Call("Arith.Mul", args, reply)
+ if err != nil {
+ t.Errorf("Mul: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A*args.B {
+ t.Errorf("Mul: expected %d got %d", reply.C, args.A*args.B)
+ }
+}
+
+func TestHTTP(t *testing.T) {
+ once.Do(startServer)
+ testHTTPRPC(t, "")
+ newOnce.Do(startNewServer)
+ testHTTPRPC(t, newHttpPath)
+}
+
+func testHTTPRPC(t *testing.T, path string) {
+ var client *Client
+ var err error
+ if path == "" {
+ client, err = DialHTTP("tcp", httpServerAddr)
+ } else {
+ client, err = DialHTTPPath("tcp", httpServerAddr, path)
+ }
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+
+ // Synchronous calls
+ args := &Args{7, 8}
+ reply := new(Reply)
+ err = client.Call("Arith.Add", args, reply)
+ if err != nil {
+ t.Errorf("Add: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
+ }
+}
+
+// CodecEmulator provides a client-like api and a ServerCodec interface.
+// Can be used to test ServeRequest.
+type CodecEmulator struct {
+ server *Server
+ serviceMethod string
+ args *Args
+ reply *Reply
+ err error
+}
+
+func (codec *CodecEmulator) Call(serviceMethod string, args *Args, reply *Reply) error {
+ codec.serviceMethod = serviceMethod
+ codec.args = args
+ codec.reply = reply
+ codec.err = nil
+ var serverError error
+ if codec.server == nil {
+ serverError = ServeRequest(codec)
+ } else {
+ serverError = codec.server.ServeRequest(codec)
+ }
+ if codec.err == nil && serverError != nil {
+ codec.err = serverError
+ }
+ return codec.err
+}
+
+func (codec *CodecEmulator) ReadRequestHeader(req *Request) error {
+ req.ServiceMethod = codec.serviceMethod
+ req.Seq = 0
+ return nil
+}
+
+func (codec *CodecEmulator) ReadRequestBody(argv interface{}) error {
+ if codec.args == nil {
+ return io.ErrUnexpectedEOF
+ }
+ *(argv.(*Args)) = *codec.args
+ return nil
+}
+
+func (codec *CodecEmulator) WriteResponse(resp *Response, reply interface{}) error {
+ if resp.Error != "" {
+ codec.err = errors.New(resp.Error)
+ } else {
+ *codec.reply = *(reply.(*Reply))
+ }
+ return nil
+}
+
+func (codec *CodecEmulator) Close() error {
+ return nil
+}
+
+func TestServeRequest(t *testing.T) {
+ once.Do(startServer)
+ testServeRequest(t, nil)
+ newOnce.Do(startNewServer)
+ testServeRequest(t, newServer)
+}
+
+func testServeRequest(t *testing.T, server *Server) {
+ client := CodecEmulator{server: server}
+
+ args := &Args{7, 8}
+ reply := new(Reply)
+ err := client.Call("Arith.Add", args, reply)
+ if err != nil {
+ t.Errorf("Add: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
+ }
+
+ err = client.Call("Arith.Add", nil, reply)
+ if err == nil {
+ t.Errorf("expected error calling Arith.Add with nil arg")
+ }
+}
+
+type ReplyNotPointer int
+type ArgNotPublic int
+type ReplyNotPublic int
+type local struct{}
+
+func (t *ReplyNotPointer) ReplyNotPointer(args *Args, reply Reply) error {
+ return nil
+}
+
+func (t *ArgNotPublic) ArgNotPublic(args *local, reply *Reply) error {
+ return nil
+}
+
+func (t *ReplyNotPublic) ReplyNotPublic(args *Args, reply *local) error {
+ return nil
+}
+
+// Check that registration handles lots of bad methods and a type with no suitable methods.
+func TestRegistrationError(t *testing.T) {
+ err := Register(new(ReplyNotPointer))
+ if err == nil {
+ t.Errorf("expected error registering ReplyNotPointer")
+ }
+ err = Register(new(ArgNotPublic))
+ if err == nil {
+ t.Errorf("expected error registering ArgNotPublic")
+ }
+ err = Register(new(ReplyNotPublic))
+ if err == nil {
+ t.Errorf("expected error registering ReplyNotPublic")
+ }
+}
+
+type WriteFailCodec int
+
+func (WriteFailCodec) WriteRequest(*Request, interface{}) error {
+ // the panic caused by this error used to not unlock a lock.
+ return errors.New("fail")
+}
+
+func (WriteFailCodec) ReadResponseHeader(*Response) error {
+ time.Sleep(120e9)
+ panic("unreachable")
+}
+
+func (WriteFailCodec) ReadResponseBody(interface{}) error {
+ time.Sleep(120e9)
+ panic("unreachable")
+}
+
+func (WriteFailCodec) Close() error {
+ return nil
+}
+
+func TestSendDeadlock(t *testing.T) {
+ client := NewClientWithCodec(WriteFailCodec(0))
+
+ done := make(chan bool)
+ go func() {
+ testSendDeadlock(client)
+ testSendDeadlock(client)
+ done <- true
+ }()
+ select {
+ case <-done:
+ return
+ case <-time.After(5e9):
+ t.Fatal("deadlock")
+ }
+}
+
+func testSendDeadlock(client *Client) {
+ defer func() {
+ recover()
+ }()
+ args := &Args{7, 8}
+ reply := new(Reply)
+ client.Call("Arith.Add", args, reply)
+}
+
+func dialDirect() (*Client, error) {
+ return Dial("tcp", serverAddr)
+}
+
+func dialHTTP() (*Client, error) {
+ return DialHTTP("tcp", httpServerAddr)
+}
+
+func countMallocs(dial func() (*Client, error), t *testing.T) uint64 {
+ once.Do(startServer)
+ client, err := dial()
+ if err != nil {
+ t.Fatal("error dialing", err)
+ }
+ args := &Args{7, 8}
+ reply := new(Reply)
+ runtime.UpdateMemStats()
+ mallocs := 0 - runtime.MemStats.Mallocs
+ const count = 100
+ for i := 0; i < count; i++ {
+ err := client.Call("Arith.Add", args, reply)
+ if err != nil {
+ t.Errorf("Add: expected no error but got string %q", err.Error())
+ }
+ if reply.C != args.A+args.B {
+ t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
+ }
+ }
+ runtime.UpdateMemStats()
+ mallocs += runtime.MemStats.Mallocs
+ return mallocs / count
+}
+
+func TestCountMallocs(t *testing.T) {
+ fmt.Printf("mallocs per rpc round trip: %d\n", countMallocs(dialDirect, t))
+}
+
+func TestCountMallocsOverHTTP(t *testing.T) {
+ fmt.Printf("mallocs per HTTP rpc round trip: %d\n", countMallocs(dialHTTP, t))
+}
+
+type writeCrasher struct{}
+
+func (writeCrasher) Close() error {
+ return nil
+}
+
+func (writeCrasher) Read(p []byte) (int, error) {
+ return 0, io.EOF
+}
+
+func (writeCrasher) Write(p []byte) (int, error) {
+ return 0, errors.New("fake write failure")
+}
+
+func TestClientWriteError(t *testing.T) {
+ c := NewClient(writeCrasher{})
+ res := false
+ err := c.Call("foo", 1, &res)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if err.Error() != "fake write failure" {
+ t.Error("unexpected value of error:", err)
+ }
+}
+
+func benchmarkEndToEnd(dial func() (*Client, error), b *testing.B) {
+ b.StopTimer()
+ once.Do(startServer)
+ client, err := dial()
+ if err != nil {
+ fmt.Println("error dialing", err)
+ return
+ }
+
+ // Synchronous calls
+ args := &Args{7, 8}
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N)
+ var wg sync.WaitGroup
+ wg.Add(procs)
+ b.StartTimer()
+
+ for p := 0; p < procs; p++ {
+ go func() {
+ reply := new(Reply)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ err = client.Call("Arith.Add", args, reply)
+ if err != nil {
+ fmt.Printf("Add: expected no error but got string %q", err.Error())
+ panic("rpc error")
+ }
+ if reply.C != args.A+args.B {
+ fmt.Printf("Add: expected %d got %d", reply.C, args.A+args.B)
+ panic("rpc error")
+ }
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func benchmarkEndToEndAsync(dial func() (*Client, error), b *testing.B) {
+ const MaxConcurrentCalls = 100
+ b.StopTimer()
+ once.Do(startServer)
+ client, err := dial()
+ if err != nil {
+ fmt.Println("error dialing", err)
+ return
+ }
+
+ // Asynchronous calls
+ args := &Args{7, 8}
+ procs := 4 * runtime.GOMAXPROCS(-1)
+ send := int32(b.N)
+ recv := int32(b.N)
+ var wg sync.WaitGroup
+ wg.Add(procs)
+ gate := make(chan bool, MaxConcurrentCalls)
+ res := make(chan *Call, MaxConcurrentCalls)
+ b.StartTimer()
+
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&send, -1) >= 0 {
+ gate <- true
+ reply := new(Reply)
+ client.Go("Arith.Add", args, reply, res)
+ }
+ }()
+ go func() {
+ for call := range res {
+ a := call.Args.(*Args).A
+ b := call.Args.(*Args).B
+ c := call.Reply.(*Reply).C
+ if a+b != c {
+ fmt.Printf("Add: expected %d got %d", a+b, c)
+ panic("incorrect reply")
+ }
+ <-gate
+ if atomic.AddInt32(&recv, -1) == 0 {
+ close(res)
+ }
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkEndToEnd(b *testing.B) {
+ benchmarkEndToEnd(dialDirect, b)
+}
+
+func BenchmarkEndToEndHTTP(b *testing.B) {
+ benchmarkEndToEnd(dialHTTP, b)
+}
+
+func BenchmarkEndToEndAsync(b *testing.B) {
+ benchmarkEndToEndAsync(dialDirect, b)
+}
+
+func BenchmarkEndToEndAsyncHTTP(b *testing.B) {
+ benchmarkEndToEndAsync(dialHTTP, b)
+}
"flag"
"io"
"os"
+ "runtime"
"strings"
"syscall"
"testing"
- "runtime"
)
// Do not test empty datagrams by default.
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package smtp
+
+import "errors"
+
+// Auth is implemented by an SMTP authentication mechanism.
+type Auth interface {
+ // Start begins an authentication with a server.
+ // It returns the name of the authentication protocol
+ // and optionally data to include in the initial AUTH message
+ // sent to the server. It can return proto == "" to indicate
+ // that the authentication should be skipped.
+ // If it returns a non-nil error, the SMTP client aborts
+ // the authentication attempt and closes the connection.
+ Start(server *ServerInfo) (proto string, toServer []byte, err error)
+
+ // Next continues the authentication. The server has just sent
+ // the fromServer data. If more is true, the server expects a
+ // response, which Next should return as toServer; otherwise
+ // Next should return toServer == nil.
+ // If Next returns a non-nil error, the SMTP client aborts
+ // the authentication attempt and closes the connection.
+ Next(fromServer []byte, more bool) (toServer []byte, err error)
+}
+
+// ServerInfo records information about an SMTP server.
+type ServerInfo struct {
+ Name string // SMTP server name
+ TLS bool // using TLS, with valid certificate for Name
+ Auth []string // advertised authentication mechanisms
+}
+
+type plainAuth struct {
+ identity, username, password string
+ host string
+}
+
+// PlainAuth returns an Auth that implements the PLAIN authentication
+// mechanism as defined in RFC 4616.
+// The returned Auth uses the given username and password to authenticate
+// on TLS connections to host and act as identity. Usually identity will be
+// left blank to act as username.
+func PlainAuth(identity, username, password, host string) Auth {
+ return &plainAuth{identity, username, password, host}
+}
+
+func (a *plainAuth) Start(server *ServerInfo) (string, []byte, error) {
+ if !server.TLS {
+ return "", nil, errors.New("unencrypted connection")
+ }
+ if server.Name != a.host {
+ return "", nil, errors.New("wrong host name")
+ }
+ resp := []byte(a.identity + "\x00" + a.username + "\x00" + a.password)
+ return "PLAIN", resp, nil
+}
+
+func (a *plainAuth) Next(fromServer []byte, more bool) ([]byte, error) {
+ if more {
+ // We've already sent everything.
+ return nil, errors.New("unexpected server challenge")
+ }
+ return nil, nil
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package smtp implements the Simple Mail Transfer Protocol as defined in RFC 5321.
+// It also implements the following extensions:
+// 8BITMIME RFC 1652
+// AUTH RFC 2554
+// STARTTLS RFC 3207
+// Additional extensions may be handled by clients.
+package smtp
+
+import (
+ "crypto/tls"
+ "encoding/base64"
+ "io"
+ "net"
+ "net/textproto"
+ "strings"
+)
+
+// A Client represents a client connection to an SMTP server.
+type Client struct {
+ // Text is the textproto.Conn used by the Client. It is exported to allow for
+ // clients to add extensions.
+ Text *textproto.Conn
+ // keep a reference to the connection so it can be used to create a TLS
+ // connection later
+ conn net.Conn
+ // whether the Client is using TLS
+ tls bool
+ serverName string
+ // map of supported extensions
+ ext map[string]string
+ // supported auth mechanisms
+ auth []string
+}
+
+// Dial returns a new Client connected to an SMTP server at addr.
+func Dial(addr string) (*Client, error) {
+ conn, err := net.Dial("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ host := addr[:strings.Index(addr, ":")]
+ return NewClient(conn, host)
+}
+
+// NewClient returns a new Client using an existing connection and host as a
+// server name to be used when authenticating.
+func NewClient(conn net.Conn, host string) (*Client, error) {
+ text := textproto.NewConn(conn)
+ _, msg, err := text.ReadResponse(220)
+ if err != nil {
+ text.Close()
+ return nil, err
+ }
+ c := &Client{Text: text, conn: conn, serverName: host}
+ if strings.Contains(msg, "ESMTP") {
+ err = c.ehlo()
+ } else {
+ err = c.helo()
+ }
+ return c, err
+}
+
+// cmd is a convenience function that sends a command and returns the response
+func (c *Client) cmd(expectCode int, format string, args ...interface{}) (int, string, error) {
+ id, err := c.Text.Cmd(format, args...)
+ if err != nil {
+ return 0, "", err
+ }
+ c.Text.StartResponse(id)
+ defer c.Text.EndResponse(id)
+ code, msg, err := c.Text.ReadResponse(expectCode)
+ return code, msg, err
+}
+
+// helo sends the HELO greeting to the server. It should be used only when the
+// server does not support ehlo.
+func (c *Client) helo() error {
+ c.ext = nil
+ _, _, err := c.cmd(250, "HELO localhost")
+ return err
+}
+
+// ehlo sends the EHLO (extended hello) greeting to the server. It
+// should be the preferred greeting for servers that support it.
+func (c *Client) ehlo() error {
+ _, msg, err := c.cmd(250, "EHLO localhost")
+ if err != nil {
+ return err
+ }
+ ext := make(map[string]string)
+ extList := strings.Split(msg, "\n")
+ if len(extList) > 1 {
+ extList = extList[1:]
+ for _, line := range extList {
+ args := strings.SplitN(line, " ", 2)
+ if len(args) > 1 {
+ ext[args[0]] = args[1]
+ } else {
+ ext[args[0]] = ""
+ }
+ }
+ }
+ if mechs, ok := ext["AUTH"]; ok {
+ c.auth = strings.Split(mechs, " ")
+ }
+ c.ext = ext
+ return err
+}
+
+// StartTLS sends the STARTTLS command and encrypts all further communication.
+// Only servers that advertise the STARTTLS extension support this function.
+func (c *Client) StartTLS(config *tls.Config) error {
+ _, _, err := c.cmd(220, "STARTTLS")
+ if err != nil {
+ return err
+ }
+ c.conn = tls.Client(c.conn, config)
+ c.Text = textproto.NewConn(c.conn)
+ c.tls = true
+ return c.ehlo()
+}
+
+// Verify checks the validity of an email address on the server.
+// If Verify returns nil, the address is valid. A non-nil return
+// does not necessarily indicate an invalid address. Many servers
+// will not verify addresses for security reasons.
+func (c *Client) Verify(addr string) error {
+ _, _, err := c.cmd(250, "VRFY %s", addr)
+ return err
+}
+
+// Auth authenticates a client using the provided authentication mechanism.
+// A failed authentication closes the connection.
+// Only servers that advertise the AUTH extension support this function.
+func (c *Client) Auth(a Auth) error {
+ encoding := base64.StdEncoding
+ mech, resp, err := a.Start(&ServerInfo{c.serverName, c.tls, c.auth})
+ if err != nil {
+ c.Quit()
+ return err
+ }
+ resp64 := make([]byte, encoding.EncodedLen(len(resp)))
+ encoding.Encode(resp64, resp)
+ code, msg64, err := c.cmd(0, "AUTH %s %s", mech, resp64)
+ for err == nil {
+ var msg []byte
+ switch code {
+ case 334:
+ msg, err = encoding.DecodeString(msg64)
+ case 235:
+ // the last message isn't base64 because it isn't a challenge
+ msg = []byte(msg64)
+ default:
+ err = &textproto.Error{code, msg64}
+ }
+ resp, err = a.Next(msg, code == 334)
+ if err != nil {
+ // abort the AUTH
+ c.cmd(501, "*")
+ c.Quit()
+ break
+ }
+ if resp == nil {
+ break
+ }
+ resp64 = make([]byte, encoding.EncodedLen(len(resp)))
+ encoding.Encode(resp64, resp)
+ code, msg64, err = c.cmd(0, string(resp64))
+ }
+ return err
+}
+
+// Mail issues a MAIL command to the server using the provided email address.
+// If the server supports the 8BITMIME extension, Mail adds the BODY=8BITMIME
+// parameter.
+// This initiates a mail transaction and is followed by one or more Rcpt calls.
+func (c *Client) Mail(from string) error {
+ cmdStr := "MAIL FROM:<%s>"
+ if c.ext != nil {
+ if _, ok := c.ext["8BITMIME"]; ok {
+ cmdStr += " BODY=8BITMIME"
+ }
+ }
+ _, _, err := c.cmd(250, cmdStr, from)
+ return err
+}
+
+// Rcpt issues a RCPT command to the server using the provided email address.
+// A call to Rcpt must be preceded by a call to Mail and may be followed by
+// a Data call or another Rcpt call.
+func (c *Client) Rcpt(to string) error {
+ _, _, err := c.cmd(25, "RCPT TO:<%s>", to)
+ return err
+}
+
+type dataCloser struct {
+ c *Client
+ io.WriteCloser
+}
+
+func (d *dataCloser) Close() error {
+ d.WriteCloser.Close()
+ _, _, err := d.c.Text.ReadResponse(250)
+ return err
+}
+
+// Data issues a DATA command to the server and returns a writer that
+// can be used to write the data. The caller should close the writer
+// before calling any more methods on c.
+// A call to Data must be preceded by one or more calls to Rcpt.
+func (c *Client) Data() (io.WriteCloser, error) {
+ _, _, err := c.cmd(354, "DATA")
+ if err != nil {
+ return nil, err
+ }
+ return &dataCloser{c, c.Text.DotWriter()}, nil
+}
+
+// SendMail connects to the server at addr, switches to TLS if possible,
+// authenticates with mechanism a if possible, and then sends an email from
+// address from, to addresses to, with message msg.
+func SendMail(addr string, a Auth, from string, to []string, msg []byte) error {
+ c, err := Dial(addr)
+ if err != nil {
+ return err
+ }
+ if ok, _ := c.Extension("STARTTLS"); ok {
+ if err = c.StartTLS(nil); err != nil {
+ return err
+ }
+ }
+ if a != nil && c.ext != nil {
+ if _, ok := c.ext["AUTH"]; ok {
+ if err = c.Auth(a); err != nil {
+ return err
+ }
+ }
+ }
+ if err = c.Mail(from); err != nil {
+ return err
+ }
+ for _, addr := range to {
+ if err = c.Rcpt(addr); err != nil {
+ return err
+ }
+ }
+ w, err := c.Data()
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(msg)
+ if err != nil {
+ return err
+ }
+ err = w.Close()
+ if err != nil {
+ return err
+ }
+ return c.Quit()
+}
+
+// Extension reports whether an extension is support by the server.
+// The extension name is case-insensitive. If the extension is supported,
+// Extension also returns a string that contains any parameters the
+// server specifies for the extension.
+func (c *Client) Extension(ext string) (bool, string) {
+ if c.ext == nil {
+ return false, ""
+ }
+ ext = strings.ToUpper(ext)
+ param, ok := c.ext[ext]
+ return ok, param
+}
+
+// Reset sends the RSET command to the server, aborting the current mail
+// transaction.
+func (c *Client) Reset() error {
+ _, _, err := c.cmd(250, "RSET")
+ return err
+}
+
+// Quit sends the QUIT command and closes the connection to the server.
+func (c *Client) Quit() error {
+ _, _, err := c.cmd(221, "QUIT")
+ if err != nil {
+ return err
+ }
+ return c.Text.Close()
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package smtp
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "net/textproto"
+ "strings"
+ "testing"
+)
+
+type authTest struct {
+ auth Auth
+ challenges []string
+ name string
+ responses []string
+}
+
+var authTests = []authTest{
+ {PlainAuth("", "user", "pass", "testserver"), []string{}, "PLAIN", []string{"\x00user\x00pass"}},
+ {PlainAuth("foo", "bar", "baz", "testserver"), []string{}, "PLAIN", []string{"foo\x00bar\x00baz"}},
+}
+
+func TestAuth(t *testing.T) {
+testLoop:
+ for i, test := range authTests {
+ name, resp, err := test.auth.Start(&ServerInfo{"testserver", true, nil})
+ if name != test.name {
+ t.Errorf("#%d got name %s, expected %s", i, name, test.name)
+ }
+ if !bytes.Equal(resp, []byte(test.responses[0])) {
+ t.Errorf("#%d got response %s, expected %s", i, resp, test.responses[0])
+ }
+ if err != nil {
+ t.Errorf("#%d error: %s", i, err)
+ }
+ for j := range test.challenges {
+ challenge := []byte(test.challenges[j])
+ expected := []byte(test.responses[j+1])
+ resp, err := test.auth.Next(challenge, true)
+ if err != nil {
+ t.Errorf("#%d error: %s", i, err)
+ continue testLoop
+ }
+ if !bytes.Equal(resp, expected) {
+ t.Errorf("#%d got %s, expected %s", i, resp, expected)
+ continue testLoop
+ }
+ }
+ }
+}
+
+type faker struct {
+ io.ReadWriter
+}
+
+func (f faker) Close() error {
+ return nil
+}
+
+func TestBasic(t *testing.T) {
+ basicServer = strings.Join(strings.Split(basicServer, "\n"), "\r\n")
+ basicClient = strings.Join(strings.Split(basicClient, "\n"), "\r\n")
+
+ var cmdbuf bytes.Buffer
+ bcmdbuf := bufio.NewWriter(&cmdbuf)
+ var fake faker
+ fake.ReadWriter = bufio.NewReadWriter(bufio.NewReader(strings.NewReader(basicServer)), bcmdbuf)
+ c := &Client{Text: textproto.NewConn(fake)}
+
+ if err := c.helo(); err != nil {
+ t.Fatalf("HELO failed: %s", err)
+ }
+ if err := c.ehlo(); err == nil {
+ t.Fatalf("Expected first EHLO to fail")
+ }
+ if err := c.ehlo(); err != nil {
+ t.Fatalf("Second EHLO failed: %s", err)
+ }
+
+ if ok, args := c.Extension("aUtH"); !ok || args != "LOGIN PLAIN" {
+ t.Fatalf("Expected AUTH supported")
+ }
+ if ok, _ := c.Extension("DSN"); ok {
+ t.Fatalf("Shouldn't support DSN")
+ }
+
+ if err := c.Mail("user@gmail.com"); err == nil {
+ t.Fatalf("MAIL should require authentication")
+ }
+
+ if err := c.Verify("user1@gmail.com"); err == nil {
+ t.Fatalf("First VRFY: expected no verification")
+ }
+ if err := c.Verify("user2@gmail.com"); err != nil {
+ t.Fatalf("Second VRFY: expected verification, got %s", err)
+ }
+
+ // fake TLS so authentication won't complain
+ c.tls = true
+ c.serverName = "smtp.google.com"
+ if err := c.Auth(PlainAuth("", "user", "pass", "smtp.google.com")); err != nil {
+ t.Fatalf("AUTH failed: %s", err)
+ }
+
+ if err := c.Mail("user@gmail.com"); err != nil {
+ t.Fatalf("MAIL failed: %s", err)
+ }
+ if err := c.Rcpt("golang-nuts@googlegroups.com"); err != nil {
+ t.Fatalf("RCPT failed: %s", err)
+ }
+ msg := `From: user@gmail.com
+To: golang-nuts@googlegroups.com
+Subject: Hooray for Go
+
+Line 1
+.Leading dot line .
+Goodbye.`
+ w, err := c.Data()
+ if err != nil {
+ t.Fatalf("DATA failed: %s", err)
+ }
+ if _, err := w.Write([]byte(msg)); err != nil {
+ t.Fatalf("Data write failed: %s", err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("Bad data response: %s", err)
+ }
+
+ if err := c.Quit(); err != nil {
+ t.Fatalf("QUIT failed: %s", err)
+ }
+
+ bcmdbuf.Flush()
+ actualcmds := cmdbuf.String()
+ if basicClient != actualcmds {
+ t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, basicClient)
+ }
+}
+
+var basicServer = `250 mx.google.com at your service
+502 Unrecognized command.
+250-mx.google.com at your service
+250-SIZE 35651584
+250-AUTH LOGIN PLAIN
+250 8BITMIME
+530 Authentication required
+252 Send some mail, I'll try my best
+250 User is valid
+235 Accepted
+250 Sender OK
+250 Receiver OK
+354 Go ahead
+250 Data OK
+221 OK
+`
+
+var basicClient = `HELO localhost
+EHLO localhost
+EHLO localhost
+MAIL FROM:<user@gmail.com> BODY=8BITMIME
+VRFY user1@gmail.com
+VRFY user2@gmail.com
+AUTH PLAIN AHVzZXIAcGFzcw==
+MAIL FROM:<user@gmail.com> BODY=8BITMIME
+RCPT TO:<golang-nuts@googlegroups.com>
+DATA
+From: user@gmail.com
+To: golang-nuts@googlegroups.com
+Subject: Hooray for Go
+
+Line 1
+..Leading dot line .
+Goodbye.
+.
+QUIT
+`
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin freebsd
+// +build darwin freebsd openbsd
// Sockets for BSD variants
//
// The decoded form returned by the Reader's Read method
// rewrites the "\r\n" line endings into the simpler "\n",
-// removes leading dot escapes if present, and stops with error os.EOF
+// removes leading dot escapes if present, and stops with error io.EOF
// after consuming (and discarding) the end-of-sequence line.
func (r *Reader) DotReader() io.Reader {
r.closeDot()
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package URL parses URLs and implements query escaping.
+// See RFC 3986.
+package url
+
+import (
+ "errors"
+ "strconv"
+ "strings"
+)
+
+// Error reports an error and the operation and URL that caused it.
+type Error struct {
+ Op string
+ URL string
+ Err error
+}
+
+func (e *Error) Error() string { return e.Op + " " + e.URL + ": " + e.Err.Error() }
+
+func ishex(c byte) bool {
+ switch {
+ case '0' <= c && c <= '9':
+ return true
+ case 'a' <= c && c <= 'f':
+ return true
+ case 'A' <= c && c <= 'F':
+ return true
+ }
+ return false
+}
+
+func unhex(c byte) byte {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0'
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+ }
+ return 0
+}
+
+type encoding int
+
+const (
+ encodePath encoding = 1 + iota
+ encodeUserPassword
+ encodeQueryComponent
+ encodeFragment
+ encodeOpaque
+)
+
+type EscapeError string
+
+func (e EscapeError) Error() string {
+ return "invalid URL escape " + strconv.Quote(string(e))
+}
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 2396.
+// When 'all' is true the full range of reserved characters are matched.
+func shouldEscape(c byte, mode encoding) bool {
+ // RFC 2396 §2.3 Unreserved characters (alphanum)
+ if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
+ return false
+ }
+ switch c {
+ case '-', '_', '.', '!', '~', '*', '\'', '(', ')': // §2.3 Unreserved characters (mark)
+ return false
+
+ case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved)
+ // Different sections of the URL allow a few of
+ // the reserved characters to appear unescaped.
+ switch mode {
+ case encodePath: // §3.3
+ // The RFC allows : @ & = + $ , but saves / ; for assigning
+ // meaning to individual path segments. This package
+ // only manipulates the path as a whole, so we allow those
+ // last two as well. Clients that need to distinguish between
+ // `/foo;y=z/bar` and `/foo%3by=z/bar` will have to re-decode RawPath.
+ // That leaves only ? to escape.
+ return c == '?'
+
+ case encodeUserPassword: // §3.2.2
+ // The RFC allows ; : & = + $ , in userinfo, so we must escape only @ and /.
+ // The parsing of userinfo treats : as special so we must escape that too.
+ return c == '@' || c == '/' || c == ':'
+
+ case encodeQueryComponent: // §3.4
+ // The RFC reserves (so we must escape) everything.
+ return true
+
+ case encodeFragment: // §4.1
+ // The RFC text is silent but the grammar allows
+ // everything, so escape nothing.
+ return false
+
+ case encodeOpaque: // §3 opaque_part
+ // The RFC allows opaque_part to use all characters
+ // except that the leading / must be escaped.
+ // (We implement that case in String.)
+ return false
+ }
+ }
+
+ // Everything else must be escaped.
+ return true
+}
+
+// QueryUnescape does the inverse transformation of QueryEscape, converting
+// %AB into the byte 0xAB and '+' into ' ' (space). It returns an error if
+// any % is not followed by two hexadecimal digits.
+func QueryUnescape(s string) (string, error) {
+ return unescape(s, encodeQueryComponent)
+}
+
+// unescape unescapes a string; the mode specifies
+// which section of the URL string is being unescaped.
+func unescape(s string, mode encoding) (string, error) {
+ // Count %, check that they're well-formed.
+ n := 0
+ hasPlus := false
+ for i := 0; i < len(s); {
+ switch s[i] {
+ case '%':
+ n++
+ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+ s = s[i:]
+ if len(s) > 3 {
+ s = s[0:3]
+ }
+ return "", EscapeError(s)
+ }
+ i += 3
+ case '+':
+ hasPlus = mode == encodeQueryComponent
+ i++
+ default:
+ i++
+ }
+ }
+
+ if n == 0 && !hasPlus {
+ return s, nil
+ }
+
+ t := make([]byte, len(s)-2*n)
+ j := 0
+ for i := 0; i < len(s); {
+ switch s[i] {
+ case '%':
+ t[j] = unhex(s[i+1])<<4 | unhex(s[i+2])
+ j++
+ i += 3
+ case '+':
+ if mode == encodeQueryComponent {
+ t[j] = ' '
+ } else {
+ t[j] = '+'
+ }
+ j++
+ i++
+ default:
+ t[j] = s[i]
+ j++
+ i++
+ }
+ }
+ return string(t), nil
+}
+
+// QueryEscape escapes the string so it can be safely placed
+// inside a URL query.
+func QueryEscape(s string) string {
+ return escape(s, encodeQueryComponent)
+}
+
+func escape(s string, mode encoding) string {
+ spaceCount, hexCount := 0, 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c, mode) {
+ if c == ' ' && mode == encodeQueryComponent {
+ spaceCount++
+ } else {
+ hexCount++
+ }
+ }
+ }
+
+ if spaceCount == 0 && hexCount == 0 {
+ return s
+ }
+
+ t := make([]byte, len(s)+2*hexCount)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; {
+ case c == ' ' && mode == encodeQueryComponent:
+ t[j] = '+'
+ j++
+ case shouldEscape(c, mode):
+ t[j] = '%'
+ t[j+1] = "0123456789ABCDEF"[c>>4]
+ t[j+2] = "0123456789ABCDEF"[c&15]
+ j += 3
+ default:
+ t[j] = s[i]
+ j++
+ }
+ }
+ return string(t)
+}
+
+// UnescapeUserinfo parses the RawUserinfo field of a URL
+// as the form user or user:password and unescapes and returns
+// the two halves.
+//
+// This functionality should only be used with legacy web sites.
+// RFC 2396 warns that interpreting Userinfo this way
+// ``is NOT RECOMMENDED, because the passing of authentication
+// information in clear text (such as URI) has proven to be a
+// security risk in almost every case where it has been used.''
+func UnescapeUserinfo(rawUserinfo string) (user, password string, err error) {
+ u, p := split(rawUserinfo, ':', true)
+ if user, err = unescape(u, encodeUserPassword); err != nil {
+ return "", "", err
+ }
+ if password, err = unescape(p, encodeUserPassword); err != nil {
+ return "", "", err
+ }
+ return
+}
+
+// EscapeUserinfo combines user and password in the form
+// user:password (or just user if password is empty) and then
+// escapes it for use as the URL.RawUserinfo field.
+//
+// This functionality should only be used with legacy web sites.
+// RFC 2396 warns that interpreting Userinfo this way
+// ``is NOT RECOMMENDED, because the passing of authentication
+// information in clear text (such as URI) has proven to be a
+// security risk in almost every case where it has been used.''
+func EscapeUserinfo(user, password string) string {
+ raw := escape(user, encodeUserPassword)
+ if password != "" {
+ raw += ":" + escape(password, encodeUserPassword)
+ }
+ return raw
+}
+
+// A URL represents a parsed URL (technically, a URI reference).
+// The general form represented is:
+// scheme://[userinfo@]host/path[?query][#fragment]
+// The Raw, RawAuthority, RawPath, and RawQuery fields are in "wire format"
+// (special characters must be hex-escaped if not meant to have special meaning).
+// All other fields are logical values; '+' or '%' represent themselves.
+//
+// The various Raw values are supplied in wire format because
+// clients typically have to split them into pieces before further
+// decoding.
+type URL struct {
+ Raw string // the original string
+ Scheme string // scheme
+ RawAuthority string // [userinfo@]host
+ RawUserinfo string // userinfo
+ Host string // host
+ RawPath string // /path[?query][#fragment]
+ Path string // /path
+ OpaquePath bool // path is opaque (unrooted when scheme is present)
+ RawQuery string // query
+ Fragment string // fragment
+}
+
+// Maybe rawurl is of the form scheme:path.
+// (Scheme must be [a-zA-Z][a-zA-Z0-9+-.]*)
+// If so, return scheme, path; else return "", rawurl.
+func getscheme(rawurl string) (scheme, path string, err error) {
+ for i := 0; i < len(rawurl); i++ {
+ c := rawurl[i]
+ switch {
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ // do nothing
+ case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
+ if i == 0 {
+ return "", rawurl, nil
+ }
+ case c == ':':
+ if i == 0 {
+ return "", "", errors.New("missing protocol scheme")
+ }
+ return rawurl[0:i], rawurl[i+1:], nil
+ default:
+ // we have encountered an invalid character,
+ // so there is no valid scheme
+ return "", rawurl, nil
+ }
+ }
+ return "", rawurl, nil
+}
+
+// Maybe s is of the form t c u.
+// If so, return t, c u (or t, u if cutc == true).
+// If not, return s, "".
+func split(s string, c byte, cutc bool) (string, string) {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ if cutc {
+ return s[0:i], s[i+1:]
+ }
+ return s[0:i], s[i:]
+ }
+ }
+ return s, ""
+}
+
+// Parse parses rawurl into a URL structure.
+// The string rawurl is assumed not to have a #fragment suffix.
+// (Web browsers strip #fragment before sending the URL to a web server.)
+// The rawurl may be relative or absolute.
+func Parse(rawurl string) (url *URL, err error) {
+ return parse(rawurl, false)
+}
+
+// ParseRequest parses rawurl into a URL structure. It assumes that
+// rawurl was received from an HTTP request, so the rawurl is interpreted
+// only as an absolute URI or an absolute path.
+// The string rawurl is assumed not to have a #fragment suffix.
+// (Web browsers strip #fragment before sending the URL to a web server.)
+func ParseRequest(rawurl string) (url *URL, err error) {
+ return parse(rawurl, true)
+}
+
+// parse parses a URL from a string in one of two contexts. If
+// viaRequest is true, the URL is assumed to have arrived via an HTTP request,
+// in which case only absolute URLs or path-absolute relative URLs are allowed.
+// If viaRequest is false, all forms of relative URLs are allowed.
+func parse(rawurl string, viaRequest bool) (url *URL, err error) {
+ var (
+ leadingSlash bool
+ path string
+ )
+
+ if rawurl == "" {
+ err = errors.New("empty url")
+ goto Error
+ }
+ url = new(URL)
+ url.Raw = rawurl
+
+ // Split off possible leading "http:", "mailto:", etc.
+ // Cannot contain escaped characters.
+ if url.Scheme, path, err = getscheme(rawurl); err != nil {
+ goto Error
+ }
+ leadingSlash = strings.HasPrefix(path, "/")
+
+ if url.Scheme != "" && !leadingSlash {
+ // RFC 2396:
+ // Absolute URI (has scheme) with non-rooted path
+ // is uninterpreted. It doesn't even have a ?query.
+ // This is the case that handles mailto:name@example.com.
+ url.RawPath = path
+
+ if url.Path, err = unescape(path, encodeOpaque); err != nil {
+ goto Error
+ }
+ url.OpaquePath = true
+ } else {
+ if viaRequest && !leadingSlash {
+ err = errors.New("invalid URI for request")
+ goto Error
+ }
+
+ // Split off query before parsing path further.
+ url.RawPath = path
+ path, query := split(path, '?', false)
+ if len(query) > 1 {
+ url.RawQuery = query[1:]
+ }
+
+ // Maybe path is //authority/path
+ if (url.Scheme != "" || !viaRequest) &&
+ strings.HasPrefix(path, "//") && !strings.HasPrefix(path, "///") {
+ url.RawAuthority, path = split(path[2:], '/', false)
+ url.RawPath = url.RawPath[2+len(url.RawAuthority):]
+ }
+
+ // Split authority into userinfo@host.
+ // If there's no @, split's default is wrong. Check explicitly.
+ var rawHost string
+ if strings.Index(url.RawAuthority, "@") < 0 {
+ rawHost = url.RawAuthority
+ } else {
+ url.RawUserinfo, rawHost = split(url.RawAuthority, '@', true)
+ }
+
+ // We leave RawAuthority only in raw form because clients
+ // of common protocols should be using Userinfo and Host
+ // instead. Clients that wish to use RawAuthority will have to
+ // interpret it themselves: RFC 2396 does not define the meaning.
+
+ if strings.Contains(rawHost, "%") {
+ // Host cannot contain escaped characters.
+ err = errors.New("hexadecimal escape in host")
+ goto Error
+ }
+ url.Host = rawHost
+
+ if url.Path, err = unescape(path, encodePath); err != nil {
+ goto Error
+ }
+ }
+ return url, nil
+
+Error:
+ return nil, &Error{"parse", rawurl, err}
+
+}
+
+// ParseWithReference is like Parse but allows a trailing #fragment.
+func ParseWithReference(rawurlref string) (url *URL, err error) {
+ // Cut off #frag.
+ rawurl, frag := split(rawurlref, '#', false)
+ if url, err = Parse(rawurl); err != nil {
+ return nil, err
+ }
+ url.Raw += frag
+ url.RawPath += frag
+ if len(frag) > 1 {
+ frag = frag[1:]
+ if url.Fragment, err = unescape(frag, encodeFragment); err != nil {
+ return nil, &Error{"parse", rawurl, err}
+ }
+ }
+ return url, nil
+}
+
+// String reassembles url into a valid URL string.
+//
+// There are redundant fields stored in the URL structure:
+// the String method consults Scheme, Path, Host, RawUserinfo,
+// RawQuery, and Fragment, but not Raw, RawPath or RawAuthority.
+func (url *URL) String() string {
+ result := ""
+ if url.Scheme != "" {
+ result += url.Scheme + ":"
+ }
+ if url.Host != "" || url.RawUserinfo != "" {
+ result += "//"
+ if url.RawUserinfo != "" {
+ // hide the password, if any
+ info := url.RawUserinfo
+ if i := strings.Index(info, ":"); i >= 0 {
+ info = info[0:i] + ":******"
+ }
+ result += info + "@"
+ }
+ result += url.Host
+ }
+ if url.OpaquePath {
+ path := url.Path
+ if strings.HasPrefix(path, "/") {
+ result += "%2f"
+ path = path[1:]
+ }
+ result += escape(path, encodeOpaque)
+ } else {
+ result += escape(url.Path, encodePath)
+ }
+ if url.RawQuery != "" {
+ result += "?" + url.RawQuery
+ }
+ if url.Fragment != "" {
+ result += "#" + escape(url.Fragment, encodeFragment)
+ }
+ return result
+}
+
+// Values maps a string key to a list of values.
+// It is typically used for query parameters and form values.
+// Unlike in the http.Header map, the keys in a Values map
+// are case-sensitive.
+type Values map[string][]string
+
+// Get gets the first value associated with the given key.
+// If there are no values associated with the key, Get returns
+// the empty string. To access multiple values, use the map
+// directly.
+func (v Values) Get(key string) string {
+ if v == nil {
+ return ""
+ }
+ vs, ok := v[key]
+ if !ok || len(vs) == 0 {
+ return ""
+ }
+ return vs[0]
+}
+
+// Set sets the key to value. It replaces any existing
+// values.
+func (v Values) Set(key, value string) {
+ v[key] = []string{value}
+}
+
+// Add adds the key to value. It appends to any existing
+// values associated with key.
+func (v Values) Add(key, value string) {
+ v[key] = append(v[key], value)
+}
+
+// Del deletes the values associated with key.
+func (v Values) Del(key string) {
+ delete(v, key)
+}
+
+// ParseQuery parses the URL-encoded query string and returns
+// a map listing the values specified for each key.
+// ParseQuery always returns a non-nil map containing all the
+// valid query parameters found; err describes the first decoding error
+// encountered, if any.
+func ParseQuery(query string) (m Values, err error) {
+ m = make(Values)
+ err = parseQuery(m, query)
+ return
+}
+
+func parseQuery(m Values, query string) (err error) {
+ for query != "" {
+ key := query
+ if i := strings.IndexAny(key, "&;"); i >= 0 {
+ key, query = key[:i], key[i+1:]
+ } else {
+ query = ""
+ }
+ if key == "" {
+ continue
+ }
+ value := ""
+ if i := strings.Index(key, "="); i >= 0 {
+ key, value = key[:i], key[i+1:]
+ }
+ key, err1 := QueryUnescape(key)
+ if err1 != nil {
+ err = err1
+ continue
+ }
+ value, err1 = QueryUnescape(value)
+ if err1 != nil {
+ err = err1
+ continue
+ }
+ m[key] = append(m[key], value)
+ }
+ return err
+}
+
+// Encode encodes the values into ``URL encoded'' form.
+// e.g. "foo=bar&bar=baz"
+func (v Values) Encode() string {
+ if v == nil {
+ return ""
+ }
+ parts := make([]string, 0, len(v)) // will be large enough for most uses
+ for k, vs := range v {
+ prefix := QueryEscape(k) + "="
+ for _, v := range vs {
+ parts = append(parts, prefix+QueryEscape(v))
+ }
+ }
+ return strings.Join(parts, "&")
+}
+
+// resolvePath applies special path segments from refs and applies
+// them to base, per RFC 2396.
+func resolvePath(basepath string, refpath string) string {
+ base := strings.Split(basepath, "/")
+ refs := strings.Split(refpath, "/")
+ if len(base) == 0 {
+ base = []string{""}
+ }
+ for idx, ref := range refs {
+ switch {
+ case ref == ".":
+ base[len(base)-1] = ""
+ case ref == "..":
+ newLen := len(base) - 1
+ if newLen < 1 {
+ newLen = 1
+ }
+ base = base[0:newLen]
+ base[len(base)-1] = ""
+ default:
+ if idx == 0 || base[len(base)-1] == "" {
+ base[len(base)-1] = ref
+ } else {
+ base = append(base, ref)
+ }
+ }
+ }
+ return strings.Join(base, "/")
+}
+
+// IsAbs returns true if the URL is absolute.
+func (url *URL) IsAbs() bool {
+ return url.Scheme != ""
+}
+
+// Parse parses a URL in the context of a base URL. The URL in ref
+// may be relative or absolute. Parse returns nil, err on parse
+// failure, otherwise its return value is the same as ResolveReference.
+func (base *URL) Parse(ref string) (*URL, error) {
+ refurl, err := Parse(ref)
+ if err != nil {
+ return nil, err
+ }
+ return base.ResolveReference(refurl), nil
+}
+
+// ResolveReference resolves a URI reference to an absolute URI from
+// an absolute base URI, per RFC 2396 Section 5.2. The URI reference
+// may be relative or absolute. ResolveReference always returns a new
+// URL instance, even if the returned URL is identical to either the
+// base or reference. If ref is an absolute URL, then ResolveReference
+// ignores base and returns a copy of ref.
+func (base *URL) ResolveReference(ref *URL) *URL {
+ url := new(URL)
+ switch {
+ case ref.IsAbs():
+ *url = *ref
+ default:
+ // relativeURI = ( net_path | abs_path | rel_path ) [ "?" query ]
+ *url = *base
+ if ref.RawAuthority != "" {
+ // The "net_path" case.
+ url.RawAuthority = ref.RawAuthority
+ url.Host = ref.Host
+ url.RawUserinfo = ref.RawUserinfo
+ }
+ switch {
+ case url.OpaquePath:
+ url.Path = ref.Path
+ url.RawPath = ref.RawPath
+ url.RawQuery = ref.RawQuery
+ case strings.HasPrefix(ref.Path, "/"):
+ // The "abs_path" case.
+ url.Path = ref.Path
+ url.RawPath = ref.RawPath
+ url.RawQuery = ref.RawQuery
+ default:
+ // The "rel_path" case.
+ path := resolvePath(base.Path, ref.Path)
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+ url.Path = path
+ url.RawPath = url.Path
+ url.RawQuery = ref.RawQuery
+ if ref.RawQuery != "" {
+ url.RawPath += "?" + url.RawQuery
+ }
+ }
+
+ url.Fragment = ref.Fragment
+ }
+ url.Raw = url.String()
+ return url
+}
+
+// Query parses RawQuery and returns the corresponding values.
+func (u *URL) Query() Values {
+ v, _ := ParseQuery(u.RawQuery)
+ return v
+}
+
+// EncodedPath returns the URL's path in "URL path encoded" form.
+func (u *URL) EncodedPath() string {
+ return escape(u.Path, encodePath)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package url
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+type URLTest struct {
+ in string
+ out *URL
+ roundtrip string // expected result of reserializing the URL; empty means same as "in".
+}
+
+var urltests = []URLTest{
+ // no path
+ {
+ "http://www.google.com",
+ &URL{
+ Raw: "http://www.google.com",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ },
+ "",
+ },
+ // path
+ {
+ "http://www.google.com/",
+ &URL{
+ Raw: "http://www.google.com/",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/",
+ Path: "/",
+ },
+ "",
+ },
+ // path with hex escaping
+ {
+ "http://www.google.com/file%20one%26two",
+ &URL{
+ Raw: "http://www.google.com/file%20one%26two",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/file%20one%26two",
+ Path: "/file one&two",
+ },
+ "http://www.google.com/file%20one&two",
+ },
+ // user
+ {
+ "ftp://webmaster@www.google.com/",
+ &URL{
+ Raw: "ftp://webmaster@www.google.com/",
+ Scheme: "ftp",
+ RawAuthority: "webmaster@www.google.com",
+ RawUserinfo: "webmaster",
+ Host: "www.google.com",
+ RawPath: "/",
+ Path: "/",
+ },
+ "",
+ },
+ // escape sequence in username
+ {
+ "ftp://john%20doe@www.google.com/",
+ &URL{
+ Raw: "ftp://john%20doe@www.google.com/",
+ Scheme: "ftp",
+ RawAuthority: "john%20doe@www.google.com",
+ RawUserinfo: "john%20doe",
+ Host: "www.google.com",
+ RawPath: "/",
+ Path: "/",
+ },
+ "ftp://john%20doe@www.google.com/",
+ },
+ // query
+ {
+ "http://www.google.com/?q=go+language",
+ &URL{
+ Raw: "http://www.google.com/?q=go+language",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/?q=go+language",
+ Path: "/",
+ RawQuery: "q=go+language",
+ },
+ "",
+ },
+ // query with hex escaping: NOT parsed
+ {
+ "http://www.google.com/?q=go%20language",
+ &URL{
+ Raw: "http://www.google.com/?q=go%20language",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/?q=go%20language",
+ Path: "/",
+ RawQuery: "q=go%20language",
+ },
+ "",
+ },
+ // %20 outside query
+ {
+ "http://www.google.com/a%20b?q=c+d",
+ &URL{
+ Raw: "http://www.google.com/a%20b?q=c+d",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/a%20b?q=c+d",
+ Path: "/a b",
+ RawQuery: "q=c+d",
+ },
+ "",
+ },
+ // path without leading /, so no query parsing
+ {
+ "http:www.google.com/?q=go+language",
+ &URL{
+ Raw: "http:www.google.com/?q=go+language",
+ Scheme: "http",
+ RawPath: "www.google.com/?q=go+language",
+ Path: "www.google.com/?q=go+language",
+ OpaquePath: true,
+ },
+ "http:www.google.com/?q=go+language",
+ },
+ // path without leading /, so no query parsing
+ {
+ "http:%2f%2fwww.google.com/?q=go+language",
+ &URL{
+ Raw: "http:%2f%2fwww.google.com/?q=go+language",
+ Scheme: "http",
+ RawPath: "%2f%2fwww.google.com/?q=go+language",
+ Path: "//www.google.com/?q=go+language",
+ OpaquePath: true,
+ },
+ "http:%2f/www.google.com/?q=go+language",
+ },
+ // non-authority
+ {
+ "mailto:/webmaster@golang.org",
+ &URL{
+ Raw: "mailto:/webmaster@golang.org",
+ Scheme: "mailto",
+ RawPath: "/webmaster@golang.org",
+ Path: "/webmaster@golang.org",
+ },
+ "",
+ },
+ // non-authority
+ {
+ "mailto:webmaster@golang.org",
+ &URL{
+ Raw: "mailto:webmaster@golang.org",
+ Scheme: "mailto",
+ RawPath: "webmaster@golang.org",
+ Path: "webmaster@golang.org",
+ OpaquePath: true,
+ },
+ "",
+ },
+ // unescaped :// in query should not create a scheme
+ {
+ "/foo?query=http://bad",
+ &URL{
+ Raw: "/foo?query=http://bad",
+ RawPath: "/foo?query=http://bad",
+ Path: "/foo",
+ RawQuery: "query=http://bad",
+ },
+ "",
+ },
+ // leading // without scheme should create an authority
+ {
+ "//foo",
+ &URL{
+ RawAuthority: "foo",
+ Raw: "//foo",
+ Host: "foo",
+ Scheme: "",
+ RawPath: "",
+ Path: "",
+ },
+ "",
+ },
+ // leading // without scheme, with userinfo, path, and query
+ {
+ "//user@foo/path?a=b",
+ &URL{
+ Raw: "//user@foo/path?a=b",
+ RawAuthority: "user@foo",
+ RawUserinfo: "user",
+ Scheme: "",
+ RawPath: "/path?a=b",
+ Path: "/path",
+ RawQuery: "a=b",
+ Host: "foo",
+ },
+ "",
+ },
+ // Three leading slashes isn't an authority, but doesn't return an error.
+ // (We can't return an error, as this code is also used via
+ // ServeHTTP -> ReadRequest -> Parse, which is arguably a
+ // different URL parsing context, but currently shares the
+ // same codepath)
+ {
+ "///threeslashes",
+ &URL{
+ RawAuthority: "",
+ Raw: "///threeslashes",
+ Host: "",
+ Scheme: "",
+ RawPath: "///threeslashes",
+ Path: "///threeslashes",
+ },
+ "",
+ },
+ {
+ "http://user:password@google.com",
+ &URL{
+ Raw: "http://user:password@google.com",
+ Scheme: "http",
+ RawAuthority: "user:password@google.com",
+ RawUserinfo: "user:password",
+ Host: "google.com",
+ },
+ "http://user:******@google.com",
+ },
+ {
+ "http://user:longerpass@google.com",
+ &URL{
+ Raw: "http://user:longerpass@google.com",
+ Scheme: "http",
+ RawAuthority: "user:longerpass@google.com",
+ RawUserinfo: "user:longerpass",
+ Host: "google.com",
+ },
+ "http://user:******@google.com",
+ },
+}
+
+var urlnofragtests = []URLTest{
+ {
+ "http://www.google.com/?q=go+language#foo",
+ &URL{
+ Raw: "http://www.google.com/?q=go+language#foo",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/?q=go+language#foo",
+ Path: "/",
+ RawQuery: "q=go+language#foo",
+ },
+ "",
+ },
+}
+
+var urlfragtests = []URLTest{
+ {
+ "http://www.google.com/?q=go+language#foo",
+ &URL{
+ Raw: "http://www.google.com/?q=go+language#foo",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/?q=go+language#foo",
+ Path: "/",
+ RawQuery: "q=go+language",
+ Fragment: "foo",
+ },
+ "",
+ },
+ {
+ "http://www.google.com/?q=go+language#foo%26bar",
+ &URL{
+ Raw: "http://www.google.com/?q=go+language#foo%26bar",
+ Scheme: "http",
+ RawAuthority: "www.google.com",
+ Host: "www.google.com",
+ RawPath: "/?q=go+language#foo%26bar",
+ Path: "/",
+ RawQuery: "q=go+language",
+ Fragment: "foo&bar",
+ },
+ "http://www.google.com/?q=go+language#foo&bar",
+ },
+}
+
+// more useful string for debugging than fmt's struct printer
+func ufmt(u *URL) string {
+ return fmt.Sprintf("raw=%q, scheme=%q, rawpath=%q, auth=%q, userinfo=%q, host=%q, path=%q, rawq=%q, frag=%q",
+ u.Raw, u.Scheme, u.RawPath, u.RawAuthority, u.RawUserinfo,
+ u.Host, u.Path, u.RawQuery, u.Fragment)
+}
+
+func DoTest(t *testing.T, parse func(string) (*URL, error), name string, tests []URLTest) {
+ for _, tt := range tests {
+ u, err := parse(tt.in)
+ if err != nil {
+ t.Errorf("%s(%q) returned error %s", name, tt.in, err)
+ continue
+ }
+ if !reflect.DeepEqual(u, tt.out) {
+ t.Errorf("%s(%q):\n\thave %v\n\twant %v\n",
+ name, tt.in, ufmt(u), ufmt(tt.out))
+ }
+ }
+}
+
+func TestParse(t *testing.T) {
+ DoTest(t, Parse, "Parse", urltests)
+ DoTest(t, Parse, "Parse", urlnofragtests)
+}
+
+func TestParseWithReference(t *testing.T) {
+ DoTest(t, ParseWithReference, "ParseWithReference", urltests)
+ DoTest(t, ParseWithReference, "ParseWithReference", urlfragtests)
+}
+
+const pathThatLooksSchemeRelative = "//not.a.user@not.a.host/just/a/path"
+
+var parseRequestUrlTests = []struct {
+ url string
+ expectedValid bool
+}{
+ {"http://foo.com", true},
+ {"http://foo.com/", true},
+ {"http://foo.com/path", true},
+ {"/", true},
+ {pathThatLooksSchemeRelative, true},
+ {"//not.a.user@%66%6f%6f.com/just/a/path/also", true},
+ {"foo.html", false},
+ {"../dir/", false},
+}
+
+func TestParseRequest(t *testing.T) {
+ for _, test := range parseRequestUrlTests {
+ _, err := ParseRequest(test.url)
+ valid := err == nil
+ if valid != test.expectedValid {
+ t.Errorf("Expected valid=%v for %q; got %v", test.expectedValid, test.url, valid)
+ }
+ }
+
+ url, err := ParseRequest(pathThatLooksSchemeRelative)
+ if err != nil {
+ t.Fatalf("Unexpected error %v", err)
+ }
+ if url.Path != pathThatLooksSchemeRelative {
+ t.Errorf("Expected path %q; got %q", pathThatLooksSchemeRelative, url.Path)
+ }
+}
+
+func DoTestString(t *testing.T, parse func(string) (*URL, error), name string, tests []URLTest) {
+ for _, tt := range tests {
+ u, err := parse(tt.in)
+ if err != nil {
+ t.Errorf("%s(%q) returned error %s", name, tt.in, err)
+ continue
+ }
+ s := u.String()
+ expected := tt.in
+ if len(tt.roundtrip) > 0 {
+ expected = tt.roundtrip
+ }
+ if s != expected {
+ t.Errorf("%s(%q).String() == %q (expected %q)", name, tt.in, s, expected)
+ }
+ }
+}
+
+func TestURLString(t *testing.T) {
+ DoTestString(t, Parse, "Parse", urltests)
+ DoTestString(t, Parse, "Parse", urlnofragtests)
+ DoTestString(t, ParseWithReference, "ParseWithReference", urltests)
+ DoTestString(t, ParseWithReference, "ParseWithReference", urlfragtests)
+}
+
+type EscapeTest struct {
+ in string
+ out string
+ err error
+}
+
+var unescapeTests = []EscapeTest{
+ {
+ "",
+ "",
+ nil,
+ },
+ {
+ "abc",
+ "abc",
+ nil,
+ },
+ {
+ "1%41",
+ "1A",
+ nil,
+ },
+ {
+ "1%41%42%43",
+ "1ABC",
+ nil,
+ },
+ {
+ "%4a",
+ "J",
+ nil,
+ },
+ {
+ "%6F",
+ "o",
+ nil,
+ },
+ {
+ "%", // not enough characters after %
+ "",
+ EscapeError("%"),
+ },
+ {
+ "%a", // not enough characters after %
+ "",
+ EscapeError("%a"),
+ },
+ {
+ "%1", // not enough characters after %
+ "",
+ EscapeError("%1"),
+ },
+ {
+ "123%45%6", // not enough characters after %
+ "",
+ EscapeError("%6"),
+ },
+ {
+ "%zzzzz", // invalid hex digits
+ "",
+ EscapeError("%zz"),
+ },
+}
+
+func TestUnescape(t *testing.T) {
+ for _, tt := range unescapeTests {
+ actual, err := QueryUnescape(tt.in)
+ if actual != tt.out || (err != nil) != (tt.err != nil) {
+ t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", tt.in, actual, err, tt.out, tt.err)
+ }
+ }
+}
+
+var escapeTests = []EscapeTest{
+ {
+ "",
+ "",
+ nil,
+ },
+ {
+ "abc",
+ "abc",
+ nil,
+ },
+ {
+ "one two",
+ "one+two",
+ nil,
+ },
+ {
+ "10%",
+ "10%25",
+ nil,
+ },
+ {
+ " ?&=#+%!<>#\"{}|\\^[]`☺\t",
+ "+%3F%26%3D%23%2B%25!%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09",
+ nil,
+ },
+}
+
+func TestEscape(t *testing.T) {
+ for _, tt := range escapeTests {
+ actual := QueryEscape(tt.in)
+ if tt.out != actual {
+ t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out)
+ }
+
+ // for bonus points, verify that escape:unescape is an identity.
+ roundtrip, err := QueryUnescape(actual)
+ if roundtrip != tt.in || err != nil {
+ t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]")
+ }
+ }
+}
+
+type UserinfoTest struct {
+ User string
+ Password string
+ Raw string
+}
+
+var userinfoTests = []UserinfoTest{
+ {"user", "password", "user:password"},
+ {"foo:bar", "~!@#$%^&*()_+{}|[]\\-=`:;'\"<>?,./",
+ "foo%3Abar:~!%40%23$%25%5E&*()_+%7B%7D%7C%5B%5D%5C-=%60%3A;'%22%3C%3E?,.%2F"},
+}
+
+func TestEscapeUserinfo(t *testing.T) {
+ for _, tt := range userinfoTests {
+ if raw := EscapeUserinfo(tt.User, tt.Password); raw != tt.Raw {
+ t.Errorf("EscapeUserinfo(%q, %q) = %q, want %q", tt.User, tt.Password, raw, tt.Raw)
+ }
+ }
+}
+
+func TestUnescapeUserinfo(t *testing.T) {
+ for _, tt := range userinfoTests {
+ if user, pass, err := UnescapeUserinfo(tt.Raw); user != tt.User || pass != tt.Password || err != nil {
+ t.Errorf("UnescapeUserinfo(%q) = %q, %q, %v, want %q, %q, nil", tt.Raw, user, pass, err, tt.User, tt.Password)
+ }
+ }
+}
+
+type EncodeQueryTest struct {
+ m Values
+ expected string
+ expected1 string
+}
+
+var encodeQueryTests = []EncodeQueryTest{
+ {nil, "", ""},
+ {Values{"q": {"puppies"}, "oe": {"utf8"}}, "q=puppies&oe=utf8", "oe=utf8&q=puppies"},
+ {Values{"q": {"dogs", "&", "7"}}, "q=dogs&q=%26&q=7", "q=dogs&q=%26&q=7"},
+}
+
+func TestEncodeQuery(t *testing.T) {
+ for _, tt := range encodeQueryTests {
+ if q := tt.m.Encode(); q != tt.expected && q != tt.expected1 {
+ t.Errorf(`EncodeQuery(%+v) = %q, want %q`, tt.m, q, tt.expected)
+ }
+ }
+}
+
+var resolvePathTests = []struct {
+ base, ref, expected string
+}{
+ {"a/b", ".", "a/"},
+ {"a/b", "c", "a/c"},
+ {"a/b", "..", ""},
+ {"a/", "..", ""},
+ {"a/", "../..", ""},
+ {"a/b/c", "..", "a/"},
+ {"a/b/c", "../d", "a/d"},
+ {"a/b/c", ".././d", "a/d"},
+ {"a/b", "./..", ""},
+ {"a/./b", ".", "a/./"},
+ {"a/../", ".", "a/../"},
+ {"a/.././b", "c", "a/.././c"},
+}
+
+func TestResolvePath(t *testing.T) {
+ for _, test := range resolvePathTests {
+ got := resolvePath(test.base, test.ref)
+ if got != test.expected {
+ t.Errorf("For %q + %q got %q; expected %q", test.base, test.ref, got, test.expected)
+ }
+ }
+}
+
+var resolveReferenceTests = []struct {
+ base, rel, expected string
+}{
+ // Absolute URL references
+ {"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"},
+ {"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"},
+ {"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"},
+
+ // Path-absolute references
+ {"http://foo.com/bar", "/baz", "http://foo.com/baz"},
+ {"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"},
+ {"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"},
+
+ // Scheme-relative
+ {"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"},
+
+ // Path-relative references:
+
+ // ... current directory
+ {"http://foo.com", ".", "http://foo.com/"},
+ {"http://foo.com/bar", ".", "http://foo.com/"},
+ {"http://foo.com/bar/", ".", "http://foo.com/bar/"},
+
+ // ... going down
+ {"http://foo.com", "bar", "http://foo.com/bar"},
+ {"http://foo.com/", "bar", "http://foo.com/bar"},
+ {"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"},
+
+ // ... going up
+ {"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"},
+ {"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"},
+ {"http://foo.com/bar", "..", "http://foo.com/"},
+ {"http://foo.com/bar/baz", "./..", "http://foo.com/"},
+
+ // "." and ".." in the base aren't special
+ {"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/./dotdot/../baz"},
+
+ // Triple dot isn't special
+ {"http://foo.com/bar", "...", "http://foo.com/..."},
+
+ // Fragment
+ {"http://foo.com/bar", ".#frag", "http://foo.com/#frag"},
+}
+
+func TestResolveReference(t *testing.T) {
+ mustParse := func(url string) *URL {
+ u, err := ParseWithReference(url)
+ if err != nil {
+ t.Fatalf("Expected URL to parse: %q, got error: %v", url, err)
+ }
+ return u
+ }
+ for _, test := range resolveReferenceTests {
+ base := mustParse(test.base)
+ rel := mustParse(test.rel)
+ url := base.ResolveReference(rel)
+ urlStr := url.String()
+ if urlStr != test.expected {
+ t.Errorf("Resolving %q + %q != %q; got %q", test.base, test.rel, test.expected, urlStr)
+ }
+ }
+
+ // Test that new instances are returned.
+ base := mustParse("http://foo.com/")
+ abs := base.ResolveReference(mustParse("."))
+ if base == abs {
+ t.Errorf("Expected no-op reference to return new URL instance.")
+ }
+ barRef := mustParse("http://bar.com/")
+ abs = base.ResolveReference(barRef)
+ if abs == barRef {
+ t.Errorf("Expected resolution of absolute reference to return new URL instance.")
+ }
+
+ // Test the convenience wrapper too
+ base = mustParse("http://foo.com/path/one/")
+ abs, _ = base.Parse("../two")
+ expected := "http://foo.com/path/two"
+ if abs.String() != expected {
+ t.Errorf("Parse wrapper got %q; expected %q", abs.String(), expected)
+ }
+ _, err := base.Parse("")
+ if err == nil {
+ t.Errorf("Expected an error from Parse wrapper parsing an empty string.")
+ }
+
+}
+
+func TestQueryValues(t *testing.T) {
+ u, _ := Parse("http://x.com?foo=bar&bar=1&bar=2")
+ v := u.Query()
+ if len(v) != 2 {
+ t.Errorf("got %d keys in Query values, want 2", len(v))
+ }
+ if g, e := v.Get("foo"), "bar"; g != e {
+ t.Errorf("Get(foo) = %q, want %q", g, e)
+ }
+ // Case sensitive:
+ if g, e := v.Get("Foo"), ""; g != e {
+ t.Errorf("Get(Foo) = %q, want %q", g, e)
+ }
+ if g, e := v.Get("bar"), "1"; g != e {
+ t.Errorf("Get(bar) = %q, want %q", g, e)
+ }
+ if g, e := v.Get("baz"), ""; g != e {
+ t.Errorf("Get(baz) = %q, want %q", g, e)
+ }
+ v.Del("bar")
+ if g, e := v.Get("bar"), ""; g != e {
+ t.Errorf("second Get(bar) = %q, want %q", g, e)
+ }
+}
+
+type parseTest struct {
+ query string
+ out Values
+}
+
+var parseTests = []parseTest{
+ {
+ query: "a=1&b=2",
+ out: Values{"a": []string{"1"}, "b": []string{"2"}},
+ },
+ {
+ query: "a=1&a=2&a=banana",
+ out: Values{"a": []string{"1", "2", "banana"}},
+ },
+ {
+ query: "ascii=%3Ckey%3A+0x90%3E",
+ out: Values{"ascii": []string{"<key: 0x90>"}},
+ },
+ {
+ query: "a=1;b=2",
+ out: Values{"a": []string{"1"}, "b": []string{"2"}},
+ },
+ {
+ query: "a=1&a=2;a=banana",
+ out: Values{"a": []string{"1", "2", "banana"}},
+ },
+}
+
+func TestParseQuery(t *testing.T) {
+ for i, test := range parseTests {
+ form, err := ParseQuery(test.query)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ if len(form) != len(test.out) {
+ t.Errorf("test %d: len(form) = %d, want %d", i, len(form), len(test.out))
+ }
+ for k, evs := range test.out {
+ vs, ok := form[k]
+ if !ok {
+ t.Errorf("test %d: Missing key %q", i, k)
+ continue
+ }
+ if len(vs) != len(evs) {
+ t.Errorf("test %d: len(form[%q]) = %d, want %d", i, k, len(vs), len(evs))
+ continue
+ }
+ for j, ev := range evs {
+ if v := vs[j]; v != ev {
+ t.Errorf("test %d: form[%q][%d] = %q, want %q", i, k, j, v, ev)
+ }
+ }
+ }
+ }
+}
package netchan
import (
+ "encoding/gob"
"errors"
- "gob"
"io"
"reflect"
"sync"
import (
"errors"
- "log"
"io"
+ "log"
"net"
"reflect"
"strconv"
"bytes"
"io"
"strings"
- "utf8"
+ "unicode/utf8"
)
var debug = false
"strconv"
"strings"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// Errors returned during parsing and execution. Users may extract the information and reformat
import (
"bytes"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
- "json"
"strings"
"testing"
)
//
// If n > 0, Readdirnames returns at most n names. In this case, if
// Readdirnames returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is os.EOF.
+// explaining why. At the end of a directory, the error is io.EOF.
//
// If n <= 0, Readdirnames returns all the names from the directory in
// a single slice. In this case, if Readdirnames succeeds (reads all
package os
import (
- "error"
+ "errors"
"syscall"
)
import (
"errors"
"syscall"
- "utf16"
+ "unicode/utf16"
"unsafe"
)
if err == nil {
return nil
}
- return &SyscallError{syscall, err.String()}
+ return &SyscallError{syscall, err.Error()}
}
var (
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package exec runs external commands. It wraps os.StartProcess to make it
+// easier to remap stdin and stdout, connect I/O with pipes, and do other
+// adjustments.
+package exec
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "syscall"
+)
+
+// Error records the name of a binary that failed to be be executed
+// and the reason it failed.
+type Error struct {
+ Name string
+ Err error
+}
+
+func (e *Error) Error() string {
+ return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error()
+}
+
+// Cmd represents an external command being prepared or run.
+type Cmd struct {
+ // Path is the path of the command to run.
+ //
+ // This is the only field that must be set to a non-zero
+ // value.
+ Path string
+
+ // Args holds command line arguments, including the command as Args[0].
+ // If the Args field is empty or nil, Run uses {Path}.
+ //
+ // In typical use, both Path and Args are set by calling Command.
+ Args []string
+
+ // Env specifies the environment of the process.
+ // If Env is nil, Run uses the current process's environment.
+ Env []string
+
+ // Dir specifies the working directory of the command.
+ // If Dir is the empty string, Run runs the command in the
+ // calling process's current directory.
+ Dir string
+
+ // Stdin specifies the process's standard input.
+ // If Stdin is nil, the process reads from DevNull.
+ Stdin io.Reader
+
+ // Stdout and Stderr specify the process's standard output and error.
+ //
+ // If either is nil, Run connects the
+ // corresponding file descriptor to /dev/null.
+ //
+ // If Stdout and Stderr are are the same writer, at most one
+ // goroutine at a time will call Write.
+ Stdout io.Writer
+ Stderr io.Writer
+
+ // ExtraFiles specifies additional open files to be inherited by the
+ // new process. It does not include standard input, standard output, or
+ // standard error. If non-nil, entry i becomes file descriptor 3+i.
+ ExtraFiles []*os.File
+
+ // SysProcAttr holds optional, operating system-specific attributes.
+ // Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
+ SysProcAttr *syscall.SysProcAttr
+
+ // Process is the underlying process, once started.
+ Process *os.Process
+
+ err error // last error (from LookPath, stdin, stdout, stderr)
+ finished bool // when Wait was called
+ childFiles []*os.File
+ closeAfterStart []io.Closer
+ closeAfterWait []io.Closer
+ goroutine []func() error
+ errch chan error // one send per goroutine
+}
+
+// Command returns the Cmd struct to execute the named program with
+// the given arguments.
+//
+// It sets Path and Args in the returned structure and zeroes the
+// other fields.
+//
+// If name contains no path separators, Command uses LookPath to
+// resolve the path to a complete name if possible. Otherwise it uses
+// name directly.
+//
+// The returned Cmd's Args field is constructed from the command name
+// followed by the elements of arg, so arg should not include the
+// command name itself. For example, Command("echo", "hello")
+func Command(name string, arg ...string) *Cmd {
+ aname, err := LookPath(name)
+ if err != nil {
+ aname = name
+ }
+ return &Cmd{
+ Path: aname,
+ Args: append([]string{name}, arg...),
+ err: err,
+ }
+}
+
+// interfaceEqual protects against panics from doing equality tests on
+// two interfaces with non-comparable underlying types
+func interfaceEqual(a, b interface{}) bool {
+ defer func() {
+ recover()
+ }()
+ return a == b
+}
+
+func (c *Cmd) envv() []string {
+ if c.Env != nil {
+ return c.Env
+ }
+ return os.Environ()
+}
+
+func (c *Cmd) argv() []string {
+ if len(c.Args) > 0 {
+ return c.Args
+ }
+ return []string{c.Path}
+}
+
+func (c *Cmd) stdin() (f *os.File, err error) {
+ if c.Stdin == nil {
+ f, err = os.Open(os.DevNull)
+ c.closeAfterStart = append(c.closeAfterStart, f)
+ return
+ }
+
+ if f, ok := c.Stdin.(*os.File); ok {
+ return f, nil
+ }
+
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ return
+ }
+
+ c.closeAfterStart = append(c.closeAfterStart, pr)
+ c.closeAfterWait = append(c.closeAfterWait, pw)
+ c.goroutine = append(c.goroutine, func() error {
+ _, err := io.Copy(pw, c.Stdin)
+ if err1 := pw.Close(); err == nil {
+ err = err1
+ }
+ return err
+ })
+ return pr, nil
+}
+
+func (c *Cmd) stdout() (f *os.File, err error) {
+ return c.writerDescriptor(c.Stdout)
+}
+
+func (c *Cmd) stderr() (f *os.File, err error) {
+ if c.Stderr != nil && interfaceEqual(c.Stderr, c.Stdout) {
+ return c.childFiles[1], nil
+ }
+ return c.writerDescriptor(c.Stderr)
+}
+
+func (c *Cmd) writerDescriptor(w io.Writer) (f *os.File, err error) {
+ if w == nil {
+ f, err = os.OpenFile(os.DevNull, os.O_WRONLY, 0)
+ c.closeAfterStart = append(c.closeAfterStart, f)
+ return
+ }
+
+ if f, ok := w.(*os.File); ok {
+ return f, nil
+ }
+
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ return
+ }
+
+ c.closeAfterStart = append(c.closeAfterStart, pw)
+ c.closeAfterWait = append(c.closeAfterWait, pr)
+ c.goroutine = append(c.goroutine, func() error {
+ _, err := io.Copy(w, pr)
+ return err
+ })
+ return pw, nil
+}
+
+// Run starts the specified command and waits for it to complete.
+//
+// The returned error is nil if the command runs, has no problems
+// copying stdin, stdout, and stderr, and exits with a zero exit
+// status.
+//
+// If the command fails to run or doesn't complete successfully, the
+// error is of type *ExitError. Other error types may be
+// returned for I/O problems.
+func (c *Cmd) Run() error {
+ if err := c.Start(); err != nil {
+ return err
+ }
+ return c.Wait()
+}
+
+// Start starts the specified command but does not wait for it to complete.
+func (c *Cmd) Start() error {
+ if c.err != nil {
+ return c.err
+ }
+ if c.Process != nil {
+ return errors.New("exec: already started")
+ }
+
+ type F func(*Cmd) (*os.File, error)
+ for _, setupFd := range []F{(*Cmd).stdin, (*Cmd).stdout, (*Cmd).stderr} {
+ fd, err := setupFd(c)
+ if err != nil {
+ return err
+ }
+ c.childFiles = append(c.childFiles, fd)
+ }
+ c.childFiles = append(c.childFiles, c.ExtraFiles...)
+
+ var err error
+ c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
+ Dir: c.Dir,
+ Files: c.childFiles,
+ Env: c.envv(),
+ Sys: c.SysProcAttr,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, fd := range c.closeAfterStart {
+ fd.Close()
+ }
+
+ c.errch = make(chan error, len(c.goroutine))
+ for _, fn := range c.goroutine {
+ go func(fn func() error) {
+ c.errch <- fn()
+ }(fn)
+ }
+
+ return nil
+}
+
+// An ExitError reports an unsuccessful exit by a command.
+type ExitError struct {
+ *os.Waitmsg
+}
+
+func (e *ExitError) Error() string {
+ return e.Waitmsg.String()
+}
+
+// Wait waits for the command to exit.
+// It must have been started by Start.
+//
+// The returned error is nil if the command runs, has no problems
+// copying stdin, stdout, and stderr, and exits with a zero exit
+// status.
+//
+// If the command fails to run or doesn't complete successfully, the
+// error is of type *ExitError. Other error types may be
+// returned for I/O problems.
+func (c *Cmd) Wait() error {
+ if c.Process == nil {
+ return errors.New("exec: not started")
+ }
+ if c.finished {
+ return errors.New("exec: Wait was already called")
+ }
+ c.finished = true
+ msg, err := c.Process.Wait(0)
+
+ var copyError error
+ for _ = range c.goroutine {
+ if err := <-c.errch; err != nil && copyError == nil {
+ copyError = err
+ }
+ }
+
+ for _, fd := range c.closeAfterWait {
+ fd.Close()
+ }
+
+ if err != nil {
+ return err
+ } else if !msg.Exited() || msg.ExitStatus() != 0 {
+ return &ExitError{msg}
+ }
+
+ return copyError
+}
+
+// Output runs the command and returns its standard output.
+func (c *Cmd) Output() ([]byte, error) {
+ if c.Stdout != nil {
+ return nil, errors.New("exec: Stdout already set")
+ }
+ var b bytes.Buffer
+ c.Stdout = &b
+ err := c.Run()
+ return b.Bytes(), err
+}
+
+// CombinedOutput runs the command and returns its combined standard
+// output and standard error.
+func (c *Cmd) CombinedOutput() ([]byte, error) {
+ if c.Stdout != nil {
+ return nil, errors.New("exec: Stdout already set")
+ }
+ if c.Stderr != nil {
+ return nil, errors.New("exec: Stderr already set")
+ }
+ var b bytes.Buffer
+ c.Stdout = &b
+ c.Stderr = &b
+ err := c.Run()
+ return b.Bytes(), err
+}
+
+// StdinPipe returns a pipe that will be connected to the command's
+// standard input when the command starts.
+func (c *Cmd) StdinPipe() (io.WriteCloser, error) {
+ if c.Stdin != nil {
+ return nil, errors.New("exec: Stdin already set")
+ }
+ if c.Process != nil {
+ return nil, errors.New("exec: StdinPipe after process started")
+ }
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ return nil, err
+ }
+ c.Stdin = pr
+ c.closeAfterStart = append(c.closeAfterStart, pr)
+ c.closeAfterWait = append(c.closeAfterWait, pw)
+ return pw, nil
+}
+
+// StdoutPipe returns a pipe that will be connected to the command's
+// standard output when the command starts.
+// The pipe will be closed automatically after Wait sees the command exit.
+func (c *Cmd) StdoutPipe() (io.ReadCloser, error) {
+ if c.Stdout != nil {
+ return nil, errors.New("exec: Stdout already set")
+ }
+ if c.Process != nil {
+ return nil, errors.New("exec: StdoutPipe after process started")
+ }
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ return nil, err
+ }
+ c.Stdout = pw
+ c.closeAfterStart = append(c.closeAfterStart, pw)
+ c.closeAfterWait = append(c.closeAfterWait, pr)
+ return pr, nil
+}
+
+// StderrPipe returns a pipe that will be connected to the command's
+// standard error when the command starts.
+// The pipe will be closed automatically after Wait sees the command exit.
+func (c *Cmd) StderrPipe() (io.ReadCloser, error) {
+ if c.Stderr != nil {
+ return nil, errors.New("exec: Stderr already set")
+ }
+ if c.Process != nil {
+ return nil, errors.New("exec: StderrPipe after process started")
+ }
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ return nil, err
+ }
+ c.Stderr = pw
+ c.closeAfterStart = append(c.closeAfterStart, pw)
+ c.closeAfterWait = append(c.closeAfterWait, pr)
+ return pr, nil
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func helperCommand(s ...string) *Cmd {
+ cs := []string{"-test.run=exec.TestHelperProcess", "--"}
+ cs = append(cs, s...)
+ cmd := Command(os.Args[0], cs...)
+ cmd.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
+ return cmd
+}
+
+func TestEcho(t *testing.T) {
+ bs, err := helperCommand("echo", "foo bar", "baz").Output()
+ if err != nil {
+ t.Errorf("echo: %v", err)
+ }
+ if g, e := string(bs), "foo bar baz\n"; g != e {
+ t.Errorf("echo: want %q, got %q", e, g)
+ }
+}
+
+func TestCatStdin(t *testing.T) {
+ // Cat, testing stdin and stdout.
+ input := "Input string\nLine 2"
+ p := helperCommand("cat")
+ p.Stdin = strings.NewReader(input)
+ bs, err := p.Output()
+ if err != nil {
+ t.Errorf("cat: %v", err)
+ }
+ s := string(bs)
+ if s != input {
+ t.Errorf("cat: want %q, got %q", input, s)
+ }
+}
+
+func TestCatGoodAndBadFile(t *testing.T) {
+ // Testing combined output and error values.
+ bs, err := helperCommand("cat", "/bogus/file.foo", "exec_test.go").CombinedOutput()
+ if _, ok := err.(*ExitError); !ok {
+ t.Errorf("expected *ExitError from cat combined; got %T: %v", err, err)
+ }
+ s := string(bs)
+ sp := strings.SplitN(s, "\n", 2)
+ if len(sp) != 2 {
+ t.Fatalf("expected two lines from cat; got %q", s)
+ }
+ errLine, body := sp[0], sp[1]
+ if !strings.HasPrefix(errLine, "Error: open /bogus/file.foo") {
+ t.Errorf("expected stderr to complain about file; got %q", errLine)
+ }
+ if !strings.Contains(body, "func TestHelperProcess(t *testing.T)") {
+ t.Errorf("expected test code; got %q (len %d)", body, len(body))
+ }
+}
+
+func TestNoExistBinary(t *testing.T) {
+ // Can't run a non-existent binary
+ err := Command("/no-exist-binary").Run()
+ if err == nil {
+ t.Error("expected error from /no-exist-binary")
+ }
+}
+
+func TestExitStatus(t *testing.T) {
+ // Test that exit values are returned correctly
+ err := helperCommand("exit", "42").Run()
+ if werr, ok := err.(*ExitError); ok {
+ if s, e := werr.Error(), "exit status 42"; s != e {
+ t.Errorf("from exit 42 got exit %q, want %q", s, e)
+ }
+ } else {
+ t.Fatalf("expected *ExitError from exit 42; got %T: %v", err, err)
+ }
+}
+
+func TestPipes(t *testing.T) {
+ check := func(what string, err error) {
+ if err != nil {
+ t.Fatalf("%s: %v", what, err)
+ }
+ }
+ // Cat, testing stdin and stdout.
+ c := helperCommand("pipetest")
+ stdin, err := c.StdinPipe()
+ check("StdinPipe", err)
+ stdout, err := c.StdoutPipe()
+ check("StdoutPipe", err)
+ stderr, err := c.StderrPipe()
+ check("StderrPipe", err)
+
+ outbr := bufio.NewReader(stdout)
+ errbr := bufio.NewReader(stderr)
+ line := func(what string, br *bufio.Reader) string {
+ line, _, err := br.ReadLine()
+ if err != nil {
+ t.Fatalf("%s: %v", what, err)
+ }
+ return string(line)
+ }
+
+ err = c.Start()
+ check("Start", err)
+
+ _, err = stdin.Write([]byte("O:I am output\n"))
+ check("first stdin Write", err)
+ if g, e := line("first output line", outbr), "O:I am output"; g != e {
+ t.Errorf("got %q, want %q", g, e)
+ }
+
+ _, err = stdin.Write([]byte("E:I am error\n"))
+ check("second stdin Write", err)
+ if g, e := line("first error line", errbr), "E:I am error"; g != e {
+ t.Errorf("got %q, want %q", g, e)
+ }
+
+ _, err = stdin.Write([]byte("O:I am output2\n"))
+ check("third stdin Write 3", err)
+ if g, e := line("second output line", outbr), "O:I am output2"; g != e {
+ t.Errorf("got %q, want %q", g, e)
+ }
+
+ stdin.Close()
+ err = c.Wait()
+ check("Wait", err)
+}
+
+func TestExtraFiles(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Logf("no operating system support; skipping")
+ return
+ }
+ tf, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile: %v", err)
+ }
+ defer os.Remove(tf.Name())
+ defer tf.Close()
+
+ const text = "Hello, fd 3!"
+ _, err = tf.Write([]byte(text))
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ _, err = tf.Seek(0, os.SEEK_SET)
+ if err != nil {
+ t.Fatalf("Seek: %v", err)
+ }
+
+ c := helperCommand("read3")
+ c.ExtraFiles = []*os.File{tf}
+ bs, err := c.CombinedOutput()
+ if err != nil {
+ t.Fatalf("CombinedOutput: %v", err)
+ }
+ if string(bs) != text {
+ t.Errorf("got %q; want %q", string(bs), text)
+ }
+}
+
+// TestHelperProcess isn't a real test. It's used as a helper process
+// for TestParameterRun.
+func TestHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+
+ args := os.Args
+ for len(args) > 0 {
+ if args[0] == "--" {
+ args = args[1:]
+ break
+ }
+ args = args[1:]
+ }
+ if len(args) == 0 {
+ fmt.Fprintf(os.Stderr, "No command\n")
+ os.Exit(2)
+ }
+
+ cmd, args := args[0], args[1:]
+ switch cmd {
+ case "echo":
+ iargs := []interface{}{}
+ for _, s := range args {
+ iargs = append(iargs, s)
+ }
+ fmt.Println(iargs...)
+ case "cat":
+ if len(args) == 0 {
+ io.Copy(os.Stdout, os.Stdin)
+ return
+ }
+ exit := 0
+ for _, fn := range args {
+ f, err := os.Open(fn)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ exit = 2
+ } else {
+ defer f.Close()
+ io.Copy(os.Stdout, f)
+ }
+ }
+ os.Exit(exit)
+ case "pipetest":
+ bufr := bufio.NewReader(os.Stdin)
+ for {
+ line, _, err := bufr.ReadLine()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ os.Exit(1)
+ }
+ if bytes.HasPrefix(line, []byte("O:")) {
+ os.Stdout.Write(line)
+ os.Stdout.Write([]byte{'\n'})
+ } else if bytes.HasPrefix(line, []byte("E:")) {
+ os.Stderr.Write(line)
+ os.Stderr.Write([]byte{'\n'})
+ } else {
+ os.Exit(1)
+ }
+ }
+ case "read3": // read fd 3
+ fd3 := os.NewFile(3, "fd3")
+ bs, err := ioutil.ReadAll(fd3)
+ if err != nil {
+ fmt.Printf("ReadAll from fd 3: %v", err)
+ os.Exit(1)
+ }
+ os.Stderr.Write(bs)
+ case "exit":
+ n, _ := strconv.Atoi(args[0])
+ os.Exit(n)
+ default:
+ fmt.Fprintf(os.Stderr, "Unknown command %q\n", cmd)
+ os.Exit(2)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+ "errors"
+ "os"
+ "strings"
+)
+
+// ErrNotFound is the error resulting if a path search failed to find an executable file.
+var ErrNotFound = errors.New("executable file not found in $path")
+
+func findExecutable(file string) error {
+ d, err := os.Stat(file)
+ if err != nil {
+ return err
+ }
+ if d.IsRegular() && d.Permission()&0111 != 0 {
+ return nil
+ }
+ return os.EPERM
+}
+
+// LookPath searches for an executable binary named file
+// in the directories named by the path environment variable.
+// If file begins with "/", "#", "./", or "../", it is tried
+// directly and the path is not consulted.
+func LookPath(file string) (string, error) {
+ // skip the path lookup for these prefixes
+ skip := []string{"/", "#", "./", "../"}
+
+ for _, p := range skip {
+ if strings.HasPrefix(file, p) {
+ err := findExecutable(file)
+ if err == nil {
+ return file, nil
+ }
+ return "", &Error{file, err}
+ }
+ }
+
+ path := os.Getenv("path")
+ for _, dir := range strings.Split(path, "\000") {
+ if err := findExecutable(dir + "/" + file); err == nil {
+ return dir + "/" + file, nil
+ }
+ }
+ return "", &Error{file, ErrNotFound}
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+ "testing"
+)
+
+var nonExistentPaths = []string{
+ "some-non-existent-path",
+ "non-existent-path/slashed",
+}
+
+func TestLookPathNotFound(t *testing.T) {
+ for _, name := range nonExistentPaths {
+ path, err := LookPath(name)
+ if err == nil {
+ t.Fatalf("LookPath found %q in $PATH", name)
+ }
+ if path != "" {
+ t.Fatalf("LookPath path == %q when err != nil", path)
+ }
+ perr, ok := err.(*Error)
+ if !ok {
+ t.Fatal("LookPath error is not an exec.Error")
+ }
+ if perr.Name != name {
+ t.Fatalf("want Error name %q, got %q", name, perr.Name)
+ }
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux openbsd
+
+package exec
+
+import (
+ "errors"
+ "os"
+ "strings"
+)
+
+// ErrNotFound is the error resulting if a path search failed to find an executable file.
+var ErrNotFound = errors.New("executable file not found in $PATH")
+
+func findExecutable(file string) error {
+ d, err := os.Stat(file)
+ if err != nil {
+ return err
+ }
+ if d.IsRegular() && d.Permission()&0111 != 0 {
+ return nil
+ }
+ return os.EPERM
+}
+
+// LookPath searches for an executable binary named file
+// in the directories named by the PATH environment variable.
+// If file contains a slash, it is tried directly and the PATH is not consulted.
+func LookPath(file string) (string, error) {
+ // NOTE(rsc): I wish we could use the Plan 9 behavior here
+ // (only bypass the path if file begins with / or ./ or ../)
+ // but that would not match all the Unix shells.
+
+ if strings.Contains(file, "/") {
+ err := findExecutable(file)
+ if err == nil {
+ return file, nil
+ }
+ return "", &Error{file, err}
+ }
+ pathenv := os.Getenv("PATH")
+ for _, dir := range strings.Split(pathenv, ":") {
+ if dir == "" {
+ // Unix shell semantics: path element "" means "."
+ dir = "."
+ }
+ if err := findExecutable(dir + "/" + file); err == nil {
+ return dir + "/" + file, nil
+ }
+ }
+ return "", &Error{file, ErrNotFound}
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+ "errors"
+ "os"
+ "strings"
+)
+
+// ErrNotFound is the error resulting if a path search failed to find an executable file.
+var ErrNotFound = errors.New("executable file not found in %PATH%")
+
+func chkStat(file string) error {
+ d, err := os.Stat(file)
+ if err != nil {
+ return err
+ }
+ if d.IsRegular() {
+ return nil
+ }
+ return os.EPERM
+}
+
+func findExecutable(file string, exts []string) (string, error) {
+ if len(exts) == 0 {
+ return file, chkStat(file)
+ }
+ f := strings.ToLower(file)
+ for _, e := range exts {
+ if strings.HasSuffix(f, e) {
+ return file, chkStat(file)
+ }
+ }
+ for _, e := range exts {
+ if f := file + e; chkStat(f) == nil {
+ return f, nil
+ }
+ }
+ return ``, os.ENOENT
+}
+
+func LookPath(file string) (f string, err error) {
+ x := os.Getenv(`PATHEXT`)
+ if x == `` {
+ x = `.COM;.EXE;.BAT;.CMD`
+ }
+ exts := []string{}
+ for _, e := range strings.Split(strings.ToLower(x), `;`) {
+ if e == "" {
+ continue
+ }
+ if e[0] != '.' {
+ e = "." + e
+ }
+ exts = append(exts, e)
+ }
+ if strings.IndexAny(file, `:\/`) != -1 {
+ if f, err = findExecutable(file, exts); err == nil {
+ return
+ }
+ return ``, &Error{file, err}
+ }
+ if pathenv := os.Getenv(`PATH`); pathenv == `` {
+ if f, err = findExecutable(`.\`+file, exts); err == nil {
+ return
+ }
+ } else {
+ for _, dir := range strings.Split(pathenv, `;`) {
+ if f, err = findExecutable(dir+`\`+file, exts); err == nil {
+ return
+ }
+ }
+ }
+ return ``, &Error{file, ErrNotFound}
+}
package os
import (
+ "errors"
"runtime"
"syscall"
)
func (p *Process) Signal(sig Signal) error {
if p.done {
- return NewError("os: process already finished")
+ return errors.New("os: process already finished")
}
f, e := OpenFile("/proc/"+itoa(p.Pid)+"/note", O_WRONLY, 0)
// Auxiliary information if the File describes a directory
type dirInfo struct {
- buf []byte // buffer for directory I/O
+ buf []byte // buffer for directory I/O
dir *syscall.DIR // from opendir
}
}
if file.dirinfo != nil {
- if libc_closedir(file.dirinfo.dir) < 0 && err == nil {
+ if libc_closedir(file.dirinfo.dir) < 0 && err == nil {
err = &PathError{"closedir", file.name, Errno(syscall.GetErrno())}
}
}
//
// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
// Readdir returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is os.EOF.
+// explaining why. At the end of a directory, the error is io.EOF.
//
// If n <= 0, Readdir returns all the FileInfo from the directory in
// a single slice. In this case, if Readdir succeeds (reads all
import (
. "os"
"path/filepath"
- "testing"
"runtime"
"syscall"
+ "testing"
)
func TestMkdirAll(t *testing.T) {
"os"
"sort"
"strings"
- "utf8"
+ "unicode/utf8"
)
var ErrBadPattern = errors.New("syntax error in pattern")
import (
. "path/filepath"
- "testing"
"runtime"
+ "testing"
)
type MatchTest struct {
import (
"errors"
"strings"
- "utf8"
+ "unicode/utf8"
)
var ErrBadPattern = errors.New("syntax error in pattern")
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-import (
- "math"
-)
-
-/*
- * Exponential distribution
- *
- * See "The Ziggurat Method for Generating Random Variables"
- * (Marsaglia & Tsang, 2000)
- * http://www.jstatsoft.org/v05/i08/paper [pdf]
- */
-
-const (
- re = 7.69711747013104972
-)
-
-// ExpFloat64 returns an exponentially distributed float64 in the range
-// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
-// (lambda) is 1 and whose mean is 1/lambda (1).
-// To produce a distribution with a different rate parameter,
-// callers can adjust the output using:
-//
-// sample = ExpFloat64() / desiredRateParameter
-//
-func (r *Rand) ExpFloat64() float64 {
- for {
- j := r.Uint32()
- i := j & 0xFF
- x := float64(j) * float64(we[i])
- if j < ke[i] {
- return x
- }
- if i == 0 {
- return re - math.Log(r.Float64())
- }
- if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) {
- return x
- }
- }
- panic("unreachable")
-}
-
-var ke = [256]uint32{
- 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
- 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
- 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78,
- 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651,
- 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca,
- 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8,
- 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea,
- 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba,
- 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed,
- 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662,
- 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3,
- 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace,
- 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6,
- 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7,
- 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415,
- 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4,
- 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36,
- 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46,
- 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac,
- 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245,
- 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52,
- 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06,
- 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0,
- 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9,
- 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76,
- 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516,
- 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289,
- 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed,
- 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb,
- 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e,
- 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a,
- 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1,
- 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b,
- 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621,
- 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d,
- 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3,
- 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73,
- 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88,
- 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a,
- 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb,
- 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176,
- 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be,
- 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192,
- 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed,
- 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936,
- 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b,
- 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4,
- 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1,
- 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482,
- 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023,
- 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
- 0xe6da6ecf,
-}
-var we = [256]float32{
- 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
- 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
- 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11,
- 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11,
- 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10,
- 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10,
- 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10,
- 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10,
- 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10,
- 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10,
- 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10,
- 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10,
- 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10,
- 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10,
- 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10,
- 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10,
- 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10,
- 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10,
- 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10,
- 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10,
- 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10,
- 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10,
- 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10,
- 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10,
- 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10,
- 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10,
- 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10,
- 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10,
- 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10,
- 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10,
- 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10,
- 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10,
- 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10,
- 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10,
- 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10,
- 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10,
- 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10,
- 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10,
- 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10,
- 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10,
- 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10,
- 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10,
- 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10,
- 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10,
- 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10,
- 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10,
- 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10,
- 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10,
- 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10,
- 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10,
- 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10,
- 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10,
- 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10,
- 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10,
- 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10,
- 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10,
- 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10,
- 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10,
- 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10,
- 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09,
- 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09,
- 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09,
- 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
- 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
-}
-var fe = [256]float32{
- 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
- 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
- 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665,
- 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967,
- 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896,
- 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092,
- 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386,
- 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495,
- 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752,
- 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325,
- 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955,
- 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694,
- 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218,
- 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763,
- 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044,
- 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796,
- 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408,
- 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928,
- 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393,
- 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625,
- 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107,
- 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878,
- 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438,
- 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682,
- 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852,
- 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479,
- 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354,
- 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494,
- 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119,
- 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624,
- 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574,
- 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672,
- 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763,
- 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816,
- 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919,
- 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274,
- 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195,
- 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106,
- 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434,
- 0.062193416, 0.060783047, 0.059384305, 0.057997175,
- 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236,
- 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623,
- 0.043502413, 0.042254124, 0.041017443, 0.039792392,
- 0.038578995, 0.037377283, 0.036187284, 0.035009038,
- 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566,
- 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421,
- 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867,
- 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392,
- 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414,
- 0.008780315, 0.007963077, 0.0071633533, 0.006381906,
- 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648,
- 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693,
- 0.00045413437,
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-import (
- "math"
-)
-
-/*
- * Normal distribution
- *
- * See "The Ziggurat Method for Generating Random Variables"
- * (Marsaglia & Tsang, 2000)
- * http://www.jstatsoft.org/v05/i08/paper [pdf]
- */
-
-const (
- rn = 3.442619855899
-)
-
-func absInt32(i int32) uint32 {
- if i < 0 {
- return uint32(-i)
- }
- return uint32(i)
-}
-
-// NormFloat64 returns a normally distributed float64 in the range
-// [-math.MaxFloat64, +math.MaxFloat64] with
-// standard normal distribution (mean = 0, stddev = 1).
-// To produce a different normal distribution, callers can
-// adjust the output using:
-//
-// sample = NormFloat64() * desiredStdDev + desiredMean
-//
-func (r *Rand) NormFloat64() float64 {
- for {
- j := int32(r.Uint32()) // Possibly negative
- i := j & 0x7F
- x := float64(j) * float64(wn[i])
- if absInt32(j) < kn[i] {
- // This case should be hit better than 99% of the time.
- return x
- }
-
- if i == 0 {
- // This extra work is only required for the base strip.
- for {
- x = -math.Log(r.Float64()) * (1.0 / rn)
- y := -math.Log(r.Float64())
- if y+y >= x*x {
- break
- }
- }
- if j > 0 {
- return rn + x
- }
- return -rn - x
- }
- if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
- return x
- }
- }
- panic("unreachable")
-}
-
-var kn = [128]uint32{
- 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
- 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
- 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7,
- 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883,
- 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30,
- 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa,
- 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d,
- 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18,
- 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924,
- 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a,
- 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4,
- 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62,
- 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e,
- 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473,
- 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd,
- 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568,
- 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08,
- 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc,
- 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94,
- 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb,
- 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075,
- 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba,
- 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded,
- 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72,
- 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
- 0x7ba90bdc, 0x7a722176, 0x77d664e5,
-}
-var wn = [128]float32{
- 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
- 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
- 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10,
- 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10,
- 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10,
- 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10,
- 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10,
- 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10,
- 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10,
- 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10,
- 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10,
- 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10,
- 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10,
- 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10,
- 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10,
- 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10,
- 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10,
- 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10,
- 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10,
- 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10,
- 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10,
- 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10,
- 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10,
- 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10,
- 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10,
- 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09,
- 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09,
- 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09,
- 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09,
- 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09,
- 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
- 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
-}
-var fn = [128]float32{
- 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303,
- 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177,
- 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569,
- 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277,
- 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434,
- 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903,
- 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055,
- 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766,
- 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872,
- 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642,
- 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446,
- 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685,
- 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484,
- 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807,
- 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239,
- 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877,
- 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499,
- 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037,
- 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265,
- 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602,
- 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467,
- 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058,
- 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863,
- 0.040742867, 0.03688439, 0.033087887, 0.029356318,
- 0.025693292, 0.022103304, 0.018592102, 0.015167298,
- 0.011839478, 0.008624485, 0.005548995, 0.0026696292,
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package rand implements pseudo-random number generators.
-package rand
-
-import "sync"
-
-// A Source represents a source of uniformly-distributed
-// pseudo-random int64 values in the range [0, 1<<63).
-type Source interface {
- Int63() int64
- Seed(seed int64)
-}
-
-// NewSource returns a new pseudo-random Source seeded with the given value.
-func NewSource(seed int64) Source {
- var rng rngSource
- rng.Seed(seed)
- return &rng
-}
-
-// A Rand is a source of random numbers.
-type Rand struct {
- src Source
-}
-
-// New returns a new Rand that uses random values from src
-// to generate other random values.
-func New(src Source) *Rand { return &Rand{src} }
-
-// Seed uses the provided seed value to initialize the generator to a deterministic state.
-func (r *Rand) Seed(seed int64) { r.src.Seed(seed) }
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
-func (r *Rand) Int63() int64 { return r.src.Int63() }
-
-// Uint32 returns a pseudo-random 32-bit value as a uint32.
-func (r *Rand) Uint32() uint32 { return uint32(r.Int63() >> 31) }
-
-// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
-func (r *Rand) Int31() int32 { return int32(r.Int63() >> 32) }
-
-// Int returns a non-negative pseudo-random int.
-func (r *Rand) Int() int {
- u := uint(r.Int63())
- return int(u << 1 >> 1) // clear sign bit if int == int32
-}
-
-// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
-func (r *Rand) Int63n(n int64) int64 {
- if n <= 0 {
- return 0
- }
- max := int64((1 << 63) - 1 - (1<<63)%uint64(n))
- v := r.Int63()
- for v > max {
- v = r.Int63()
- }
- return v % n
-}
-
-// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
-func (r *Rand) Int31n(n int32) int32 {
- if n <= 0 {
- return 0
- }
- max := int32((1 << 31) - 1 - (1<<31)%uint32(n))
- v := r.Int31()
- for v > max {
- v = r.Int31()
- }
- return v % n
-}
-
-// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
-func (r *Rand) Intn(n int) int {
- if n <= 1<<31-1 {
- return int(r.Int31n(int32(n)))
- }
- return int(r.Int63n(int64(n)))
-}
-
-// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).
-func (r *Rand) Float64() float64 { return float64(r.Int63()) / (1 << 63) }
-
-// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).
-func (r *Rand) Float32() float32 { return float32(r.Float64()) }
-
-// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
-func (r *Rand) Perm(n int) []int {
- m := make([]int, n)
- for i := 0; i < n; i++ {
- m[i] = i
- }
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- m[i], m[j] = m[j], m[i]
- }
- return m
-}
-
-/*
- * Top-level convenience functions
- */
-
-var globalRand = New(&lockedSource{src: NewSource(1)})
-
-// Seed uses the provided seed value to initialize the generator to a deterministic state.
-func Seed(seed int64) { globalRand.Seed(seed) }
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
-func Int63() int64 { return globalRand.Int63() }
-
-// Uint32 returns a pseudo-random 32-bit value as a uint32.
-func Uint32() uint32 { return globalRand.Uint32() }
-
-// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
-func Int31() int32 { return globalRand.Int31() }
-
-// Int returns a non-negative pseudo-random int.
-func Int() int { return globalRand.Int() }
-
-// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
-func Int63n(n int64) int64 { return globalRand.Int63n(n) }
-
-// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
-func Int31n(n int32) int32 { return globalRand.Int31n(n) }
-
-// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
-func Intn(n int) int { return globalRand.Intn(n) }
-
-// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).
-func Float64() float64 { return globalRand.Float64() }
-
-// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).
-func Float32() float32 { return globalRand.Float32() }
-
-// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
-func Perm(n int) []int { return globalRand.Perm(n) }
-
-// NormFloat64 returns a normally distributed float64 in the range
-// [-math.MaxFloat64, +math.MaxFloat64] with
-// standard normal distribution (mean = 0, stddev = 1).
-// To produce a different normal distribution, callers can
-// adjust the output using:
-//
-// sample = NormFloat64() * desiredStdDev + desiredMean
-//
-func NormFloat64() float64 { return globalRand.NormFloat64() }
-
-// ExpFloat64 returns an exponentially distributed float64 in the range
-// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
-// (lambda) is 1 and whose mean is 1/lambda (1).
-// To produce a distribution with a different rate parameter,
-// callers can adjust the output using:
-//
-// sample = ExpFloat64() / desiredRateParameter
-//
-func ExpFloat64() float64 { return globalRand.ExpFloat64() }
-
-type lockedSource struct {
- lk sync.Mutex
- src Source
-}
-
-func (r *lockedSource) Int63() (n int64) {
- r.lk.Lock()
- n = r.src.Int63()
- r.lk.Unlock()
- return
-}
-
-func (r *lockedSource) Seed(seed int64) {
- r.lk.Lock()
- r.src.Seed(seed)
- r.lk.Unlock()
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-import (
- "errors"
- "math"
- "fmt"
- "testing"
-)
-
-const (
- numTestSamples = 10000
-)
-
-type statsResults struct {
- mean float64
- stddev float64
- closeEnough float64
- maxError float64
-}
-
-func max(a, b float64) float64 {
- if a > b {
- return a
- }
- return b
-}
-
-func nearEqual(a, b, closeEnough, maxError float64) bool {
- absDiff := math.Abs(a - b)
- if absDiff < closeEnough { // Necessary when one value is zero and one value is close to zero.
- return true
- }
- return absDiff/max(math.Abs(a), math.Abs(b)) < maxError
-}
-
-var testSeeds = []int64{1, 1754801282, 1698661970, 1550503961}
-
-// checkSimilarDistribution returns success if the mean and stddev of the
-// two statsResults are similar.
-func (this *statsResults) checkSimilarDistribution(expected *statsResults) error {
- if !nearEqual(this.mean, expected.mean, expected.closeEnough, expected.maxError) {
- s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", this.mean, expected.mean, expected.closeEnough, expected.maxError)
- fmt.Println(s)
- return errors.New(s)
- }
- if !nearEqual(this.stddev, expected.stddev, 0, expected.maxError) {
- s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", this.stddev, expected.stddev, expected.closeEnough, expected.maxError)
- fmt.Println(s)
- return errors.New(s)
- }
- return nil
-}
-
-func getStatsResults(samples []float64) *statsResults {
- res := new(statsResults)
- var sum float64
- for i := range samples {
- sum += samples[i]
- }
- res.mean = sum / float64(len(samples))
- var devsum float64
- for i := range samples {
- devsum += math.Pow(samples[i]-res.mean, 2)
- }
- res.stddev = math.Sqrt(devsum / float64(len(samples)))
- return res
-}
-
-func checkSampleDistribution(t *testing.T, samples []float64, expected *statsResults) {
- actual := getStatsResults(samples)
- err := actual.checkSimilarDistribution(expected)
- if err != nil {
- t.Errorf(err.Error())
- }
-}
-
-func checkSampleSliceDistributions(t *testing.T, samples []float64, nslices int, expected *statsResults) {
- chunk := len(samples) / nslices
- for i := 0; i < nslices; i++ {
- low := i * chunk
- var high int
- if i == nslices-1 {
- high = len(samples) - 1
- } else {
- high = (i + 1) * chunk
- }
- checkSampleDistribution(t, samples[low:high], expected)
- }
-}
-
-//
-// Normal distribution tests
-//
-
-func generateNormalSamples(nsamples int, mean, stddev float64, seed int64) []float64 {
- r := New(NewSource(seed))
- samples := make([]float64, nsamples)
- for i := range samples {
- samples[i] = r.NormFloat64()*stddev + mean
- }
- return samples
-}
-
-func testNormalDistribution(t *testing.T, nsamples int, mean, stddev float64, seed int64) {
- //fmt.Printf("testing nsamples=%v mean=%v stddev=%v seed=%v\n", nsamples, mean, stddev, seed);
-
- samples := generateNormalSamples(nsamples, mean, stddev, seed)
- errorScale := max(1.0, stddev) // Error scales with stddev
- expected := &statsResults{mean, stddev, 0.10 * errorScale, 0.08 * errorScale}
-
- // Make sure that the entire set matches the expected distribution.
- checkSampleDistribution(t, samples, expected)
-
- // Make sure that each half of the set matches the expected distribution.
- checkSampleSliceDistributions(t, samples, 2, expected)
-
- // Make sure that each 7th of the set matches the expected distribution.
- checkSampleSliceDistributions(t, samples, 7, expected)
-}
-
-// Actual tests
-
-func TestStandardNormalValues(t *testing.T) {
- for _, seed := range testSeeds {
- testNormalDistribution(t, numTestSamples, 0, 1, seed)
- }
-}
-
-func TestNonStandardNormalValues(t *testing.T) {
- for sd := 0.5; sd < 1000; sd *= 2 {
- for m := 0.5; m < 1000; m *= 2 {
- for _, seed := range testSeeds {
- testNormalDistribution(t, numTestSamples, m, sd, seed)
- }
- }
- }
-}
-
-//
-// Exponential distribution tests
-//
-
-func generateExponentialSamples(nsamples int, rate float64, seed int64) []float64 {
- r := New(NewSource(seed))
- samples := make([]float64, nsamples)
- for i := range samples {
- samples[i] = r.ExpFloat64() / rate
- }
- return samples
-}
-
-func testExponentialDistribution(t *testing.T, nsamples int, rate float64, seed int64) {
- //fmt.Printf("testing nsamples=%v rate=%v seed=%v\n", nsamples, rate, seed);
-
- mean := 1 / rate
- stddev := mean
-
- samples := generateExponentialSamples(nsamples, rate, seed)
- errorScale := max(1.0, 1/rate) // Error scales with the inverse of the rate
- expected := &statsResults{mean, stddev, 0.10 * errorScale, 0.20 * errorScale}
-
- // Make sure that the entire set matches the expected distribution.
- checkSampleDistribution(t, samples, expected)
-
- // Make sure that each half of the set matches the expected distribution.
- checkSampleSliceDistributions(t, samples, 2, expected)
-
- // Make sure that each 7th of the set matches the expected distribution.
- checkSampleSliceDistributions(t, samples, 7, expected)
-}
-
-// Actual tests
-
-func TestStandardExponentialValues(t *testing.T) {
- for _, seed := range testSeeds {
- testExponentialDistribution(t, numTestSamples, 1, seed)
- }
-}
-
-func TestNonStandardExponentialValues(t *testing.T) {
- for rate := 0.05; rate < 10; rate *= 2 {
- for _, seed := range testSeeds {
- testExponentialDistribution(t, numTestSamples, rate, seed)
- }
- }
-}
-
-//
-// Table generation tests
-//
-
-func initNorm() (testKn []uint32, testWn, testFn []float32) {
- const m1 = 1 << 31
- var (
- dn float64 = rn
- tn = dn
- vn float64 = 9.91256303526217e-3
- )
-
- testKn = make([]uint32, 128)
- testWn = make([]float32, 128)
- testFn = make([]float32, 128)
-
- q := vn / math.Exp(-0.5*dn*dn)
- testKn[0] = uint32((dn / q) * m1)
- testKn[1] = 0
- testWn[0] = float32(q / m1)
- testWn[127] = float32(dn / m1)
- testFn[0] = 1.0
- testFn[127] = float32(math.Exp(-0.5 * dn * dn))
- for i := 126; i >= 1; i-- {
- dn = math.Sqrt(-2.0 * math.Log(vn/dn+math.Exp(-0.5*dn*dn)))
- testKn[i+1] = uint32((dn / tn) * m1)
- tn = dn
- testFn[i] = float32(math.Exp(-0.5 * dn * dn))
- testWn[i] = float32(dn / m1)
- }
- return
-}
-
-func initExp() (testKe []uint32, testWe, testFe []float32) {
- const m2 = 1 << 32
- var (
- de float64 = re
- te = de
- ve float64 = 3.9496598225815571993e-3
- )
-
- testKe = make([]uint32, 256)
- testWe = make([]float32, 256)
- testFe = make([]float32, 256)
-
- q := ve / math.Exp(-de)
- testKe[0] = uint32((de / q) * m2)
- testKe[1] = 0
- testWe[0] = float32(q / m2)
- testWe[255] = float32(de / m2)
- testFe[0] = 1.0
- testFe[255] = float32(math.Exp(-de))
- for i := 254; i >= 1; i-- {
- de = -math.Log(ve/de + math.Exp(-de))
- testKe[i+1] = uint32((de / te) * m2)
- te = de
- testFe[i] = float32(math.Exp(-de))
- testWe[i] = float32(de / m2)
- }
- return
-}
-
-// compareUint32Slices returns the first index where the two slices
-// disagree, or <0 if the lengths are the same and all elements
-// are identical.
-func compareUint32Slices(s1, s2 []uint32) int {
- if len(s1) != len(s2) {
- if len(s1) > len(s2) {
- return len(s2) + 1
- }
- return len(s1) + 1
- }
- for i := range s1 {
- if s1[i] != s2[i] {
- return i
- }
- }
- return -1
-}
-
-// compareFloat32Slices returns the first index where the two slices
-// disagree, or <0 if the lengths are the same and all elements
-// are identical.
-func compareFloat32Slices(s1, s2 []float32) int {
- if len(s1) != len(s2) {
- if len(s1) > len(s2) {
- return len(s2) + 1
- }
- return len(s1) + 1
- }
- for i := range s1 {
- if !nearEqual(float64(s1[i]), float64(s2[i]), 0, 1e-7) {
- return i
- }
- }
- return -1
-}
-
-func TestNormTables(t *testing.T) {
- testKn, testWn, testFn := initNorm()
- if i := compareUint32Slices(kn[0:], testKn); i >= 0 {
- t.Errorf("kn disagrees at index %v; %v != %v", i, kn[i], testKn[i])
- }
- if i := compareFloat32Slices(wn[0:], testWn); i >= 0 {
- t.Errorf("wn disagrees at index %v; %v != %v", i, wn[i], testWn[i])
- }
- if i := compareFloat32Slices(fn[0:], testFn); i >= 0 {
- t.Errorf("fn disagrees at index %v; %v != %v", i, fn[i], testFn[i])
- }
-}
-
-func TestExpTables(t *testing.T) {
- testKe, testWe, testFe := initExp()
- if i := compareUint32Slices(ke[0:], testKe); i >= 0 {
- t.Errorf("ke disagrees at index %v; %v != %v", i, ke[i], testKe[i])
- }
- if i := compareFloat32Slices(we[0:], testWe); i >= 0 {
- t.Errorf("we disagrees at index %v; %v != %v", i, we[i], testWe[i])
- }
- if i := compareFloat32Slices(fe[0:], testFe); i >= 0 {
- t.Errorf("fe disagrees at index %v; %v != %v", i, fe[i], testFe[i])
- }
-}
-
-// Benchmarks
-
-func BenchmarkInt63Threadsafe(b *testing.B) {
- for n := b.N; n > 0; n-- {
- Int63()
- }
-}
-
-func BenchmarkInt63Unthreadsafe(b *testing.B) {
- r := New(NewSource(1))
- for n := b.N; n > 0; n-- {
- r.Int63()
- }
-}
-
-func BenchmarkIntn1000(b *testing.B) {
- r := New(NewSource(1))
- for n := b.N; n > 0; n-- {
- r.Intn(1000)
- }
-}
-
-func BenchmarkInt63n1000(b *testing.B) {
- r := New(NewSource(1))
- for n := b.N; n > 0; n-- {
- r.Int63n(1000)
- }
-}
-
-func BenchmarkInt31n1000(b *testing.B) {
- r := New(NewSource(1))
- for n := b.N; n > 0; n-- {
- r.Int31n(1000)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-/*
- * Uniform distribution
- *
- * algorithm by
- * DP Mitchell and JA Reeds
- */
-
-const (
- _LEN = 607
- _TAP = 273
- _MAX = 1 << 63
- _MASK = _MAX - 1
- _A = 48271
- _M = (1 << 31) - 1
- _Q = 44488
- _R = 3399
-)
-
-var (
- // cooked random numbers
- // the state of the rng
- // after 780e10 iterations
- rng_cooked [_LEN]int64 = [...]int64{
- 5041579894721019882, 4646389086726545243, 1395769623340756751, 5333664234075297259,
- 2875692520355975054, 9033628115061424579, 7143218595135194537, 4812947590706362721,
- 7937252194349799378, 5307299880338848416, 8209348851763925077, 2115741599318814044,
- 4593015457530856296, 8140875735541888011, 3319429241265089026, 8619815648190321034,
- 1727074043483619500, 113108499721038619, 4569519971459345583, 5062833859075314731,
- 2387618771259064424, 2716131344356686112, 6559392774825876886, 7650093201692370310,
- 7684323884043752161, 257867835996031390, 6593456519409015164, 271327514973697897,
- 2789386447340118284, 1065192797246149621, 3344507881999356393, 4459797941780066633,
- 7465081662728599889, 1014950805555097187, 4449440729345990775, 3481109366438502643,
- 2418672789110888383, 5796562887576294778, 4484266064449540171, 3738982361971787048,
- 4523597184512354423, 10530508058128498, 8633833783282346118, 2625309929628791628,
- 8660405965245884302, 10162832508971942, 6540714680961817391, 7031802312784620857,
- 6240911277345944669, 831864355460801054, 8004434137542152891, 2116287251661052151,
- 2202309800992166967, 9161020366945053561, 4069299552407763864, 4936383537992622449,
- 457351505131524928, 342195045928179354, 2847771682816600509, 2068020115986376518,
- 4368649989588021065, 887231587095185257, 5563591506886576496, 6816225200251950296,
- 5616972787034086048, 8471809303394836566, 1686575021641186857, 4045484338074262002,
- 4244156215201778923, 7848217333783577387, 5632136521049761902, 833283142057835272,
- 9029726508369077193, 3243583134664087292, 4316371101804477087, 8937849979965997980,
- 6446940406810434101, 1679342092332374735, 6050638460742422078, 6993520719509581582,
- 7640877852514293609, 5881353426285907985, 812786550756860885, 4541845584483343330,
- 2725470216277009086, 4980675660146853729, 5210769080603236061, 8894283318990530821,
- 6326442804750084282, 1495812843684243920, 7069751578799128019, 7370257291860230865,
- 6756929275356942261, 4706794511633873654, 7824520467827898663, 8549875090542453214,
- 33650829478596156, 1328918435751322643, 7297902601803624459, 1011190183918857495,
- 2238025036817854944, 5147159997473910359, 896512091560522982, 2659470849286379941,
- 6097729358393448602, 1731725986304753684, 4106255841983812711, 8327155210721535508,
- 8477511620686074402, 5803876044675762232, 8435417780860221662, 5988852856651071244,
- 4715837297103951910, 7566171971264485114, 505808562678895611, 5070098180695063370,
- 842110666775871513, 572156825025677802, 1791881013492340891, 3393267094866038768,
- 3778721850472236509, 2352769483186201278, 1292459583847367458, 8897907043675088419,
- 5781809037144163536, 2733958794029492513, 5092019688680754699, 8996124554772526841,
- 4234737173186232084, 5027558287275472836, 4635198586344772304, 8687338893267139351,
- 5907508150730407386, 784756255473944452, 972392927514829904, 5422057694808175112,
- 5158420642969283891, 9048531678558643225, 2407211146698877100, 7583282216521099569,
- 3940796514530962282, 3341174631045206375, 3095313889586102949, 7405321895688238710,
- 5832080132947175283, 7890064875145919662, 8184139210799583195, 1149859861409226130,
- 1464597243840211302, 4641648007187991873, 3516491885471466898, 956288521791657692,
- 6657089965014657519, 5220884358887979358, 1796677326474620641, 5340761970648932916,
- 1147977171614181568, 5066037465548252321, 2574765911837859848, 1085848279845204775,
- 3350107529868390359, 6116438694366558490, 2107701075971293812, 1803294065921269267,
- 2469478054175558874, 7368243281019965984, 3791908367843677526, 185046971116456637,
- 2257095756513439648, 7217693971077460129, 909049953079504259, 7196649268545224266,
- 5637660345400869599, 3955544945427965183, 8057528650917418961, 4139268440301127643,
- 6621926588513568059, 1373361136802681441, 6527366231383600011, 3507654575162700890,
- 9202058512774729859, 1954818376891585542, 6640380907130175705, 8299563319178235687,
- 3901867355218954373, 7046310742295574065, 6847195391333990232, 1572638100518868053,
- 8850422670118399721, 3631909142291992901, 5158881091950831288, 2882958317343121593,
- 4763258931815816403, 6280052734341785344, 4243789408204964850, 2043464728020827976,
- 6545300466022085465, 4562580375758598164, 5495451168795427352, 1738312861590151095,
- 553004618757816492, 6895160632757959823, 8233623922264685171, 7139506338801360852,
- 8550891222387991669, 5535668688139305547, 2430933853350256242, 5401941257863201076,
- 8159640039107728799, 6157493831600770366, 7632066283658143750, 6308328381617103346,
- 3681878764086140361, 3289686137190109749, 6587997200611086848, 244714774258135476,
- 4079788377417136100, 8090302575944624335, 2945117363431356361, 864324395848741045,
- 3009039260312620700, 8430027460082534031, 401084700045993341, 7254622446438694921,
- 4707864159563588614, 5640248530963493951, 5982507712689997893, 3315098242282210105,
- 5503847578771918426, 3941971367175193882, 8118566580304798074, 3839261274019871296,
- 7062410411742090847, 741381002980207668, 6027994129690250817, 2497829994150063930,
- 6251390334426228834, 1368930247903518833, 8809096399316380241, 6492004350391900708,
- 2462145737463489636, 404828418920299174, 4153026434231690595, 261785715255475940,
- 5464715384600071357, 592710404378763017, 6764129236657751224, 8513655718539357449,
- 5820343663801914208, 385298524683789911, 5224135003438199467, 6303131641338802145,
- 7150122561309371392, 368107899140673753, 3115186834558311558, 2915636353584281051,
- 4782583894627718279, 6718292300699989587, 8387085186914375220, 3387513132024756289,
- 4654329375432538231, 8930667561363381602, 5374373436876319273, 7623042350483453954,
- 7725442901813263321, 9186225467561587250, 4091027289597503355, 2357631606492579800,
- 2530936820058611833, 1636551876240043639, 5564664674334965799, 1452244145334316253,
- 2061642381019690829, 1279580266495294036, 9108481583171221009, 6023278686734049809,
- 5007630032676973346, 2153168792952589781, 6720334534964750538, 6041546491134794105,
- 3433922409283786309, 2285479922797300912, 3110614940896576130, 6366559590722842893,
- 5418791419666136509, 7163298419643543757, 4891138053923696990, 580618510277907015,
- 1684034065251686769, 4429514767357295841, 330346578555450005, 1119637995812174675,
- 7177515271653460134, 4589042248470800257, 7693288629059004563, 143607045258444228,
- 246994305896273627, 866417324803099287, 6473547110565816071, 3092379936208876896,
- 2058427839513754051, 5133784708526867938, 8785882556301281247, 6149332666841167611,
- 8585842181454472135, 6137678347805511274, 2070447184436970006, 5708223427705576541,
- 5999657892458244504, 4358391411789012426, 325123008708389849, 6837621693887290924,
- 4843721905315627004, 6010651222149276415, 5398352198963874652, 4602025990114250980,
- 1044646352569048800, 9106614159853161675, 829256115228593269, 4919284369102997000,
- 2681532557646850893, 3681559472488511871, 5307999518958214035, 6334130388442829274,
- 2658708232916537604, 1163313865052186287, 581945337509520675, 3648778920718647903,
- 4423673246306544414, 1620799783996955743, 220828013409515943, 8150384699999389761,
- 4287360518296753003, 4590000184845883843, 5513660857261085186, 6964829100392774275,
- 478991688350776035, 8746140185685648781, 228500091334420247, 1356187007457302238,
- 3019253992034194581, 3152601605678500003, 430152752706002213, 5559581553696971176,
- 4916432985369275664, 663574931734554391, 3420773838927732076, 2868348622579915573,
- 1999319134044418520, 3328689518636282723, 2587672709781371173, 1517255313529399333,
- 3092343956317362483, 3662252519007064108, 972445599196498113, 7664865435875959367,
- 1708913533482282562, 6917817162668868494, 3217629022545312900, 2570043027221707107,
- 8739788839543624613, 2488075924621352812, 4694002395387436668, 4559628481798514356,
- 2997203966153298104, 1282559373026354493, 240113143146674385, 8665713329246516443,
- 628141331766346752, 4571950817186770476, 1472811188152235408, 7596648026010355826,
- 6091219417754424743, 7834161864828164065, 7103445518877254909, 4390861237357459201,
- 4442653864240571734, 8903482404847331368, 622261699494173647, 6037261250297213248,
- 504404948065709118, 7275215526217113061, 1011176780856001400, 2194750105623461063,
- 2623071828615234808, 5157313728073836108, 3738405111966602044, 2539767524076729570,
- 2467284396349269342, 5256026990536851868, 7841086888628396109, 6640857538655893162,
- 1202087339038317498, 2113514992440715978, 7534350895342931403, 4925284734898484745,
- 5145623771477493805, 8225140880134972332, 2719520354384050532, 9132346697815513771,
- 4332154495710163773, 7137789594094346916, 6994721091344268833, 6667228574869048934,
- 655440045726677499, 59934747298466858, 6124974028078036405, 8957774780655365418,
- 2332206071942466437, 1701056712286369627, 3154897383618636503, 1637766181387607527,
- 2460521277767576533, 197309393502684135, 643677854385267315, 2543179307861934850,
- 4350769010207485119, 4754652089410667672, 2015595502641514512, 7999059458976458608,
- 4287946071480840813, 8362686366770308971, 6486469209321732151, 3617727845841796026,
- 7554353525834302244, 4450022655153542367, 1605195740213535749, 5327014565305508387,
- 4626575813550328320, 2692222020597705149, 241045573717249868, 5098046974627094010,
- 7916882295460730264, 884817090297530579, 5329160409530630596, 7790979528857726136,
- 4955070238059373407, 4918537275422674302, 3008076183950404629, 3007769226071157901,
- 2470346235617803020, 8928702772696731736, 7856187920214445904, 4474874585391974885,
- 7900176660600710914, 2140571127916226672, 2425445057265199971, 2486055153341847830,
- 4186670094382025798, 1883939007446035042, 8808666044074867985, 3734134241178479257,
- 4065968871360089196, 6953124200385847784, 1305686814738899057, 1637739099014457647,
- 3656125660947993209, 3966759634633167020, 3106378204088556331, 6328899822778449810,
- 4565385105440252958, 1979884289539493806, 2331793186920865425, 3783206694208922581,
- 8464961209802336085, 2843963751609577687, 3030678195484896323, 4793717574095772604,
- 4459239494808162889, 402587895800087237, 8057891408711167515, 4541888170938985079,
- 1042662272908816815, 5557303057122568958, 2647678726283249984, 2144477441549833761,
- 5806352215355387087, 7117771003473903623, 5916597177708541638, 462597715452321361,
- 8833658097025758785, 5970273481425315300, 563813119381731307, 2768349550652697015,
- 1598828206250873866, 5206393647403558110, 6235043485709261823, 3152217402014639496,
- 8469693267274066490, 125672920241807416, 5311079624024060938, 6663754932310491587,
- 8736848295048751716, 4488039774992061878, 5923302823487327109, 140891791083103236,
- 7414942793393574290, 7990420780896957397, 4317817392807076702, 3625184369705367340,
- 2740722765288122703, 5743100009702758344, 5997898640509039159, 8854493341352484163,
- 5242208035432907801, 701338899890987198, 7609280429197514109, 3020985755112334161,
- 6651322707055512866, 2635195723621160615, 5144520864246028816, 1035086515727829828,
- 1567242097116389047, 8172389260191636581, 6337820351429292273, 2163012566996458925,
- 2743190902890262681, 1906367633221323427, 6011544915663598137, 5932255307352610768,
- 2241128460406315459, 895504896216695588, 3094483003111372717, 4583857460292963101,
- 9079887171656594975, 8839289181930711403, 5762740387243057873, 4225072055348026230,
- 1838220598389033063, 3801620336801580414, 8823526620080073856, 1776617605585100335,
- 7899055018877642622, 5421679761463003041, 5521102963086275121, 4248279443559365898,
- 8735487530905098534, 1760527091573692978, 7142485049657745894, 8222656872927218123,
- 4969531564923704323, 3394475942196872480, 6424174453260338141, 359248545074932887,
- 3273651282831730598, 6797106199797138596, 3030918217665093212, 145600834617314036,
- 6036575856065626233, 740416251634527158, 7080427635449935582, 6951781370868335478,
- 399922722363687927, 294902314447253185, 7844950936339178523, 880320858634709042,
- 6192655680808675579, 411604686384710388, 9026808440365124461, 6440783557497587732,
- 4615674634722404292, 539897290441580544, 2096238225866883852, 8751955639408182687,
- 1907224908052289603, 7381039757301768559, 6157238513393239656, 7749994231914157575,
- 8629571604380892756, 5280433031239081479, 7101611890139813254, 2479018537985767835,
- 7169176924412769570, 7942066497793203302, 1357759729055557688, 2278447439451174845,
- 3625338785743880657, 6477479539006708521, 8976185375579272206, 5511371554711836120,
- 1326024180520890843, 7537449876596048829, 5464680203499696154, 3189671183162196045,
- 6346751753565857109, 241159987320630307, 3095793449658682053, 8978332846736310159,
- 2902794662273147216, 7208698530190629697, 7276901792339343736, 1732385229314443140,
- 4133292154170828382, 2918308698224194548, 1519461397937144458, 5293934712616591764,
- 4922828954023452664, 2879211533496425641, 5896236396443472108, 8465043815351752425,
- 7329020396871624740, 8915471717014488588, 2944902635677463047, 7052079073493465134,
- 8382142935188824023, 9103922860780351547, 4152330101494654406,
- }
-)
-
-type rngSource struct {
- tap int // index into vec
- feed int // index into vec
- vec [_LEN]int64 // current feedback register
-}
-
-// seed rng x[n+1] = 48271 * x[n] mod (2**31 - 1)
-func seedrand(x int32) int32 {
- hi := x / _Q
- lo := x % _Q
- x = _A*lo - _R*hi
- if x < 0 {
- x += _M
- }
- return x
-}
-
-// Seed uses the provided seed value to initialize the generator to a deterministic state.
-func (rng *rngSource) Seed(seed int64) {
- rng.tap = 0
- rng.feed = _LEN - _TAP
-
- seed = seed % _M
- if seed < 0 {
- seed += _M
- }
- if seed == 0 {
- seed = 89482311
- }
-
- x := int32(seed)
- for i := -20; i < _LEN; i++ {
- x = seedrand(x)
- if i >= 0 {
- var u int64
- u = int64(x) << 40
- x = seedrand(x)
- u ^= int64(x) << 20
- x = seedrand(x)
- u ^= int64(x)
- u ^= rng_cooked[i]
- rng.vec[i] = u & _MASK
- }
- }
-}
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
-func (rng *rngSource) Int63() int64 {
- rng.tap--
- if rng.tap < 0 {
- rng.tap += _LEN
- }
-
- rng.feed--
- if rng.feed < 0 {
- rng.feed += _LEN
- }
-
- x := (rng.vec[rng.feed] + rng.vec[rng.tap]) & _MASK
- rng.vec[rng.feed] = x
- return x
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// W.Hormann, G.Derflinger:
-// "Rejection-Inversion to Generate Variates
-// from Monotone Discrete Distributions"
-// http://eeyore.wu-wien.ac.at/papers/96-04-04.wh-der.ps.gz
-
-package rand
-
-import "math"
-
-// A Zipf generates Zipf distributed variates.
-type Zipf struct {
- r *Rand
- imax float64
- v float64
- q float64
- s float64
- oneminusQ float64
- oneminusQinv float64
- hxm float64
- hx0minusHxm float64
-}
-
-func (z *Zipf) h(x float64) float64 {
- return math.Exp(z.oneminusQ*math.Log(z.v+x)) * z.oneminusQinv
-}
-
-func (z *Zipf) hinv(x float64) float64 {
- return math.Exp(z.oneminusQinv*math.Log(z.oneminusQ*x)) - z.v
-}
-
-// NewZipf returns a Zipf generating variates p(k) on [0, imax]
-// proportional to (v+k)**(-s) where s>1 and k>=0, and v>=1.
-//
-func NewZipf(r *Rand, s float64, v float64, imax uint64) *Zipf {
- z := new(Zipf)
- if s <= 1.0 || v < 1 {
- return nil
- }
- z.r = r
- z.imax = float64(imax)
- z.v = v
- z.q = s
- z.oneminusQ = 1.0 - z.q
- z.oneminusQinv = 1.0 / z.oneminusQ
- z.hxm = z.h(z.imax + 0.5)
- z.hx0minusHxm = z.h(0.5) - math.Exp(math.Log(z.v)*(-z.q)) - z.hxm
- z.s = 1 - z.hinv(z.h(1.5)-math.Exp(-z.q*math.Log(z.v+1.0)))
- return z
-}
-
-// Uint64 returns a value drawn from the Zipf distributed described
-// by the Zipf object.
-func (z *Zipf) Uint64() uint64 {
- k := 0.0
-
- for {
- r := z.r.Float64() // r on [0,1]
- ur := z.hxm + r*z.hx0minusHxm
- x := z.hinv(ur)
- k = math.Floor(x + 0.5)
- if k-x <= z.s {
- break
- }
- if ur >= z.h(k+0.5)-math.Exp(-math.Log(k+z.v)*z.q) {
- break
- }
- }
- return uint64(k)
-}
"compress/bzip2"
"fmt"
"io"
+ "math/rand"
old "old/regexp"
"os"
"path/filepath"
- "rand"
"regexp/syntax"
"strconv"
"strings"
"testing"
- "utf8"
+ "unicode/utf8"
)
// TestRE2 tests this package's regexp API against test cases
"strconv"
"strings"
"sync"
- "utf8"
+ "unicode/utf8"
)
var debug = false
"sort"
"strings"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// An Error describes a failure to parse a regular expression
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rpc
-
-import (
- "bufio"
- "errors"
- "gob"
- "http"
- "io"
- "log"
- "net"
- "sync"
-)
-
-// ServerError represents an error that has been returned from
-// the remote side of the RPC connection.
-type ServerError string
-
-func (e ServerError) Error() string {
- return string(e)
-}
-
-var ErrShutdown = errors.New("connection is shut down")
-
-// Call represents an active RPC.
-type Call struct {
- ServiceMethod string // The name of the service and method to call.
- Args interface{} // The argument to the function (*struct).
- Reply interface{} // The reply from the function (*struct).
- Error error // After completion, the error status.
- Done chan *Call // Strobes when call is complete; value is the error status.
- seq uint64
-}
-
-// Client represents an RPC Client.
-// There may be multiple outstanding Calls associated
-// with a single Client.
-type Client struct {
- mutex sync.Mutex // protects pending, seq, request
- sending sync.Mutex
- request Request
- seq uint64
- codec ClientCodec
- pending map[uint64]*Call
- closing bool
- shutdown bool
-}
-
-// A ClientCodec implements writing of RPC requests and
-// reading of RPC responses for the client side of an RPC session.
-// The client calls WriteRequest to write a request to the connection
-// and calls ReadResponseHeader and ReadResponseBody in pairs
-// to read responses. The client calls Close when finished with the
-// connection. ReadResponseBody may be called with a nil
-// argument to force the body of the response to be read and then
-// discarded.
-type ClientCodec interface {
- WriteRequest(*Request, interface{}) error
- ReadResponseHeader(*Response) error
- ReadResponseBody(interface{}) error
-
- Close() error
-}
-
-func (client *Client) send(c *Call) {
- // Register this call.
- client.mutex.Lock()
- if client.shutdown {
- c.Error = ErrShutdown
- client.mutex.Unlock()
- c.done()
- return
- }
- c.seq = client.seq
- client.seq++
- client.pending[c.seq] = c
- client.mutex.Unlock()
-
- // Encode and send the request.
- client.sending.Lock()
- defer client.sending.Unlock()
- client.request.Seq = c.seq
- client.request.ServiceMethod = c.ServiceMethod
- if err := client.codec.WriteRequest(&client.request, c.Args); err != nil {
- c.Error = err
- c.done()
- }
-}
-
-func (client *Client) input() {
- var err error
- var response Response
- for err == nil {
- response = Response{}
- err = client.codec.ReadResponseHeader(&response)
- if err != nil {
- if err == io.EOF && !client.closing {
- err = io.ErrUnexpectedEOF
- }
- break
- }
- seq := response.Seq
- client.mutex.Lock()
- c := client.pending[seq]
- delete(client.pending, seq)
- client.mutex.Unlock()
-
- if response.Error == "" {
- err = client.codec.ReadResponseBody(c.Reply)
- if err != nil {
- c.Error = errors.New("reading body " + err.Error())
- }
- } else {
- // We've got an error response. Give this to the request;
- // any subsequent requests will get the ReadResponseBody
- // error if there is one.
- c.Error = ServerError(response.Error)
- err = client.codec.ReadResponseBody(nil)
- if err != nil {
- err = errors.New("reading error body: " + err.Error())
- }
- }
- c.done()
- }
- // Terminate pending calls.
- client.mutex.Lock()
- client.shutdown = true
- for _, call := range client.pending {
- call.Error = err
- call.done()
- }
- client.mutex.Unlock()
- if err != io.EOF || !client.closing {
- log.Println("rpc: client protocol error:", err)
- }
-}
-
-func (call *Call) done() {
- select {
- case call.Done <- call:
- // ok
- default:
- // We don't want to block here. It is the caller's responsibility to make
- // sure the channel has enough buffer space. See comment in Go().
- }
-}
-
-// NewClient returns a new Client to handle requests to the
-// set of services at the other end of the connection.
-// It adds a buffer to the write side of the connection so
-// the header and payload are sent as a unit.
-func NewClient(conn io.ReadWriteCloser) *Client {
- encBuf := bufio.NewWriter(conn)
- client := &gobClientCodec{conn, gob.NewDecoder(conn), gob.NewEncoder(encBuf), encBuf}
- return NewClientWithCodec(client)
-}
-
-// NewClientWithCodec is like NewClient but uses the specified
-// codec to encode requests and decode responses.
-func NewClientWithCodec(codec ClientCodec) *Client {
- client := &Client{
- codec: codec,
- pending: make(map[uint64]*Call),
- }
- go client.input()
- return client
-}
-
-type gobClientCodec struct {
- rwc io.ReadWriteCloser
- dec *gob.Decoder
- enc *gob.Encoder
- encBuf *bufio.Writer
-}
-
-func (c *gobClientCodec) WriteRequest(r *Request, body interface{}) (err error) {
- if err = c.enc.Encode(r); err != nil {
- return
- }
- if err = c.enc.Encode(body); err != nil {
- return
- }
- return c.encBuf.Flush()
-}
-
-func (c *gobClientCodec) ReadResponseHeader(r *Response) error {
- return c.dec.Decode(r)
-}
-
-func (c *gobClientCodec) ReadResponseBody(body interface{}) error {
- return c.dec.Decode(body)
-}
-
-func (c *gobClientCodec) Close() error {
- return c.rwc.Close()
-}
-
-// DialHTTP connects to an HTTP RPC server at the specified network address
-// listening on the default HTTP RPC path.
-func DialHTTP(network, address string) (*Client, error) {
- return DialHTTPPath(network, address, DefaultRPCPath)
-}
-
-// DialHTTPPath connects to an HTTP RPC server
-// at the specified network address and path.
-func DialHTTPPath(network, address, path string) (*Client, error) {
- var err error
- conn, err := net.Dial(network, address)
- if err != nil {
- return nil, err
- }
- io.WriteString(conn, "CONNECT "+path+" HTTP/1.0\n\n")
-
- // Require successful HTTP response
- // before switching to RPC protocol.
- resp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: "CONNECT"})
- if err == nil && resp.Status == connected {
- return NewClient(conn), nil
- }
- if err == nil {
- err = errors.New("unexpected HTTP response: " + resp.Status)
- }
- conn.Close()
- return nil, &net.OpError{"dial-http", network + " " + address, nil, err}
-}
-
-// Dial connects to an RPC server at the specified network address.
-func Dial(network, address string) (*Client, error) {
- conn, err := net.Dial(network, address)
- if err != nil {
- return nil, err
- }
- return NewClient(conn), nil
-}
-
-func (client *Client) Close() error {
- client.mutex.Lock()
- if client.shutdown || client.closing {
- client.mutex.Unlock()
- return ErrShutdown
- }
- client.closing = true
- client.mutex.Unlock()
- return client.codec.Close()
-}
-
-// Go invokes the function asynchronously. It returns the Call structure representing
-// the invocation. The done channel will signal when the call is complete by returning
-// the same Call object. If done is nil, Go will allocate a new channel.
-// If non-nil, done must be buffered or Go will deliberately crash.
-func (client *Client) Go(serviceMethod string, args interface{}, reply interface{}, done chan *Call) *Call {
- call := new(Call)
- call.ServiceMethod = serviceMethod
- call.Args = args
- call.Reply = reply
- if done == nil {
- done = make(chan *Call, 10) // buffered.
- } else {
- // If caller passes done != nil, it must arrange that
- // done has enough buffer for the number of simultaneous
- // RPCs that will be using that channel. If the channel
- // is totally unbuffered, it's best not to run at all.
- if cap(done) == 0 {
- log.Panic("rpc: done channel is unbuffered")
- }
- }
- call.Done = done
- if client.shutdown {
- call.Error = ErrShutdown
- call.done()
- return call
- }
- client.send(call)
- return call
-}
-
-// Call invokes the named function, waits for it to complete, and returns its error status.
-func (client *Client) Call(serviceMethod string, args interface{}, reply interface{}) error {
- if client.shutdown {
- return ErrShutdown
- }
- call := <-client.Go(serviceMethod, args, reply, make(chan *Call, 1)).Done
- return call.Error
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rpc
-
-/*
- Some HTML presented at http://machine:port/debug/rpc
- Lists services, their methods, and some statistics, still rudimentary.
-*/
-
-import (
- "fmt"
- "http"
- "sort"
- "template"
-)
-
-const debugText = `<html>
- <body>
- <title>Services</title>
- {{range .}}
- <hr>
- Service {{.Name}}
- <hr>
- <table>
- <th align=center>Method</th><th align=center>Calls</th>
- {{range .Method}}
- <tr>
- <td align=left font=fixed>{{.Name}}({{.Type.ArgType}}, {{.Type.ReplyType}}) error</td>
- <td align=center>{{.Type.NumCalls}}</td>
- </tr>
- {{end}}
- </table>
- {{end}}
- </body>
- </html>`
-
-var debug = template.Must(template.New("RPC debug").Parse(debugText))
-
-type debugMethod struct {
- Type *methodType
- Name string
-}
-
-type methodArray []debugMethod
-
-type debugService struct {
- Service *service
- Name string
- Method methodArray
-}
-
-type serviceArray []debugService
-
-func (s serviceArray) Len() int { return len(s) }
-func (s serviceArray) Less(i, j int) bool { return s[i].Name < s[j].Name }
-func (s serviceArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (m methodArray) Len() int { return len(m) }
-func (m methodArray) Less(i, j int) bool { return m[i].Name < m[j].Name }
-func (m methodArray) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-
-type debugHTTP struct {
- *Server
-}
-
-// Runs at /debug/rpc
-func (server debugHTTP) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- // Build a sorted version of the data.
- var services = make(serviceArray, len(server.serviceMap))
- i := 0
- server.mu.Lock()
- for sname, service := range server.serviceMap {
- services[i] = debugService{service, sname, make(methodArray, len(service.method))}
- j := 0
- for mname, method := range service.method {
- services[i].Method[j] = debugMethod{method, mname}
- j++
- }
- sort.Sort(services[i].Method)
- i++
- }
- server.mu.Unlock()
- sort.Sort(services)
- err := debug.Execute(w, services)
- if err != nil {
- fmt.Fprintln(w, "rpc: error executing template:", err.Error())
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package jsonrpc
-
-import (
- "errors"
- "fmt"
- "io"
- "json"
- "net"
- "rpc"
- "testing"
-)
-
-type Args struct {
- A, B int
-}
-
-type Reply struct {
- C int
-}
-
-type Arith int
-
-func (t *Arith) Add(args *Args, reply *Reply) error {
- reply.C = args.A + args.B
- return nil
-}
-
-func (t *Arith) Mul(args *Args, reply *Reply) error {
- reply.C = args.A * args.B
- return nil
-}
-
-func (t *Arith) Div(args *Args, reply *Reply) error {
- if args.B == 0 {
- return errors.New("divide by zero")
- }
- reply.C = args.A / args.B
- return nil
-}
-
-func (t *Arith) Error(args *Args, reply *Reply) error {
- panic("ERROR")
-}
-
-func init() {
- rpc.Register(new(Arith))
-}
-
-func TestServer(t *testing.T) {
- type addResp struct {
- Id interface{} `json:"id"`
- Result Reply `json:"result"`
- Error interface{} `json:"error"`
- }
-
- cli, srv := net.Pipe()
- defer cli.Close()
- go ServeConn(srv)
- dec := json.NewDecoder(cli)
-
- // Send hand-coded requests to server, parse responses.
- for i := 0; i < 10; i++ {
- fmt.Fprintf(cli, `{"method": "Arith.Add", "id": "\u%04d", "params": [{"A": %d, "B": %d}]}`, i, i, i+1)
- var resp addResp
- err := dec.Decode(&resp)
- if err != nil {
- t.Fatalf("Decode: %s", err)
- }
- if resp.Error != nil {
- t.Fatalf("resp.Error: %s", resp.Error)
- }
- if resp.Id.(string) != string(i) {
- t.Fatalf("resp: bad id %q want %q", resp.Id.(string), string(i))
- }
- if resp.Result.C != 2*i+1 {
- t.Fatalf("resp: bad result: %d+%d=%d", i, i+1, resp.Result.C)
- }
- }
-
- fmt.Fprintf(cli, "{}\n")
- var resp addResp
- if err := dec.Decode(&resp); err != nil {
- t.Fatalf("Decode after empty: %s", err)
- }
- if resp.Error == nil {
- t.Fatalf("Expected error, got nil")
- }
-}
-
-func TestClient(t *testing.T) {
- // Assume server is okay (TestServer is above).
- // Test client against server.
- cli, srv := net.Pipe()
- go ServeConn(srv)
-
- client := NewClient(cli)
- defer client.Close()
-
- // Synchronous calls
- args := &Args{7, 8}
- reply := new(Reply)
- err := client.Call("Arith.Add", args, reply)
- if err != nil {
- t.Errorf("Add: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
- }
-
- args = &Args{7, 8}
- reply = new(Reply)
- err = client.Call("Arith.Mul", args, reply)
- if err != nil {
- t.Errorf("Mul: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A*args.B {
- t.Errorf("Mul: expected %d got %d", reply.C, args.A*args.B)
- }
-
- // Out of order.
- args = &Args{7, 8}
- mulReply := new(Reply)
- mulCall := client.Go("Arith.Mul", args, mulReply, nil)
- addReply := new(Reply)
- addCall := client.Go("Arith.Add", args, addReply, nil)
-
- addCall = <-addCall.Done
- if addCall.Error != nil {
- t.Errorf("Add: expected no error but got string %q", addCall.Error.Error())
- }
- if addReply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", addReply.C, args.A+args.B)
- }
-
- mulCall = <-mulCall.Done
- if mulCall.Error != nil {
- t.Errorf("Mul: expected no error but got string %q", mulCall.Error.Error())
- }
- if mulReply.C != args.A*args.B {
- t.Errorf("Mul: expected %d got %d", mulReply.C, args.A*args.B)
- }
-
- // Error test
- args = &Args{7, 0}
- reply = new(Reply)
- err = client.Call("Arith.Div", args, reply)
- // expect an error: zero divide
- if err == nil {
- t.Error("Div: expected error")
- } else if err.Error() != "divide by zero" {
- t.Error("Div: expected divide by zero error; got", err)
- }
-}
-
-func TestMalformedInput(t *testing.T) {
- cli, srv := net.Pipe()
- go cli.Write([]byte(`{id:1}`)) // invalid json
- ServeConn(srv) // must return, not loop
-}
-
-func TestUnexpectedError(t *testing.T) {
- cli, srv := myPipe()
- go cli.PipeWriter.CloseWithError(errors.New("unexpected error!")) // reader will get this error
- ServeConn(srv) // must return, not loop
-}
-
-// Copied from package net.
-func myPipe() (*pipe, *pipe) {
- r1, w1 := io.Pipe()
- r2, w2 := io.Pipe()
-
- return &pipe{r1, w2}, &pipe{r2, w1}
-}
-
-type pipe struct {
- *io.PipeReader
- *io.PipeWriter
-}
-
-type pipeAddr int
-
-func (pipeAddr) Network() string {
- return "pipe"
-}
-
-func (pipeAddr) String() string {
- return "pipe"
-}
-
-func (p *pipe) Close() error {
- err := p.PipeReader.Close()
- err1 := p.PipeWriter.Close()
- if err == nil {
- err = err1
- }
- return err
-}
-
-func (p *pipe) LocalAddr() net.Addr {
- return pipeAddr(0)
-}
-
-func (p *pipe) RemoteAddr() net.Addr {
- return pipeAddr(0)
-}
-
-func (p *pipe) SetTimeout(nsec int64) error {
- return errors.New("net.Pipe does not support timeouts")
-}
-
-func (p *pipe) SetReadTimeout(nsec int64) error {
- return errors.New("net.Pipe does not support timeouts")
-}
-
-func (p *pipe) SetWriteTimeout(nsec int64) error {
- return errors.New("net.Pipe does not support timeouts")
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec
-// for the rpc package.
-package jsonrpc
-
-import (
- "fmt"
- "io"
- "json"
- "net"
- "rpc"
- "sync"
-)
-
-type clientCodec struct {
- dec *json.Decoder // for reading JSON values
- enc *json.Encoder // for writing JSON values
- c io.Closer
-
- // temporary work space
- req clientRequest
- resp clientResponse
-
- // JSON-RPC responses include the request id but not the request method.
- // Package rpc expects both.
- // We save the request method in pending when sending a request
- // and then look it up by request ID when filling out the rpc Response.
- mutex sync.Mutex // protects pending
- pending map[uint64]string // map request id to method name
-}
-
-// NewClientCodec returns a new rpc.ClientCodec using JSON-RPC on conn.
-func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec {
- return &clientCodec{
- dec: json.NewDecoder(conn),
- enc: json.NewEncoder(conn),
- c: conn,
- pending: make(map[uint64]string),
- }
-}
-
-type clientRequest struct {
- Method string `json:"method"`
- Params [1]interface{} `json:"params"`
- Id uint64 `json:"id"`
-}
-
-func (c *clientCodec) WriteRequest(r *rpc.Request, param interface{}) error {
- c.mutex.Lock()
- c.pending[r.Seq] = r.ServiceMethod
- c.mutex.Unlock()
- c.req.Method = r.ServiceMethod
- c.req.Params[0] = param
- c.req.Id = r.Seq
- return c.enc.Encode(&c.req)
-}
-
-type clientResponse struct {
- Id uint64 `json:"id"`
- Result *json.RawMessage `json:"result"`
- Error interface{} `json:"error"`
-}
-
-func (r *clientResponse) reset() {
- r.Id = 0
- r.Result = nil
- r.Error = nil
-}
-
-func (c *clientCodec) ReadResponseHeader(r *rpc.Response) error {
- c.resp.reset()
- if err := c.dec.Decode(&c.resp); err != nil {
- return err
- }
-
- c.mutex.Lock()
- r.ServiceMethod = c.pending[c.resp.Id]
- delete(c.pending, c.resp.Id)
- c.mutex.Unlock()
-
- r.Error = ""
- r.Seq = c.resp.Id
- if c.resp.Error != nil {
- x, ok := c.resp.Error.(string)
- if !ok {
- return fmt.Errorf("invalid error %v", c.resp.Error)
- }
- if x == "" {
- x = "unspecified error"
- }
- r.Error = x
- }
- return nil
-}
-
-func (c *clientCodec) ReadResponseBody(x interface{}) error {
- if x == nil {
- return nil
- }
- return json.Unmarshal(*c.resp.Result, x)
-}
-
-func (c *clientCodec) Close() error {
- return c.c.Close()
-}
-
-// NewClient returns a new rpc.Client to handle requests to the
-// set of services at the other end of the connection.
-func NewClient(conn io.ReadWriteCloser) *rpc.Client {
- return rpc.NewClientWithCodec(NewClientCodec(conn))
-}
-
-// Dial connects to a JSON-RPC server at the specified network address.
-func Dial(network, address string) (*rpc.Client, error) {
- conn, err := net.Dial(network, address)
- if err != nil {
- return nil, err
- }
- return NewClient(conn), err
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package jsonrpc
-
-import (
- "errors"
- "io"
- "json"
- "rpc"
- "sync"
-)
-
-type serverCodec struct {
- dec *json.Decoder // for reading JSON values
- enc *json.Encoder // for writing JSON values
- c io.Closer
-
- // temporary work space
- req serverRequest
- resp serverResponse
-
- // JSON-RPC clients can use arbitrary json values as request IDs.
- // Package rpc expects uint64 request IDs.
- // We assign uint64 sequence numbers to incoming requests
- // but save the original request ID in the pending map.
- // When rpc responds, we use the sequence number in
- // the response to find the original request ID.
- mutex sync.Mutex // protects seq, pending
- seq uint64
- pending map[uint64]*json.RawMessage
-}
-
-// NewServerCodec returns a new rpc.ServerCodec using JSON-RPC on conn.
-func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec {
- return &serverCodec{
- dec: json.NewDecoder(conn),
- enc: json.NewEncoder(conn),
- c: conn,
- pending: make(map[uint64]*json.RawMessage),
- }
-}
-
-type serverRequest struct {
- Method string `json:"method"`
- Params *json.RawMessage `json:"params"`
- Id *json.RawMessage `json:"id"`
-}
-
-func (r *serverRequest) reset() {
- r.Method = ""
- if r.Params != nil {
- *r.Params = (*r.Params)[0:0]
- }
- if r.Id != nil {
- *r.Id = (*r.Id)[0:0]
- }
-}
-
-type serverResponse struct {
- Id *json.RawMessage `json:"id"`
- Result interface{} `json:"result"`
- Error interface{} `json:"error"`
-}
-
-func (c *serverCodec) ReadRequestHeader(r *rpc.Request) error {
- c.req.reset()
- if err := c.dec.Decode(&c.req); err != nil {
- return err
- }
- r.ServiceMethod = c.req.Method
-
- // JSON request id can be any JSON value;
- // RPC package expects uint64. Translate to
- // internal uint64 and save JSON on the side.
- c.mutex.Lock()
- c.seq++
- c.pending[c.seq] = c.req.Id
- c.req.Id = nil
- r.Seq = c.seq
- c.mutex.Unlock()
-
- return nil
-}
-
-func (c *serverCodec) ReadRequestBody(x interface{}) error {
- if x == nil {
- return nil
- }
- // JSON params is array value.
- // RPC params is struct.
- // Unmarshal into array containing struct for now.
- // Should think about making RPC more general.
- var params [1]interface{}
- params[0] = x
- return json.Unmarshal(*c.req.Params, ¶ms)
-}
-
-var null = json.RawMessage([]byte("null"))
-
-func (c *serverCodec) WriteResponse(r *rpc.Response, x interface{}) error {
- var resp serverResponse
- c.mutex.Lock()
- b, ok := c.pending[r.Seq]
- if !ok {
- c.mutex.Unlock()
- return errors.New("invalid sequence number in response")
- }
- delete(c.pending, r.Seq)
- c.mutex.Unlock()
-
- if b == nil {
- // Invalid request so no id. Use JSON null.
- b = &null
- }
- resp.Id = b
- resp.Result = x
- if r.Error == "" {
- resp.Error = nil
- } else {
- resp.Error = r.Error
- }
- return c.enc.Encode(resp)
-}
-
-func (c *serverCodec) Close() error {
- return c.c.Close()
-}
-
-// ServeConn runs the JSON-RPC server on a single connection.
-// ServeConn blocks, serving the connection until the client hangs up.
-// The caller typically invokes ServeConn in a go statement.
-func ServeConn(conn io.ReadWriteCloser) {
- rpc.ServeCodec(NewServerCodec(conn))
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
- Package rpc provides access to the exported methods of an object across a
- network or other I/O connection. A server registers an object, making it visible
- as a service with the name of the type of the object. After registration, exported
- methods of the object will be accessible remotely. A server may register multiple
- objects (services) of different types but it is an error to register multiple
- objects of the same type.
-
- Only methods that satisfy these criteria will be made available for remote access;
- other methods will be ignored:
-
- - the method name is exported, that is, begins with an upper case letter.
- - the method receiver is exported or local (defined in the package
- registering the service).
- - the method has two arguments, both exported or local types.
- - the method's second argument is a pointer.
- - the method has return type error.
-
- The method's first argument represents the arguments provided by the caller; the
- second argument represents the result parameters to be returned to the caller.
- The method's return value, if non-nil, is passed back as a string that the client
- sees as if created by errors.New.
-
- The server may handle requests on a single connection by calling ServeConn. More
- typically it will create a network listener and call Accept or, for an HTTP
- listener, HandleHTTP and http.Serve.
-
- A client wishing to use the service establishes a connection and then invokes
- NewClient on the connection. The convenience function Dial (DialHTTP) performs
- both steps for a raw network connection (an HTTP connection). The resulting
- Client object has two methods, Call and Go, that specify the service and method to
- call, a pointer containing the arguments, and a pointer to receive the result
- parameters.
-
- Call waits for the remote call to complete; Go launches the call asynchronously
- and returns a channel that will signal completion.
-
- Package "gob" is used to transport the data.
-
- Here is a simple example. A server wishes to export an object of type Arith:
-
- package server
-
- type Args struct {
- A, B int
- }
-
- type Quotient struct {
- Quo, Rem int
- }
-
- type Arith int
-
- func (t *Arith) Multiply(args *Args, reply *int) error {
- *reply = args.A * args.B
- return nil
- }
-
- func (t *Arith) Divide(args *Args, quo *Quotient) error {
- if args.B == 0 {
- return errors.New("divide by zero")
- }
- quo.Quo = args.A / args.B
- quo.Rem = args.A % args.B
- return nil
- }
-
- The server calls (for HTTP service):
-
- arith := new(Arith)
- rpc.Register(arith)
- rpc.HandleHTTP()
- l, e := net.Listen("tcp", ":1234")
- if e != nil {
- log.Fatal("listen error:", e)
- }
- go http.Serve(l, nil)
-
- At this point, clients can see a service "Arith" with methods "Arith.Multiply" and
- "Arith.Divide". To invoke one, a client first dials the server:
-
- client, err := rpc.DialHTTP("tcp", serverAddress + ":1234")
- if err != nil {
- log.Fatal("dialing:", err)
- }
-
- Then it can make a remote call:
-
- // Synchronous call
- args := &server.Args{7,8}
- var reply int
- err = client.Call("Arith.Multiply", args, &reply)
- if err != nil {
- log.Fatal("arith error:", err)
- }
- fmt.Printf("Arith: %d*%d=%d", args.A, args.B, reply)
-
- or
-
- // Asynchronous call
- quotient := new(Quotient)
- divCall := client.Go("Arith.Divide", args, "ient, nil)
- replyCall := <-divCall.Done // will be equal to divCall
- // check errors, print, etc.
-
- A server implementation will often provide a simple, type-safe wrapper for the
- client.
-*/
-package rpc
-
-import (
- "bufio"
- "errors"
- "gob"
- "http"
- "io"
- "log"
- "net"
- "reflect"
- "strings"
- "sync"
- "unicode"
- "utf8"
-)
-
-const (
- // Defaults used by HandleHTTP
- DefaultRPCPath = "/_goRPC_"
- DefaultDebugPath = "/debug/rpc"
-)
-
-// Precompute the reflect type for error. Can't use error directly
-// because Typeof takes an empty interface value. This is annoying.
-var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
-
-type methodType struct {
- sync.Mutex // protects counters
- method reflect.Method
- ArgType reflect.Type
- ReplyType reflect.Type
- numCalls uint
-}
-
-type service struct {
- name string // name of service
- rcvr reflect.Value // receiver of methods for the service
- typ reflect.Type // type of the receiver
- method map[string]*methodType // registered methods
-}
-
-// Request is a header written before every RPC call. It is used internally
-// but documented here as an aid to debugging, such as when analyzing
-// network traffic.
-type Request struct {
- ServiceMethod string // format: "Service.Method"
- Seq uint64 // sequence number chosen by client
- next *Request // for free list in Server
-}
-
-// Response is a header written before every RPC return. It is used internally
-// but documented here as an aid to debugging, such as when analyzing
-// network traffic.
-type Response struct {
- ServiceMethod string // echoes that of the Request
- Seq uint64 // echoes that of the request
- Error string // error, if any.
- next *Response // for free list in Server
-}
-
-// Server represents an RPC Server.
-type Server struct {
- mu sync.Mutex // protects the serviceMap
- serviceMap map[string]*service
- reqLock sync.Mutex // protects freeReq
- freeReq *Request
- respLock sync.Mutex // protects freeResp
- freeResp *Response
-}
-
-// NewServer returns a new Server.
-func NewServer() *Server {
- return &Server{serviceMap: make(map[string]*service)}
-}
-
-// DefaultServer is the default instance of *Server.
-var DefaultServer = NewServer()
-
-// Is this an exported - upper case - name?
-func isExported(name string) bool {
- rune, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(rune)
-}
-
-// Is this type exported or a builtin?
-func isExportedOrBuiltinType(t reflect.Type) bool {
- for t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
- // PkgPath will be non-empty even for an exported type,
- // so we need to check the type name as well.
- return isExported(t.Name()) || t.PkgPath() == ""
-}
-
-// Register publishes in the server the set of methods of the
-// receiver value that satisfy the following conditions:
-// - exported method
-// - two arguments, both pointers to exported structs
-// - one return value, of type error
-// It returns an error if the receiver is not an exported type or has no
-// suitable methods.
-// The client accesses each method using a string of the form "Type.Method",
-// where Type is the receiver's concrete type.
-func (server *Server) Register(rcvr interface{}) error {
- return server.register(rcvr, "", false)
-}
-
-// RegisterName is like Register but uses the provided name for the type
-// instead of the receiver's concrete type.
-func (server *Server) RegisterName(name string, rcvr interface{}) error {
- return server.register(rcvr, name, true)
-}
-
-func (server *Server) register(rcvr interface{}, name string, useName bool) error {
- server.mu.Lock()
- defer server.mu.Unlock()
- if server.serviceMap == nil {
- server.serviceMap = make(map[string]*service)
- }
- s := new(service)
- s.typ = reflect.TypeOf(rcvr)
- s.rcvr = reflect.ValueOf(rcvr)
- sname := reflect.Indirect(s.rcvr).Type().Name()
- if useName {
- sname = name
- }
- if sname == "" {
- log.Fatal("rpc: no service name for type", s.typ.String())
- }
- if !isExported(sname) && !useName {
- s := "rpc Register: type " + sname + " is not exported"
- log.Print(s)
- return errors.New(s)
- }
- if _, present := server.serviceMap[sname]; present {
- return errors.New("rpc: service already defined: " + sname)
- }
- s.name = sname
- s.method = make(map[string]*methodType)
-
- // Install the methods
- for m := 0; m < s.typ.NumMethod(); m++ {
- method := s.typ.Method(m)
- mtype := method.Type
- mname := method.Name
- if method.PkgPath != "" {
- continue
- }
- // Method needs three ins: receiver, *args, *reply.
- if mtype.NumIn() != 3 {
- log.Println("method", mname, "has wrong number of ins:", mtype.NumIn())
- continue
- }
- // First arg need not be a pointer.
- argType := mtype.In(1)
- if !isExportedOrBuiltinType(argType) {
- log.Println(mname, "argument type not exported or local:", argType)
- continue
- }
- // Second arg must be a pointer.
- replyType := mtype.In(2)
- if replyType.Kind() != reflect.Ptr {
- log.Println("method", mname, "reply type not a pointer:", replyType)
- continue
- }
- if !isExportedOrBuiltinType(replyType) {
- log.Println("method", mname, "reply type not exported or local:", replyType)
- continue
- }
- // Method needs one out: error.
- if mtype.NumOut() != 1 {
- log.Println("method", mname, "has wrong number of outs:", mtype.NumOut())
- continue
- }
- if returnType := mtype.Out(0); returnType != typeOfError {
- log.Println("method", mname, "returns", returnType.String(), "not error")
- continue
- }
- s.method[mname] = &methodType{method: method, ArgType: argType, ReplyType: replyType}
- }
-
- if len(s.method) == 0 {
- s := "rpc Register: type " + sname + " has no exported methods of suitable type"
- log.Print(s)
- return errors.New(s)
- }
- server.serviceMap[s.name] = s
- return nil
-}
-
-// A value sent as a placeholder for the response when the server receives an invalid request.
-type InvalidRequest struct{}
-
-var invalidRequest = InvalidRequest{}
-
-func (server *Server) sendResponse(sending *sync.Mutex, req *Request, reply interface{}, codec ServerCodec, errmsg string) {
- resp := server.getResponse()
- // Encode the response header
- resp.ServiceMethod = req.ServiceMethod
- if errmsg != "" {
- resp.Error = errmsg
- reply = invalidRequest
- }
- resp.Seq = req.Seq
- sending.Lock()
- err := codec.WriteResponse(resp, reply)
- if err != nil {
- log.Println("rpc: writing response:", err)
- }
- sending.Unlock()
- server.freeResponse(resp)
-}
-
-func (m *methodType) NumCalls() (n uint) {
- m.Lock()
- n = m.numCalls
- m.Unlock()
- return n
-}
-
-func (s *service) call(server *Server, sending *sync.Mutex, mtype *methodType, req *Request, argv, replyv reflect.Value, codec ServerCodec) {
- mtype.Lock()
- mtype.numCalls++
- mtype.Unlock()
- function := mtype.method.Func
- // Invoke the method, providing a new value for the reply.
- returnValues := function.Call([]reflect.Value{s.rcvr, argv, replyv})
- // The return value for the method is an error.
- errInter := returnValues[0].Interface()
- errmsg := ""
- if errInter != nil {
- errmsg = errInter.(error).Error()
- }
- server.sendResponse(sending, req, replyv.Interface(), codec, errmsg)
- server.freeRequest(req)
-}
-
-type gobServerCodec struct {
- rwc io.ReadWriteCloser
- dec *gob.Decoder
- enc *gob.Encoder
- encBuf *bufio.Writer
-}
-
-func (c *gobServerCodec) ReadRequestHeader(r *Request) error {
- return c.dec.Decode(r)
-}
-
-func (c *gobServerCodec) ReadRequestBody(body interface{}) error {
- return c.dec.Decode(body)
-}
-
-func (c *gobServerCodec) WriteResponse(r *Response, body interface{}) (err error) {
- if err = c.enc.Encode(r); err != nil {
- return
- }
- if err = c.enc.Encode(body); err != nil {
- return
- }
- return c.encBuf.Flush()
-}
-
-func (c *gobServerCodec) Close() error {
- return c.rwc.Close()
-}
-
-// ServeConn runs the server on a single connection.
-// ServeConn blocks, serving the connection until the client hangs up.
-// The caller typically invokes ServeConn in a go statement.
-// ServeConn uses the gob wire format (see package gob) on the
-// connection. To use an alternate codec, use ServeCodec.
-func (server *Server) ServeConn(conn io.ReadWriteCloser) {
- buf := bufio.NewWriter(conn)
- srv := &gobServerCodec{conn, gob.NewDecoder(conn), gob.NewEncoder(buf), buf}
- server.ServeCodec(srv)
-}
-
-// ServeCodec is like ServeConn but uses the specified codec to
-// decode requests and encode responses.
-func (server *Server) ServeCodec(codec ServerCodec) {
- sending := new(sync.Mutex)
- for {
- service, mtype, req, argv, replyv, keepReading, err := server.readRequest(codec)
- if err != nil {
- if err != io.EOF {
- log.Println("rpc:", err)
- }
- if !keepReading {
- break
- }
- // send a response if we actually managed to read a header.
- if req != nil {
- server.sendResponse(sending, req, invalidRequest, codec, err.Error())
- server.freeRequest(req)
- }
- continue
- }
- go service.call(server, sending, mtype, req, argv, replyv, codec)
- }
- codec.Close()
-}
-
-// ServeRequest is like ServeCodec but synchronously serves a single request.
-// It does not close the codec upon completion.
-func (server *Server) ServeRequest(codec ServerCodec) error {
- sending := new(sync.Mutex)
- service, mtype, req, argv, replyv, keepReading, err := server.readRequest(codec)
- if err != nil {
- if !keepReading {
- return err
- }
- // send a response if we actually managed to read a header.
- if req != nil {
- server.sendResponse(sending, req, invalidRequest, codec, err.Error())
- server.freeRequest(req)
- }
- return err
- }
- service.call(server, sending, mtype, req, argv, replyv, codec)
- return nil
-}
-
-func (server *Server) getRequest() *Request {
- server.reqLock.Lock()
- req := server.freeReq
- if req == nil {
- req = new(Request)
- } else {
- server.freeReq = req.next
- *req = Request{}
- }
- server.reqLock.Unlock()
- return req
-}
-
-func (server *Server) freeRequest(req *Request) {
- server.reqLock.Lock()
- req.next = server.freeReq
- server.freeReq = req
- server.reqLock.Unlock()
-}
-
-func (server *Server) getResponse() *Response {
- server.respLock.Lock()
- resp := server.freeResp
- if resp == nil {
- resp = new(Response)
- } else {
- server.freeResp = resp.next
- *resp = Response{}
- }
- server.respLock.Unlock()
- return resp
-}
-
-func (server *Server) freeResponse(resp *Response) {
- server.respLock.Lock()
- resp.next = server.freeResp
- server.freeResp = resp
- server.respLock.Unlock()
-}
-
-func (server *Server) readRequest(codec ServerCodec) (service *service, mtype *methodType, req *Request, argv, replyv reflect.Value, keepReading bool, err error) {
- service, mtype, req, keepReading, err = server.readRequestHeader(codec)
- if err != nil {
- if !keepReading {
- return
- }
- // discard body
- codec.ReadRequestBody(nil)
- return
- }
-
- // Decode the argument value.
- argIsValue := false // if true, need to indirect before calling.
- if mtype.ArgType.Kind() == reflect.Ptr {
- argv = reflect.New(mtype.ArgType.Elem())
- } else {
- argv = reflect.New(mtype.ArgType)
- argIsValue = true
- }
- // argv guaranteed to be a pointer now.
- if err = codec.ReadRequestBody(argv.Interface()); err != nil {
- return
- }
- if argIsValue {
- argv = argv.Elem()
- }
-
- replyv = reflect.New(mtype.ReplyType.Elem())
- return
-}
-
-func (server *Server) readRequestHeader(codec ServerCodec) (service *service, mtype *methodType, req *Request, keepReading bool, err error) {
- // Grab the request header.
- req = server.getRequest()
- err = codec.ReadRequestHeader(req)
- if err != nil {
- req = nil
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- return
- }
- err = errors.New("rpc: server cannot decode request: " + err.Error())
- return
- }
-
- // We read the header successfully. If we see an error now,
- // we can still recover and move on to the next request.
- keepReading = true
-
- serviceMethod := strings.Split(req.ServiceMethod, ".")
- if len(serviceMethod) != 2 {
- err = errors.New("rpc: service/method request ill-formed: " + req.ServiceMethod)
- return
- }
- // Look up the request.
- server.mu.Lock()
- service = server.serviceMap[serviceMethod[0]]
- server.mu.Unlock()
- if service == nil {
- err = errors.New("rpc: can't find service " + req.ServiceMethod)
- return
- }
- mtype = service.method[serviceMethod[1]]
- if mtype == nil {
- err = errors.New("rpc: can't find method " + req.ServiceMethod)
- }
- return
-}
-
-// Accept accepts connections on the listener and serves requests
-// for each incoming connection. Accept blocks; the caller typically
-// invokes it in a go statement.
-func (server *Server) Accept(lis net.Listener) {
- for {
- conn, err := lis.Accept()
- if err != nil {
- log.Fatal("rpc.Serve: accept:", err.Error()) // TODO(r): exit?
- }
- go server.ServeConn(conn)
- }
-}
-
-// Register publishes the receiver's methods in the DefaultServer.
-func Register(rcvr interface{}) error { return DefaultServer.Register(rcvr) }
-
-// RegisterName is like Register but uses the provided name for the type
-// instead of the receiver's concrete type.
-func RegisterName(name string, rcvr interface{}) error {
- return DefaultServer.RegisterName(name, rcvr)
-}
-
-// A ServerCodec implements reading of RPC requests and writing of
-// RPC responses for the server side of an RPC session.
-// The server calls ReadRequestHeader and ReadRequestBody in pairs
-// to read requests from the connection, and it calls WriteResponse to
-// write a response back. The server calls Close when finished with the
-// connection. ReadRequestBody may be called with a nil
-// argument to force the body of the request to be read and discarded.
-type ServerCodec interface {
- ReadRequestHeader(*Request) error
- ReadRequestBody(interface{}) error
- WriteResponse(*Response, interface{}) error
-
- Close() error
-}
-
-// ServeConn runs the DefaultServer on a single connection.
-// ServeConn blocks, serving the connection until the client hangs up.
-// The caller typically invokes ServeConn in a go statement.
-// ServeConn uses the gob wire format (see package gob) on the
-// connection. To use an alternate codec, use ServeCodec.
-func ServeConn(conn io.ReadWriteCloser) {
- DefaultServer.ServeConn(conn)
-}
-
-// ServeCodec is like ServeConn but uses the specified codec to
-// decode requests and encode responses.
-func ServeCodec(codec ServerCodec) {
- DefaultServer.ServeCodec(codec)
-}
-
-// ServeRequest is like ServeCodec but synchronously serves a single request.
-// It does not close the codec upon completion.
-func ServeRequest(codec ServerCodec) error {
- return DefaultServer.ServeRequest(codec)
-}
-
-// Accept accepts connections on the listener and serves requests
-// to DefaultServer for each incoming connection.
-// Accept blocks; the caller typically invokes it in a go statement.
-func Accept(lis net.Listener) { DefaultServer.Accept(lis) }
-
-// Can connect to RPC service using HTTP CONNECT to rpcPath.
-var connected = "200 Connected to Go RPC"
-
-// ServeHTTP implements an http.Handler that answers RPC requests.
-func (server *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- if req.Method != "CONNECT" {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.WriteHeader(http.StatusMethodNotAllowed)
- io.WriteString(w, "405 must CONNECT\n")
- return
- }
- conn, _, err := w.(http.Hijacker).Hijack()
- if err != nil {
- log.Print("rpc hijacking ", req.RemoteAddr, ": ", err.Error())
- return
- }
- io.WriteString(conn, "HTTP/1.0 "+connected+"\n\n")
- server.ServeConn(conn)
-}
-
-// HandleHTTP registers an HTTP handler for RPC messages on rpcPath,
-// and a debugging handler on debugPath.
-// It is still necessary to invoke http.Serve(), typically in a go statement.
-func (server *Server) HandleHTTP(rpcPath, debugPath string) {
- http.Handle(rpcPath, server)
- http.Handle(debugPath, debugHTTP{server})
-}
-
-// HandleHTTP registers an HTTP handler for RPC messages to DefaultServer
-// on DefaultRPCPath and a debugging handler on DefaultDebugPath.
-// It is still necessary to invoke http.Serve(), typically in a go statement.
-func HandleHTTP() {
- DefaultServer.HandleHTTP(DefaultRPCPath, DefaultDebugPath)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rpc
-
-import (
- "errors"
- "fmt"
- "http/httptest"
- "io"
- "log"
- "net"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-var (
- newServer *Server
- serverAddr, newServerAddr string
- httpServerAddr string
- once, newOnce, httpOnce sync.Once
-)
-
-const (
- second = 1e9
- newHttpPath = "/foo"
-)
-
-type Args struct {
- A, B int
-}
-
-type Reply struct {
- C int
-}
-
-type Arith int
-
-// Some of Arith's methods have value args, some have pointer args. That's deliberate.
-
-func (t *Arith) Add(args Args, reply *Reply) error {
- reply.C = args.A + args.B
- return nil
-}
-
-func (t *Arith) Mul(args *Args, reply *Reply) error {
- reply.C = args.A * args.B
- return nil
-}
-
-func (t *Arith) Div(args Args, reply *Reply) error {
- if args.B == 0 {
- return errors.New("divide by zero")
- }
- reply.C = args.A / args.B
- return nil
-}
-
-func (t *Arith) String(args *Args, reply *string) error {
- *reply = fmt.Sprintf("%d+%d=%d", args.A, args.B, args.A+args.B)
- return nil
-}
-
-func (t *Arith) Scan(args string, reply *Reply) (err error) {
- _, err = fmt.Sscan(args, &reply.C)
- return
-}
-
-func (t *Arith) Error(args *Args, reply *Reply) error {
- panic("ERROR")
-}
-
-func listenTCP() (net.Listener, string) {
- l, e := net.Listen("tcp", "127.0.0.1:0") // any available address
- if e != nil {
- log.Fatalf("net.Listen tcp :0: %v", e)
- }
- return l, l.Addr().String()
-}
-
-func startServer() {
- Register(new(Arith))
-
- var l net.Listener
- l, serverAddr = listenTCP()
- log.Println("Test RPC server listening on", serverAddr)
- go Accept(l)
-
- HandleHTTP()
- httpOnce.Do(startHttpServer)
-}
-
-func startNewServer() {
- newServer = NewServer()
- newServer.Register(new(Arith))
-
- var l net.Listener
- l, newServerAddr = listenTCP()
- log.Println("NewServer test RPC server listening on", newServerAddr)
- go Accept(l)
-
- newServer.HandleHTTP(newHttpPath, "/bar")
- httpOnce.Do(startHttpServer)
-}
-
-func startHttpServer() {
- server := httptest.NewServer(nil)
- httpServerAddr = server.Listener.Addr().String()
- log.Println("Test HTTP RPC server listening on", httpServerAddr)
-}
-
-func TestRPC(t *testing.T) {
- once.Do(startServer)
- testRPC(t, serverAddr)
- newOnce.Do(startNewServer)
- testRPC(t, newServerAddr)
-}
-
-func testRPC(t *testing.T, addr string) {
- client, err := Dial("tcp", addr)
- if err != nil {
- t.Fatal("dialing", err)
- }
-
- // Synchronous calls
- args := &Args{7, 8}
- reply := new(Reply)
- err = client.Call("Arith.Add", args, reply)
- if err != nil {
- t.Errorf("Add: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
- }
-
- // Nonexistent method
- args = &Args{7, 0}
- reply = new(Reply)
- err = client.Call("Arith.BadOperation", args, reply)
- // expect an error
- if err == nil {
- t.Error("BadOperation: expected error")
- } else if !strings.HasPrefix(err.Error(), "rpc: can't find method ") {
- t.Errorf("BadOperation: expected can't find method error; got %q", err)
- }
-
- // Unknown service
- args = &Args{7, 8}
- reply = new(Reply)
- err = client.Call("Arith.Unknown", args, reply)
- if err == nil {
- t.Error("expected error calling unknown service")
- } else if strings.Index(err.Error(), "method") < 0 {
- t.Error("expected error about method; got", err)
- }
-
- // Out of order.
- args = &Args{7, 8}
- mulReply := new(Reply)
- mulCall := client.Go("Arith.Mul", args, mulReply, nil)
- addReply := new(Reply)
- addCall := client.Go("Arith.Add", args, addReply, nil)
-
- addCall = <-addCall.Done
- if addCall.Error != nil {
- t.Errorf("Add: expected no error but got string %q", addCall.Error.Error())
- }
- if addReply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", addReply.C, args.A+args.B)
- }
-
- mulCall = <-mulCall.Done
- if mulCall.Error != nil {
- t.Errorf("Mul: expected no error but got string %q", mulCall.Error.Error())
- }
- if mulReply.C != args.A*args.B {
- t.Errorf("Mul: expected %d got %d", mulReply.C, args.A*args.B)
- }
-
- // Error test
- args = &Args{7, 0}
- reply = new(Reply)
- err = client.Call("Arith.Div", args, reply)
- // expect an error: zero divide
- if err == nil {
- t.Error("Div: expected error")
- } else if err.Error() != "divide by zero" {
- t.Error("Div: expected divide by zero error; got", err)
- }
-
- // Bad type.
- reply = new(Reply)
- err = client.Call("Arith.Add", reply, reply) // args, reply would be the correct thing to use
- if err == nil {
- t.Error("expected error calling Arith.Add with wrong arg type")
- } else if strings.Index(err.Error(), "type") < 0 {
- t.Error("expected error about type; got", err)
- }
-
- // Non-struct argument
- const Val = 12345
- str := fmt.Sprint(Val)
- reply = new(Reply)
- err = client.Call("Arith.Scan", &str, reply)
- if err != nil {
- t.Errorf("Scan: expected no error but got string %q", err.Error())
- } else if reply.C != Val {
- t.Errorf("Scan: expected %d got %d", Val, reply.C)
- }
-
- // Non-struct reply
- args = &Args{27, 35}
- str = ""
- err = client.Call("Arith.String", args, &str)
- if err != nil {
- t.Errorf("String: expected no error but got string %q", err.Error())
- }
- expect := fmt.Sprintf("%d+%d=%d", args.A, args.B, args.A+args.B)
- if str != expect {
- t.Errorf("String: expected %s got %s", expect, str)
- }
-
- args = &Args{7, 8}
- reply = new(Reply)
- err = client.Call("Arith.Mul", args, reply)
- if err != nil {
- t.Errorf("Mul: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A*args.B {
- t.Errorf("Mul: expected %d got %d", reply.C, args.A*args.B)
- }
-}
-
-func TestHTTP(t *testing.T) {
- once.Do(startServer)
- testHTTPRPC(t, "")
- newOnce.Do(startNewServer)
- testHTTPRPC(t, newHttpPath)
-}
-
-func testHTTPRPC(t *testing.T, path string) {
- var client *Client
- var err error
- if path == "" {
- client, err = DialHTTP("tcp", httpServerAddr)
- } else {
- client, err = DialHTTPPath("tcp", httpServerAddr, path)
- }
- if err != nil {
- t.Fatal("dialing", err)
- }
-
- // Synchronous calls
- args := &Args{7, 8}
- reply := new(Reply)
- err = client.Call("Arith.Add", args, reply)
- if err != nil {
- t.Errorf("Add: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
- }
-}
-
-// CodecEmulator provides a client-like api and a ServerCodec interface.
-// Can be used to test ServeRequest.
-type CodecEmulator struct {
- server *Server
- serviceMethod string
- args *Args
- reply *Reply
- err error
-}
-
-func (codec *CodecEmulator) Call(serviceMethod string, args *Args, reply *Reply) error {
- codec.serviceMethod = serviceMethod
- codec.args = args
- codec.reply = reply
- codec.err = nil
- var serverError error
- if codec.server == nil {
- serverError = ServeRequest(codec)
- } else {
- serverError = codec.server.ServeRequest(codec)
- }
- if codec.err == nil && serverError != nil {
- codec.err = serverError
- }
- return codec.err
-}
-
-func (codec *CodecEmulator) ReadRequestHeader(req *Request) error {
- req.ServiceMethod = codec.serviceMethod
- req.Seq = 0
- return nil
-}
-
-func (codec *CodecEmulator) ReadRequestBody(argv interface{}) error {
- if codec.args == nil {
- return io.ErrUnexpectedEOF
- }
- *(argv.(*Args)) = *codec.args
- return nil
-}
-
-func (codec *CodecEmulator) WriteResponse(resp *Response, reply interface{}) error {
- if resp.Error != "" {
- codec.err = errors.New(resp.Error)
- } else {
- *codec.reply = *(reply.(*Reply))
- }
- return nil
-}
-
-func (codec *CodecEmulator) Close() error {
- return nil
-}
-
-func TestServeRequest(t *testing.T) {
- once.Do(startServer)
- testServeRequest(t, nil)
- newOnce.Do(startNewServer)
- testServeRequest(t, newServer)
-}
-
-func testServeRequest(t *testing.T, server *Server) {
- client := CodecEmulator{server: server}
-
- args := &Args{7, 8}
- reply := new(Reply)
- err := client.Call("Arith.Add", args, reply)
- if err != nil {
- t.Errorf("Add: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
- }
-
- err = client.Call("Arith.Add", nil, reply)
- if err == nil {
- t.Errorf("expected error calling Arith.Add with nil arg")
- }
-}
-
-type ReplyNotPointer int
-type ArgNotPublic int
-type ReplyNotPublic int
-type local struct{}
-
-func (t *ReplyNotPointer) ReplyNotPointer(args *Args, reply Reply) error {
- return nil
-}
-
-func (t *ArgNotPublic) ArgNotPublic(args *local, reply *Reply) error {
- return nil
-}
-
-func (t *ReplyNotPublic) ReplyNotPublic(args *Args, reply *local) error {
- return nil
-}
-
-// Check that registration handles lots of bad methods and a type with no suitable methods.
-func TestRegistrationError(t *testing.T) {
- err := Register(new(ReplyNotPointer))
- if err == nil {
- t.Errorf("expected error registering ReplyNotPointer")
- }
- err = Register(new(ArgNotPublic))
- if err == nil {
- t.Errorf("expected error registering ArgNotPublic")
- }
- err = Register(new(ReplyNotPublic))
- if err == nil {
- t.Errorf("expected error registering ReplyNotPublic")
- }
-}
-
-type WriteFailCodec int
-
-func (WriteFailCodec) WriteRequest(*Request, interface{}) error {
- // the panic caused by this error used to not unlock a lock.
- return errors.New("fail")
-}
-
-func (WriteFailCodec) ReadResponseHeader(*Response) error {
- time.Sleep(120e9)
- panic("unreachable")
-}
-
-func (WriteFailCodec) ReadResponseBody(interface{}) error {
- time.Sleep(120e9)
- panic("unreachable")
-}
-
-func (WriteFailCodec) Close() error {
- return nil
-}
-
-func TestSendDeadlock(t *testing.T) {
- client := NewClientWithCodec(WriteFailCodec(0))
-
- done := make(chan bool)
- go func() {
- testSendDeadlock(client)
- testSendDeadlock(client)
- done <- true
- }()
- select {
- case <-done:
- return
- case <-time.After(5e9):
- t.Fatal("deadlock")
- }
-}
-
-func testSendDeadlock(client *Client) {
- defer func() {
- recover()
- }()
- args := &Args{7, 8}
- reply := new(Reply)
- client.Call("Arith.Add", args, reply)
-}
-
-func dialDirect() (*Client, error) {
- return Dial("tcp", serverAddr)
-}
-
-func dialHTTP() (*Client, error) {
- return DialHTTP("tcp", httpServerAddr)
-}
-
-func countMallocs(dial func() (*Client, error), t *testing.T) uint64 {
- once.Do(startServer)
- client, err := dial()
- if err != nil {
- t.Fatal("error dialing", err)
- }
- args := &Args{7, 8}
- reply := new(Reply)
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
- const count = 100
- for i := 0; i < count; i++ {
- err := client.Call("Arith.Add", args, reply)
- if err != nil {
- t.Errorf("Add: expected no error but got string %q", err.Error())
- }
- if reply.C != args.A+args.B {
- t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
- }
- }
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
- return mallocs / count
-}
-
-func TestCountMallocs(t *testing.T) {
- fmt.Printf("mallocs per rpc round trip: %d\n", countMallocs(dialDirect, t))
-}
-
-func TestCountMallocsOverHTTP(t *testing.T) {
- fmt.Printf("mallocs per HTTP rpc round trip: %d\n", countMallocs(dialHTTP, t))
-}
-
-type writeCrasher struct{}
-
-func (writeCrasher) Close() error {
- return nil
-}
-
-func (writeCrasher) Read(p []byte) (int, error) {
- return 0, io.EOF
-}
-
-func (writeCrasher) Write(p []byte) (int, error) {
- return 0, errors.New("fake write failure")
-}
-
-func TestClientWriteError(t *testing.T) {
- c := NewClient(writeCrasher{})
- res := false
- err := c.Call("foo", 1, &res)
- if err == nil {
- t.Fatal("expected error")
- }
- if err.Error() != "fake write failure" {
- t.Error("unexpected value of error:", err)
- }
-}
-
-func benchmarkEndToEnd(dial func() (*Client, error), b *testing.B) {
- b.StopTimer()
- once.Do(startServer)
- client, err := dial()
- if err != nil {
- fmt.Println("error dialing", err)
- return
- }
-
- // Synchronous calls
- args := &Args{7, 8}
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N)
- var wg sync.WaitGroup
- wg.Add(procs)
- b.StartTimer()
-
- for p := 0; p < procs; p++ {
- go func() {
- reply := new(Reply)
- for atomic.AddInt32(&N, -1) >= 0 {
- err = client.Call("Arith.Add", args, reply)
- if err != nil {
- fmt.Printf("Add: expected no error but got string %q", err.Error())
- panic("rpc error")
- }
- if reply.C != args.A+args.B {
- fmt.Printf("Add: expected %d got %d", reply.C, args.A+args.B)
- panic("rpc error")
- }
- }
- wg.Done()
- }()
- }
- wg.Wait()
-}
-
-func benchmarkEndToEndAsync(dial func() (*Client, error), b *testing.B) {
- const MaxConcurrentCalls = 100
- b.StopTimer()
- once.Do(startServer)
- client, err := dial()
- if err != nil {
- fmt.Println("error dialing", err)
- return
- }
-
- // Asynchronous calls
- args := &Args{7, 8}
- procs := 4 * runtime.GOMAXPROCS(-1)
- send := int32(b.N)
- recv := int32(b.N)
- var wg sync.WaitGroup
- wg.Add(procs)
- gate := make(chan bool, MaxConcurrentCalls)
- res := make(chan *Call, MaxConcurrentCalls)
- b.StartTimer()
-
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&send, -1) >= 0 {
- gate <- true
- reply := new(Reply)
- client.Go("Arith.Add", args, reply, res)
- }
- }()
- go func() {
- for call := range res {
- a := call.Args.(*Args).A
- b := call.Args.(*Args).B
- c := call.Reply.(*Reply).C
- if a+b != c {
- fmt.Printf("Add: expected %d got %d", a+b, c)
- panic("incorrect reply")
- }
- <-gate
- if atomic.AddInt32(&recv, -1) == 0 {
- close(res)
- }
- }
- wg.Done()
- }()
- }
- wg.Wait()
-}
-
-func BenchmarkEndToEnd(b *testing.B) {
- benchmarkEndToEnd(dialDirect, b)
-}
-
-func BenchmarkEndToEndHTTP(b *testing.B) {
- benchmarkEndToEnd(dialHTTP, b)
-}
-
-func BenchmarkEndToEndAsync(b *testing.B) {
- benchmarkEndToEndAsync(dialDirect, b)
-}
-
-func BenchmarkEndToEndAsyncHTTP(b *testing.B) {
- benchmarkEndToEndAsync(dialHTTP, b)
-}
func entersyscall()
func exitsyscall()
+func LockedOSThread() bool
/* Useless for gccgo.
import (
"math"
- "rand"
+ "math/rand"
. "runtime"
"testing"
)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
-// It takes an io.Reader providing the source, which then can be tokenized
-// through repeated calls to the Scan function. For compatibility with
-// existing tools, the NUL character is not allowed (implementation
-// restriction).
-//
-// By default, a Scanner skips white space and Go comments and recognizes all
-// literals as defined by the Go language specification. It may be
-// customized to recognize only a subset of those literals and to recognize
-// different white space characters.
-//
-// Basic usage pattern:
-//
-// var s scanner.Scanner
-// s.Init(src)
-// tok := s.Scan()
-// for tok != scanner.EOF {
-// // do something with tok
-// tok = s.Scan()
-// }
-//
-package scanner
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "unicode"
- "utf8"
-)
-
-// TODO(gri): Consider changing this to use the new (token) Position package.
-
-// A source position is represented by a Position value.
-// A position is valid if Line > 0.
-type Position struct {
- Filename string // filename, if any
- Offset int // byte offset, starting at 0
- Line int // line number, starting at 1
- Column int // column number, starting at 1 (character count per line)
-}
-
-// IsValid returns true if the position is valid.
-func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
-func (pos Position) String() string {
- s := pos.Filename
- if pos.IsValid() {
- if s != "" {
- s += ":"
- }
- s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
- }
- if s == "" {
- s = "???"
- }
- return s
-}
-
-// Predefined mode bits to control recognition of tokens. For instance,
-// to configure a Scanner such that it only recognizes (Go) identifiers,
-// integers, and skips comments, set the Scanner's Mode field to:
-//
-// ScanIdents | ScanInts | SkipComments
-//
-const (
- ScanIdents = 1 << -Ident
- ScanInts = 1 << -Int
- ScanFloats = 1 << -Float // includes Ints
- ScanChars = 1 << -Char
- ScanStrings = 1 << -String
- ScanRawStrings = 1 << -RawString
- ScanComments = 1 << -Comment
- SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
- GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
-)
-
-// The result of Scan is one of the following tokens or a Unicode character.
-const (
- EOF = -(iota + 1)
- Ident
- Int
- Float
- Char
- String
- RawString
- Comment
- skipComment
-)
-
-var tokenString = map[rune]string{
- EOF: "EOF",
- Ident: "Ident",
- Int: "Int",
- Float: "Float",
- Char: "Char",
- String: "String",
- RawString: "RawString",
- Comment: "Comment",
-}
-
-// TokenString returns a (visible) string for a token or Unicode character.
-func TokenString(tok rune) string {
- if s, found := tokenString[tok]; found {
- return s
- }
- return fmt.Sprintf("%q", string(tok))
-}
-
-// GoWhitespace is the default value for the Scanner's Whitespace field.
-// Its value selects Go's white space characters.
-const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
-
-const bufLen = 1024 // at least utf8.UTFMax
-
-// A Scanner implements reading of Unicode characters and tokens from an io.Reader.
-type Scanner struct {
- // Input
- src io.Reader
-
- // Source buffer
- srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
- srcPos int // reading position (srcBuf index)
- srcEnd int // source end (srcBuf index)
-
- // Source position
- srcBufOffset int // byte offset of srcBuf[0] in source
- line int // line count
- column int // character count
- lastLineLen int // length of last line in characters (for correct column reporting)
- lastCharLen int // length of last character in bytes
-
- // Token text buffer
- // Typically, token text is stored completely in srcBuf, but in general
- // the token text's head may be buffered in tokBuf while the token text's
- // tail is stored in srcBuf.
- tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
- tokPos int // token text tail position (srcBuf index); valid if >= 0
- tokEnd int // token text tail end (srcBuf index)
-
- // One character look-ahead
- ch rune // character before current srcPos
-
- // Error is called for each error encountered. If no Error
- // function is set, the error is reported to os.Stderr.
- Error func(s *Scanner, msg string)
-
- // ErrorCount is incremented by one for each error encountered.
- ErrorCount int
-
- // The Mode field controls which tokens are recognized. For instance,
- // to recognize Ints, set the ScanInts bit in Mode. The field may be
- // changed at any time.
- Mode uint
-
- // The Whitespace field controls which characters are recognized
- // as white space. To recognize a character ch <= ' ' as white space,
- // set the ch'th bit in Whitespace (the Scanner's behavior is undefined
- // for values ch > ' '). The field may be changed at any time.
- Whitespace uint64
-
- // Start position of most recently scanned token; set by Scan.
- // Calling Init or Next invalidates the position (Line == 0).
- // The Filename field is always left untouched by the Scanner.
- // If an error is reported (via Error) and Position is invalid,
- // the scanner is not inside a token. Call Pos to obtain an error
- // position in that case.
- Position
-}
-
-// Init initializes a Scanner with a new source and returns s.
-// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens,
-// and Whitespace is set to GoWhitespace.
-func (s *Scanner) Init(src io.Reader) *Scanner {
- s.src = src
-
- // initialize source buffer
- // (the first call to next() will fill it by calling src.Read)
- s.srcBuf[0] = utf8.RuneSelf // sentinel
- s.srcPos = 0
- s.srcEnd = 0
-
- // initialize source position
- s.srcBufOffset = 0
- s.line = 1
- s.column = 0
- s.lastLineLen = 0
- s.lastCharLen = 0
-
- // initialize token text buffer
- // (required for first call to next()).
- s.tokPos = -1
-
- // initialize one character look-ahead
- s.ch = -1 // no char read yet
-
- // initialize public fields
- s.Error = nil
- s.ErrorCount = 0
- s.Mode = GoTokens
- s.Whitespace = GoWhitespace
- s.Line = 0 // invalidate token position
-
- return s
-}
-
-// TODO(gri): The code for next() and the internal scanner state could benefit
-// from a rethink. While next() is optimized for the common ASCII
-// case, the "corrections" needed for proper position tracking undo
-// some of the attempts for fast-path optimization.
-
-// next reads and returns the next Unicode character. It is designed such
-// that only a minimal amount of work needs to be done in the common ASCII
-// case (one test to check for both ASCII and end-of-buffer, and one test
-// to check for newlines).
-func (s *Scanner) next() rune {
- ch, width := rune(s.srcBuf[s.srcPos]), 1
-
- if ch >= utf8.RuneSelf {
- // uncommon case: not ASCII or not enough bytes
- for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
- // not enough bytes: read some more, but first
- // save away token text if any
- if s.tokPos >= 0 {
- s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
- s.tokPos = 0
- // s.tokEnd is set by Scan()
- }
- // move unread bytes to beginning of buffer
- copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
- s.srcBufOffset += s.srcPos
- // read more bytes
- // (an io.Reader must return os.EOF when it reaches
- // the end of what it is reading - simply returning
- // n == 0 will make this loop retry forever; but the
- // error is in the reader implementation in that case)
- i := s.srcEnd - s.srcPos
- n, err := s.src.Read(s.srcBuf[i:bufLen])
- s.srcPos = 0
- s.srcEnd = i + n
- s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
- if err != nil {
- if s.srcEnd == 0 {
- if s.lastCharLen > 0 {
- // previous character was not EOF
- s.column++
- }
- s.lastCharLen = 0
- return EOF
- }
- if err != io.EOF {
- s.error(err.Error())
- }
- // If err == EOF, we won't be getting more
- // bytes; break to avoid infinite loop. If
- // err is something else, we don't know if
- // we can get more bytes; thus also break.
- break
- }
- }
- // at least one byte
- ch = rune(s.srcBuf[s.srcPos])
- if ch >= utf8.RuneSelf {
- // uncommon case: not ASCII
- ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
- if ch == utf8.RuneError && width == 1 {
- // advance for correct error position
- s.srcPos += width
- s.lastCharLen = width
- s.column++
- s.error("illegal UTF-8 encoding")
- return ch
- }
- }
- }
-
- // advance
- s.srcPos += width
- s.lastCharLen = width
- s.column++
-
- // special situations
- switch ch {
- case 0:
- // implementation restriction for compatibility with other tools
- s.error("illegal character NUL")
- case '\n':
- s.line++
- s.lastLineLen = s.column
- s.column = 0
- }
-
- return ch
-}
-
-// Next reads and returns the next Unicode character.
-// It returns EOF at the end of the source. It reports
-// a read error by calling s.Error, if not nil; otherwise
-// it prints an error message to os.Stderr. Next does not
-// update the Scanner's Position field; use Pos() to
-// get the current position.
-func (s *Scanner) Next() rune {
- s.tokPos = -1 // don't collect token text
- s.Line = 0 // invalidate token position
- ch := s.Peek()
- s.ch = s.next()
- return ch
-}
-
-// Peek returns the next Unicode character in the source without advancing
-// the scanner. It returns EOF if the scanner's position is at the last
-// character of the source.
-func (s *Scanner) Peek() rune {
- if s.ch < 0 {
- s.ch = s.next()
- }
- return s.ch
-}
-
-func (s *Scanner) error(msg string) {
- s.ErrorCount++
- if s.Error != nil {
- s.Error(s, msg)
- return
- }
- pos := s.Position
- if !pos.IsValid() {
- pos = s.Pos()
- }
- fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
-}
-
-func (s *Scanner) scanIdentifier() rune {
- ch := s.next() // read character after first '_' or letter
- for ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) {
- ch = s.next()
- }
- return ch
-}
-
-func digitVal(ch rune) int {
- switch {
- case '0' <= ch && ch <= '9':
- return int(ch - '0')
- case 'a' <= ch && ch <= 'f':
- return int(ch - 'a' + 10)
- case 'A' <= ch && ch <= 'F':
- return int(ch - 'A' + 10)
- }
- return 16 // larger than any legal digit val
-}
-
-func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
-
-func (s *Scanner) scanMantissa(ch rune) rune {
- for isDecimal(ch) {
- ch = s.next()
- }
- return ch
-}
-
-func (s *Scanner) scanFraction(ch rune) rune {
- if ch == '.' {
- ch = s.scanMantissa(s.next())
- }
- return ch
-}
-
-func (s *Scanner) scanExponent(ch rune) rune {
- if ch == 'e' || ch == 'E' {
- ch = s.next()
- if ch == '-' || ch == '+' {
- ch = s.next()
- }
- ch = s.scanMantissa(ch)
- }
- return ch
-}
-
-func (s *Scanner) scanNumber(ch rune) (rune, rune) {
- // isDecimal(ch)
- if ch == '0' {
- // int or float
- ch = s.next()
- if ch == 'x' || ch == 'X' {
- // hexadecimal int
- ch = s.next()
- for digitVal(ch) < 16 {
- ch = s.next()
- }
- } else {
- // octal int or float
- seenDecimalDigit := false
- for isDecimal(ch) {
- if ch > '7' {
- seenDecimalDigit = true
- }
- ch = s.next()
- }
- if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
- // float
- ch = s.scanFraction(ch)
- ch = s.scanExponent(ch)
- return Float, ch
- }
- // octal int
- if seenDecimalDigit {
- s.error("illegal octal number")
- }
- }
- return Int, ch
- }
- // decimal int or float
- ch = s.scanMantissa(ch)
- if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
- // float
- ch = s.scanFraction(ch)
- ch = s.scanExponent(ch)
- return Float, ch
- }
- return Int, ch
-}
-
-func (s *Scanner) scanDigits(ch rune, base, n int) rune {
- for n > 0 && digitVal(ch) < base {
- ch = s.next()
- n--
- }
- if n > 0 {
- s.error("illegal char escape")
- }
- return ch
-}
-
-func (s *Scanner) scanEscape(quote rune) rune {
- ch := s.next() // read character after '/'
- switch ch {
- case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
- // nothing to do
- ch = s.next()
- case '0', '1', '2', '3', '4', '5', '6', '7':
- ch = s.scanDigits(ch, 8, 3)
- case 'x':
- ch = s.scanDigits(s.next(), 16, 2)
- case 'u':
- ch = s.scanDigits(s.next(), 16, 4)
- case 'U':
- ch = s.scanDigits(s.next(), 16, 8)
- default:
- s.error("illegal char escape")
- }
- return ch
-}
-
-func (s *Scanner) scanString(quote rune) (n int) {
- ch := s.next() // read character after quote
- for ch != quote {
- if ch == '\n' || ch < 0 {
- s.error("literal not terminated")
- return
- }
- if ch == '\\' {
- ch = s.scanEscape(quote)
- } else {
- ch = s.next()
- }
- n++
- }
- return
-}
-
-func (s *Scanner) scanRawString() {
- ch := s.next() // read character after '`'
- for ch != '`' {
- if ch < 0 {
- s.error("literal not terminated")
- return
- }
- ch = s.next()
- }
-}
-
-func (s *Scanner) scanChar() {
- if s.scanString('\'') != 1 {
- s.error("illegal char literal")
- }
-}
-
-func (s *Scanner) scanComment(ch rune) rune {
- // ch == '/' || ch == '*'
- if ch == '/' {
- // line comment
- ch = s.next() // read character after "//"
- for ch != '\n' && ch >= 0 {
- ch = s.next()
- }
- return ch
- }
-
- // general comment
- ch = s.next() // read character after "/*"
- for {
- if ch < 0 {
- s.error("comment not terminated")
- break
- }
- ch0 := ch
- ch = s.next()
- if ch0 == '*' && ch == '/' {
- ch = s.next()
- break
- }
- }
- return ch
-}
-
-// Scan reads the next token or Unicode character from source and returns it.
-// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set.
-// It returns EOF at the end of the source. It reports scanner errors (read and
-// token errors) by calling s.Error, if not nil; otherwise it prints an error
-// message to os.Stderr.
-func (s *Scanner) Scan() rune {
- ch := s.Peek()
-
- // reset token text position
- s.tokPos = -1
- s.Line = 0
-
-redo:
- // skip white space
- for s.Whitespace&(1<<uint(ch)) != 0 {
- ch = s.next()
- }
-
- // start collecting token text
- s.tokBuf.Reset()
- s.tokPos = s.srcPos - s.lastCharLen
-
- // set token position
- // (this is a slightly optimized version of the code in Pos())
- s.Offset = s.srcBufOffset + s.tokPos
- if s.column > 0 {
- // common case: last character was not a '\n'
- s.Line = s.line
- s.Column = s.column
- } else {
- // last character was a '\n'
- // (we cannot be at the beginning of the source
- // since we have called next() at least once)
- s.Line = s.line - 1
- s.Column = s.lastLineLen
- }
-
- // determine token value
- tok := ch
- switch {
- case unicode.IsLetter(ch) || ch == '_':
- if s.Mode&ScanIdents != 0 {
- tok = Ident
- ch = s.scanIdentifier()
- } else {
- ch = s.next()
- }
- case isDecimal(ch):
- if s.Mode&(ScanInts|ScanFloats) != 0 {
- tok, ch = s.scanNumber(ch)
- } else {
- ch = s.next()
- }
- default:
- switch ch {
- case '"':
- if s.Mode&ScanStrings != 0 {
- s.scanString('"')
- tok = String
- }
- ch = s.next()
- case '\'':
- if s.Mode&ScanChars != 0 {
- s.scanChar()
- tok = Char
- }
- ch = s.next()
- case '.':
- ch = s.next()
- if isDecimal(ch) && s.Mode&ScanFloats != 0 {
- tok = Float
- ch = s.scanMantissa(ch)
- ch = s.scanExponent(ch)
- }
- case '/':
- ch = s.next()
- if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
- if s.Mode&SkipComments != 0 {
- s.tokPos = -1 // don't collect token text
- ch = s.scanComment(ch)
- goto redo
- }
- ch = s.scanComment(ch)
- tok = Comment
- }
- case '`':
- if s.Mode&ScanRawStrings != 0 {
- s.scanRawString()
- tok = String
- }
- ch = s.next()
- default:
- ch = s.next()
- }
- }
-
- // end of token text
- s.tokEnd = s.srcPos - s.lastCharLen
-
- s.ch = ch
- return tok
-}
-
-// Pos returns the position of the character immediately after
-// the character or token returned by the last call to Next or Scan.
-func (s *Scanner) Pos() (pos Position) {
- pos.Filename = s.Filename
- pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
- switch {
- case s.column > 0:
- // common case: last character was not a '\n'
- pos.Line = s.line
- pos.Column = s.column
- case s.lastLineLen > 0:
- // last character was a '\n'
- pos.Line = s.line - 1
- pos.Column = s.lastLineLen
- default:
- // at the beginning of the source
- pos.Line = 1
- pos.Column = 1
- }
- return
-}
-
-// TokenText returns the string corresponding to the most recently scanned token.
-// Valid after calling Scan().
-func (s *Scanner) TokenText() string {
- if s.tokPos < 0 {
- // no token text
- return ""
- }
-
- if s.tokEnd < 0 {
- // if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
- s.tokEnd = s.tokPos
- }
-
- if s.tokBuf.Len() == 0 {
- // common case: the entire token text is still in srcBuf
- return string(s.srcBuf[s.tokPos:s.tokEnd])
- }
-
- // part of the token text was saved in tokBuf: save the rest in
- // tokBuf as well and return its content
- s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
- s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
- return s.tokBuf.String()
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package scanner
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
- "testing"
- "utf8"
-)
-
-// A StringReader delivers its data one string segment at a time via Read.
-type StringReader struct {
- data []string
- step int
-}
-
-func (r *StringReader) Read(p []byte) (n int, err error) {
- if r.step < len(r.data) {
- s := r.data[r.step]
- n = copy(p, s)
- r.step++
- } else {
- err = io.EOF
- }
- return
-}
-
-func readRuneSegments(t *testing.T, segments []string) {
- got := ""
- want := strings.Join(segments, "")
- s := new(Scanner).Init(&StringReader{data: segments})
- for {
- ch := s.Next()
- if ch == EOF {
- break
- }
- got += string(ch)
- }
- if got != want {
- t.Errorf("segments=%v got=%s want=%s", segments, got, want)
- }
-}
-
-var segmentList = [][]string{
- {},
- {""},
- {"日", "本語"},
- {"\u65e5", "\u672c", "\u8a9e"},
- {"\U000065e5", " ", "\U0000672c", "\U00008a9e"},
- {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"},
- {"Hello", ", ", "World", "!"},
- {"Hello", ", ", "", "World", "!"},
-}
-
-func TestNext(t *testing.T) {
- for _, s := range segmentList {
- readRuneSegments(t, s)
- }
-}
-
-type token struct {
- tok rune
- text string
-}
-
-var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
-
-var tokenList = []token{
- {Comment, "// line comments"},
- {Comment, "//"},
- {Comment, "////"},
- {Comment, "// comment"},
- {Comment, "// /* comment */"},
- {Comment, "// // comment //"},
- {Comment, "//" + f100},
-
- {Comment, "// general comments"},
- {Comment, "/**/"},
- {Comment, "/***/"},
- {Comment, "/* comment */"},
- {Comment, "/* // comment */"},
- {Comment, "/* /* comment */"},
- {Comment, "/*\n comment\n*/"},
- {Comment, "/*" + f100 + "*/"},
-
- {Comment, "// identifiers"},
- {Ident, "a"},
- {Ident, "a0"},
- {Ident, "foobar"},
- {Ident, "abc123"},
- {Ident, "LGTM"},
- {Ident, "_"},
- {Ident, "_abc123"},
- {Ident, "abc123_"},
- {Ident, "_abc_123_"},
- {Ident, "_äöü"},
- {Ident, "_本"},
- {Ident, "äöü"},
- {Ident, "本"},
- {Ident, "a۰۱۸"},
- {Ident, "foo६४"},
- {Ident, "bar9876"},
- {Ident, f100},
-
- {Comment, "// decimal ints"},
- {Int, "0"},
- {Int, "1"},
- {Int, "9"},
- {Int, "42"},
- {Int, "1234567890"},
-
- {Comment, "// octal ints"},
- {Int, "00"},
- {Int, "01"},
- {Int, "07"},
- {Int, "042"},
- {Int, "01234567"},
-
- {Comment, "// hexadecimal ints"},
- {Int, "0x0"},
- {Int, "0x1"},
- {Int, "0xf"},
- {Int, "0x42"},
- {Int, "0x123456789abcDEF"},
- {Int, "0x" + f100},
- {Int, "0X0"},
- {Int, "0X1"},
- {Int, "0XF"},
- {Int, "0X42"},
- {Int, "0X123456789abcDEF"},
- {Int, "0X" + f100},
-
- {Comment, "// floats"},
- {Float, "0."},
- {Float, "1."},
- {Float, "42."},
- {Float, "01234567890."},
- {Float, ".0"},
- {Float, ".1"},
- {Float, ".42"},
- {Float, ".0123456789"},
- {Float, "0.0"},
- {Float, "1.0"},
- {Float, "42.0"},
- {Float, "01234567890.0"},
- {Float, "0e0"},
- {Float, "1e0"},
- {Float, "42e0"},
- {Float, "01234567890e0"},
- {Float, "0E0"},
- {Float, "1E0"},
- {Float, "42E0"},
- {Float, "01234567890E0"},
- {Float, "0e+10"},
- {Float, "1e-10"},
- {Float, "42e+10"},
- {Float, "01234567890e-10"},
- {Float, "0E+10"},
- {Float, "1E-10"},
- {Float, "42E+10"},
- {Float, "01234567890E-10"},
-
- {Comment, "// chars"},
- {Char, `' '`},
- {Char, `'a'`},
- {Char, `'本'`},
- {Char, `'\a'`},
- {Char, `'\b'`},
- {Char, `'\f'`},
- {Char, `'\n'`},
- {Char, `'\r'`},
- {Char, `'\t'`},
- {Char, `'\v'`},
- {Char, `'\''`},
- {Char, `'\000'`},
- {Char, `'\777'`},
- {Char, `'\x00'`},
- {Char, `'\xff'`},
- {Char, `'\u0000'`},
- {Char, `'\ufA16'`},
- {Char, `'\U00000000'`},
- {Char, `'\U0000ffAB'`},
-
- {Comment, "// strings"},
- {String, `" "`},
- {String, `"a"`},
- {String, `"本"`},
- {String, `"\a"`},
- {String, `"\b"`},
- {String, `"\f"`},
- {String, `"\n"`},
- {String, `"\r"`},
- {String, `"\t"`},
- {String, `"\v"`},
- {String, `"\""`},
- {String, `"\000"`},
- {String, `"\777"`},
- {String, `"\x00"`},
- {String, `"\xff"`},
- {String, `"\u0000"`},
- {String, `"\ufA16"`},
- {String, `"\U00000000"`},
- {String, `"\U0000ffAB"`},
- {String, `"` + f100 + `"`},
-
- {Comment, "// raw strings"},
- {String, "``"},
- {String, "`\\`"},
- {String, "`" + "\n\n/* foobar */\n\n" + "`"},
- {String, "`" + f100 + "`"},
-
- {Comment, "// individual characters"},
- // NUL character is not allowed
- {'\x01', "\x01"},
- {' ' - 1, string(' ' - 1)},
- {'+', "+"},
- {'/', "/"},
- {'.', "."},
- {'~', "~"},
- {'(', "("},
-}
-
-func makeSource(pattern string) *bytes.Buffer {
- var buf bytes.Buffer
- for _, k := range tokenList {
- fmt.Fprintf(&buf, pattern, k.text)
- }
- return &buf
-}
-
-func checkTok(t *testing.T, s *Scanner, line int, got, want rune, text string) {
- if got != want {
- t.Fatalf("tok = %s, want %s for %q", TokenString(got), TokenString(want), text)
- }
- if s.Line != line {
- t.Errorf("line = %d, want %d for %q", s.Line, line, text)
- }
- stext := s.TokenText()
- if stext != text {
- t.Errorf("text = %q, want %q", stext, text)
- } else {
- // check idempotency of TokenText() call
- stext = s.TokenText()
- if stext != text {
- t.Errorf("text = %q, want %q (idempotency check)", stext, text)
- }
- }
-}
-
-func countNewlines(s string) int {
- n := 0
- for _, ch := range s {
- if ch == '\n' {
- n++
- }
- }
- return n
-}
-
-func testScan(t *testing.T, mode uint) {
- s := new(Scanner).Init(makeSource(" \t%s\n"))
- s.Mode = mode
- tok := s.Scan()
- line := 1
- for _, k := range tokenList {
- if mode&SkipComments == 0 || k.tok != Comment {
- checkTok(t, s, line, tok, k.tok, k.text)
- tok = s.Scan()
- }
- line += countNewlines(k.text) + 1 // each token is on a new line
- }
- checkTok(t, s, line, tok, EOF, "")
-}
-
-func TestScan(t *testing.T) {
- testScan(t, GoTokens)
- testScan(t, GoTokens&^SkipComments)
-}
-
-func TestPosition(t *testing.T) {
- src := makeSource("\t\t\t\t%s\n")
- s := new(Scanner).Init(src)
- s.Mode = GoTokens &^ SkipComments
- s.Scan()
- pos := Position{"", 4, 1, 5}
- for _, k := range tokenList {
- if s.Offset != pos.Offset {
- t.Errorf("offset = %d, want %d for %q", s.Offset, pos.Offset, k.text)
- }
- if s.Line != pos.Line {
- t.Errorf("line = %d, want %d for %q", s.Line, pos.Line, k.text)
- }
- if s.Column != pos.Column {
- t.Errorf("column = %d, want %d for %q", s.Column, pos.Column, k.text)
- }
- pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
- pos.Line += countNewlines(k.text) + 1 // each token is on a new line
- s.Scan()
- }
- // make sure there were no token-internal errors reported by scanner
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-}
-
-func TestScanZeroMode(t *testing.T) {
- src := makeSource("%s\n")
- str := src.String()
- s := new(Scanner).Init(src)
- s.Mode = 0 // don't recognize any token classes
- s.Whitespace = 0 // don't skip any whitespace
- tok := s.Scan()
- for i, ch := range str {
- if tok != ch {
- t.Fatalf("%d. tok = %s, want %s", i, TokenString(tok), TokenString(ch))
- }
- tok = s.Scan()
- }
- if tok != EOF {
- t.Fatalf("tok = %s, want EOF", TokenString(tok))
- }
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-}
-
-func testScanSelectedMode(t *testing.T, mode uint, class rune) {
- src := makeSource("%s\n")
- s := new(Scanner).Init(src)
- s.Mode = mode
- tok := s.Scan()
- for tok != EOF {
- if tok < 0 && tok != class {
- t.Fatalf("tok = %s, want %s", TokenString(tok), TokenString(class))
- }
- tok = s.Scan()
- }
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-}
-
-func TestScanSelectedMask(t *testing.T) {
- testScanSelectedMode(t, 0, 0)
- testScanSelectedMode(t, ScanIdents, Ident)
- // Don't test ScanInts and ScanNumbers since some parts of
- // the floats in the source look like (illegal) octal ints
- // and ScanNumbers may return either Int or Float.
- testScanSelectedMode(t, ScanChars, Char)
- testScanSelectedMode(t, ScanStrings, String)
- testScanSelectedMode(t, SkipComments, 0)
- testScanSelectedMode(t, ScanComments, Comment)
-}
-
-func TestScanNext(t *testing.T) {
- s := new(Scanner).Init(bytes.NewBufferString("if a == bcd /* comment */ {\n\ta += c\n} // line comment ending in eof"))
- checkTok(t, s, 1, s.Scan(), Ident, "if")
- checkTok(t, s, 1, s.Scan(), Ident, "a")
- checkTok(t, s, 1, s.Scan(), '=', "=")
- checkTok(t, s, 0, s.Next(), '=', "")
- checkTok(t, s, 0, s.Next(), ' ', "")
- checkTok(t, s, 0, s.Next(), 'b', "")
- checkTok(t, s, 1, s.Scan(), Ident, "cd")
- checkTok(t, s, 1, s.Scan(), '{', "{")
- checkTok(t, s, 2, s.Scan(), Ident, "a")
- checkTok(t, s, 2, s.Scan(), '+', "+")
- checkTok(t, s, 0, s.Next(), '=', "")
- checkTok(t, s, 2, s.Scan(), Ident, "c")
- checkTok(t, s, 3, s.Scan(), '}', "}")
- checkTok(t, s, 3, s.Scan(), -1, "")
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-}
-
-func TestScanWhitespace(t *testing.T) {
- var buf bytes.Buffer
- var ws uint64
- // start at 1, NUL character is not allowed
- for ch := byte(1); ch < ' '; ch++ {
- buf.WriteByte(ch)
- ws |= 1 << ch
- }
- const orig = 'x'
- buf.WriteByte(orig)
-
- s := new(Scanner).Init(&buf)
- s.Mode = 0
- s.Whitespace = ws
- tok := s.Scan()
- if tok != orig {
- t.Errorf("tok = %s, want %s", TokenString(tok), TokenString(orig))
- }
-}
-
-func testError(t *testing.T, src, pos, msg string, tok rune) {
- s := new(Scanner).Init(bytes.NewBufferString(src))
- errorCalled := false
- s.Error = func(s *Scanner, m string) {
- if !errorCalled {
- // only look at first error
- if p := s.Pos().String(); p != pos {
- t.Errorf("pos = %q, want %q for %q", p, pos, src)
- }
- if m != msg {
- t.Errorf("msg = %q, want %q for %q", m, msg, src)
- }
- errorCalled = true
- }
- }
- tk := s.Scan()
- if tk != tok {
- t.Errorf("tok = %s, want %s for %q", TokenString(tk), TokenString(tok), src)
- }
- if !errorCalled {
- t.Errorf("error handler not called for %q", src)
- }
- if s.ErrorCount == 0 {
- t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
- }
-}
-
-func TestError(t *testing.T) {
- testError(t, "\x00", "1:1", "illegal character NUL", 0)
- testError(t, "\x80", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
- testError(t, "\xff", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
-
- testError(t, "a\x00", "1:2", "illegal character NUL", Ident)
- testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", Ident)
- testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", Ident)
-
- testError(t, `"a`+"\x00", "1:3", "illegal character NUL", String)
- testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", String)
- testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", String)
-
- testError(t, "`a"+"\x00", "1:3", "illegal character NUL", String)
- testError(t, "`ab"+"\x80", "1:4", "illegal UTF-8 encoding", String)
- testError(t, "`abc"+"\xff", "1:5", "illegal UTF-8 encoding", String)
-
- testError(t, `'\"'`, "1:3", "illegal char escape", Char)
- testError(t, `"\'"`, "1:3", "illegal char escape", String)
-
- testError(t, `01238`, "1:6", "illegal octal number", Int)
- testError(t, `'aa'`, "1:4", "illegal char literal", Char)
-
- testError(t, `'`, "1:2", "literal not terminated", Char)
- testError(t, `'`+"\n", "1:2", "literal not terminated", Char)
- testError(t, `"abc`, "1:5", "literal not terminated", String)
- testError(t, `"abc`+"\n", "1:5", "literal not terminated", String)
- testError(t, "`abc\n", "2:1", "literal not terminated", String)
- testError(t, `/*/`, "1:4", "comment not terminated", EOF)
-}
-
-func checkPos(t *testing.T, got, want Position) {
- if got.Offset != want.Offset || got.Line != want.Line || got.Column != want.Column {
- t.Errorf("got offset, line, column = %d, %d, %d; want %d, %d, %d",
- got.Offset, got.Line, got.Column, want.Offset, want.Line, want.Column)
- }
-}
-
-func checkNextPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
- if ch := s.Next(); ch != char {
- t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
- }
- want := Position{Offset: offset, Line: line, Column: column}
- checkPos(t, s.Pos(), want)
-}
-
-func checkScanPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
- want := Position{Offset: offset, Line: line, Column: column}
- checkPos(t, s.Pos(), want)
- if ch := s.Scan(); ch != char {
- t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
- if string(ch) != s.TokenText() {
- t.Errorf("tok = %q, want %q", s.TokenText(), string(ch))
- }
- }
- checkPos(t, s.Position, want)
-}
-
-func TestPos(t *testing.T) {
- // corner case: empty source
- s := new(Scanner).Init(bytes.NewBufferString(""))
- checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
- s.Peek() // peek doesn't affect the position
- checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
-
- // corner case: source with only a newline
- s = new(Scanner).Init(bytes.NewBufferString("\n"))
- checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
- checkNextPos(t, s, 1, 2, 1, '\n')
- // after EOF position doesn't change
- for i := 10; i > 0; i-- {
- checkScanPos(t, s, 1, 2, 1, EOF)
- }
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-
- // corner case: source with only a single character
- s = new(Scanner).Init(bytes.NewBufferString("本"))
- checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
- checkNextPos(t, s, 3, 1, 2, '本')
- // after EOF position doesn't change
- for i := 10; i > 0; i-- {
- checkScanPos(t, s, 3, 1, 2, EOF)
- }
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-
- // positions after calling Next
- s = new(Scanner).Init(bytes.NewBufferString(" foo६४ \n\n本語\n"))
- checkNextPos(t, s, 1, 1, 2, ' ')
- s.Peek() // peek doesn't affect the position
- checkNextPos(t, s, 2, 1, 3, ' ')
- checkNextPos(t, s, 3, 1, 4, 'f')
- checkNextPos(t, s, 4, 1, 5, 'o')
- checkNextPos(t, s, 5, 1, 6, 'o')
- checkNextPos(t, s, 8, 1, 7, '६')
- checkNextPos(t, s, 11, 1, 8, '४')
- checkNextPos(t, s, 12, 1, 9, ' ')
- checkNextPos(t, s, 13, 1, 10, ' ')
- checkNextPos(t, s, 14, 2, 1, '\n')
- checkNextPos(t, s, 15, 3, 1, '\n')
- checkNextPos(t, s, 18, 3, 2, '本')
- checkNextPos(t, s, 21, 3, 3, '語')
- checkNextPos(t, s, 22, 4, 1, '\n')
- // after EOF position doesn't change
- for i := 10; i > 0; i-- {
- checkScanPos(t, s, 22, 4, 1, EOF)
- }
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-
- // positions after calling Scan
- s = new(Scanner).Init(bytes.NewBufferString("abc\n本語\n\nx"))
- s.Mode = 0
- s.Whitespace = 0
- checkScanPos(t, s, 0, 1, 1, 'a')
- s.Peek() // peek doesn't affect the position
- checkScanPos(t, s, 1, 1, 2, 'b')
- checkScanPos(t, s, 2, 1, 3, 'c')
- checkScanPos(t, s, 3, 1, 4, '\n')
- checkScanPos(t, s, 4, 2, 1, '本')
- checkScanPos(t, s, 7, 2, 2, '語')
- checkScanPos(t, s, 10, 2, 3, '\n')
- checkScanPos(t, s, 11, 3, 1, '\n')
- checkScanPos(t, s, 12, 4, 1, 'x')
- // after EOF position doesn't change
- for i := 10; i > 0; i-- {
- checkScanPos(t, s, 13, 4, 2, EOF)
- }
- if s.ErrorCount != 0 {
- t.Errorf("%d errors", s.ErrorCount)
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package smtp
-
-import "errors"
-
-// Auth is implemented by an SMTP authentication mechanism.
-type Auth interface {
- // Start begins an authentication with a server.
- // It returns the name of the authentication protocol
- // and optionally data to include in the initial AUTH message
- // sent to the server. It can return proto == "" to indicate
- // that the authentication should be skipped.
- // If it returns a non-nil error, the SMTP client aborts
- // the authentication attempt and closes the connection.
- Start(server *ServerInfo) (proto string, toServer []byte, err error)
-
- // Next continues the authentication. The server has just sent
- // the fromServer data. If more is true, the server expects a
- // response, which Next should return as toServer; otherwise
- // Next should return toServer == nil.
- // If Next returns a non-nil error, the SMTP client aborts
- // the authentication attempt and closes the connection.
- Next(fromServer []byte, more bool) (toServer []byte, err error)
-}
-
-// ServerInfo records information about an SMTP server.
-type ServerInfo struct {
- Name string // SMTP server name
- TLS bool // using TLS, with valid certificate for Name
- Auth []string // advertised authentication mechanisms
-}
-
-type plainAuth struct {
- identity, username, password string
- host string
-}
-
-// PlainAuth returns an Auth that implements the PLAIN authentication
-// mechanism as defined in RFC 4616.
-// The returned Auth uses the given username and password to authenticate
-// on TLS connections to host and act as identity. Usually identity will be
-// left blank to act as username.
-func PlainAuth(identity, username, password, host string) Auth {
- return &plainAuth{identity, username, password, host}
-}
-
-func (a *plainAuth) Start(server *ServerInfo) (string, []byte, error) {
- if !server.TLS {
- return "", nil, errors.New("unencrypted connection")
- }
- if server.Name != a.host {
- return "", nil, errors.New("wrong host name")
- }
- resp := []byte(a.identity + "\x00" + a.username + "\x00" + a.password)
- return "PLAIN", resp, nil
-}
-
-func (a *plainAuth) Next(fromServer []byte, more bool) ([]byte, error) {
- if more {
- // We've already sent everything.
- return nil, errors.New("unexpected server challenge")
- }
- return nil, nil
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package smtp implements the Simple Mail Transfer Protocol as defined in RFC 5321.
-// It also implements the following extensions:
-// 8BITMIME RFC 1652
-// AUTH RFC 2554
-// STARTTLS RFC 3207
-// Additional extensions may be handled by clients.
-package smtp
-
-import (
- "crypto/tls"
- "encoding/base64"
- "io"
- "net"
- "net/textproto"
- "strings"
-)
-
-// A Client represents a client connection to an SMTP server.
-type Client struct {
- // Text is the textproto.Conn used by the Client. It is exported to allow for
- // clients to add extensions.
- Text *textproto.Conn
- // keep a reference to the connection so it can be used to create a TLS
- // connection later
- conn net.Conn
- // whether the Client is using TLS
- tls bool
- serverName string
- // map of supported extensions
- ext map[string]string
- // supported auth mechanisms
- auth []string
-}
-
-// Dial returns a new Client connected to an SMTP server at addr.
-func Dial(addr string) (*Client, error) {
- conn, err := net.Dial("tcp", addr)
- if err != nil {
- return nil, err
- }
- host := addr[:strings.Index(addr, ":")]
- return NewClient(conn, host)
-}
-
-// NewClient returns a new Client using an existing connection and host as a
-// server name to be used when authenticating.
-func NewClient(conn net.Conn, host string) (*Client, error) {
- text := textproto.NewConn(conn)
- _, msg, err := text.ReadResponse(220)
- if err != nil {
- text.Close()
- return nil, err
- }
- c := &Client{Text: text, conn: conn, serverName: host}
- if strings.Contains(msg, "ESMTP") {
- err = c.ehlo()
- } else {
- err = c.helo()
- }
- return c, err
-}
-
-// cmd is a convenience function that sends a command and returns the response
-func (c *Client) cmd(expectCode int, format string, args ...interface{}) (int, string, error) {
- id, err := c.Text.Cmd(format, args...)
- if err != nil {
- return 0, "", err
- }
- c.Text.StartResponse(id)
- defer c.Text.EndResponse(id)
- code, msg, err := c.Text.ReadResponse(expectCode)
- return code, msg, err
-}
-
-// helo sends the HELO greeting to the server. It should be used only when the
-// server does not support ehlo.
-func (c *Client) helo() error {
- c.ext = nil
- _, _, err := c.cmd(250, "HELO localhost")
- return err
-}
-
-// ehlo sends the EHLO (extended hello) greeting to the server. It
-// should be the preferred greeting for servers that support it.
-func (c *Client) ehlo() error {
- _, msg, err := c.cmd(250, "EHLO localhost")
- if err != nil {
- return err
- }
- ext := make(map[string]string)
- extList := strings.Split(msg, "\n")
- if len(extList) > 1 {
- extList = extList[1:]
- for _, line := range extList {
- args := strings.SplitN(line, " ", 2)
- if len(args) > 1 {
- ext[args[0]] = args[1]
- } else {
- ext[args[0]] = ""
- }
- }
- }
- if mechs, ok := ext["AUTH"]; ok {
- c.auth = strings.Split(mechs, " ")
- }
- c.ext = ext
- return err
-}
-
-// StartTLS sends the STARTTLS command and encrypts all further communication.
-// Only servers that advertise the STARTTLS extension support this function.
-func (c *Client) StartTLS(config *tls.Config) error {
- _, _, err := c.cmd(220, "STARTTLS")
- if err != nil {
- return err
- }
- c.conn = tls.Client(c.conn, config)
- c.Text = textproto.NewConn(c.conn)
- c.tls = true
- return c.ehlo()
-}
-
-// Verify checks the validity of an email address on the server.
-// If Verify returns nil, the address is valid. A non-nil return
-// does not necessarily indicate an invalid address. Many servers
-// will not verify addresses for security reasons.
-func (c *Client) Verify(addr string) error {
- _, _, err := c.cmd(250, "VRFY %s", addr)
- return err
-}
-
-// Auth authenticates a client using the provided authentication mechanism.
-// A failed authentication closes the connection.
-// Only servers that advertise the AUTH extension support this function.
-func (c *Client) Auth(a Auth) error {
- encoding := base64.StdEncoding
- mech, resp, err := a.Start(&ServerInfo{c.serverName, c.tls, c.auth})
- if err != nil {
- c.Quit()
- return err
- }
- resp64 := make([]byte, encoding.EncodedLen(len(resp)))
- encoding.Encode(resp64, resp)
- code, msg64, err := c.cmd(0, "AUTH %s %s", mech, resp64)
- for err == nil {
- var msg []byte
- switch code {
- case 334:
- msg, err = encoding.DecodeString(msg64)
- case 235:
- // the last message isn't base64 because it isn't a challenge
- msg = []byte(msg64)
- default:
- err = &textproto.Error{code, msg64}
- }
- resp, err = a.Next(msg, code == 334)
- if err != nil {
- // abort the AUTH
- c.cmd(501, "*")
- c.Quit()
- break
- }
- if resp == nil {
- break
- }
- resp64 = make([]byte, encoding.EncodedLen(len(resp)))
- encoding.Encode(resp64, resp)
- code, msg64, err = c.cmd(0, string(resp64))
- }
- return err
-}
-
-// Mail issues a MAIL command to the server using the provided email address.
-// If the server supports the 8BITMIME extension, Mail adds the BODY=8BITMIME
-// parameter.
-// This initiates a mail transaction and is followed by one or more Rcpt calls.
-func (c *Client) Mail(from string) error {
- cmdStr := "MAIL FROM:<%s>"
- if c.ext != nil {
- if _, ok := c.ext["8BITMIME"]; ok {
- cmdStr += " BODY=8BITMIME"
- }
- }
- _, _, err := c.cmd(250, cmdStr, from)
- return err
-}
-
-// Rcpt issues a RCPT command to the server using the provided email address.
-// A call to Rcpt must be preceded by a call to Mail and may be followed by
-// a Data call or another Rcpt call.
-func (c *Client) Rcpt(to string) error {
- _, _, err := c.cmd(25, "RCPT TO:<%s>", to)
- return err
-}
-
-type dataCloser struct {
- c *Client
- io.WriteCloser
-}
-
-func (d *dataCloser) Close() error {
- d.WriteCloser.Close()
- _, _, err := d.c.Text.ReadResponse(250)
- return err
-}
-
-// Data issues a DATA command to the server and returns a writer that
-// can be used to write the data. The caller should close the writer
-// before calling any more methods on c.
-// A call to Data must be preceded by one or more calls to Rcpt.
-func (c *Client) Data() (io.WriteCloser, error) {
- _, _, err := c.cmd(354, "DATA")
- if err != nil {
- return nil, err
- }
- return &dataCloser{c, c.Text.DotWriter()}, nil
-}
-
-// SendMail connects to the server at addr, switches to TLS if possible,
-// authenticates with mechanism a if possible, and then sends an email from
-// address from, to addresses to, with message msg.
-func SendMail(addr string, a Auth, from string, to []string, msg []byte) error {
- c, err := Dial(addr)
- if err != nil {
- return err
- }
- if ok, _ := c.Extension("STARTTLS"); ok {
- if err = c.StartTLS(nil); err != nil {
- return err
- }
- }
- if a != nil && c.ext != nil {
- if _, ok := c.ext["AUTH"]; ok {
- if err = c.Auth(a); err != nil {
- return err
- }
- }
- }
- if err = c.Mail(from); err != nil {
- return err
- }
- for _, addr := range to {
- if err = c.Rcpt(addr); err != nil {
- return err
- }
- }
- w, err := c.Data()
- if err != nil {
- return err
- }
- _, err = w.Write(msg)
- if err != nil {
- return err
- }
- err = w.Close()
- if err != nil {
- return err
- }
- return c.Quit()
-}
-
-// Extension reports whether an extension is support by the server.
-// The extension name is case-insensitive. If the extension is supported,
-// Extension also returns a string that contains any parameters the
-// server specifies for the extension.
-func (c *Client) Extension(ext string) (bool, string) {
- if c.ext == nil {
- return false, ""
- }
- ext = strings.ToUpper(ext)
- param, ok := c.ext[ext]
- return ok, param
-}
-
-// Reset sends the RSET command to the server, aborting the current mail
-// transaction.
-func (c *Client) Reset() error {
- _, _, err := c.cmd(250, "RSET")
- return err
-}
-
-// Quit sends the QUIT command and closes the connection to the server.
-func (c *Client) Quit() error {
- _, _, err := c.cmd(221, "QUIT")
- if err != nil {
- return err
- }
- return c.Text.Close()
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package smtp
-
-import (
- "bufio"
- "bytes"
- "io"
- "net/textproto"
- "strings"
- "testing"
-)
-
-type authTest struct {
- auth Auth
- challenges []string
- name string
- responses []string
-}
-
-var authTests = []authTest{
- {PlainAuth("", "user", "pass", "testserver"), []string{}, "PLAIN", []string{"\x00user\x00pass"}},
- {PlainAuth("foo", "bar", "baz", "testserver"), []string{}, "PLAIN", []string{"foo\x00bar\x00baz"}},
-}
-
-func TestAuth(t *testing.T) {
-testLoop:
- for i, test := range authTests {
- name, resp, err := test.auth.Start(&ServerInfo{"testserver", true, nil})
- if name != test.name {
- t.Errorf("#%d got name %s, expected %s", i, name, test.name)
- }
- if !bytes.Equal(resp, []byte(test.responses[0])) {
- t.Errorf("#%d got response %s, expected %s", i, resp, test.responses[0])
- }
- if err != nil {
- t.Errorf("#%d error: %s", i, err)
- }
- for j := range test.challenges {
- challenge := []byte(test.challenges[j])
- expected := []byte(test.responses[j+1])
- resp, err := test.auth.Next(challenge, true)
- if err != nil {
- t.Errorf("#%d error: %s", i, err)
- continue testLoop
- }
- if !bytes.Equal(resp, expected) {
- t.Errorf("#%d got %s, expected %s", i, resp, expected)
- continue testLoop
- }
- }
- }
-}
-
-type faker struct {
- io.ReadWriter
-}
-
-func (f faker) Close() error {
- return nil
-}
-
-func TestBasic(t *testing.T) {
- basicServer = strings.Join(strings.Split(basicServer, "\n"), "\r\n")
- basicClient = strings.Join(strings.Split(basicClient, "\n"), "\r\n")
-
- var cmdbuf bytes.Buffer
- bcmdbuf := bufio.NewWriter(&cmdbuf)
- var fake faker
- fake.ReadWriter = bufio.NewReadWriter(bufio.NewReader(strings.NewReader(basicServer)), bcmdbuf)
- c := &Client{Text: textproto.NewConn(fake)}
-
- if err := c.helo(); err != nil {
- t.Fatalf("HELO failed: %s", err)
- }
- if err := c.ehlo(); err == nil {
- t.Fatalf("Expected first EHLO to fail")
- }
- if err := c.ehlo(); err != nil {
- t.Fatalf("Second EHLO failed: %s", err)
- }
-
- if ok, args := c.Extension("aUtH"); !ok || args != "LOGIN PLAIN" {
- t.Fatalf("Expected AUTH supported")
- }
- if ok, _ := c.Extension("DSN"); ok {
- t.Fatalf("Shouldn't support DSN")
- }
-
- if err := c.Mail("user@gmail.com"); err == nil {
- t.Fatalf("MAIL should require authentication")
- }
-
- if err := c.Verify("user1@gmail.com"); err == nil {
- t.Fatalf("First VRFY: expected no verification")
- }
- if err := c.Verify("user2@gmail.com"); err != nil {
- t.Fatalf("Second VRFY: expected verification, got %s", err)
- }
-
- // fake TLS so authentication won't complain
- c.tls = true
- c.serverName = "smtp.google.com"
- if err := c.Auth(PlainAuth("", "user", "pass", "smtp.google.com")); err != nil {
- t.Fatalf("AUTH failed: %s", err)
- }
-
- if err := c.Mail("user@gmail.com"); err != nil {
- t.Fatalf("MAIL failed: %s", err)
- }
- if err := c.Rcpt("golang-nuts@googlegroups.com"); err != nil {
- t.Fatalf("RCPT failed: %s", err)
- }
- msg := `From: user@gmail.com
-To: golang-nuts@googlegroups.com
-Subject: Hooray for Go
-
-Line 1
-.Leading dot line .
-Goodbye.`
- w, err := c.Data()
- if err != nil {
- t.Fatalf("DATA failed: %s", err)
- }
- if _, err := w.Write([]byte(msg)); err != nil {
- t.Fatalf("Data write failed: %s", err)
- }
- if err := w.Close(); err != nil {
- t.Fatalf("Bad data response: %s", err)
- }
-
- if err := c.Quit(); err != nil {
- t.Fatalf("QUIT failed: %s", err)
- }
-
- bcmdbuf.Flush()
- actualcmds := cmdbuf.String()
- if basicClient != actualcmds {
- t.Fatalf("Got:\n%s\nExpected:\n%s", actualcmds, basicClient)
- }
-}
-
-var basicServer = `250 mx.google.com at your service
-502 Unrecognized command.
-250-mx.google.com at your service
-250-SIZE 35651584
-250-AUTH LOGIN PLAIN
-250 8BITMIME
-530 Authentication required
-252 Send some mail, I'll try my best
-250 User is valid
-235 Accepted
-250 Sender OK
-250 Receiver OK
-354 Go ahead
-250 Data OK
-221 OK
-`
-
-var basicClient = `HELO localhost
-EHLO localhost
-EHLO localhost
-MAIL FROM:<user@gmail.com> BODY=8BITMIME
-VRFY user1@gmail.com
-VRFY user2@gmail.com
-AUTH PLAIN AHVzZXIAcGFzcw==
-MAIL FROM:<user@gmail.com> BODY=8BITMIME
-RCPT TO:<golang-nuts@googlegroups.com>
-DATA
-From: user@gmail.com
-To: golang-nuts@googlegroups.com
-Subject: Hooray for Go
-
-Line 1
-..Leading dot line .
-Goodbye.
-.
-QUIT
-`
import (
"fmt"
"math"
- "rand"
+ "math/rand"
. "sort"
"strconv"
"testing"
"bytes"
"strings"
"unicode"
- "utf8"
+ "unicode/utf8"
)
const lowerhex = "0123456789abcdef"
import (
"errors"
"io"
- "utf8"
+ "unicode/utf8"
)
// A Reader implements the io.Reader, io.ByteScanner, and
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
-// If no bytes are available, the error returned is os.EOF.
+// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (r *Reader) ReadRune() (ch rune, size int, err error) {
import (
"unicode"
- "utf8"
+ "unicode/utf8"
)
// explode splits s into an array of UTF-8 sequences, one per Unicode character (still strings) up to a maximum of n (n < 0 means no limit).
. "strings"
"testing"
"unicode"
+ "unicode/utf8"
"unsafe"
- "utf8"
)
func eq(a, b []string) bool {
package sync_test
import (
+ "runtime"
. "sync"
"sync/atomic"
- "runtime"
"testing"
)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package syslog provides a simple interface to the system log service. It
-// can send messages to the syslog daemon using UNIX domain sockets, UDP, or
-// TCP connections.
-package syslog
-
-import (
- "fmt"
- "log"
- "net"
- "os"
-)
-
-type Priority int
-
-const (
- // From /usr/include/sys/syslog.h.
- // These are the same on Linux, BSD, and OS X.
- LOG_EMERG Priority = iota
- LOG_ALERT
- LOG_CRIT
- LOG_ERR
- LOG_WARNING
- LOG_NOTICE
- LOG_INFO
- LOG_DEBUG
-)
-
-// A Writer is a connection to a syslog server.
-type Writer struct {
- priority Priority
- prefix string
- conn serverConn
-}
-
-type serverConn interface {
- writeBytes(p Priority, prefix string, b []byte) (int, error)
- writeString(p Priority, prefix string, s string) (int, error)
- close() error
-}
-
-type netConn struct {
- conn net.Conn
-}
-
-// New establishes a new connection to the system log daemon.
-// Each write to the returned writer sends a log message with
-// the given priority and prefix.
-func New(priority Priority, prefix string) (w *Writer, err error) {
- return Dial("", "", priority, prefix)
-}
-
-// Dial establishes a connection to a log daemon by connecting
-// to address raddr on the network net.
-// Each write to the returned writer sends a log message with
-// the given priority and prefix.
-func Dial(network, raddr string, priority Priority, prefix string) (w *Writer, err error) {
- if prefix == "" {
- prefix = os.Args[0]
- }
- var conn serverConn
- if network == "" {
- conn, err = unixSyslog()
- } else {
- var c net.Conn
- c, err = net.Dial(network, raddr)
- conn = netConn{c}
- }
- return &Writer{priority, prefix, conn}, err
-}
-
-// Write sends a log message to the syslog daemon.
-func (w *Writer) Write(b []byte) (int, error) {
- if w.priority > LOG_DEBUG || w.priority < LOG_EMERG {
- return 0, os.EINVAL
- }
- return w.conn.writeBytes(w.priority, w.prefix, b)
-}
-
-func (w *Writer) writeString(p Priority, s string) (int, error) {
- return w.conn.writeString(p, w.prefix, s)
-}
-
-func (w *Writer) Close() error { return w.conn.close() }
-
-// Emerg logs a message using the LOG_EMERG priority.
-func (w *Writer) Emerg(m string) (err error) {
- _, err = w.writeString(LOG_EMERG, m)
- return err
-}
-// Crit logs a message using the LOG_CRIT priority.
-func (w *Writer) Crit(m string) (err error) {
- _, err = w.writeString(LOG_CRIT, m)
- return err
-}
-// ERR logs a message using the LOG_ERR priority.
-func (w *Writer) Err(m string) (err error) {
- _, err = w.writeString(LOG_ERR, m)
- return err
-}
-
-// Warning logs a message using the LOG_WARNING priority.
-func (w *Writer) Warning(m string) (err error) {
- _, err = w.writeString(LOG_WARNING, m)
- return err
-}
-
-// Notice logs a message using the LOG_NOTICE priority.
-func (w *Writer) Notice(m string) (err error) {
- _, err = w.writeString(LOG_NOTICE, m)
- return err
-}
-// Info logs a message using the LOG_INFO priority.
-func (w *Writer) Info(m string) (err error) {
- _, err = w.writeString(LOG_INFO, m)
- return err
-}
-// Debug logs a message using the LOG_DEBUG priority.
-func (w *Writer) Debug(m string) (err error) {
- _, err = w.writeString(LOG_DEBUG, m)
- return err
-}
-
-func (n netConn) writeBytes(p Priority, prefix string, b []byte) (int, error) {
- return fmt.Fprintf(n.conn, "<%d>%s: %s\n", p, prefix, b)
-}
-
-func (n netConn) writeString(p Priority, prefix string, s string) (int, error) {
- return fmt.Fprintf(n.conn, "<%d>%s: %s\n", p, prefix, s)
-}
-
-func (n netConn) close() error {
- return n.conn.Close()
-}
-
-// NewLogger provides an object that implements the full log.Logger interface,
-// but sends messages to Syslog instead; flag is passed as is to Logger;
-// priority will be used for all messages sent using this interface.
-// All messages are logged with priority p.
-func NewLogger(p Priority, flag int) *log.Logger {
- s, err := New(p, "")
- if err != nil {
- return nil
- }
- return log.New(s, "", flag)
-}
+++ /dev/null
-/* syslog_c.c -- call syslog for Go.
-
- Copyright 2011 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <syslog.h>
-
-/* We need to use a C function to call the syslog function, because we
- can't represent a C varargs function in Go. */
-
-void syslog_c(int, const char*)
- asm ("libgo_syslog.syslog.syslog_c");
-
-void
-syslog_c (int priority, const char *msg)
-{
- syslog (priority, "%s", msg);
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// gccgo specific implementation of syslog for Solaris. Solaris uses
-// STREAMS to communicate with syslogd. That is enough of a pain that
-// we just call the libc function.
-
-package syslog
-
-import (
- "fmt"
- "syscall"
-)
-
-func unixSyslog() (conn serverConn, err error) {
- return libcConn(0), nil
-}
-
-type libcConn int
-
-func syslog_c(int, *byte)
-
-func (libcConn) writeBytes(p Priority, prefix string, b []byte) (int, error) {
- syslog_c(int(p), syscall.StringBytePtr(fmt.Sprintf("%s: %s", prefix, b)))
- return len(b), nil
-}
-
-func (libcConn) writeString(p Priority, prefix string, s string) (int, error) {
- syslog_c(int(p), syscall.StringBytePtr(fmt.Sprintf("%s: %s", prefix, s)))
- return len(s), nil
-}
-
-func (libcConn) close() error {
- return nil
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package syslog
-
-import (
- "io"
- "log"
- "net"
- "testing"
-)
-
-var serverAddr string
-
-func runSyslog(c net.PacketConn, done chan<- string) {
- var buf [4096]byte
- var rcvd string = ""
- for {
- n, _, err := c.ReadFrom(buf[0:])
- if err != nil || n == 0 {
- break
- }
- rcvd += string(buf[0:n])
- }
- done <- rcvd
-}
-
-func startServer(done chan<- string) {
- c, e := net.ListenPacket("udp", "127.0.0.1:0")
- if e != nil {
- log.Fatalf("net.ListenPacket failed udp :0 %v", e)
- }
- serverAddr = c.LocalAddr().String()
- c.SetReadTimeout(100e6) // 100ms
- go runSyslog(c, done)
-}
-
-func skipNetTest(t *testing.T) bool {
- if testing.Short() {
- // Depends on syslog daemon running, and sometimes it's not.
- t.Logf("skipping syslog test during -short")
- return true
- }
- return false
-}
-
-func TestNew(t *testing.T) {
- if skipNetTest(t) {
- return
- }
- s, err := New(LOG_INFO, "")
- if err != nil {
- t.Fatalf("New() failed: %s", err)
- }
- // Don't send any messages.
- s.Close()
-}
-
-func TestNewLogger(t *testing.T) {
- if skipNetTest(t) {
- return
- }
- f := NewLogger(LOG_INFO, 0)
- if f == nil {
- t.Error("NewLogger() failed")
- }
-}
-
-func TestDial(t *testing.T) {
- if skipNetTest(t) {
- return
- }
- l, err := Dial("", "", LOG_ERR, "syslog_test")
- if err != nil {
- t.Fatalf("Dial() failed: %s", err)
- }
- l.Close()
-}
-
-func TestUDPDial(t *testing.T) {
- done := make(chan string)
- startServer(done)
- l, err := Dial("udp", serverAddr, LOG_INFO, "syslog_test")
- if err != nil {
- t.Fatalf("syslog.Dial() failed: %s", err)
- }
- msg := "udp test"
- l.Info(msg)
- expected := "<6>syslog_test: udp test\n"
- rcvd := <-done
- if rcvd != expected {
- t.Fatalf("s.Info() = '%q', but wanted '%q'", rcvd, expected)
- }
-}
-
-func TestWrite(t *testing.T) {
- done := make(chan string)
- startServer(done)
- l, err := Dial("udp", serverAddr, LOG_ERR, "syslog_test")
- if err != nil {
- t.Fatalf("syslog.Dial() failed: %s", err)
- }
- msg := "write test"
- _, err = io.WriteString(l, msg)
- if err != nil {
- t.Fatalf("WriteString() failed: %s", err)
- }
- expected := "<3>syslog_test: write test\n"
- rcvd := <-done
- if rcvd != expected {
- t.Fatalf("s.Info() = '%q', but wanted '%q'", rcvd, expected)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syslog
-
-import (
- "errors"
- "net"
-)
-
-// unixSyslog opens a connection to the syslog daemon running on the
-// local machine using a Unix domain socket.
-
-func unixSyslog() (conn serverConn, err error) {
- logTypes := []string{"unixgram", "unix"}
- logPaths := []string{"/dev/log", "/var/run/syslog"}
- var raddr string
- for _, network := range logTypes {
- for _, path := range logPaths {
- raddr = path
- conn, err := net.Dial(network, raddr)
- if err != nil {
- continue
- } else {
- return netConn{conn}, nil
- }
- }
- }
- return nil, errors.New("Unix syslog delivery error")
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tabwriter implements a write filter (tabwriter.Writer) that
-// translates tabbed columns in input into properly aligned text.
-//
-// The package is using the Elastic Tabstops algorithm described at
-// http://nickgravgaard.com/elastictabstops/index.html.
-//
-package tabwriter
-
-import (
- "bytes"
- "io"
- "os"
- "utf8"
-)
-
-// ----------------------------------------------------------------------------
-// Filter implementation
-
-// A cell represents a segment of text terminated by tabs or line breaks.
-// The text itself is stored in a separate buffer; cell only describes the
-// segment's size in bytes, its width in runes, and whether it's an htab
-// ('\t') terminated cell.
-//
-type cell struct {
- size int // cell size in bytes
- width int // cell width in runes
- htab bool // true if the cell is terminated by an htab ('\t')
-}
-
-// A Writer is a filter that inserts padding around tab-delimited
-// columns in its input to align them in the output.
-//
-// The Writer treats incoming bytes as UTF-8 encoded text consisting
-// of cells terminated by (horizontal or vertical) tabs or line
-// breaks (newline or formfeed characters). Cells in adjacent lines
-// constitute a column. The Writer inserts padding as needed to
-// make all cells in a column have the same width, effectively
-// aligning the columns. It assumes that all characters have the
-// same width except for tabs for which a tabwidth must be specified.
-// Note that cells are tab-terminated, not tab-separated: trailing
-// non-tab text at the end of a line does not form a column cell.
-//
-// The Writer assumes that all Unicode code points have the same width;
-// this may not be true in some fonts.
-//
-// If DiscardEmptyColumns is set, empty columns that are terminated
-// entirely by vertical (or "soft") tabs are discarded. Columns
-// terminated by horizontal (or "hard") tabs are not affected by
-// this flag.
-//
-// If a Writer is configured to filter HTML, HTML tags and entities
-// are simply passed through. The widths of tags and entities are
-// assumed to be zero (tags) and one (entities) for formatting purposes.
-//
-// A segment of text may be escaped by bracketing it with Escape
-// characters. The tabwriter passes escaped text segments through
-// unchanged. In particular, it does not interpret any tabs or line
-// breaks within the segment. If the StripEscape flag is set, the
-// Escape characters are stripped from the output; otherwise they
-// are passed through as well. For the purpose of formatting, the
-// width of the escaped text is always computed excluding the Escape
-// characters.
-//
-// The formfeed character ('\f') acts like a newline but it also
-// terminates all columns in the current line (effectively calling
-// Flush). Cells in the next line start new columns. Unless found
-// inside an HTML tag or inside an escaped text segment, formfeed
-// characters appear as newlines in the output.
-//
-// The Writer must buffer input internally, because proper spacing
-// of one line may depend on the cells in future lines. Clients must
-// call Flush when done calling Write.
-//
-type Writer struct {
- // configuration
- output io.Writer
- minwidth int
- tabwidth int
- padding int
- padbytes [8]byte
- flags uint
-
- // current state
- buf bytes.Buffer // collected text excluding tabs or line breaks
- pos int // buffer position up to which cell.width of incomplete cell has been computed
- cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
- endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
- lines [][]cell // list of lines; each line is a list of cells
- widths []int // list of column widths in runes - re-used during formatting
-}
-
-func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
-
-// Reset the current state.
-func (b *Writer) reset() {
- b.buf.Reset()
- b.pos = 0
- b.cell = cell{}
- b.endChar = 0
- b.lines = b.lines[0:0]
- b.widths = b.widths[0:0]
- b.addLine()
-}
-
-// Internal representation (current state):
-//
-// - all text written is appended to buf; tabs and line breaks are stripped away
-// - at any given time there is a (possibly empty) incomplete cell at the end
-// (the cell starts after a tab or line break)
-// - cell.size is the number of bytes belonging to the cell so far
-// - cell.width is text width in runes of that cell from the start of the cell to
-// position pos; html tags and entities are excluded from this width if html
-// filtering is enabled
-// - the sizes and widths of processed text are kept in the lines list
-// which contains a list of cells for each line
-// - the widths list is a temporary list with current widths used during
-// formatting; it is kept in Writer because it's re-used
-//
-// |<---------- size ---------->|
-// | |
-// |<- width ->|<- ignored ->| |
-// | | | |
-// [---processed---tab------------<tag>...</tag>...]
-// ^ ^ ^
-// | | |
-// buf start of incomplete cell pos
-
-// Formatting can be controlled with these flags.
-const (
- // Ignore html tags and treat entities (starting with '&'
- // and ending in ';') as single characters (width = 1).
- FilterHTML uint = 1 << iota
-
- // Strip Escape characters bracketing escaped text segments
- // instead of passing them through unchanged with the text.
- StripEscape
-
- // Force right-alignment of cell content.
- // Default is left-alignment.
- AlignRight
-
- // Handle empty columns as if they were not present in
- // the input in the first place.
- DiscardEmptyColumns
-
- // Always use tabs for indentation columns (i.e., padding of
- // leading empty cells on the left) independent of padchar.
- TabIndent
-
- // Print a vertical bar ('|') between columns (after formatting).
- // Discarded columns appear as zero-width columns ("||").
- Debug
-)
-
-// A Writer must be initialized with a call to Init. The first parameter (output)
-// specifies the filter output. The remaining parameters control the formatting:
-//
-// minwidth minimal cell width including any padding
-// tabwidth width of tab characters (equivalent number of spaces)
-// padding padding added to a cell before computing its width
-// padchar ASCII char used for padding
-// if padchar == '\t', the Writer will assume that the
-// width of a '\t' in the formatted output is tabwidth,
-// and cells are left-aligned independent of align_left
-// (for correct-looking results, tabwidth must correspond
-// to the tab width in the viewer displaying the result)
-// flags formatting control
-//
-// To format in tab-separated columns with a tab stop of 8:
-// b.Init(w, 8, 1, 8, '\t', 0);
-//
-// To format in space-separated columns with at least 4 spaces between columns:
-// b.Init(w, 0, 4, 8, ' ', 0);
-//
-func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
- if minwidth < 0 || tabwidth < 0 || padding < 0 {
- panic("negative minwidth, tabwidth, or padding")
- }
- b.output = output
- b.minwidth = minwidth
- b.tabwidth = tabwidth
- b.padding = padding
- for i := range b.padbytes {
- b.padbytes[i] = padchar
- }
- if padchar == '\t' {
- // tab padding enforces left-alignment
- flags &^= AlignRight
- }
- b.flags = flags
-
- b.reset()
-
- return b
-}
-
-// debugging support (keep code around)
-func (b *Writer) dump() {
- pos := 0
- for i, line := range b.lines {
- print("(", i, ") ")
- for _, c := range line {
- print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
- pos += c.size
- }
- print("\n")
- }
- print("\n")
-}
-
-// local error wrapper so we can distinguish errors we want to return
-// as errors from genuine panics (which we don't want to return as errors)
-type osError struct {
- err error
-}
-
-func (b *Writer) write0(buf []byte) {
- n, err := b.output.Write(buf)
- if n != len(buf) && err == nil {
- err = os.EIO
- }
- if err != nil {
- panic(osError{err})
- }
-}
-
-func (b *Writer) writeN(src []byte, n int) {
- for n > len(src) {
- b.write0(src)
- n -= len(src)
- }
- b.write0(src[0:n])
-}
-
-var (
- newline = []byte{'\n'}
- tabs = []byte("\t\t\t\t\t\t\t\t")
-)
-
-func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
- if b.padbytes[0] == '\t' || useTabs {
- // padding is done with tabs
- if b.tabwidth == 0 {
- return // tabs have no width - can't do any padding
- }
- // make cellw the smallest multiple of b.tabwidth
- cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
- n := cellw - textw // amount of padding
- if n < 0 {
- panic("internal error")
- }
- b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
- return
- }
-
- // padding is done with non-tab characters
- b.writeN(b.padbytes[0:], cellw-textw)
-}
-
-var vbar = []byte{'|'}
-
-func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
- pos = pos0
- for i := line0; i < line1; i++ {
- line := b.lines[i]
-
- // if TabIndent is set, use tabs to pad leading empty cells
- useTabs := b.flags&TabIndent != 0
-
- for j, c := range line {
- if j > 0 && b.flags&Debug != 0 {
- // indicate column break
- b.write0(vbar)
- }
-
- if c.size == 0 {
- // empty cell
- if j < len(b.widths) {
- b.writePadding(c.width, b.widths[j], useTabs)
- }
- } else {
- // non-empty cell
- useTabs = false
- if b.flags&AlignRight == 0 { // align left
- b.write0(b.buf.Bytes()[pos : pos+c.size])
- pos += c.size
- if j < len(b.widths) {
- b.writePadding(c.width, b.widths[j], false)
- }
- } else { // align right
- if j < len(b.widths) {
- b.writePadding(c.width, b.widths[j], false)
- }
- b.write0(b.buf.Bytes()[pos : pos+c.size])
- pos += c.size
- }
- }
- }
-
- if i+1 == len(b.lines) {
- // last buffered line - we don't have a newline, so just write
- // any outstanding buffered data
- b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
- pos += b.cell.size
- } else {
- // not the last line - write newline
- b.write0(newline)
- }
- }
- return
-}
-
-// Format the text between line0 and line1 (excluding line1); pos
-// is the buffer position corresponding to the beginning of line0.
-// Returns the buffer position corresponding to the beginning of
-// line1 and an error, if any.
-//
-func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
- pos = pos0
- column := len(b.widths)
- for this := line0; this < line1; this++ {
- line := b.lines[this]
-
- if column < len(line)-1 {
- // cell exists in this column => this line
- // has more cells than the previous line
- // (the last cell per line is ignored because cells are
- // tab-terminated; the last cell per line describes the
- // text before the newline/formfeed and does not belong
- // to a column)
-
- // print unprinted lines until beginning of block
- pos = b.writeLines(pos, line0, this)
- line0 = this
-
- // column block begin
- width := b.minwidth // minimal column width
- discardable := true // true if all cells in this column are empty and "soft"
- for ; this < line1; this++ {
- line = b.lines[this]
- if column < len(line)-1 {
- // cell exists in this column
- c := line[column]
- // update width
- if w := c.width + b.padding; w > width {
- width = w
- }
- // update discardable
- if c.width > 0 || c.htab {
- discardable = false
- }
- } else {
- break
- }
- }
- // column block end
-
- // discard empty columns if necessary
- if discardable && b.flags&DiscardEmptyColumns != 0 {
- width = 0
- }
-
- // format and print all columns to the right of this column
- // (we know the widths of this column and all columns to the left)
- b.widths = append(b.widths, width) // push width
- pos = b.format(pos, line0, this)
- b.widths = b.widths[0 : len(b.widths)-1] // pop width
- line0 = this
- }
- }
-
- // print unprinted lines until end
- return b.writeLines(pos, line0, line1)
-}
-
-// Append text to current cell.
-func (b *Writer) append(text []byte) {
- b.buf.Write(text)
- b.cell.size += len(text)
-}
-
-// Update the cell width.
-func (b *Writer) updateWidth() {
- b.cell.width += utf8.RuneCount(b.buf.Bytes()[b.pos:b.buf.Len()])
- b.pos = b.buf.Len()
-}
-
-// To escape a text segment, bracket it with Escape characters.
-// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
-// does not terminate a cell and constitutes a single character of
-// width one for formatting purposes.
-//
-// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
-//
-const Escape = '\xff'
-
-// Start escaped mode.
-func (b *Writer) startEscape(ch byte) {
- switch ch {
- case Escape:
- b.endChar = Escape
- case '<':
- b.endChar = '>'
- case '&':
- b.endChar = ';'
- }
-}
-
-// Terminate escaped mode. If the escaped text was an HTML tag, its width
-// is assumed to be zero for formatting purposes; if it was an HTML entity,
-// its width is assumed to be one. In all other cases, the width is the
-// unicode width of the text.
-//
-func (b *Writer) endEscape() {
- switch b.endChar {
- case Escape:
- b.updateWidth()
- if b.flags&StripEscape == 0 {
- b.cell.width -= 2 // don't count the Escape chars
- }
- case '>': // tag of zero width
- case ';':
- b.cell.width++ // entity, count as one rune
- }
- b.pos = b.buf.Len()
- b.endChar = 0
-}
-
-// Terminate the current cell by adding it to the list of cells of the
-// current line. Returns the number of cells in that line.
-//
-func (b *Writer) terminateCell(htab bool) int {
- b.cell.htab = htab
- line := &b.lines[len(b.lines)-1]
- *line = append(*line, b.cell)
- b.cell = cell{}
- return len(*line)
-}
-
-func handlePanic(err *error) {
- if e := recover(); e != nil {
- *err = e.(osError).err // re-panics if it's not a local osError
- }
-}
-
-// Flush should be called after the last call to Write to ensure
-// that any data buffered in the Writer is written to output. Any
-// incomplete escape sequence at the end is simply considered
-// complete for formatting purposes.
-//
-func (b *Writer) Flush() (err error) {
- defer b.reset() // even in the presence of errors
- defer handlePanic(&err)
-
- // add current cell if not empty
- if b.cell.size > 0 {
- if b.endChar != 0 {
- // inside escape - terminate it even if incomplete
- b.endEscape()
- }
- b.terminateCell(false)
- }
-
- // format contents of buffer
- b.format(0, 0, len(b.lines))
-
- return
-}
-
-var hbar = []byte("---\n")
-
-// Write writes buf to the writer b.
-// The only errors returned are ones encountered
-// while writing to the underlying output stream.
-//
-func (b *Writer) Write(buf []byte) (n int, err error) {
- defer handlePanic(&err)
-
- // split text into cells
- n = 0
- for i, ch := range buf {
- if b.endChar == 0 {
- // outside escape
- switch ch {
- case '\t', '\v', '\n', '\f':
- // end of cell
- b.append(buf[n:i])
- b.updateWidth()
- n = i + 1 // ch consumed
- ncells := b.terminateCell(ch == '\t')
- if ch == '\n' || ch == '\f' {
- // terminate line
- b.addLine()
- if ch == '\f' || ncells == 1 {
- // A '\f' always forces a flush. Otherwise, if the previous
- // line has only one cell which does not have an impact on
- // the formatting of the following lines (the last cell per
- // line is ignored by format()), thus we can flush the
- // Writer contents.
- if err = b.Flush(); err != nil {
- return
- }
- if ch == '\f' && b.flags&Debug != 0 {
- // indicate section break
- b.write0(hbar)
- }
- }
- }
-
- case Escape:
- // start of escaped sequence
- b.append(buf[n:i])
- b.updateWidth()
- n = i
- if b.flags&StripEscape != 0 {
- n++ // strip Escape
- }
- b.startEscape(Escape)
-
- case '<', '&':
- // possibly an html tag/entity
- if b.flags&FilterHTML != 0 {
- // begin of tag/entity
- b.append(buf[n:i])
- b.updateWidth()
- n = i
- b.startEscape(ch)
- }
- }
-
- } else {
- // inside escape
- if ch == b.endChar {
- // end of tag/entity
- j := i + 1
- if ch == Escape && b.flags&StripEscape != 0 {
- j = i // strip Escape
- }
- b.append(buf[n:j])
- n = i + 1 // ch consumed
- b.endEscape()
- }
- }
- }
-
- // append leftover text
- b.append(buf[n:])
- n = len(buf)
- return
-}
-
-// NewWriter allocates and initializes a new tabwriter.Writer.
-// The parameters are the same as for the the Init function.
-//
-func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
- return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabwriter
-
-import (
- "io"
- "testing"
-)
-
-type buffer struct {
- a []byte
-}
-
-func (b *buffer) init(n int) { b.a = make([]byte, n)[0:0] }
-
-func (b *buffer) clear() { b.a = b.a[0:0] }
-
-func (b *buffer) Write(buf []byte) (written int, err error) {
- n := len(b.a)
- m := len(buf)
- if n+m <= cap(b.a) {
- b.a = b.a[0 : n+m]
- for i := 0; i < m; i++ {
- b.a[n+i] = buf[i]
- }
- } else {
- panic("buffer.Write: buffer too small")
- }
- return len(buf), nil
-}
-
-func (b *buffer) String() string { return string(b.a) }
-
-func write(t *testing.T, testname string, w *Writer, src string) {
- written, err := io.WriteString(w, src)
- if err != nil {
- t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err)
- }
- if written != len(src) {
- t.Errorf("--- test: %s\n--- src:\n%q\n--- written = %d, len(src) = %d\n", testname, src, written, len(src))
- }
-}
-
-func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
- err := w.Flush()
- if err != nil {
- t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err)
- }
-
- res := b.String()
- if res != expected {
- t.Errorf("--- test: %s\n--- src:\n%q\n--- found:\n%q\n--- expected:\n%q\n", testname, src, res, expected)
- }
-}
-
-func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
- var b buffer
- b.init(1000)
-
- var w Writer
- w.Init(&b, minwidth, tabwidth, padding, padchar, flags)
-
- // write all at once
- title := testname + " (written all at once)"
- b.clear()
- write(t, title, &w, src)
- verify(t, title, &w, &b, src, expected)
-
- // write byte-by-byte
- title = testname + " (written byte-by-byte)"
- b.clear()
- for i := 0; i < len(src); i++ {
- write(t, title, &w, src[i:i+1])
- }
- verify(t, title, &w, &b, src, expected)
-
- // write using Fibonacci slice sizes
- title = testname + " (written in fibonacci slices)"
- b.clear()
- for i, d := 0, 0; i < len(src); {
- write(t, title, &w, src[i:i+d])
- i, d = i+d, d+1
- if i+d > len(src) {
- d = len(src) - i
- }
- }
- verify(t, title, &w, &b, src, expected)
-}
-
-var tests = []struct {
- testname string
- minwidth, tabwidth, padding int
- padchar byte
- flags uint
- src, expected string
-}{
- {
- "1a",
- 8, 0, 1, '.', 0,
- "",
- "",
- },
-
- {
- "1a debug",
- 8, 0, 1, '.', Debug,
- "",
- "",
- },
-
- {
- "1b esc stripped",
- 8, 0, 1, '.', StripEscape,
- "\xff\xff",
- "",
- },
-
- {
- "1b esc",
- 8, 0, 1, '.', 0,
- "\xff\xff",
- "\xff\xff",
- },
-
- {
- "1c esc stripped",
- 8, 0, 1, '.', StripEscape,
- "\xff\t\xff",
- "\t",
- },
-
- {
- "1c esc",
- 8, 0, 1, '.', 0,
- "\xff\t\xff",
- "\xff\t\xff",
- },
-
- {
- "1d esc stripped",
- 8, 0, 1, '.', StripEscape,
- "\xff\"foo\t\n\tbar\"\xff",
- "\"foo\t\n\tbar\"",
- },
-
- {
- "1d esc",
- 8, 0, 1, '.', 0,
- "\xff\"foo\t\n\tbar\"\xff",
- "\xff\"foo\t\n\tbar\"\xff",
- },
-
- {
- "1e esc stripped",
- 8, 0, 1, '.', StripEscape,
- "abc\xff\tdef", // unterminated escape
- "abc\tdef",
- },
-
- {
- "1e esc",
- 8, 0, 1, '.', 0,
- "abc\xff\tdef", // unterminated escape
- "abc\xff\tdef",
- },
-
- {
- "2",
- 8, 0, 1, '.', 0,
- "\n\n\n",
- "\n\n\n",
- },
-
- {
- "3",
- 8, 0, 1, '.', 0,
- "a\nb\nc",
- "a\nb\nc",
- },
-
- {
- "4a",
- 8, 0, 1, '.', 0,
- "\t", // '\t' terminates an empty cell on last line - nothing to print
- "",
- },
-
- {
- "4b",
- 8, 0, 1, '.', AlignRight,
- "\t", // '\t' terminates an empty cell on last line - nothing to print
- "",
- },
-
- {
- "5",
- 8, 0, 1, '.', 0,
- "*\t*",
- "*.......*",
- },
-
- {
- "5b",
- 8, 0, 1, '.', 0,
- "*\t*\n",
- "*.......*\n",
- },
-
- {
- "5c",
- 8, 0, 1, '.', 0,
- "*\t*\t",
- "*.......*",
- },
-
- {
- "5c debug",
- 8, 0, 1, '.', Debug,
- "*\t*\t",
- "*.......|*",
- },
-
- {
- "5d",
- 8, 0, 1, '.', AlignRight,
- "*\t*\t",
- ".......**",
- },
-
- {
- "6",
- 8, 0, 1, '.', 0,
- "\t\n",
- "........\n",
- },
-
- {
- "7a",
- 8, 0, 1, '.', 0,
- "a) foo",
- "a) foo",
- },
-
- {
- "7b",
- 8, 0, 1, ' ', 0,
- "b) foo\tbar",
- "b) foo bar",
- },
-
- {
- "7c",
- 8, 0, 1, '.', 0,
- "c) foo\tbar\t",
- "c) foo..bar",
- },
-
- {
- "7d",
- 8, 0, 1, '.', 0,
- "d) foo\tbar\n",
- "d) foo..bar\n",
- },
-
- {
- "7e",
- 8, 0, 1, '.', 0,
- "e) foo\tbar\t\n",
- "e) foo..bar.....\n",
- },
-
- {
- "7f",
- 8, 0, 1, '.', FilterHTML,
- "f) f<o\t<b>bar</b>\t\n",
- "f) f<o..<b>bar</b>.....\n",
- },
-
- {
- "7g",
- 8, 0, 1, '.', FilterHTML,
- "g) f<o\t<b>bar</b>\t non-terminated entity &",
- "g) f<o..<b>bar</b>..... non-terminated entity &",
- },
-
- {
- "7g debug",
- 8, 0, 1, '.', FilterHTML | Debug,
- "g) f<o\t<b>bar</b>\t non-terminated entity &",
- "g) f<o..|<b>bar</b>.....| non-terminated entity &",
- },
-
- {
- "8",
- 8, 0, 1, '*', 0,
- "Hello, world!\n",
- "Hello, world!\n",
- },
-
- {
- "9a",
- 1, 0, 0, '.', 0,
- "1\t2\t3\t4\n" +
- "11\t222\t3333\t44444\n",
-
- "1.2..3...4\n" +
- "11222333344444\n",
- },
-
- {
- "9b",
- 1, 0, 0, '.', FilterHTML,
- "1\t2<!---\f--->\t3\t4\n" + // \f inside HTML is ignored
- "11\t222\t3333\t44444\n",
-
- "1.2<!---\f--->..3...4\n" +
- "11222333344444\n",
- },
-
- {
- "9c",
- 1, 0, 0, '.', 0,
- "1\t2\t3\t4\f" + // \f causes a newline and flush
- "11\t222\t3333\t44444\n",
-
- "1234\n" +
- "11222333344444\n",
- },
-
- {
- "9c debug",
- 1, 0, 0, '.', Debug,
- "1\t2\t3\t4\f" + // \f causes a newline and flush
- "11\t222\t3333\t44444\n",
-
- "1|2|3|4\n" +
- "---\n" +
- "11|222|3333|44444\n",
- },
-
- {
- "10a",
- 5, 0, 0, '.', 0,
- "1\t2\t3\t4\n",
- "1....2....3....4\n",
- },
-
- {
- "10b",
- 5, 0, 0, '.', 0,
- "1\t2\t3\t4\t\n",
- "1....2....3....4....\n",
- },
-
- {
- "11",
- 8, 0, 1, '.', 0,
- "本\tb\tc\n" +
- "aa\t\u672c\u672c\u672c\tcccc\tddddd\n" +
- "aaa\tbbbb\n",
-
- "本.......b.......c\n" +
- "aa......本本本.....cccc....ddddd\n" +
- "aaa.....bbbb\n",
- },
-
- {
- "12a",
- 8, 0, 1, ' ', AlignRight,
- "a\tè\tc\t\n" +
- "aa\tèèè\tcccc\tddddd\t\n" +
- "aaa\tèèèè\t\n",
-
- " a è c\n" +
- " aa èèè cccc ddddd\n" +
- " aaa èèèè\n",
- },
-
- {
- "12b",
- 2, 0, 0, ' ', 0,
- "a\tb\tc\n" +
- "aa\tbbb\tcccc\n" +
- "aaa\tbbbb\n",
-
- "a b c\n" +
- "aa bbbcccc\n" +
- "aaabbbb\n",
- },
-
- {
- "12c",
- 8, 0, 1, '_', 0,
- "a\tb\tc\n" +
- "aa\tbbb\tcccc\n" +
- "aaa\tbbbb\n",
-
- "a_______b_______c\n" +
- "aa______bbb_____cccc\n" +
- "aaa_____bbbb\n",
- },
-
- {
- "13a",
- 4, 0, 1, '-', 0,
- "4444\t日本語\t22\t1\t333\n" +
- "999999999\t22\n" +
- "7\t22\n" +
- "\t\t\t88888888\n" +
- "\n" +
- "666666\t666666\t666666\t4444\n" +
- "1\t1\t999999999\t0000000000\n",
-
- "4444------日本語-22--1---333\n" +
- "999999999-22\n" +
- "7---------22\n" +
- "------------------88888888\n" +
- "\n" +
- "666666-666666-666666----4444\n" +
- "1------1------999999999-0000000000\n",
- },
-
- {
- "13b",
- 4, 0, 3, '.', 0,
- "4444\t333\t22\t1\t333\n" +
- "999999999\t22\n" +
- "7\t22\n" +
- "\t\t\t88888888\n" +
- "\n" +
- "666666\t666666\t666666\t4444\n" +
- "1\t1\t999999999\t0000000000\n",
-
- "4444........333...22...1...333\n" +
- "999999999...22\n" +
- "7...........22\n" +
- "....................88888888\n" +
- "\n" +
- "666666...666666...666666......4444\n" +
- "1........1........999999999...0000000000\n",
- },
-
- {
- "13c",
- 8, 8, 1, '\t', FilterHTML,
- "4444\t333\t22\t1\t333\n" +
- "999999999\t22\n" +
- "7\t22\n" +
- "\t\t\t88888888\n" +
- "\n" +
- "666666\t666666\t666666\t4444\n" +
- "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
-
- "4444\t\t333\t22\t1\t333\n" +
- "999999999\t22\n" +
- "7\t\t22\n" +
- "\t\t\t\t88888888\n" +
- "\n" +
- "666666\t666666\t666666\t\t4444\n" +
- "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
- },
-
- {
- "14",
- 1, 0, 2, ' ', AlignRight,
- ".0\t.3\t2.4\t-5.1\t\n" +
- "23.0\t12345678.9\t2.4\t-989.4\t\n" +
- "5.1\t12.0\t2.4\t-7.0\t\n" +
- ".0\t0.0\t332.0\t8908.0\t\n" +
- ".0\t-.3\t456.4\t22.1\t\n" +
- ".0\t1.2\t44.4\t-13.3\t\t",
-
- " .0 .3 2.4 -5.1\n" +
- " 23.0 12345678.9 2.4 -989.4\n" +
- " 5.1 12.0 2.4 -7.0\n" +
- " .0 0.0 332.0 8908.0\n" +
- " .0 -.3 456.4 22.1\n" +
- " .0 1.2 44.4 -13.3",
- },
-
- {
- "14 debug",
- 1, 0, 2, ' ', AlignRight | Debug,
- ".0\t.3\t2.4\t-5.1\t\n" +
- "23.0\t12345678.9\t2.4\t-989.4\t\n" +
- "5.1\t12.0\t2.4\t-7.0\t\n" +
- ".0\t0.0\t332.0\t8908.0\t\n" +
- ".0\t-.3\t456.4\t22.1\t\n" +
- ".0\t1.2\t44.4\t-13.3\t\t",
-
- " .0| .3| 2.4| -5.1|\n" +
- " 23.0| 12345678.9| 2.4| -989.4|\n" +
- " 5.1| 12.0| 2.4| -7.0|\n" +
- " .0| 0.0| 332.0| 8908.0|\n" +
- " .0| -.3| 456.4| 22.1|\n" +
- " .0| 1.2| 44.4| -13.3|",
- },
-
- {
- "15a",
- 4, 0, 0, '.', 0,
- "a\t\tb",
- "a.......b",
- },
-
- {
- "15b",
- 4, 0, 0, '.', DiscardEmptyColumns,
- "a\t\tb", // htabs - do not discard column
- "a.......b",
- },
-
- {
- "15c",
- 4, 0, 0, '.', DiscardEmptyColumns,
- "a\v\vb",
- "a...b",
- },
-
- {
- "15d",
- 4, 0, 0, '.', AlignRight | DiscardEmptyColumns,
- "a\v\vb",
- "...ab",
- },
-
- {
- "16a",
- 100, 100, 0, '\t', 0,
- "a\tb\t\td\n" +
- "a\tb\t\td\te\n" +
- "a\n" +
- "a\tb\tc\td\n" +
- "a\tb\tc\td\te\n",
-
- "a\tb\t\td\n" +
- "a\tb\t\td\te\n" +
- "a\n" +
- "a\tb\tc\td\n" +
- "a\tb\tc\td\te\n",
- },
-
- {
- "16b",
- 100, 100, 0, '\t', DiscardEmptyColumns,
- "a\vb\v\vd\n" +
- "a\vb\v\vd\ve\n" +
- "a\n" +
- "a\vb\vc\vd\n" +
- "a\vb\vc\vd\ve\n",
-
- "a\tb\td\n" +
- "a\tb\td\te\n" +
- "a\n" +
- "a\tb\tc\td\n" +
- "a\tb\tc\td\te\n",
- },
-
- {
- "16b debug",
- 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
- "a\vb\v\vd\n" +
- "a\vb\v\vd\ve\n" +
- "a\n" +
- "a\vb\vc\vd\n" +
- "a\vb\vc\vd\ve\n",
-
- "a\t|b\t||d\n" +
- "a\t|b\t||d\t|e\n" +
- "a\n" +
- "a\t|b\t|c\t|d\n" +
- "a\t|b\t|c\t|d\t|e\n",
- },
-
- {
- "16c",
- 100, 100, 0, '\t', DiscardEmptyColumns,
- "a\tb\t\td\n" + // hard tabs - do not discard column
- "a\tb\t\td\te\n" +
- "a\n" +
- "a\tb\tc\td\n" +
- "a\tb\tc\td\te\n",
-
- "a\tb\t\td\n" +
- "a\tb\t\td\te\n" +
- "a\n" +
- "a\tb\tc\td\n" +
- "a\tb\tc\td\te\n",
- },
-
- {
- "16c debug",
- 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
- "a\tb\t\td\n" + // hard tabs - do not discard column
- "a\tb\t\td\te\n" +
- "a\n" +
- "a\tb\tc\td\n" +
- "a\tb\tc\td\te\n",
-
- "a\t|b\t|\t|d\n" +
- "a\t|b\t|\t|d\t|e\n" +
- "a\n" +
- "a\t|b\t|c\t|d\n" +
- "a\t|b\t|c\t|d\t|e\n",
- },
-}
-
-func Test(t *testing.T) {
- for _, e := range tests {
- check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output
-such as HTML.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Actions may not span newlines, although comments can.
-
-Once constructed, templates and template sets can be executed safely in
-parallel.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail below.
-
-*/
-// {{/* a comment */}}
-// A comment; discarded. May contain newlines.
-// Comments do not nest.
-/*
-
- {{pipeline}}
- The default textual representation of the value of the pipeline
- is copied to the output.
-
- {{if pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
- nil pointer or interface value, and any array, slice, map, or
- string of length zero.
- Dot is unaffected.
-
- {{if pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
-
- {{range pipeline}} T1 {{end}}
- The value of the pipeline must be an array, slice, or map. If
- the value of the pipeline has length zero, nothing is output;
- otherwise, dot is set to the successive elements of the array,
- slice, or map and T1 is executed.
-
- {{range pipeline}} T1 {{else}} T0 {{end}}
- The value of the pipeline must be an array, slice, or map. If
- the value of the pipeline has length zero, dot is unaffected and
- T0 is executed; otherwise, dot is set to the successive elements
- of the array, slice, or map and T1 is executed.
-
- {{template "name"}}
- The template with the specified name is executed with nil data.
-
- {{template "name" pipeline}}
- The template with the specified name is executed with dot set
- to the value of the pipeline.
-
- {{with pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, dot is set to the value of the pipeline and T1 is
- executed.
-
- {{with pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, dot is unaffected and T0
- is executed; otherwise, dot is set to the value of the pipeline
- and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
- - A boolean, string, character, integer, floating-point, imaginary
- or complex constant in Go syntax. These behave like Go's untyped
- constants, although raw strings may not span newlines.
- - The character '.' (period):
- .
- The result is the value of dot.
- - A variable name, which is a (possibly empty) alphanumeric string
- preceded by a dollar sign, such as
- $piOver2
- or
- $
- The result is the value of the variable.
- Variables are described below.
- - The name of a field of the data, which must be a struct, preceded
- by a period, such as
- .Field
- The result is the value of the field. Field invocations may be
- chained:
- .Field1.Field2
- Fields can also be evaluated on variables, including chaining:
- $x.Field1.Field2
- - The name of a key of the data, which must be a map, preceded
- by a period, such as
- .Key
- The result is the map element value indexed by the key.
- Key invocations may be chained and combined with fields to any
- depth:
- .Field1.Key1.Field2.Key2
- Although the key must be an alphanumeric identifier, unlike with
- field names they do not need to start with an upper case letter.
- Keys can also be evaluated on variables, including chaining:
- $x.key1.key2
- - The name of a niladic method of the data, preceded by a period,
- such as
- .Method
- The result is the value of invoking the method with dot as the
- receiver, dot.Method(). Such a method must have one return value (of
- any type) or two return values, the second of which is an error.
- If it has two and the returned error is non-nil, execution terminates
- and an error is returned to the caller as the value of Execute.
- Method invocations may be chained and combined with fields and keys
- to any depth:
- .Field1.Key1.Method1.Field2.Key2.Method2
- Methods can also be evaluated on variables, including chaining:
- $x.Method1.Field
- - The name of a niladic function, such as
- fun
- The result is the value of invoking the function, fun(). The return
- types and values behave as in methods. Functions and function
- names are described below.
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
- Argument
- The result is the value of evaluating the argument.
- .Method [Argument...]
- The method can be alone or the last element of a chain but,
- unlike methods in the middle of a chain, it can take arguments.
- The result is the value of calling the method with the
- arguments:
- dot.Method(Argument1, etc.)
- functionName [Argument...]
- The result is the value of calling the function associated
- with the name:
- function(Argument1, etc.)
- Functions and function names are described below.
-
-Pipelines
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of the each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
- $variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
- $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
- {{"\"output\""}}
- A string constant.
- {{`"output"`}}
- A raw string constant.
- {{printf "%q" "output"}}
- A function call.
- {{"output" | printf "%q"}}
- A function call whose final argument comes from the previous
- command.
- {{"put" | printf "%s%s" "out" | printf "%q"}}
- A more elaborate call.
- {{"output" | printf "%s" | printf "%q"}}
- A longer chain.
- {{with "output"}}{{printf "%q" .}}{{end}}
- A with action using dot.
- {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
- A with action that creates and uses a variable.
- {{with $x := "output"}}{{printf "%q" $x}}{{end}}
- A with action that uses the variable in another action.
- {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
- The same, but pipelined.
-
-Functions
-
-During execution functions are found in three function maps: first in the
-template, then in the "template set" (described below), and finally in the
-global function map. By default, no functions are defined in the template or
-the set but the Funcs methods can be used to add them.
-
-Predefined global functions are named as follows.
-
- and
- Returns the boolean AND of its arguments by returning the
- first empty argument or the last argument, that is,
- "and x y" behaves as "if x then y else x". All the
- arguments are evaluated.
- html
- Returns the escaped HTML equivalent of the textual
- representation of its arguments.
- index
- Returns the result of indexing its first argument by the
- following arguments. Thus "index x 1 2 3" is, in Go syntax,
- x[1][2][3]. Each indexed item must be a map, slice, or array.
- js
- Returns the escaped JavaScript equivalent of the textual
- representation of its arguments.
- len
- Returns the integer length of its argument.
- not
- Returns the boolean negation of its single argument.
- or
- Returns the boolean OR of its arguments by returning the
- first non-empty argument or the last argument, that is,
- "or x y" behaves as "if x then x else y". All the
- arguments are evaluated.
- print
- An alias for fmt.Sprint
- printf
- An alias for fmt.Sprintf
- println
- An alias for fmt.Sprintln
- urlquery
- Returns the escaped value of the textual representation of
- its arguments in a form suitable for embedding in a URL query.
-
-The boolean functions take any zero value to be false and a non-zero value to
-be true.
-
-Template sets
-
-Each template is named by a string specified when it is created. A template may
-use a template invocation to instantiate another template directly or by its
-name; see the explanation of the template action above. The name is looked up
-in the template set associated with the template.
-
-If no template invocation actions occur in the template, the issue of template
-sets can be ignored. If it does contain invocations, though, the template
-containing the invocations must be part of a template set in which to look up
-the names.
-
-There are two ways to construct template sets.
-
-The first is to use a Set's Parse method to create a set of named templates from
-a single input defining multiple templates. The syntax of the definitions is to
-surround each template declaration with a define and end action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example of input to Set.Parse:
-
- `{{define "T1"}} definition of template T1 {{end}}
- {{define "T2"}} definition of template T2 {{end}}
- {{define "T3"}} {{template "T1"}} {{template "T2"}} {{end}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed.
-
-The second way to build a template set is to use Set's Add method to add a
-parsed template to a set. A template may be bound to at most one set. If it's
-necessary to have a template in multiple sets, the template definition must be
-parsed multiple times to create distinct *Template values.
-
-Set.Parse may be called multiple times on different inputs to construct the set.
-Two sets may therefore be constructed with a common base set of templates plus,
-through a second Parse call each, specializations for some elements.
-
-A template may be executed directly or through Set.Execute, which executes a
-named template from the set. To invoke our example above, we might write,
-
- err := set.Execute(os.Stdout, "T3", "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-*/
-package template
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "io"
- "reflect"
- "runtime"
- "strings"
- "template/parse"
-)
-
-// state represents the state of an execution. It's not part of the
-// template so that multiple executions of the same template
-// can execute in parallel.
-type state struct {
- tmpl *Template
- wr io.Writer
- line int // line number for errors
- vars []variable // push-down stack of variable values.
-}
-
-// variable holds the dynamic value of a variable such as $, $x etc.
-type variable struct {
- name string
- value reflect.Value
-}
-
-// push pushes a new variable on the stack.
-func (s *state) push(name string, value reflect.Value) {
- s.vars = append(s.vars, variable{name, value})
-}
-
-// mark returns the length of the variable stack.
-func (s *state) mark() int {
- return len(s.vars)
-}
-
-// pop pops the variable stack up to the mark.
-func (s *state) pop(mark int) {
- s.vars = s.vars[0:mark]
-}
-
-// setVar overwrites the top-nth variable on the stack. Used by range iterations.
-func (s *state) setVar(n int, value reflect.Value) {
- s.vars[len(s.vars)-n].value = value
-}
-
-// varValue returns the value of the named variable.
-func (s *state) varValue(name string) reflect.Value {
- for i := s.mark() - 1; i >= 0; i-- {
- if s.vars[i].name == name {
- return s.vars[i].value
- }
- }
- s.errorf("undefined variable: %s", name)
- return zero
-}
-
-var zero reflect.Value
-
-// errorf formats the error and terminates processing.
-func (s *state) errorf(format string, args ...interface{}) {
- format = fmt.Sprintf("template: %s:%d: %s", s.tmpl.Name(), s.line, format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (s *state) error(err error) {
- s.errorf("%s", err)
-}
-
-// errRecover is the handler that turns panics into returns from the top
-// level of Parse.
-func errRecover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- *errp = e.(error)
- }
-}
-
-// Execute applies a parsed template to the specified data object,
-// writing the output to wr.
-func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
- defer errRecover(&err)
- value := reflect.ValueOf(data)
- state := &state{
- tmpl: t,
- wr: wr,
- line: 1,
- vars: []variable{{"$", value}},
- }
- if t.Tree == nil || t.Root == nil {
- state.errorf("must be parsed before execution")
- }
- state.walk(value, t.Root)
- return
-}
-
-// Walk functions step through the major pieces of the template structure,
-// generating output as they go.
-func (s *state) walk(dot reflect.Value, n parse.Node) {
- switch n := n.(type) {
- case *parse.ActionNode:
- s.line = n.Line
- // Do not pop variables so they persist until next end.
- // Also, if the action declares variables, don't print the result.
- val := s.evalPipeline(dot, n.Pipe)
- if len(n.Pipe.Decl) == 0 {
- s.printValue(n, val)
- }
- case *parse.IfNode:
- s.line = n.Line
- s.walkIfOrWith(parse.NodeIf, dot, n.Pipe, n.List, n.ElseList)
- case *parse.ListNode:
- for _, node := range n.Nodes {
- s.walk(dot, node)
- }
- case *parse.RangeNode:
- s.line = n.Line
- s.walkRange(dot, n)
- case *parse.TemplateNode:
- s.line = n.Line
- s.walkTemplate(dot, n)
- case *parse.TextNode:
- if _, err := s.wr.Write(n.Text); err != nil {
- s.error(err)
- }
- case *parse.WithNode:
- s.line = n.Line
- s.walkIfOrWith(parse.NodeWith, dot, n.Pipe, n.List, n.ElseList)
- default:
- s.errorf("unknown node: %s", n)
- }
-}
-
-// walkIfOrWith walks an 'if' or 'with' node. The two control structures
-// are identical in behavior except that 'with' sets dot.
-func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
- defer s.pop(s.mark())
- val := s.evalPipeline(dot, pipe)
- truth, ok := isTrue(val)
- if !ok {
- s.errorf("if/with can't use %v", val)
- }
- if truth {
- if typ == parse.NodeWith {
- s.walk(val, list)
- } else {
- s.walk(dot, list)
- }
- } else if elseList != nil {
- s.walk(dot, elseList)
- }
-}
-
-// isTrue returns whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value.
-func isTrue(val reflect.Value) (truth, ok bool) {
- if !val.IsValid() {
- // Something like var x interface{}, never set. It's a form of nil.
- return false, true
- }
- switch val.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- truth = val.Len() > 0
- case reflect.Bool:
- truth = val.Bool()
- case reflect.Complex64, reflect.Complex128:
- truth = val.Complex() != 0
- case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
- truth = !val.IsNil()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- truth = val.Int() != 0
- case reflect.Float32, reflect.Float64:
- truth = val.Float() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- truth = val.Uint() != 0
- case reflect.Struct:
- truth = true // Struct values are always true.
- default:
- return
- }
- return truth, true
-}
-
-func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
- defer s.pop(s.mark())
- val, _ := indirect(s.evalPipeline(dot, r.Pipe))
- // mark top of stack before any variables in the body are pushed.
- mark := s.mark()
- oneIteration := func(index, elem reflect.Value) {
- // Set top var (lexically the second if there are two) to the element.
- if len(r.Pipe.Decl) > 0 {
- s.setVar(1, elem)
- }
- // Set next var (lexically the first if there are two) to the index.
- if len(r.Pipe.Decl) > 1 {
- s.setVar(2, index)
- }
- s.walk(elem, r.List)
- s.pop(mark)
- }
- switch val.Kind() {
- case reflect.Array, reflect.Slice:
- if val.Len() == 0 {
- break
- }
- for i := 0; i < val.Len(); i++ {
- oneIteration(reflect.ValueOf(i), val.Index(i))
- }
- return
- case reflect.Map:
- if val.Len() == 0 {
- break
- }
- for _, key := range val.MapKeys() {
- oneIteration(key, val.MapIndex(key))
- }
- return
- case reflect.Chan:
- if val.IsNil() {
- break
- }
- i := 0
- for ; ; i++ {
- elem, ok := val.Recv()
- if !ok {
- break
- }
- oneIteration(reflect.ValueOf(i), elem)
- }
- if i == 0 {
- break
- }
- return
- case reflect.Invalid:
- break // An invalid value is likely a nil map, etc. and acts like an empty map.
- default:
- s.errorf("range can't iterate over %v", val)
- }
- if r.ElseList != nil {
- s.walk(dot, r.ElseList)
- }
-}
-
-func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
- set := s.tmpl.set
- if set == nil {
- s.errorf("no set defined in which to invoke template named %q", t.Name)
- }
- tmpl := set.tmpl[t.Name]
- if tmpl == nil {
- s.errorf("template %q not in set", t.Name)
- }
- // Variables declared by the pipeline persist.
- dot = s.evalPipeline(dot, t.Pipe)
- newState := *s
- newState.tmpl = tmpl
- // No dynamic scoping: template invocations inherit no variables.
- newState.vars = []variable{{"$", dot}}
- newState.walk(dot, tmpl.Root)
-}
-
-// Eval functions evaluate pipelines, commands, and their elements and extract
-// values from the data structure by examining fields, calling methods, and so on.
-// The printing of those values happens only through walk functions.
-
-// evalPipeline returns the value acquired by evaluating a pipeline. If the
-// pipeline has a variable declaration, the variable will be pushed on the
-// stack. Callers should therefore pop the stack after they are finished
-// executing commands depending on the pipeline value.
-func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
- if pipe == nil {
- return
- }
- for _, cmd := range pipe.Cmds {
- value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
- // If the object has type interface{}, dig down one level to the thing inside.
- if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
- value = reflect.ValueOf(value.Interface()) // lovely!
- }
- }
- for _, variable := range pipe.Decl {
- s.push(variable.Ident[0], value)
- }
- return value
-}
-
-func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
- if len(args) > 1 || final.IsValid() {
- s.errorf("can't give argument to non-function %s", args[0])
- }
-}
-
-func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
- firstWord := cmd.Args[0]
- switch n := firstWord.(type) {
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, cmd.Args, final)
- case *parse.IdentifierNode:
- // Must be a function.
- return s.evalFunction(dot, n.Ident, cmd.Args, final)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, cmd.Args, final)
- }
- s.notAFunction(cmd.Args, final)
- switch word := firstWord.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(word.True)
- case *parse.DotNode:
- return dot
- case *parse.NumberNode:
- return s.idealConstant(word)
- case *parse.StringNode:
- return reflect.ValueOf(word.Text)
- }
- s.errorf("can't evaluate command %q", firstWord)
- panic("not reached")
-}
-
-// idealConstant is called to return the value of a number in a context where
-// we don't know the type. In that case, the syntax of the number tells us
-// its type, and we use Go rules to resolve. Note there is no such thing as
-// a uint ideal constant in this situation - the value must be of int type.
-func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
- // These are ideal constants but we don't know the type
- // and we have no context. (If it was a method argument,
- // we'd know what we need.) The syntax guides us to some extent.
- switch {
- case constant.IsComplex:
- return reflect.ValueOf(constant.Complex128) // incontrovertible.
- case constant.IsFloat && strings.IndexAny(constant.Text, ".eE") >= 0:
- return reflect.ValueOf(constant.Float64)
- case constant.IsInt:
- n := int(constant.Int64)
- if int64(n) != constant.Int64 {
- s.errorf("%s overflows int", constant.Text)
- }
- return reflect.ValueOf(n)
- case constant.IsUint:
- s.errorf("%s overflows int", constant.Text)
- }
- return zero
-}
-
-func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
- return s.evalFieldChain(dot, dot, field.Ident, args, final)
-}
-
-func (s *state) evalVariableNode(dot reflect.Value, v *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
- // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
- value := s.varValue(v.Ident[0])
- if len(v.Ident) == 1 {
- return value
- }
- return s.evalFieldChain(dot, value, v.Ident[1:], args, final)
-}
-
-// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
-// dot is the environment in which to evaluate arguments, while
-// receiver is the value being walked along the chain.
-func (s *state) evalFieldChain(dot, receiver reflect.Value, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
- n := len(ident)
- for i := 0; i < n-1; i++ {
- receiver = s.evalField(dot, ident[i], nil, zero, receiver)
- }
- // Now if it's a method, it gets the arguments.
- return s.evalField(dot, ident[n-1], args, final, receiver)
-}
-
-func (s *state) evalFunction(dot reflect.Value, name string, args []parse.Node, final reflect.Value) reflect.Value {
- function, ok := findFunction(name, s.tmpl, s.tmpl.set)
- if !ok {
- s.errorf("%q is not a defined function", name)
- }
- return s.evalCall(dot, function, name, args, final)
-}
-
-// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
-// The 'final' argument represents the return value from the preceding
-// value of the pipeline, if any.
-func (s *state) evalField(dot reflect.Value, fieldName string, args []parse.Node, final, receiver reflect.Value) reflect.Value {
- if !receiver.IsValid() {
- return zero
- }
- typ := receiver.Type()
- receiver, _ = indirect(receiver)
- // Unless it's an interface, need to get to a value of type *T to guarantee
- // we see all methods of T and *T.
- ptr := receiver
- if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
- ptr = ptr.Addr()
- }
- if method, ok := methodByName(ptr, fieldName); ok {
- return s.evalCall(dot, method, fieldName, args, final)
- }
- hasArgs := len(args) > 1 || final.IsValid()
- // It's not a method; is it a field of a struct?
- receiver, isNil := indirect(receiver)
- if receiver.Kind() == reflect.Struct {
- tField, ok := receiver.Type().FieldByName(fieldName)
- if ok {
- field := receiver.FieldByIndex(tField.Index)
- if hasArgs {
- s.errorf("%s is not a method but has arguments", fieldName)
- }
- if tField.PkgPath == "" { // field is exported
- return field
- }
- }
- }
- // If it's a map, attempt to use the field name as a key.
- if receiver.Kind() == reflect.Map {
- nameVal := reflect.ValueOf(fieldName)
- if nameVal.Type().AssignableTo(receiver.Type().Key()) {
- if hasArgs {
- s.errorf("%s is not a method but has arguments", fieldName)
- }
- return receiver.MapIndex(nameVal)
- }
- }
- if isNil {
- s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
- }
- s.errorf("can't evaluate field %s in type %s", fieldName, typ)
- panic("not reached")
-}
-
-// TODO: delete when reflect's own MethodByName is released.
-func methodByName(receiver reflect.Value, name string) (reflect.Value, bool) {
- typ := receiver.Type()
- for i := 0; i < typ.NumMethod(); i++ {
- if typ.Method(i).Name == name {
- return receiver.Method(i), true // This value includes the receiver.
- }
- }
- return zero, false
-}
-
-var (
- osErrorType = reflect.TypeOf((*error)(nil)).Elem()
- fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-)
-
-// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
-// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
-// as the function itself.
-func (s *state) evalCall(dot, fun reflect.Value, name string, args []parse.Node, final reflect.Value) reflect.Value {
- if args != nil {
- args = args[1:] // Zeroth arg is function name/node; not passed to function.
- }
- typ := fun.Type()
- numIn := len(args)
- if final.IsValid() {
- numIn++
- }
- numFixed := len(args)
- if typ.IsVariadic() {
- numFixed = typ.NumIn() - 1 // last arg is the variadic one.
- if numIn < numFixed {
- s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
- }
- } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
- s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
- }
- if !goodFunc(typ) {
- s.errorf("can't handle multiple results from method/function %q", name)
- }
- // Build the arg list.
- argv := make([]reflect.Value, numIn)
- // Args must be evaluated. Fixed args first.
- i := 0
- for ; i < numFixed; i++ {
- argv[i] = s.evalArg(dot, typ.In(i), args[i])
- }
- // Now the ... args.
- if typ.IsVariadic() {
- argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
- for ; i < len(args); i++ {
- argv[i] = s.evalArg(dot, argType, args[i])
- }
- }
- // Add final value if necessary.
- if final.IsValid() {
- argv[i] = final
- }
- result := fun.Call(argv)
- // If we have an error that is not nil, stop execution and return that error to the caller.
- if len(result) == 2 && !result[1].IsNil() {
- s.errorf("error calling %s: %s", name, result[1].Interface().(error))
- }
- return result[0]
-}
-
-// validateType guarantees that the value is valid and assignable to the type.
-func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
- if !value.IsValid() {
- s.errorf("invalid value; expected %s", typ)
- }
- if !value.Type().AssignableTo(typ) {
- // Does one dereference or indirection work? We could do more, as we
- // do with method receivers, but that gets messy and method receivers
- // are much more constrained, so it makes more sense there than here.
- // Besides, one is almost always all you need.
- switch {
- case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
- value = value.Elem()
- case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
- value = value.Addr()
- default:
- s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
- }
- }
- return value
-}
-
-func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
- switch arg := n.(type) {
- case *parse.DotNode:
- return s.validateType(dot, typ)
- case *parse.FieldNode:
- return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
- case *parse.VariableNode:
- return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
- }
- switch typ.Kind() {
- case reflect.Bool:
- return s.evalBool(typ, n)
- case reflect.Complex64, reflect.Complex128:
- return s.evalComplex(typ, n)
- case reflect.Float32, reflect.Float64:
- return s.evalFloat(typ, n)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return s.evalInteger(typ, n)
- case reflect.Interface:
- if typ.NumMethod() == 0 {
- return s.evalEmptyInterface(dot, n)
- }
- case reflect.String:
- return s.evalString(typ, n)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return s.evalUnsignedInteger(typ, n)
- }
- s.errorf("can't handle %s for arg of type %s", n, typ)
- panic("not reached")
-}
-
-func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.BoolNode); ok {
- value := reflect.New(typ).Elem()
- value.SetBool(n.True)
- return value
- }
- s.errorf("expected bool; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.StringNode); ok {
- value := reflect.New(typ).Elem()
- value.SetString(n.Text)
- return value
- }
- s.errorf("expected string; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
- value := reflect.New(typ).Elem()
- value.SetInt(n.Int64)
- return value
- }
- s.errorf("expected integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
- value := reflect.New(typ).Elem()
- value.SetUint(n.Uint64)
- return value
- }
- s.errorf("expected unsigned integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
- value := reflect.New(typ).Elem()
- value.SetFloat(n.Float64)
- return value
- }
- s.errorf("expected float; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
- value := reflect.New(typ).Elem()
- value.SetComplex(n.Complex128)
- return value
- }
- s.errorf("expected complex; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
- switch n := n.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(n.True)
- case *parse.DotNode:
- return dot
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, nil, zero)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, n.Ident, nil, zero)
- case *parse.NumberNode:
- return s.idealConstant(n)
- case *parse.StringNode:
- return reflect.ValueOf(n.Text)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, nil, zero)
- }
- s.errorf("can't handle assignment of %s to empty interface argument", n)
- panic("not reached")
-}
-
-// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
-// We indirect through pointers and empty interfaces (only) because
-// non-empty interfaces have methods we might need.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
- for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
- if v.IsNil() {
- return v, true
- }
- if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
- break
- }
- }
- return v, false
-}
-
-// printValue writes the textual representation of the value to the output of
-// the template.
-func (s *state) printValue(n parse.Node, v reflect.Value) {
- if v.Kind() == reflect.Ptr {
- v, _ = indirect(v) // fmt.Fprint handles nil.
- }
- if !v.IsValid() {
- fmt.Fprint(s.wr, "<no value>")
- return
- }
-
- if !v.Type().Implements(fmtStringerType) {
- if v.CanAddr() && reflect.PtrTo(v.Type()).Implements(fmtStringerType) {
- v = v.Addr()
- } else {
- switch v.Kind() {
- case reflect.Chan, reflect.Func:
- s.errorf("can't print %s of type %s", n, v.Type())
- }
- }
- }
- fmt.Fprint(s.wr, v.Interface())
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "flag"
- "fmt"
- "os"
- "reflect"
- "sort"
- "strings"
- "testing"
-)
-
-var debug = flag.Bool("debug", false, "show the errors produced by the tests")
-
-// T has lots of interesting pieces to use to test execution.
-type T struct {
- // Basics
- True bool
- I int
- U16 uint16
- X string
- FloatZero float64
- ComplexZero float64
- // Nested structs.
- U *U
- // Struct with String method.
- V0 V
- V1, V2 *V
- // Slices
- SI []int
- SIEmpty []int
- SB []bool
- // Maps
- MSI map[string]int
- MSIone map[string]int // one element, for deterministic output
- MSIEmpty map[string]int
- MXI map[interface{}]int
- MII map[int]int
- SMSI []map[string]int
- // Empty interfaces; used to see if we can dig inside one.
- Empty0 interface{} // nil
- Empty1 interface{}
- Empty2 interface{}
- Empty3 interface{}
- Empty4 interface{}
- // Non-empty interface.
- NonEmptyInterface I
- // Stringer.
- Str fmt.Stringer
- // Pointers
- PI *int
- PSI *[]int
- NIL *int
- // Template to test evaluation of templates.
- Tmpl *Template
-}
-
-type U struct {
- V string
-}
-
-type V struct {
- j int
-}
-
-func (v *V) String() string {
- if v == nil {
- return "nilV"
- }
- return fmt.Sprintf("<%d>", v.j)
-}
-
-var tVal = &T{
- True: true,
- I: 17,
- U16: 16,
- X: "x",
- U: &U{"v"},
- V0: V{6666},
- V1: &V{7777}, // leave V2 as nil
- SI: []int{3, 4, 5},
- SB: []bool{true, false},
- MSI: map[string]int{"one": 1, "two": 2, "three": 3},
- MSIone: map[string]int{"one": 1},
- MXI: map[interface{}]int{"one": 1},
- MII: map[int]int{1: 1},
- SMSI: []map[string]int{
- {"one": 1, "two": 2},
- {"eleven": 11, "twelve": 12},
- },
- Empty1: 3,
- Empty2: "empty2",
- Empty3: []int{7, 8},
- Empty4: &U{"UinEmpty"},
- NonEmptyInterface: new(T),
- Str: bytes.NewBuffer([]byte("foozle")),
- PI: newInt(23),
- PSI: newIntSlice(21, 22, 23),
- Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X
-}
-
-// A non-empty interface.
-type I interface {
- Method0() string
-}
-
-var iVal I = tVal
-
-// Helpers for creation.
-func newInt(n int) *int {
- p := new(int)
- *p = n
- return p
-}
-
-func newIntSlice(n ...int) *[]int {
- p := new([]int)
- *p = make([]int, len(n))
- copy(*p, n)
- return p
-}
-
-// Simple methods with and without arguments.
-func (t *T) Method0() string {
- return "M0"
-}
-
-func (t *T) Method1(a int) int {
- return a
-}
-
-func (t *T) Method2(a uint16, b string) string {
- return fmt.Sprintf("Method2: %d %s", a, b)
-}
-
-func (t *T) MAdd(a int, b []int) []int {
- v := make([]int, len(b))
- for i, x := range b {
- v[i] = x + a
- }
- return v
-}
-
-// MSort is used to sort map keys for stable output. (Nice trick!)
-func (t *T) MSort(m map[string]int) []string {
- keys := make([]string, len(m))
- i := 0
- for k := range m {
- keys[i] = k
- i++
- }
- sort.Strings(keys)
- return keys
-}
-
-// EPERM returns a value and an error according to its argument.
-func (t *T) EPERM(error bool) (bool, error) {
- if error {
- return true, os.EPERM
- }
- return false, nil
-}
-
-// A few methods to test chaining.
-func (t *T) GetU() *U {
- return t.U
-}
-
-func (u *U) TrueFalse(b bool) string {
- if b {
- return "true"
- }
- return ""
-}
-
-func typeOf(arg interface{}) string {
- return fmt.Sprintf("%T", arg)
-}
-
-type execTest struct {
- name string
- input string
- output string
- data interface{}
- ok bool
-}
-
-// bigInt and bigUint are hex string representing numbers either side
-// of the max int boundary.
-// We do it this way so the test doesn't depend on ints being 32 bits.
-var (
- bigInt = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
- bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
-)
-
-var execTests = []execTest{
- // Trivial cases.
- {"empty", "", "", nil, true},
- {"text", "some text", "some text", nil, true},
-
- // Ideal constants.
- {"ideal int", "{{typeOf 3}}", "int", 0, true},
- {"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
- {"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
- {"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
- {"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
- {"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
-
- // Fields of structs.
- {".X", "-{{.X}}-", "-x-", tVal, true},
- {".U.V", "-{{.U.V}}-", "-v-", tVal, true},
-
- // Fields on maps.
- {"map .one", "{{.MSI.one}}", "1", tVal, true},
- {"map .two", "{{.MSI.two}}", "2", tVal, true},
- {"map .NO", "{{.MSI.NO}}", "<no value>", tVal, true},
- {"map .one interface", "{{.MXI.one}}", "1", tVal, true},
- {"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
- {"map .WRONG type", "{{.MII.one}}", "", tVal, false},
-
- // Dots of all kinds to test basic evaluation.
- {"dot int", "<{{.}}>", "<13>", 13, true},
- {"dot uint", "<{{.}}>", "<14>", uint(14), true},
- {"dot float", "<{{.}}>", "<15.1>", 15.1, true},
- {"dot bool", "<{{.}}>", "<true>", true, true},
- {"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true},
- {"dot string", "<{{.}}>", "<hello>", "hello", true},
- {"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true},
- {"dot map", "<{{.}}>", "<map[two:22]>", map[string]int{"two": 22}, true},
- {"dot struct", "<{{.}}>", "<{7 seven}>", struct {
- a int
- b string
- }{7, "seven"}, true},
-
- // Variables.
- {"$ int", "{{$}}", "123", 123, true},
- {"$.I", "{{$.I}}", "17", tVal, true},
- {"$.U.V", "{{$.U.V}}", "v", tVal, true},
- {"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
-
- // Type with String method.
- {"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true},
- {"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true},
- {"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
-
- // Pointers.
- {"*int", "{{.PI}}", "23", tVal, true},
- {"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
- {"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
- {"NIL", "{{.NIL}}", "<nil>", tVal, true},
-
- // Empty interfaces holding values.
- {"empty nil", "{{.Empty0}}", "<no value>", tVal, true},
- {"empty with int", "{{.Empty1}}", "3", tVal, true},
- {"empty with string", "{{.Empty2}}", "empty2", tVal, true},
- {"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
- {"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
- {"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
-
- // Method calls.
- {".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
- {".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
- {".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
- {".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
- {".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
- {".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
- {"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
- {"method on chained var",
- "{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
- "true", tVal, true},
- {"chained method",
- "{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
- "true", tVal, true},
- {"chained method on variable",
- "{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
- "true", tVal, true},
-
- // Pipelines.
- {"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
-
- // If.
- {"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
- {"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
- {"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
- {"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
- {"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
- {"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
- {"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
- {"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
- {"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
- {"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
- {"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
- {"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
- {"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
-
- // Print etc.
- {"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
- {"print", `{{print 1 2 3}}`, "1 2 3", tVal, true},
- {"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
- {"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
- {"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
- {"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true},
- {"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
- {"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
- {"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
- {"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
- {"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
- {"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
- {"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
-
- // HTML.
- {"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
- "<script>alert("XSS");</script>", nil, true},
- {"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
- "<script>alert("XSS");</script>", nil, true},
-
- // JavaScript.
- {"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true},
-
- // URL query.
- {"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
-
- // Booleans
- {"not", "{{not true}} {{not false}}", "false true", nil, true},
- {"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
- {"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
- {"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
- {"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
-
- // Indexing.
- {"slice[0]", "{{index .SI 0}}", "3", tVal, true},
- {"slice[1]", "{{index .SI 1}}", "4", tVal, true},
- {"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
- {"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
- {"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
- {"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
- {"map[NO]", "{{index .MSI `XXX`}}", "", tVal, true},
- {"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
- {"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
-
- // Len.
- {"slice", "{{len .SI}}", "3", tVal, true},
- {"map", "{{len .MSI }}", "3", tVal, true},
- {"len of int", "{{len 3}}", "", tVal, false},
- {"len of nothing", "{{len .Empty0}}", "", tVal, false},
-
- // With.
- {"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
- {"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
- {"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
- {"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
- {"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
- {"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
- {"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true},
- {"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
- {"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
- {"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
- {"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
- {"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
- {"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
- {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
- {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
-
- // Range.
- {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
- {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
- {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
- {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
- {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
- {"range map", "{{range .MSI | .MSort}}-{{.}}-{{end}}", "-one--three--two-", tVal, true},
- {"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
- {"range map else", "{{range .MSI | .MSort}}-{{.}}-{{else}}EMPTY{{end}}", "-one--three--two-", tVal, true},
- {"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
- {"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
- {"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
- {"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true},
- {"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true},
- {"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true},
- {"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "<one=1>", tVal, true},
- {"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true},
- {"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true},
- {"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
- {"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
-
- // Cute examples.
- {"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
- {"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
-
- // Error handling.
- {"error method, error", "{{.EPERM true}}", "", tVal, false},
- {"error method, no error", "{{.EPERM false}}", "false", tVal, true},
-
- // Fixed bugs.
- // Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
- {"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
- // Do not loop endlessly in indirect for non-empty interfaces.
- // The bug appears with *interface only; looped forever.
- {"bug1", "{{.Method0}}", "M0", &iVal, true},
- // Was taking address of interface field, so method set was empty.
- {"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
- // Struct values were not legal in with - mere oversight.
- {"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
- // Nil interface values in if.
- {"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
- // Stringer.
- {"bug5", "{{.Str}}", "foozle", tVal, true},
- // Args need to be indirected and dereferenced sometimes.
- {"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
- {"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
- {"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
- {"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
-}
-
-func zeroArgs() string {
- return "zeroArgs"
-}
-
-func oneArg(a string) string {
- return "oneArg=" + a
-}
-
-// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
-func count(n int) chan string {
- if n == 0 {
- return nil
- }
- c := make(chan string)
- go func() {
- for i := 0; i < n; i++ {
- c <- "abcdefghijklmnop"[i : i+1]
- }
- close(c)
- }()
- return c
-}
-
-// vfunc takes a *V and a V
-func vfunc(V, *V) string {
- return "vfunc"
-}
-
-func testExecute(execTests []execTest, set *Set, t *testing.T) {
- b := new(bytes.Buffer)
- funcs := FuncMap{
- "count": count,
- "oneArg": oneArg,
- "typeOf": typeOf,
- "vfunc": vfunc,
- "zeroArgs": zeroArgs,
- }
- for _, test := range execTests {
- tmpl := New(test.name).Funcs(funcs)
- _, err := tmpl.ParseInSet(test.input, set)
- if err != nil {
- t.Errorf("%s: parse error: %s", test.name, err)
- continue
- }
- b.Reset()
- err = tmpl.Execute(b, test.data)
- switch {
- case !test.ok && err == nil:
- t.Errorf("%s: expected error; got none", test.name)
- continue
- case test.ok && err != nil:
- t.Errorf("%s: unexpected execute error: %s", test.name, err)
- continue
- case !test.ok && err != nil:
- // expected error, got one
- if *debug {
- fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
- }
- }
- result := b.String()
- if result != test.output {
- t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
- }
- }
-}
-
-func TestExecute(t *testing.T) {
- testExecute(execTests, nil, t)
-}
-
-var delimPairs = []string{
- "", "", // default
- "{{", "}}", // same as default
- "<<", ">>", // distinct
- "|", "|", // same
- "(日)", "(本)", // peculiar
-}
-
-func TestDelims(t *testing.T) {
- const hello = "Hello, world"
- var value = struct{ Str string }{hello}
- for i := 0; i < len(delimPairs); i += 2 {
- text := ".Str"
- left := delimPairs[i+0]
- trueLeft := left
- right := delimPairs[i+1]
- trueRight := right
- if left == "" { // default case
- trueLeft = "{{"
- }
- if right == "" { // default case
- trueRight = "}}"
- }
- text = trueLeft + text + trueRight
- // Now add a comment
- text += trueLeft + "/*comment*/" + trueRight
- // Now add an action containing a string.
- text += trueLeft + `"` + trueLeft + `"` + trueRight
- // At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
- tmpl, err := New("delims").Delims(left, right).Parse(text)
- if err != nil {
- t.Fatalf("delim %q text %q parse err %s", left, text, err)
- }
- var b = new(bytes.Buffer)
- err = tmpl.Execute(b, value)
- if err != nil {
- t.Fatalf("delim %q exec err %s", left, err)
- }
- if b.String() != hello+trueLeft {
- t.Errorf("expected %q got %q", hello+trueLeft, b.String())
- }
- }
-}
-
-// Check that an error from a method flows back to the top.
-func TestExecuteError(t *testing.T) {
- b := new(bytes.Buffer)
- tmpl := New("error")
- _, err := tmpl.Parse("{{.EPERM true}}")
- if err != nil {
- t.Fatalf("parse error: %s", err)
- }
- err = tmpl.Execute(b, tVal)
- if err == nil {
- t.Errorf("expected error; got none")
- } else if !strings.Contains(err.Error(), os.EPERM.Error()) {
- if *debug {
- fmt.Printf("test execute error: %s\n", err)
- }
- t.Errorf("expected os.EPERM; got %s", err)
- }
-}
-
-func TestJSEscaping(t *testing.T) {
- testCases := []struct {
- in, exp string
- }{
- {`a`, `a`},
- {`'foo`, `\'foo`},
- {`Go "jump" \`, `Go \"jump\" \\`},
- {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
- {"unprintable \uFDFF", `unprintable \uFDFF`},
- {`<html>`, `\x3Chtml\x3E`},
- }
- for _, tc := range testCases {
- s := JSEscapeString(tc.in)
- if s != tc.exp {
- t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
- }
- }
-}
-
-// A nice example: walk a binary tree.
-
-type Tree struct {
- Val int
- Left, Right *Tree
-}
-
-// Use different delimiters to test Set.Delims.
-const treeTemplate = `
- (define "tree")
- [
- (.Val)
- (with .Left)
- (template "tree" .)
- (end)
- (with .Right)
- (template "tree" .)
- (end)
- ]
- (end)
-`
-
-func TestTree(t *testing.T) {
- var tree = &Tree{
- 1,
- &Tree{
- 2, &Tree{
- 3,
- &Tree{
- 4, nil, nil,
- },
- nil,
- },
- &Tree{
- 5,
- &Tree{
- 6, nil, nil,
- },
- nil,
- },
- },
- &Tree{
- 7,
- &Tree{
- 8,
- &Tree{
- 9, nil, nil,
- },
- nil,
- },
- &Tree{
- 10,
- &Tree{
- 11, nil, nil,
- },
- nil,
- },
- },
- }
- set := new(Set)
- _, err := set.Delims("(", ")").Parse(treeTemplate)
- if err != nil {
- t.Fatal("parse error:", err)
- }
- var b bytes.Buffer
- err = set.Execute(&b, "tree", tree)
- if err != nil {
- t.Fatal("exec error:", err)
- }
- stripSpace := func(r rune) rune {
- if r == '\t' || r == '\n' {
- return -1
- }
- return r
- }
- result := strings.Map(stripSpace, b.String())
- const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
- if result != expect {
- t.Errorf("expected %q got %q", expect, result)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "strings"
- "unicode"
- "url"
- "utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. If the second argument evaluates to non-nil
-// during execution, execution terminates and Execute returns an error.
-type FuncMap map[string]interface{}
-
-var builtins = FuncMap{
- "and": and,
- "html": HTMLEscaper,
- "index": index,
- "js": JSEscaper,
- "len": length,
- "not": not,
- "or": or,
- "print": fmt.Sprint,
- "printf": fmt.Sprintf,
- "println": fmt.Sprintln,
- "urlquery": URLQueryEscaper,
-}
-
-var builtinFuncs = createValueFuncs(builtins)
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
- addValueFuncs(m, funcMap)
- return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
- for name, fn := range in {
- v := reflect.ValueOf(fn)
- if v.Kind() != reflect.Func {
- panic("value for " + name + " not a function")
- }
- if !goodFunc(v.Type()) {
- panic(fmt.Errorf("can't handle multiple results from method/function %q", name))
- }
- out[name] = v
- }
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
- for name, fn := range in {
- out[name] = fn
- }
-}
-
-// goodFunc checks that the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
- // We allow functions with 1 result or 2 results where the second is an error.
- switch {
- case typ.NumOut() == 1:
- return true
- case typ.NumOut() == 2 && typ.Out(1) == osErrorType:
- return true
- }
- return false
-}
-
-// findFunction looks for a function in the template, set, and global map.
-func findFunction(name string, tmpl *Template, set *Set) (reflect.Value, bool) {
- if tmpl != nil {
- if fn := tmpl.execFuncs[name]; fn.IsValid() {
- return fn, true
- }
- }
- if set != nil {
- if fn := set.execFuncs[name]; fn.IsValid() {
- return fn, true
- }
- }
- if fn := builtinFuncs[name]; fn.IsValid() {
- return fn, true
- }
- return reflect.Value{}, false
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item interface{}, indices ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(item)
- for _, i := range indices {
- index := reflect.ValueOf(i)
- var isNil bool
- if v, isNil = indirect(v); isNil {
- return nil, fmt.Errorf("index of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Slice:
- var x int64
- switch index.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- x = index.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- x = int64(index.Uint())
- default:
- return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
- }
- if x < 0 || x >= int64(v.Len()) {
- return nil, fmt.Errorf("index out of range: %d", x)
- }
- v = v.Index(int(x))
- case reflect.Map:
- if !index.Type().AssignableTo(v.Type().Key()) {
- return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
- }
- if x := v.MapIndex(index); x.IsValid() {
- v = x
- } else {
- v = reflect.Zero(v.Type().Key())
- }
- default:
- return nil, fmt.Errorf("can't index item of type %s", index.Type())
- }
- }
- return v.Interface(), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item interface{}) (int, error) {
- v, isNil := indirect(reflect.ValueOf(item))
- if isNil {
- return 0, fmt.Errorf("len of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.Len(), nil
- }
- return 0, fmt.Errorf("len of type %s", v.Type())
-}
-
-// Boolean logic.
-
-func truth(a interface{}) bool {
- t, _ := isTrue(reflect.ValueOf(a))
- return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 interface{}, args ...interface{}) interface{} {
- if !truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if !truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 interface{}, args ...interface{}) interface{} {
- if truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg interface{}) (truth bool) {
- truth, _ = isTrue(reflect.ValueOf(arg))
- return !truth
-}
-
-// HTML escaping.
-
-var (
- htmlQuot = []byte(""") // shorter than """
- htmlApos = []byte("'") // shorter than "'"
- htmlAmp = []byte("&")
- htmlLt = []byte("<")
- htmlGt = []byte(">")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
- last := 0
- for i, c := range b {
- var html []byte
- switch c {
- case '"':
- html = htmlQuot
- case '\'':
- html = htmlApos
- case '&':
- html = htmlAmp
- case '<':
- html = htmlLt
- case '>':
- html = htmlGt
- default:
- continue
- }
- w.Write(b[last:i])
- w.Write(html)
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexAny(s, `'"&<>`) < 0 {
- return s
- }
- var b bytes.Buffer
- HTMLEscape(&b, []byte(s))
- return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
- ok := false
- var s string
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- s = fmt.Sprint(args...)
- }
- return HTMLEscapeString(s)
-}
-
-// JavaScript escaping.
-
-var (
- jsLowUni = []byte(`\u00`)
- hex = []byte("0123456789ABCDEF")
-
- jsBackslash = []byte(`\\`)
- jsApos = []byte(`\'`)
- jsQuot = []byte(`\"`)
- jsLt = []byte(`\x3C`)
- jsGt = []byte(`\x3E`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
- last := 0
- for i := 0; i < len(b); i++ {
- c := b[i]
-
- if !jsIsSpecial(rune(c)) {
- // fast path: nothing to do
- continue
- }
- w.Write(b[last:i])
-
- if c < utf8.RuneSelf {
- // Quotes, slashes and angle brackets get quoted.
- // Control characters get written as \u00XX.
- switch c {
- case '\\':
- w.Write(jsBackslash)
- case '\'':
- w.Write(jsApos)
- case '"':
- w.Write(jsQuot)
- case '<':
- w.Write(jsLt)
- case '>':
- w.Write(jsGt)
- default:
- w.Write(jsLowUni)
- t, b := c>>4, c&0x0f
- w.Write(hex[t : t+1])
- w.Write(hex[b : b+1])
- }
- } else {
- // Unicode rune.
- r, size := utf8.DecodeRune(b[i:])
- if unicode.IsPrint(r) {
- w.Write(b[i : i+size])
- } else {
- // TODO(dsymonds): Do this without fmt?
- fmt.Fprintf(w, "\\u%04X", r)
- }
- i += size - 1
- }
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexFunc(s, jsIsSpecial) < 0 {
- return s
- }
- var b bytes.Buffer
- JSEscape(&b, []byte(s))
- return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
- switch r {
- case '\\', '\'', '"', '<', '>':
- return true
- }
- return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
- ok := false
- var s string
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- s = fmt.Sprint(args...)
- }
- return JSEscapeString(s)
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
- s, ok := "", false
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- s = fmt.Sprint(args...)
- }
- return url.QueryEscape(s)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates and sets easier.
-
-package template
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
-)
-
-// Functions and methods to parse a single template.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable initializations
-// such as
-// var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseFile creates a new Template and parses the template definition from
-// the named file. The template name is the base name of the file.
-func ParseFile(filename string) (*Template, error) {
- t := New(filepath.Base(filename))
- return t.ParseFile(filename)
-}
-
-// parseFileInSet creates a new Template and parses the template
-// definition from the named file. The template name is the base name
-// of the file. It also adds the template to the set. Function bindings are
-// checked against those in the set.
-func parseFileInSet(filename string, set *Set) (*Template, error) {
- t := New(filepath.Base(filename))
- return t.parseFileInSet(filename, set)
-}
-
-// ParseFile reads the template definition from a file and parses it to
-// construct an internal representation of the template for execution.
-// The returned template will be nil if an error occurs.
-func (t *Template) ParseFile(filename string) (*Template, error) {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- return t.Parse(string(b))
-}
-
-// parseFileInSet is the same as ParseFile except that function bindings
-// are checked against those in the set and the template is added
-// to the set.
-// The returned template will be nil if an error occurs.
-func (t *Template) parseFileInSet(filename string, set *Set) (*Template, error) {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- return t.ParseInSet(string(b), set)
-}
-
-// Functions and methods to parse a set.
-
-// SetMust is a helper that wraps a call to a function returning (*Set, error)
-// and panics if the error is non-nil. It is intended for use in variable initializations
-// such as
-// var s = template.SetMust(template.ParseSetFiles("file"))
-func SetMust(s *Set, err error) *Set {
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// ParseFiles parses the named files into a set of named templates.
-// Each file must be parseable by itself.
-// If an error occurs, parsing stops and the returned set is nil.
-func (s *Set) ParseFiles(filenames ...string) (*Set, error) {
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- _, err = s.Parse(string(b))
- if err != nil {
- return nil, err
- }
- }
- return s, nil
-}
-
-// ParseSetFiles creates a new Set and parses the set definition from the
-// named files. Each file must be individually parseable.
-func ParseSetFiles(filenames ...string) (*Set, error) {
- s := new(Set)
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- _, err = s.Parse(string(b))
- if err != nil {
- return nil, err
- }
- }
- return s, nil
-}
-
-// ParseGlob parses the set definition from the files identified by the
-// pattern. The pattern is processed by filepath.Glob and must match at
-// least one file.
-// If an error occurs, parsing stops and the returned set is nil.
-func (s *Set) ParseGlob(pattern string) (*Set, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
- }
- return s.ParseFiles(filenames...)
-}
-
-// ParseSetGlob creates a new Set and parses the set definition from the
-// files identified by the pattern. The pattern is processed by filepath.Glob
-// and must match at least one file.
-func ParseSetGlob(pattern string) (*Set, error) {
- set, err := new(Set).ParseGlob(pattern)
- if err != nil {
- return nil, err
- }
- return set, nil
-}
-
-// Functions and methods to parse stand-alone template files into a set.
-
-// ParseTemplateFiles parses the named template files and adds
-// them to the set. Each template will be named the base name of
-// its file.
-// Unlike with ParseFiles, each file should be a stand-alone template
-// definition suitable for Template.Parse (not Set.Parse); that is, the
-// file does not contain {{define}} clauses. ParseTemplateFiles is
-// therefore equivalent to calling the ParseFile function to create
-// individual templates, which are then added to the set.
-// Each file must be parseable by itself.
-// If an error occurs, parsing stops and the returned set is nil.
-func (s *Set) ParseTemplateFiles(filenames ...string) (*Set, error) {
- for _, filename := range filenames {
- _, err := parseFileInSet(filename, s)
- if err != nil {
- return nil, err
- }
- }
- return s, nil
-}
-
-// ParseTemplateGlob parses the template files matched by the
-// patern and adds them to the set. Each template will be named
-// the base name of its file.
-// Unlike with ParseGlob, each file should be a stand-alone template
-// definition suitable for Template.Parse (not Set.Parse); that is, the
-// file does not contain {{define}} clauses. ParseTemplateGlob is
-// therefore equivalent to calling the ParseFile function to create
-// individual templates, which are then added to the set.
-// Each file must be parseable by itself.
-// If an error occurs, parsing stops and the returned set is nil.
-func (s *Set) ParseTemplateGlob(pattern string) (*Set, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- for _, filename := range filenames {
- _, err := parseFileInSet(filename, s)
- if err != nil {
- return nil, err
- }
- }
- return s, nil
-}
-
-// ParseTemplateFiles creates a set by parsing the named files,
-// each of which defines a single template. Each template will be
-// named the base name of its file.
-// Unlike with ParseFiles, each file should be a stand-alone template
-// definition suitable for Template.Parse (not Set.Parse); that is, the
-// file does not contain {{define}} clauses. ParseTemplateFiles is
-// therefore equivalent to calling the ParseFile function to create
-// individual templates, which are then added to the set.
-// Each file must be parseable by itself. Parsing stops if an error is
-// encountered.
-func ParseTemplateFiles(filenames ...string) (*Set, error) {
- set := new(Set)
- set.init()
- for _, filename := range filenames {
- t, err := ParseFile(filename)
- if err != nil {
- return nil, err
- }
- if err := set.add(t); err != nil {
- return nil, err
- }
- }
- return set, nil
-}
-
-// ParseTemplateGlob creates a set by parsing the files matched
-// by the pattern, each of which defines a single template. The pattern
-// is processed by filepath.Glob and must match at least one file. Each
-// template will be named the base name of its file.
-// Unlike with ParseGlob, each file should be a stand-alone template
-// definition suitable for Template.Parse (not Set.Parse); that is, the
-// file does not contain {{define}} clauses. ParseTemplateGlob is
-// therefore equivalent to calling the ParseFile function to create
-// individual templates, which are then added to the set.
-// Each file must be parseable by itself. Parsing stops if an error is
-// encountered.
-func ParseTemplateGlob(pattern string) (*Set, error) {
- set := new(Set)
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
- }
- for _, filename := range filenames {
- t, err := ParseFile(filename)
- if err != nil {
- return nil, err
- }
- if err := set.add(t); err != nil {
- return nil, err
- }
- }
- return set, nil
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "reflect"
- "template/parse"
-)
-
-// Template is the representation of a parsed template.
-type Template struct {
- name string
- *parse.Tree
- leftDelim string
- rightDelim string
- // We use two maps, one for parsing and one for execution.
- // This separation makes the API cleaner since it doesn't
- // expose reflection to the client.
- parseFuncs FuncMap
- execFuncs map[string]reflect.Value
- set *Set // can be nil.
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
- return t.name
-}
-
-// Parsing.
-
-// New allocates a new template with the given name.
-func New(name string) *Template {
- return &Template{
- name: name,
- parseFuncs: make(FuncMap),
- execFuncs: make(map[string]reflect.Value),
- }
-}
-
-// Delims sets the action delimiters, to be used in a subsequent
-// parse, to the specified strings.
-// An empty delimiter stands for the corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
- t.leftDelim = left
- t.rightDelim = right
- return t
-}
-
-// Funcs adds the elements of the argument map to the template's function
-// map. It panics if a value in the map is not a function with appropriate
-// return type.
-// The return value is the template, so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
- addValueFuncs(t.execFuncs, funcMap)
- addFuncs(t.parseFuncs, funcMap)
- return t
-}
-
-// Parse parses the template definition string to construct an internal
-// representation of the template for execution.
-func (t *Template) Parse(s string) (tmpl *Template, err error) {
- t.Tree, err = parse.New(t.name).Parse(s, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
- if err != nil {
- return nil, err
- }
- return t, nil
-}
-
-// ParseInSet parses the template definition string to construct an internal
-// representation of the template for execution. It also adds the template
-// to the set.
-// Function bindings are checked against those in the set.
-func (t *Template) ParseInSet(s string, set *Set) (tmpl *Template, err error) {
- var setFuncs FuncMap
- if set != nil {
- setFuncs = set.parseFuncs
- }
- t.Tree, err = parse.New(t.name).Parse(s, t.leftDelim, t.rightDelim, t.parseFuncs, setFuncs, builtins)
- if err != nil {
- return nil, err
- }
- t.addToSet(set)
- return t, nil
-}
-
-// addToSet adds the template to the set, verifying it's not being double-assigned.
-func (t *Template) addToSet(set *Set) {
- if set == nil || t.set == set {
- return
- }
- // If double-assigned, Add will panic and we will turn that into an error.
- set.Add(t)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strings"
- "unicode"
- "utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
- typ itemType
- val string
-}
-
-func (i item) String() string {
- switch {
- case i.typ == itemEOF:
- return "EOF"
- case i.typ == itemError:
- return i.val
- case i.typ > itemKeyword:
- return fmt.Sprintf("<%s>", i.val)
- case len(i.val) > 10:
- return fmt.Sprintf("%.10q...", i.val)
- }
- return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
- itemError itemType = iota // error occurred; value is text of error
- itemBool // boolean constant
- itemChar // printable ASCII character; grab bag for comma etc.
- itemCharConstant // character constant
- itemComplex // complex constant (1+2i); imaginary is just a number
- itemColonEquals // colon-equals (':=') introducing a declaration
- itemEOF
- itemField // alphanumeric identifier, starting with '.', possibly chained ('.x.y')
- itemIdentifier // alphanumeric identifier
- itemLeftDelim // left action delimiter
- itemNumber // simple number, including imaginary
- itemPipe // pipe symbol
- itemRawString // raw quoted string (includes quotes)
- itemRightDelim // right action delimiter
- itemString // quoted string (includes quotes)
- itemText // plain text
- itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'.
- // Keywords appear after all the rest.
- itemKeyword // used only to delimit the keywords
- itemDot // the cursor, spelled '.'.
- itemDefine // define keyword
- itemElse // else keyword
- itemEnd // end keyword
- itemIf // if keyword
- itemRange // range keyword
- itemTemplate // template keyword
- itemWith // with keyword
-)
-
-// Make the types prettyprint.
-var itemName = map[itemType]string{
- itemError: "error",
- itemBool: "bool",
- itemChar: "char",
- itemCharConstant: "charconst",
- itemComplex: "complex",
- itemColonEquals: ":=",
- itemEOF: "EOF",
- itemField: "field",
- itemIdentifier: "identifier",
- itemLeftDelim: "left delim",
- itemNumber: "number",
- itemPipe: "pipe",
- itemRawString: "raw string",
- itemRightDelim: "right delim",
- itemString: "string",
- itemVariable: "variable",
- // keywords
- itemDot: ".",
- itemDefine: "define",
- itemElse: "else",
- itemIf: "if",
- itemEnd: "end",
- itemRange: "range",
- itemTemplate: "template",
- itemWith: "with",
-}
-
-func (i itemType) String() string {
- s := itemName[i]
- if s == "" {
- return fmt.Sprintf("item%d", int(i))
- }
- return s
-}
-
-var key = map[string]itemType{
- ".": itemDot,
- "define": itemDefine,
- "else": itemElse,
- "end": itemEnd,
- "if": itemIf,
- "range": itemRange,
- "template": itemTemplate,
- "with": itemWith,
-}
-
-const eof = -1
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
- name string // the name of the input; used only for error reports.
- input string // the string being scanned.
- leftDelim string // start of action.
- rightDelim string // end of action.
- state stateFn // the next lexing function to enter.
- pos int // current position in the input.
- start int // start position of this item.
- width int // width of last rune read from input.
- items chan item // channel of scanned items.
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() (r rune) {
- if l.pos >= len(l.input) {
- l.width = 0
- return eof
- }
- r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
- l.pos += l.width
- return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
- l.pos -= l.width
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.input[l.start:l.pos]}
- l.start = l.pos
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
- l.start = l.pos
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
- if strings.IndexRune(valid, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
- for strings.IndexRune(valid, l.next()) >= 0 {
- }
- l.backup()
-}
-
-// lineNumber reports which line we're on. Doing it this way
-// means we don't have to worry about peek double counting.
-func (l *lexer) lineNumber() int {
- return 1 + strings.Count(l.input[:l.pos], "\n")
-}
-
-// error returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.run.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
- l.items <- item{itemError, fmt.Sprintf(format, args...)}
- return nil
-}
-
-// nextItem returns the next item from the input.
-func (l *lexer) nextItem() item {
- for {
- select {
- case item := <-l.items:
- return item
- default:
- l.state = l.state(l)
- }
- }
- panic("not reached")
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string) *lexer {
- if left == "" {
- left = leftDelim
- }
- if right == "" {
- right = rightDelim
- }
- l := &lexer{
- name: name,
- input: input,
- leftDelim: left,
- rightDelim: right,
- state: lexText,
- items: make(chan item, 2), // Two items of buffering is sufficient for all state functions
- }
- return l
-}
-
-// state functions
-
-const (
- leftDelim = "{{"
- rightDelim = "}}"
- leftComment = "/*"
- rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
- for {
- if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
- if l.pos > l.start {
- l.emit(itemText)
- }
- return lexLeftDelim
- }
- if l.next() == eof {
- break
- }
- }
- // Correctly reached EOF.
- if l.pos > l.start {
- l.emit(itemText)
- }
- l.emit(itemEOF)
- return nil
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present.
-func lexLeftDelim(l *lexer) stateFn {
- if strings.HasPrefix(l.input[l.pos:], l.leftDelim+leftComment) {
- return lexComment
- }
- l.pos += len(l.leftDelim)
- l.emit(itemLeftDelim)
- return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
- i := strings.Index(l.input[l.pos:], rightComment+l.rightDelim)
- if i < 0 {
- return l.errorf("unclosed comment")
- }
- l.pos += i + len(rightComment) + len(l.rightDelim)
- l.ignore()
- return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present.
-func lexRightDelim(l *lexer) stateFn {
- l.pos += len(l.rightDelim)
- l.emit(itemRightDelim)
- return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
- // Either number, quoted string, or identifier.
- // Spaces separate and are ignored.
- // Pipe symbols separate and are emitted.
- if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- return lexRightDelim
- }
- switch r := l.next(); {
- case r == eof || r == '\n':
- return l.errorf("unclosed action")
- case isSpace(r):
- l.ignore()
- case r == ':':
- if l.next() != '=' {
- return l.errorf("expected :=")
- }
- l.emit(itemColonEquals)
- case r == '|':
- l.emit(itemPipe)
- case r == '"':
- return lexQuote
- case r == '`':
- return lexRawQuote
- case r == '$':
- return lexIdentifier
- case r == '\'':
- return lexChar
- case r == '.':
- // special look-ahead for ".field" so we don't break l.backup().
- if l.pos < len(l.input) {
- r := l.input[l.pos]
- if r < '0' || '9' < r {
- return lexIdentifier // itemDot comes from the keyword table.
- }
- }
- fallthrough // '.' can start a number.
- case r == '+' || r == '-' || ('0' <= r && r <= '9'):
- l.backup()
- return lexNumber
- case isAlphaNumeric(r):
- l.backup()
- return lexIdentifier
- case r <= unicode.MaxASCII && unicode.IsPrint(r):
- l.emit(itemChar)
- return lexInsideAction
- default:
- return l.errorf("unrecognized character in action: %#U", r)
- }
- return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric or field.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
- for {
- switch r := l.next(); {
- case isAlphaNumeric(r):
- // absorb.
- case r == '.' && (l.input[l.start] == '.' || l.input[l.start] == '$'):
- // field chaining; absorb into one token.
- default:
- l.backup()
- word := l.input[l.start:l.pos]
- switch {
- case key[word] > itemKeyword:
- l.emit(key[word])
- case word[0] == '.':
- l.emit(itemField)
- case word[0] == '$':
- l.emit(itemVariable)
- case word == "true", word == "false":
- l.emit(itemBool)
- default:
- l.emit(itemIdentifier)
- }
- break Loop
- }
- }
- return lexInsideAction
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parse.
-func lexChar(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated character constant")
- case '\'':
- break Loop
- }
- }
- l.emit(itemCharConstant)
- return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
- if !l.scanNumber() {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- if sign := l.peek(); sign == '+' || sign == '-' {
- // Complex: 1+2i. No spaces, must end in 'i'.
- if !l.scanNumber() || l.input[l.pos-1] != 'i' {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(itemComplex)
- } else {
- l.emit(itemNumber)
- }
- return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
- // Optional leading sign.
- l.accept("+-")
- // Is it hex?
- digits := "0123456789"
- if l.accept("0") && l.accept("xX") {
- digits = "0123456789abcdefABCDEF"
- }
- l.acceptRun(digits)
- if l.accept(".") {
- l.acceptRun(digits)
- }
- if l.accept("eE") {
- l.accept("+-")
- l.acceptRun("0123456789")
- }
- // Is it imaginary?
- l.accept("i")
- // Next thing mustn't be alphanumeric.
- if isAlphaNumeric(l.peek()) {
- l.next()
- return false
- }
- return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated quoted string")
- case '"':
- break Loop
- }
- }
- l.emit(itemString)
- return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case eof, '\n':
- return l.errorf("unterminated raw quoted string")
- case '`':
- break Loop
- }
- }
- l.emit(itemRawString)
- return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
- switch r {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "reflect"
- "testing"
-)
-
-type lexTest struct {
- name string
- input string
- items []item
-}
-
-var (
- tEOF = item{itemEOF, ""}
- tLeft = item{itemLeftDelim, "{{"}
- tRight = item{itemRightDelim, "}}"}
- tRange = item{itemRange, "range"}
- tPipe = item{itemPipe, "|"}
- tFor = item{itemIdentifier, "for"}
- tQuote = item{itemString, `"abc \n\t\" "`}
- raw = "`" + `abc\n\t\" ` + "`"
- tRawQuote = item{itemRawString, raw}
-)
-
-var lexTests = []lexTest{
- {"empty", "", []item{tEOF}},
- {"spaces", " \t\n", []item{{itemText, " \t\n"}, tEOF}},
- {"text", `now is the time`, []item{{itemText, "now is the time"}, tEOF}},
- {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
- {itemText, "hello-"},
- {itemText, "-world"},
- tEOF,
- }},
- {"punctuation", "{{,@%}}", []item{
- tLeft,
- {itemChar, ","},
- {itemChar, "@"},
- {itemChar, "%"},
- tRight,
- tEOF,
- }},
- {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
- {"for", `{{for }}`, []item{tLeft, tFor, tRight, tEOF}},
- {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
- {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
- {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{
- tLeft,
- {itemNumber, "1"},
- {itemNumber, "02"},
- {itemNumber, "0x14"},
- {itemNumber, "-7.2i"},
- {itemNumber, "1e3"},
- {itemNumber, "+1.2e-4"},
- {itemNumber, "4.2i"},
- {itemComplex, "1+2i"},
- tRight,
- tEOF,
- }},
- {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
- tLeft,
- {itemCharConstant, `'a'`},
- {itemCharConstant, `'\n'`},
- {itemCharConstant, `'\''`},
- {itemCharConstant, `'\\'`},
- {itemCharConstant, `'\u00FF'`},
- {itemCharConstant, `'\xFF'`},
- {itemCharConstant, `'本'`},
- tRight,
- tEOF,
- }},
- {"bools", "{{true false}}", []item{
- tLeft,
- {itemBool, "true"},
- {itemBool, "false"},
- tRight,
- tEOF,
- }},
- {"dot", "{{.}}", []item{
- tLeft,
- {itemDot, "."},
- tRight,
- tEOF,
- }},
- {"dots", "{{.x . .2 .x.y}}", []item{
- tLeft,
- {itemField, ".x"},
- {itemDot, "."},
- {itemNumber, ".2"},
- {itemField, ".x.y"},
- tRight,
- tEOF,
- }},
- {"keywords", "{{range if else end with}}", []item{
- tLeft,
- {itemRange, "range"},
- {itemIf, "if"},
- {itemElse, "else"},
- {itemEnd, "end"},
- {itemWith, "with"},
- tRight,
- tEOF,
- }},
- {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
- tLeft,
- {itemVariable, "$c"},
- {itemColonEquals, ":="},
- {itemIdentifier, "printf"},
- {itemVariable, "$"},
- {itemVariable, "$hello"},
- {itemVariable, "$23"},
- {itemVariable, "$"},
- {itemVariable, "$var.Field"},
- {itemField, ".Method"},
- tRight,
- tEOF,
- }},
- {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
- {itemText, "intro "},
- tLeft,
- {itemIdentifier, "echo"},
- {itemIdentifier, "hi"},
- {itemNumber, "1.2"},
- tPipe,
- {itemIdentifier, "noargs"},
- tPipe,
- {itemIdentifier, "args"},
- {itemNumber, "1"},
- {itemString, `"hi"`},
- tRight,
- {itemText, " outro"},
- tEOF,
- }},
- {"declaration", "{{$v := 3}}", []item{
- tLeft,
- {itemVariable, "$v"},
- {itemColonEquals, ":="},
- {itemNumber, "3"},
- tRight,
- tEOF,
- }},
- {"2 declarations", "{{$v , $w := 3}}", []item{
- tLeft,
- {itemVariable, "$v"},
- {itemChar, ","},
- {itemVariable, "$w"},
- {itemColonEquals, ":="},
- {itemNumber, "3"},
- tRight,
- tEOF,
- }},
- // errors
- {"badchar", "#{{\x01}}", []item{
- {itemText, "#"},
- tLeft,
- {itemError, "unrecognized character in action: U+0001"},
- }},
- {"unclosed action", "{{\n}}", []item{
- tLeft,
- {itemError, "unclosed action"},
- }},
- {"EOF in action", "{{range", []item{
- tLeft,
- tRange,
- {itemError, "unclosed action"},
- }},
- {"unclosed quote", "{{\"\n\"}}", []item{
- tLeft,
- {itemError, "unterminated quoted string"},
- }},
- {"unclosed raw quote", "{{`xx\n`}}", []item{
- tLeft,
- {itemError, "unterminated raw quoted string"},
- }},
- {"unclosed char constant", "{{'\n}}", []item{
- tLeft,
- {itemError, "unterminated character constant"},
- }},
- {"bad number", "{{3k}}", []item{
- tLeft,
- {itemError, `bad number syntax: "3k"`},
- }},
-
- // Fixed bugs
- // Many elements in an action blew the lookahead until
- // we made lexInsideAction not loop.
- {"long pipeline deadlock", "{{|||||}}", []item{
- tLeft,
- tPipe,
- tPipe,
- tPipe,
- tPipe,
- tPipe,
- tRight,
- tEOF,
- }},
-}
-
-// collect gathers the emitted items into a slice.
-func collect(t *lexTest, left, right string) (items []item) {
- l := lex(t.name, t.input, left, right)
- for {
- item := l.nextItem()
- items = append(items, item)
- if item.typ == itemEOF || item.typ == itemError {
- break
- }
- }
- return
-}
-
-func TestLex(t *testing.T) {
- for _, test := range lexTests {
- items := collect(&test, "", "")
- if !reflect.DeepEqual(items, test.items) {
- t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
- }
- }
-}
-
-// Some easy cases from above, but with delimiters $$ and @@
-var lexDelimTests = []lexTest{
- {"punctuation", "$$,@%{{}}@@", []item{
- tLeftDelim,
- {itemChar, ","},
- {itemChar, "@"},
- {itemChar, "%"},
- {itemChar, "{"},
- {itemChar, "{"},
- {itemChar, "}"},
- {itemChar, "}"},
- tRightDelim,
- tEOF,
- }},
- {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
- {"for", `$$for @@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
- {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
- {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
-}
-
-var (
- tLeftDelim = item{itemLeftDelim, "$$"}
- tRightDelim = item{itemRightDelim, "@@"}
-)
-
-func TestDelims(t *testing.T) {
- for _, test := range lexDelimTests {
- items := collect(&test, "$$", "@@")
- if !reflect.DeepEqual(items, test.items) {
- t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse nodes.
-
-package parse
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-)
-
-// A node is an element in the parse tree. The interface is trivial.
-type Node interface {
- Type() NodeType
- String() string
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-const (
- NodeText NodeType = iota // Plain text.
- NodeAction // A simple action such as field evaluation.
- NodeBool // A boolean constant.
- NodeCommand // An element of a pipeline.
- NodeDot // The cursor, dot.
- nodeElse // An else action. Not added to tree.
- nodeEnd // An end action. Not added to tree.
- NodeField // A field or method name.
- NodeIdentifier // An identifier; always a function name.
- NodeIf // An if action.
- NodeList // A list of Nodes.
- NodeNumber // A numerical constant.
- NodePipe // A pipeline of commands.
- NodeRange // A range action.
- NodeString // A string constant.
- NodeTemplate // A template invocation action.
- NodeVariable // A $ variable.
- NodeWith // A with action.
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
- NodeType
- Nodes []Node // The element nodes in lexical order.
-}
-
-func newList() *ListNode {
- return &ListNode{NodeType: NodeList}
-}
-
-func (l *ListNode) append(n Node) {
- l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) String() string {
- b := new(bytes.Buffer)
- fmt.Fprint(b, "[")
- for _, n := range l.Nodes {
- fmt.Fprint(b, n)
- }
- fmt.Fprint(b, "]")
- return b.String()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
- NodeType
- Text []byte // The text; may span newlines.
-}
-
-func newText(text string) *TextNode {
- return &TextNode{NodeType: NodeText, Text: []byte(text)}
-}
-
-func (t *TextNode) String() string {
- return fmt.Sprintf("(text: %q)", t.Text)
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
- NodeType
- Line int // The line number in the input.
- Decl []*VariableNode // Variable declarations in lexical order.
- Cmds []*CommandNode // The commands in lexical order.
-}
-
-func newPipeline(line int, decl []*VariableNode) *PipeNode {
- return &PipeNode{NodeType: NodePipe, Line: line, Decl: decl}
-}
-
-func (p *PipeNode) append(command *CommandNode) {
- p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
- if p.Decl != nil {
- return fmt.Sprintf("%v := %v", p.Decl, p.Cmds)
- }
- return fmt.Sprintf("%v", p.Cmds)
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations.
-type ActionNode struct {
- NodeType
- Line int // The line number in the input.
- Pipe *PipeNode // The pipeline in the action.
-}
-
-func newAction(line int, pipe *PipeNode) *ActionNode {
- return &ActionNode{NodeType: NodeAction, Line: line, Pipe: pipe}
-}
-
-func (a *ActionNode) String() string {
- return fmt.Sprintf("(action: %v)", a.Pipe)
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
- NodeType
- Args []Node // Arguments in lexical order: Identifier, field, or constant.
-}
-
-func newCommand() *CommandNode {
- return &CommandNode{NodeType: NodeCommand}
-}
-
-func (c *CommandNode) append(arg Node) {
- c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
- return fmt.Sprintf("(command: %v)", c.Args)
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
- NodeType
- Ident string // The identifier's name.
-}
-
-// NewIdentifier returns a new IdentifierNode with the given identifier name.
-func NewIdentifier(ident string) *IdentifierNode {
- return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
-}
-
-func (i *IdentifierNode) String() string {
- return fmt.Sprintf("I=%s", i.Ident)
-}
-
-// VariableNode holds a list of variable names. The dollar sign is
-// part of the name.
-type VariableNode struct {
- NodeType
- Ident []string // Variable names in lexical order.
-}
-
-func newVariable(ident string) *VariableNode {
- return &VariableNode{NodeType: NodeVariable, Ident: strings.Split(ident, ".")}
-}
-
-func (v *VariableNode) String() string {
- return fmt.Sprintf("V=%s", v.Ident)
-}
-
-// DotNode holds the special identifier '.'. It is represented by a nil pointer.
-type DotNode bool
-
-func newDot() *DotNode {
- return nil
-}
-
-func (d *DotNode) Type() NodeType {
- return NodeDot
-}
-
-func (d *DotNode) String() string {
- return "{{<.>}}"
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
- NodeType
- Ident []string // The identifiers in lexical order.
-}
-
-func newField(ident string) *FieldNode {
- return &FieldNode{NodeType: NodeField, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
-}
-
-func (f *FieldNode) String() string {
- return fmt.Sprintf("F=%s", f.Ident)
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
- NodeType
- True bool // The value of the boolean constant.
-}
-
-func newBool(true bool) *BoolNode {
- return &BoolNode{NodeType: NodeBool, True: true}
-}
-
-func (b *BoolNode) String() string {
- return fmt.Sprintf("B=%t", b.True)
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
- NodeType
- IsInt bool // Number has an integral value.
- IsUint bool // Number has an unsigned integral value.
- IsFloat bool // Number has a floating-point value.
- IsComplex bool // Number is complex.
- Int64 int64 // The signed integer value.
- Uint64 uint64 // The unsigned integer value.
- Float64 float64 // The floating-point value.
- Complex128 complex128 // The complex value.
- Text string // The original textual representation from the input.
-}
-
-func newNumber(text string, typ itemType) (*NumberNode, error) {
- n := &NumberNode{NodeType: NodeNumber, Text: text}
- switch typ {
- case itemCharConstant:
- rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
- if err != nil {
- return nil, err
- }
- if tail != "'" {
- return nil, fmt.Errorf("malformed character constant: %s", text)
- }
- n.Int64 = int64(rune)
- n.IsInt = true
- n.Uint64 = uint64(rune)
- n.IsUint = true
- n.Float64 = float64(rune) // odd but those are the rules.
- n.IsFloat = true
- return n, nil
- case itemComplex:
- // fmt.Sscan can parse the pair, so let it do the work.
- if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
- return nil, err
- }
- n.IsComplex = true
- n.simplifyComplex()
- return n, nil
- }
- // Imaginary constants can only be complex unless they are zero.
- if len(text) > 0 && text[len(text)-1] == 'i' {
- f, err := strconv.Atof64(text[:len(text)-1])
- if err == nil {
- n.IsComplex = true
- n.Complex128 = complex(0, f)
- n.simplifyComplex()
- return n, nil
- }
- }
- // Do integer test first so we get 0x123 etc.
- u, err := strconv.Btoui64(text, 0) // will fail for -0; fixed below.
- if err == nil {
- n.IsUint = true
- n.Uint64 = u
- }
- i, err := strconv.Btoi64(text, 0)
- if err == nil {
- n.IsInt = true
- n.Int64 = i
- if i == 0 {
- n.IsUint = true // in case of -0.
- n.Uint64 = u
- }
- }
- // If an integer extraction succeeded, promote the float.
- if n.IsInt {
- n.IsFloat = true
- n.Float64 = float64(n.Int64)
- } else if n.IsUint {
- n.IsFloat = true
- n.Float64 = float64(n.Uint64)
- } else {
- f, err := strconv.Atof64(text)
- if err == nil {
- n.IsFloat = true
- n.Float64 = f
- // If a floating-point extraction succeeded, extract the int if needed.
- if !n.IsInt && float64(int64(f)) == f {
- n.IsInt = true
- n.Int64 = int64(f)
- }
- if !n.IsUint && float64(uint64(f)) == f {
- n.IsUint = true
- n.Uint64 = uint64(f)
- }
- }
- }
- if !n.IsInt && !n.IsUint && !n.IsFloat {
- return nil, fmt.Errorf("illegal number syntax: %q", text)
- }
- return n, nil
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
- n.IsFloat = imag(n.Complex128) == 0
- if n.IsFloat {
- n.Float64 = real(n.Complex128)
- n.IsInt = float64(int64(n.Float64)) == n.Float64
- if n.IsInt {
- n.Int64 = int64(n.Float64)
- }
- n.IsUint = float64(uint64(n.Float64)) == n.Float64
- if n.IsUint {
- n.Uint64 = uint64(n.Float64)
- }
- }
-}
-
-func (n *NumberNode) String() string {
- return fmt.Sprintf("N=%s", n.Text)
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
- NodeType
- Quoted string // The original text of the string, with quotes.
- Text string // The string, after quote processing.
-}
-
-func newString(orig, text string) *StringNode {
- return &StringNode{NodeType: NodeString, Quoted: orig, Text: text}
-}
-
-func (s *StringNode) String() string {
- return fmt.Sprintf("S=%#q", s.Text)
-}
-
-// endNode represents an {{end}} action. It is represented by a nil pointer.
-// It does not appear in the final parse tree.
-type endNode bool
-
-func newEnd() *endNode {
- return nil
-}
-
-func (e *endNode) Type() NodeType {
- return nodeEnd
-}
-
-func (e *endNode) String() string {
- return "{{end}}"
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
- NodeType
- Line int // The line number in the input.
-}
-
-func newElse(line int) *elseNode {
- return &elseNode{NodeType: nodeElse, Line: line}
-}
-
-func (e *elseNode) Type() NodeType {
- return nodeElse
-}
-
-func (e *elseNode) String() string {
- return "{{else}}"
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
- NodeType
- Line int // The line number in the input.
- Pipe *PipeNode // The pipeline to be evaluated.
- List *ListNode // What to execute if the value is non-empty.
- ElseList *ListNode // What to execute if the value is empty (nil if absent).
-}
-
-func (b *BranchNode) String() string {
- name := ""
- switch b.NodeType {
- case NodeIf:
- name = "if"
- case NodeRange:
- name = "range"
- case NodeWith:
- name = "with"
- default:
- panic("unknown branch type")
- }
- if b.ElseList != nil {
- return fmt.Sprintf("({{%s %s}} %s {{else}} %s)", name, b.Pipe, b.List, b.ElseList)
- }
- return fmt.Sprintf("({{%s %s}} %s)", name, b.Pipe, b.List)
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
- BranchNode
-}
-
-func newIf(line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
- return &IfNode{BranchNode{NodeType: NodeIf, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
- BranchNode
-}
-
-func newRange(line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
- return &RangeNode{BranchNode{NodeType: NodeRange, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-// WithNode represents a {{with}} action and its commands.
-type WithNode struct {
- BranchNode
-}
-
-func newWith(line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
- return &WithNode{BranchNode{NodeType: NodeWith, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-// TemplateNode represents a {{template}} action.
-type TemplateNode struct {
- NodeType
- Line int // The line number in the input.
- Name string // The name of the template (unquoted).
- Pipe *PipeNode // The command to evaluate as dot for the template.
-}
-
-func newTemplate(line int, name string, pipe *PipeNode) *TemplateNode {
- return &TemplateNode{NodeType: NodeTemplate, Line: line, Name: name, Pipe: pipe}
-}
-
-func (t *TemplateNode) String() string {
- if t.Pipe == nil {
- return fmt.Sprintf("{{template %q}}", t.Name)
- }
- return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates. The grammar is defined
-// in the documents for the template package.
-package parse
-
-import (
- "fmt"
- "runtime"
- "strconv"
- "unicode"
-)
-
-// Tree is the representation of a parsed template.
-type Tree struct {
- Name string // Name is the name of the template.
- Root *ListNode // Root is the top-level root of the parse tree.
- // Parsing only; cleared after parse.
- funcs []map[string]interface{}
- lex *lexer
- token [2]item // two-token lookahead for parser.
- peekCount int
- vars []string // variables defined at the moment.
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
- if t.peekCount > 0 {
- t.peekCount--
- } else {
- t.token[0] = t.lex.nextItem()
- }
- return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
- t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens
-func (t *Tree) backup2(t1 item) {
- t.token[1] = t1
- t.peekCount = 2
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
- if t.peekCount > 0 {
- return t.token[t.peekCount-1]
- }
- t.peekCount = 1
- t.token[0] = t.lex.nextItem()
- return t.token[0]
-}
-
-// Parsing.
-
-// New allocates a new template with the given name.
-func New(name string, funcs ...map[string]interface{}) *Tree {
- return &Tree{
- Name: name,
- funcs: funcs,
- }
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...interface{}) {
- t.Root = nil
- format = fmt.Sprintf("template: %s:%d: %s", t.Name, t.lex.lineNumber(), format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
- t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
- token := t.next()
- if token.typ != expected {
- t.errorf("expected %s in %s; got %s", expected, context, token)
- }
- return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
- t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- if t != nil {
- t.stopParse()
- }
- *errp = e.(error)
- }
- return
-}
-
-// startParse starts the template parsing from the lexer.
-func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
- t.Root = nil
- t.lex = lex
- t.vars = []string{"$"}
- t.funcs = funcs
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
- t.lex = nil
- t.vars = nil
- t.funcs = nil
-}
-
-// atEOF returns true if, possibly after spaces, we're at EOF.
-func (t *Tree) atEOF() bool {
- for {
- token := t.peek()
- switch token.typ {
- case itemEOF:
- return true
- case itemText:
- for _, r := range token.val {
- if !unicode.IsSpace(r) {
- return false
- }
- }
- t.next() // skip spaces.
- continue
- }
- break
- }
- return false
-}
-
-// Parse parses the template definition string to construct an internal
-// representation of the template for execution. If either action delimiter
-// string is empty, the default ("{{" or "}}") is used.
-func (t *Tree) Parse(s, leftDelim, rightDelim string, funcs ...map[string]interface{}) (tree *Tree, err error) {
- defer t.recover(&err)
- t.startParse(funcs, lex(t.Name, s, leftDelim, rightDelim))
- t.parse(true)
- t.stopParse()
- return t, nil
-}
-
-// parse is the helper for Parse.
-// It triggers an error if we expect EOF but don't reach it.
-func (t *Tree) parse(toEOF bool) (next Node) {
- t.Root, next = t.itemList(true)
- if toEOF && next != nil {
- t.errorf("unexpected %s", next)
- }
- return next
-}
-
-// itemList:
-// textOrAction*
-// Terminates at EOF and at {{end}} or {{else}}, which is returned separately.
-// The toEOF flag tells whether we expect to reach EOF.
-func (t *Tree) itemList(toEOF bool) (list *ListNode, next Node) {
- list = newList()
- for t.peek().typ != itemEOF {
- n := t.textOrAction()
- switch n.Type() {
- case nodeEnd, nodeElse:
- return list, n
- }
- list.append(n)
- }
- if !toEOF {
- t.unexpected(t.next(), "input")
- }
- return list, nil
-}
-
-// textOrAction:
-// text | action
-func (t *Tree) textOrAction() Node {
- switch token := t.next(); token.typ {
- case itemText:
- return newText(token.val)
- case itemLeftDelim:
- return t.action()
- default:
- t.unexpected(token, "input")
- }
- return nil
-}
-
-// Action:
-// control
-// command ("|" command)*
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
- switch token := t.next(); token.typ {
- case itemElse:
- return t.elseControl()
- case itemEnd:
- return t.endControl()
- case itemIf:
- return t.ifControl()
- case itemRange:
- return t.rangeControl()
- case itemTemplate:
- return t.templateControl()
- case itemWith:
- return t.withControl()
- }
- t.backup()
- // Do not pop variables; they persist until "end".
- return newAction(t.lex.lineNumber(), t.pipeline("command"))
-}
-
-// Pipeline:
-// field or command
-// pipeline "|" pipeline
-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
- var decl []*VariableNode
- // Are there declarations?
- for {
- if v := t.peek(); v.typ == itemVariable {
- t.next()
- if next := t.peek(); next.typ == itemColonEquals || next.typ == itemChar {
- t.next()
- variable := newVariable(v.val)
- if len(variable.Ident) != 1 {
- t.errorf("illegal variable in declaration: %s", v.val)
- }
- decl = append(decl, variable)
- t.vars = append(t.vars, v.val)
- if next.typ == itemChar && next.val == "," {
- if context == "range" && len(decl) < 2 {
- continue
- }
- t.errorf("too many declarations in %s", context)
- }
- } else {
- t.backup2(v)
- }
- }
- break
- }
- pipe = newPipeline(t.lex.lineNumber(), decl)
- for {
- switch token := t.next(); token.typ {
- case itemRightDelim:
- if len(pipe.Cmds) == 0 {
- t.errorf("missing value for %s", context)
- }
- return
- case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
- itemVariable, itemNumber, itemRawString, itemString:
- t.backup()
- pipe.append(t.command())
- default:
- t.unexpected(token, context)
- }
- }
- return
-}
-
-func (t *Tree) parseControl(context string) (lineNum int, pipe *PipeNode, list, elseList *ListNode) {
- lineNum = t.lex.lineNumber()
- defer t.popVars(len(t.vars))
- pipe = t.pipeline(context)
- var next Node
- list, next = t.itemList(false)
- switch next.Type() {
- case nodeEnd: //done
- case nodeElse:
- elseList, next = t.itemList(false)
- if next.Type() != nodeEnd {
- t.errorf("expected end; found %s", next)
- }
- elseList = elseList
- }
- return lineNum, pipe, list, elseList
-}
-
-// If:
-// {{if pipeline}} itemList {{end}}
-// {{if pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) ifControl() Node {
- return newIf(t.parseControl("if"))
-}
-
-// Range:
-// {{range pipeline}} itemList {{end}}
-// {{range pipeline}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
- return newRange(t.parseControl("range"))
-}
-
-// With:
-// {{with pipeline}} itemList {{end}}
-// {{with pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) withControl() Node {
- return newWith(t.parseControl("with"))
-}
-
-// End:
-// {{end}}
-// End keyword is past.
-func (t *Tree) endControl() Node {
- t.expect(itemRightDelim, "end")
- return newEnd()
-}
-
-// Else:
-// {{else}}
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
- t.expect(itemRightDelim, "else")
- return newElse(t.lex.lineNumber())
-}
-
-// Template:
-// {{template stringValue pipeline}}
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
- var name string
- switch token := t.next(); token.typ {
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- name = s
- default:
- t.unexpected(token, "template invocation")
- }
- var pipe *PipeNode
- if t.next().typ != itemRightDelim {
- t.backup()
- // Do not pop variables; they persist until "end".
- pipe = t.pipeline("template")
- }
- return newTemplate(t.lex.lineNumber(), name, pipe)
-}
-
-// command:
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
- cmd := newCommand()
-Loop:
- for {
- switch token := t.next(); token.typ {
- case itemRightDelim:
- t.backup()
- break Loop
- case itemPipe:
- break Loop
- case itemError:
- t.errorf("%s", token.val)
- case itemIdentifier:
- if !t.hasFunction(token.val) {
- t.errorf("function %q not defined", token.val)
- }
- cmd.append(NewIdentifier(token.val))
- case itemDot:
- cmd.append(newDot())
- case itemVariable:
- cmd.append(t.useVar(token.val))
- case itemField:
- cmd.append(newField(token.val))
- case itemBool:
- cmd.append(newBool(token.val == "true"))
- case itemCharConstant, itemComplex, itemNumber:
- number, err := newNumber(token.val, token.typ)
- if err != nil {
- t.error(err)
- }
- cmd.append(number)
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- cmd.append(newString(token.val, s))
- default:
- t.unexpected(token, "command")
- }
- }
- if len(cmd.Args) == 0 {
- t.errorf("empty command")
- }
- return cmd
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
- for _, funcMap := range t.funcs {
- if funcMap == nil {
- continue
- }
- if funcMap[name] != nil {
- return true
- }
- }
- return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
- t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(name string) Node {
- v := newVariable(name)
- for _, varName := range t.vars {
- if varName == v.Ident[0] {
- return v
- }
- }
- t.errorf("undefined variable %q", v.Ident[0])
- return nil
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "flag"
- "fmt"
- "testing"
-)
-
-var debug = flag.Bool("debug", false, "show the errors produced by the tests")
-
-type numberTest struct {
- text string
- isInt bool
- isUint bool
- isFloat bool
- isComplex bool
- int64
- uint64
- float64
- complex128
-}
-
-var numberTests = []numberTest{
- // basics
- {"0", true, true, true, false, 0, 0, 0, 0},
- {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
- {"73", true, true, true, false, 73, 73, 73, 0},
- {"073", true, true, true, false, 073, 073, 073, 0},
- {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
- {"-73", true, false, true, false, -73, 0, -73, 0},
- {"+73", true, false, true, false, 73, 0, 73, 0},
- {"100", true, true, true, false, 100, 100, 100, 0},
- {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
- {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
- {"-1.2", false, false, true, false, 0, 0, -1.2, 0},
- {"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
- {"-1e19", false, false, true, false, 0, 0, -1e19, 0},
- {"4i", false, false, false, true, 0, 0, 0, 4i},
- {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
- {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
- // complex with 0 imaginary are float (and maybe integer)
- {"0i", true, true, true, true, 0, 0, 0, 0},
- {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
- {"-12+0i", true, false, true, true, -12, 0, -12, -12},
- {"13+0i", true, true, true, true, 13, 13, 13, 13},
- // funny bases
- {"0123", true, true, true, false, 0123, 0123, 0123, 0},
- {"-0x0", true, true, true, false, 0, 0, 0, 0},
- {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
- // character constants
- {`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
- {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
- {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
- {`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
- {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
- {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
- {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
- {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
- // some broken syntax
- {text: "+-2"},
- {text: "0x123."},
- {text: "1e."},
- {text: "0xi."},
- {text: "1+2."},
- {text: "'x"},
- {text: "'xx'"},
-}
-
-func TestNumberParse(t *testing.T) {
- for _, test := range numberTests {
- // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
- // because imaginary comes out as a number.
- var c complex128
- typ := itemNumber
- if test.text[0] == '\'' {
- typ = itemCharConstant
- } else {
- _, err := fmt.Sscan(test.text, &c)
- if err == nil {
- typ = itemComplex
- }
- }
- n, err := newNumber(test.text, typ)
- ok := test.isInt || test.isUint || test.isFloat || test.isComplex
- if ok && err != nil {
- t.Errorf("unexpected error for %q: %s", test.text, err)
- continue
- }
- if !ok && err == nil {
- t.Errorf("expected error for %q", test.text)
- continue
- }
- if !ok {
- if *debug {
- fmt.Printf("%s\n\t%s\n", test.text, err)
- }
- continue
- }
- if n.IsComplex != test.isComplex {
- t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
- }
- if test.isInt {
- if !n.IsInt {
- t.Errorf("expected integer for %q", test.text)
- }
- if n.Int64 != test.int64 {
- t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
- }
- } else if n.IsInt {
- t.Errorf("did not expect integer for %q", test.text)
- }
- if test.isUint {
- if !n.IsUint {
- t.Errorf("expected unsigned integer for %q", test.text)
- }
- if n.Uint64 != test.uint64 {
- t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
- }
- } else if n.IsUint {
- t.Errorf("did not expect unsigned integer for %q", test.text)
- }
- if test.isFloat {
- if !n.IsFloat {
- t.Errorf("expected float for %q", test.text)
- }
- if n.Float64 != test.float64 {
- t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
- }
- } else if n.IsFloat {
- t.Errorf("did not expect float for %q", test.text)
- }
- if test.isComplex {
- if !n.IsComplex {
- t.Errorf("expected complex for %q", test.text)
- }
- if n.Complex128 != test.complex128 {
- t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
- }
- } else if n.IsComplex {
- t.Errorf("did not expect complex for %q", test.text)
- }
- }
-}
-
-type parseTest struct {
- name string
- input string
- ok bool
- result string
-}
-
-const (
- noError = true
- hasError = false
-)
-
-var parseTests = []parseTest{
- {"empty", "", noError,
- `[]`},
- {"comment", "{{/*\n\n\n*/}}", noError,
- `[]`},
- {"spaces", " \t\n", noError,
- `[(text: " \t\n")]`},
- {"text", "some text", noError,
- `[(text: "some text")]`},
- {"emptyAction", "{{}}", hasError,
- `[(action: [])]`},
- {"field", "{{.X}}", noError,
- `[(action: [(command: [F=[X]])])]`},
- {"simple command", "{{printf}}", noError,
- `[(action: [(command: [I=printf])])]`},
- {"$ invocation", "{{$}}", noError,
- "[(action: [(command: [V=[$]])])]"},
- {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
- "[({{with [V=[$x]] := [(command: [N=3])]}} [(action: [(command: [V=[$x] N=23])])])]"},
- {"variable with fields", "{{$.I}}", noError,
- "[(action: [(command: [V=[$ I]])])]"},
- {"multi-word command", "{{printf `%d` 23}}", noError,
- "[(action: [(command: [I=printf S=`%d` N=23])])]"},
- {"pipeline", "{{.X|.Y}}", noError,
- `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
- {"pipeline with decl", "{{$x := .X|.Y}}", noError,
- `[(action: [V=[$x]] := [(command: [F=[X]]) (command: [F=[Y]])])]`},
- {"declaration", "{{.X|.Y}}", noError,
- `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
- {"simple if", "{{if .X}}hello{{end}}", noError,
- `[({{if [(command: [F=[X]])]}} [(text: "hello")])]`},
- {"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
- `[({{if [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
- {"simple range", "{{range .X}}hello{{end}}", noError,
- `[({{range [(command: [F=[X]])]}} [(text: "hello")])]`},
- {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
- `[({{range [(command: [F=[X Y Z]])]}} [(text: "hello")])]`},
- {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
- `[({{range [(command: [F=[X]])]}} [(text: "hello")({{range [(command: [F=[Y]])]}} [(text: "goodbye")])])]`},
- {"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
- `[({{range [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
- {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
- `[({{range [(command: [F=[X]]) (command: [F=[M]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
- {"range []int", "{{range .SI}}{{.}}{{end}}", noError,
- `[({{range [(command: [F=[SI]])]}} [(action: [(command: [{{<.>}}])])])]`},
- {"constants", "{{range .SI 1 -3.2i true false 'a'}}{{end}}", noError,
- `[({{range [(command: [F=[SI] N=1 N=-3.2i B=true B=false N='a'])]}} [])]`},
- {"template", "{{template `x`}}", noError,
- `[{{template "x"}}]`},
- {"template with arg", "{{template `x` .Y}}", noError,
- `[{{template "x" [(command: [F=[Y]])]}}]`},
- {"with", "{{with .X}}hello{{end}}", noError,
- `[({{with [(command: [F=[X]])]}} [(text: "hello")])]`},
- {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
- `[({{with [(command: [F=[X]])]}} [(text: "hello")] {{else}} [(text: "goodbye")])]`},
- // Errors.
- {"unclosed action", "hello{{range", hasError, ""},
- {"unmatched end", "{{end}}", hasError, ""},
- {"missing end", "hello{{range .x}}", hasError, ""},
- {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
- {"undefined function", "hello{{undefined}}", hasError, ""},
- {"undefined variable", "{{$x}}", hasError, ""},
- {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
- {"variable undefined in template", "{{template $v}}", hasError, ""},
- {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
- {"template with field ref", "{{template .X}}", hasError, ""},
- {"template with var", "{{template $v}}", hasError, ""},
- {"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
- {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
- {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
-}
-
-var builtins = map[string]interface{}{
- "printf": fmt.Sprintf,
-}
-
-func TestParse(t *testing.T) {
- for _, test := range parseTests {
- tmpl, err := New(test.name).Parse(test.input, "", "", builtins)
- switch {
- case err == nil && !test.ok:
- t.Errorf("%q: expected error; got none", test.name)
- continue
- case err != nil && test.ok:
- t.Errorf("%q: unexpected error: %v", test.name, err)
- continue
- case err != nil && !test.ok:
- // expected error, got one
- if *debug {
- fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
- }
- continue
- }
- result := tmpl.Root.String()
- if result != test.result {
- t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strconv"
-)
-
-// Set returns a slice of Trees created by parsing the template set
-// definition in the argument string. If an error is encountered,
-// parsing stops and an empty slice is returned with the error.
-func Set(text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (tree map[string]*Tree, err error) {
- tree = make(map[string]*Tree)
- defer (*Tree)(nil).recover(&err)
- lex := lex("set", text, leftDelim, rightDelim)
- const context = "define clause"
- for {
- t := New("set") // name will be updated once we know it.
- t.startParse(funcs, lex)
- // Expect EOF or "{{ define name }}".
- if t.atEOF() {
- break
- }
- t.expect(itemLeftDelim, context)
- t.expect(itemDefine, context)
- name := t.expect(itemString, context)
- t.Name, err = strconv.Unquote(name.val)
- if err != nil {
- t.error(err)
- }
- t.expect(itemRightDelim, context)
- end := t.parse(false)
- if end == nil {
- t.errorf("unexpected EOF in %s", context)
- }
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- t.stopParse()
- if _, present := tree[t.Name]; present {
- return nil, fmt.Errorf("template: %q multiply defined", name)
- }
- tree[t.Name] = t
- }
- return
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "io"
- "reflect"
- "template/parse"
-)
-
-// Set holds a set of related templates that can refer to one another by name.
-// The zero value represents an empty set.
-// A template may be a member of multiple sets.
-type Set struct {
- tmpl map[string]*Template
- leftDelim string
- rightDelim string
- parseFuncs FuncMap
- execFuncs map[string]reflect.Value
-}
-
-func (s *Set) init() {
- if s.tmpl == nil {
- s.tmpl = make(map[string]*Template)
- s.parseFuncs = make(FuncMap)
- s.execFuncs = make(map[string]reflect.Value)
- }
-}
-
-// Delims sets the action delimiters, to be used in a subsequent
-// parse, to the specified strings.
-// An empty delimiter stands for the corresponding default: {{ or }}.
-// The return value is the set, so calls can be chained.
-func (s *Set) Delims(left, right string) *Set {
- s.leftDelim = left
- s.rightDelim = right
- return s
-}
-
-// Funcs adds the elements of the argument map to the set's function map. It
-// panics if a value in the map is not a function with appropriate return
-// type.
-// The return value is the set, so calls can be chained.
-func (s *Set) Funcs(funcMap FuncMap) *Set {
- s.init()
- addValueFuncs(s.execFuncs, funcMap)
- addFuncs(s.parseFuncs, funcMap)
- return s
-}
-
-// Add adds the argument templates to the set. It panics if two templates
-// with the same name are added or if a template is already a member of
-// a set.
-// The return value is the set, so calls can be chained.
-func (s *Set) Add(templates ...*Template) *Set {
- for _, t := range templates {
- if err := s.add(t); err != nil {
- panic(err)
- }
- }
- return s
-}
-
-// add adds the argument template to the set.
-func (s *Set) add(t *Template) error {
- s.init()
- if t.set != nil {
- return fmt.Errorf("template: %q already in a set", t.name)
- }
- if _, ok := s.tmpl[t.name]; ok {
- return fmt.Errorf("template: %q already defined in set", t.name)
- }
- s.tmpl[t.name] = t
- t.set = s
- return nil
-}
-
-// Template returns the template with the given name in the set,
-// or nil if there is no such template.
-func (s *Set) Template(name string) *Template {
- return s.tmpl[name]
-}
-
-// FuncMap returns the set's function map.
-func (s *Set) FuncMap() FuncMap {
- return s.parseFuncs
-}
-
-// Execute applies the named template to the specified data object, writing
-// the output to wr.
-func (s *Set) Execute(wr io.Writer, name string, data interface{}) error {
- tmpl := s.tmpl[name]
- if tmpl == nil {
- return fmt.Errorf("template: no template %q in set", name)
- }
- return tmpl.Execute(wr, data)
-}
-
-// Parse parses a string into a set of named templates. Parse may be called
-// multiple times for a given set, adding the templates defined in the string
-// to the set. If a template is redefined, the element in the set is
-// overwritten with the new definition.
-func (s *Set) Parse(text string) (*Set, error) {
- trees, err := parse.Set(text, s.leftDelim, s.rightDelim, s.parseFuncs, builtins)
- if err != nil {
- return nil, err
- }
- s.init()
- for name, tree := range trees {
- tmpl := New(name)
- tmpl.Tree = tree
- tmpl.addToSet(s)
- s.tmpl[name] = tmpl
- }
- return s, nil
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "testing"
-)
-
-const (
- noError = true
- hasError = false
-)
-
-type setParseTest struct {
- name string
- input string
- ok bool
- names []string
- results []string
-}
-
-var setParseTests = []setParseTest{
- {"empty", "", noError,
- nil,
- nil},
- {"one", `{{define "foo"}} FOO {{end}}`, noError,
- []string{"foo"},
- []string{`[(text: " FOO ")]`}},
- {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
- []string{"foo", "bar"},
- []string{`[(text: " FOO ")]`, `[(text: " BAR ")]`}},
- // errors
- {"missing end", `{{define "foo"}} FOO `, hasError,
- nil,
- nil},
- {"malformed name", `{{define "foo}} FOO `, hasError,
- nil,
- nil},
-}
-
-func TestSetParse(t *testing.T) {
- for _, test := range setParseTests {
- set, err := new(Set).Parse(test.input)
- switch {
- case err == nil && !test.ok:
- t.Errorf("%q: expected error; got none", test.name)
- continue
- case err != nil && test.ok:
- t.Errorf("%q: unexpected error: %v", test.name, err)
- continue
- case err != nil && !test.ok:
- // expected error, got one
- if *debug {
- fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
- }
- continue
- }
- if set == nil {
- continue
- }
- if len(set.tmpl) != len(test.names) {
- t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(set.tmpl))
- continue
- }
- for i, name := range test.names {
- tmpl, ok := set.tmpl[name]
- if !ok {
- t.Errorf("%s: can't find template %q", test.name, name)
- continue
- }
- result := tmpl.Root.String()
- if result != test.results[i] {
- t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
- }
- }
- }
-}
-
-var setExecTests = []execTest{
- {"empty", "", "", nil, true},
- {"text", "some text", "some text", nil, true},
- {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
- {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
- {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
- {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
- {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
- {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
- {"variable declared by template", `{{template "nested" $x=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
-
- // User-defined function: test argument evaluator.
- {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
- {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
-}
-
-// These strings are also in testdata/*.
-const setText1 = `
- {{define "x"}}TEXT{{end}}
- {{define "dotV"}}{{.V}}{{end}}
-`
-
-const setText2 = `
- {{define "dot"}}{{.}}{{end}}
- {{define "nested"}}{{template "dot" .}}{{end}}
-`
-
-func TestSetExecute(t *testing.T) {
- // Declare a set with a couple of templates first.
- set := new(Set)
- _, err := set.Parse(setText1)
- if err != nil {
- t.Fatalf("error parsing set: %s", err)
- }
- _, err = set.Parse(setText2)
- if err != nil {
- t.Fatalf("error parsing set: %s", err)
- }
- testExecute(setExecTests, set, t)
-}
-
-func TestSetParseFiles(t *testing.T) {
- set := new(Set)
- _, err := set.ParseFiles("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- _, err = set.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(setExecTests, set, t)
-}
-
-func TestParseSetFiles(t *testing.T) {
- set := new(Set)
- _, err := ParseSetFiles("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- set, err = ParseSetFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(setExecTests, set, t)
-}
-
-func TestSetParseGlob(t *testing.T) {
- _, err := new(Set).ParseGlob("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- _, err = new(Set).ParseGlob("[x")
- if err == nil {
- t.Error("expected error for bad pattern; got none")
- }
- set, err := new(Set).ParseGlob("testdata/file*.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(setExecTests, set, t)
-}
-
-func TestParseSetGlob(t *testing.T) {
- _, err := ParseSetGlob("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- _, err = ParseSetGlob("[x")
- if err == nil {
- t.Error("expected error for bad pattern; got none")
- }
- set, err := ParseSetGlob("testdata/file*.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(setExecTests, set, t)
-}
-
-var templateFileExecTests = []execTest{
- {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\ntemplate2\n", 0, true},
-}
-
-func TestSetParseTemplateFiles(t *testing.T) {
- _, err := ParseTemplateFiles("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- set, err := new(Set).ParseTemplateFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(templateFileExecTests, set, t)
-}
-
-func TestParseTemplateFiles(t *testing.T) {
- _, err := ParseTemplateFiles("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- set, err := new(Set).ParseTemplateFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(templateFileExecTests, set, t)
-}
-
-func TestSetParseTemplateGlob(t *testing.T) {
- _, err := ParseTemplateGlob("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- _, err = new(Set).ParseTemplateGlob("[x")
- if err == nil {
- t.Error("expected error for bad pattern; got none")
- }
- set, err := new(Set).ParseTemplateGlob("testdata/tmpl*.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(templateFileExecTests, set, t)
-}
-
-func TestParseTemplateGlob(t *testing.T) {
- _, err := ParseTemplateGlob("DOES NOT EXIST")
- if err == nil {
- t.Error("expected error for non-existent file; got none")
- }
- _, err = ParseTemplateGlob("[x")
- if err == nil {
- t.Error("expected error for bad pattern; got none")
- }
- set, err := ParseTemplateGlob("testdata/tmpl*.tmpl")
- if err != nil {
- t.Fatalf("error parsing files: %v", err)
- }
- testExecute(templateFileExecTests, set, t)
-}
+++ /dev/null
-{{define "x"}}TEXT{{end}}
-{{define "dotV"}}{{.V}}{{end}}
+++ /dev/null
-{{define "dot"}}{{.}}{{end}}
-{{define "nested"}}{{template "dot" .}}{{end}}
"flag"
"fmt"
"math"
- "rand"
+ "math/rand"
"reflect"
"strings"
)
package quick
import (
- "rand"
+ "math/rand"
"reflect"
"testing"
)
import (
"fmt"
- "rand"
+ "math/rand"
"reflect"
"strings"
)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
+// It takes an io.Reader providing the source, which then can be tokenized
+// through repeated calls to the Scan function. For compatibility with
+// existing tools, the NUL character is not allowed (implementation
+// restriction).
+//
+// By default, a Scanner skips white space and Go comments and recognizes all
+// literals as defined by the Go language specification. It may be
+// customized to recognize only a subset of those literals and to recognize
+// different white space characters.
+//
+// Basic usage pattern:
+//
+// var s scanner.Scanner
+// s.Init(src)
+// tok := s.Scan()
+// for tok != scanner.EOF {
+// // do something with tok
+// tok = s.Scan()
+// }
+//
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "unicode"
+ "unicode/utf8"
+)
+
+// TODO(gri): Consider changing this to use the new (token) Position package.
+
+// A source position is represented by a Position value.
+// A position is valid if Line > 0.
+type Position struct {
+ Filename string // filename, if any
+ Offset int // byte offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count per line)
+}
+
+// IsValid returns true if the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "???"
+ }
+ return s
+}
+
+// Predefined mode bits to control recognition of tokens. For instance,
+// to configure a Scanner such that it only recognizes (Go) identifiers,
+// integers, and skips comments, set the Scanner's Mode field to:
+//
+// ScanIdents | ScanInts | SkipComments
+//
+const (
+ ScanIdents = 1 << -Ident
+ ScanInts = 1 << -Int
+ ScanFloats = 1 << -Float // includes Ints
+ ScanChars = 1 << -Char
+ ScanStrings = 1 << -String
+ ScanRawStrings = 1 << -RawString
+ ScanComments = 1 << -Comment
+ SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
+ GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
+)
+
+// The result of Scan is one of the following tokens or a Unicode character.
+const (
+ EOF = -(iota + 1)
+ Ident
+ Int
+ Float
+ Char
+ String
+ RawString
+ Comment
+ skipComment
+)
+
+var tokenString = map[rune]string{
+ EOF: "EOF",
+ Ident: "Ident",
+ Int: "Int",
+ Float: "Float",
+ Char: "Char",
+ String: "String",
+ RawString: "RawString",
+ Comment: "Comment",
+}
+
+// TokenString returns a (visible) string for a token or Unicode character.
+func TokenString(tok rune) string {
+ if s, found := tokenString[tok]; found {
+ return s
+ }
+ return fmt.Sprintf("%q", string(tok))
+}
+
+// GoWhitespace is the default value for the Scanner's Whitespace field.
+// Its value selects Go's white space characters.
+const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
+
+const bufLen = 1024 // at least utf8.UTFMax
+
+// A Scanner implements reading of Unicode characters and tokens from an io.Reader.
+type Scanner struct {
+ // Input
+ src io.Reader
+
+ // Source buffer
+ srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
+ srcPos int // reading position (srcBuf index)
+ srcEnd int // source end (srcBuf index)
+
+ // Source position
+ srcBufOffset int // byte offset of srcBuf[0] in source
+ line int // line count
+ column int // character count
+ lastLineLen int // length of last line in characters (for correct column reporting)
+ lastCharLen int // length of last character in bytes
+
+ // Token text buffer
+ // Typically, token text is stored completely in srcBuf, but in general
+ // the token text's head may be buffered in tokBuf while the token text's
+ // tail is stored in srcBuf.
+ tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
+ tokPos int // token text tail position (srcBuf index); valid if >= 0
+ tokEnd int // token text tail end (srcBuf index)
+
+ // One character look-ahead
+ ch rune // character before current srcPos
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(s *Scanner, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // The Mode field controls which tokens are recognized. For instance,
+ // to recognize Ints, set the ScanInts bit in Mode. The field may be
+ // changed at any time.
+ Mode uint
+
+ // The Whitespace field controls which characters are recognized
+ // as white space. To recognize a character ch <= ' ' as white space,
+ // set the ch'th bit in Whitespace (the Scanner's behavior is undefined
+ // for values ch > ' '). The field may be changed at any time.
+ Whitespace uint64
+
+ // Start position of most recently scanned token; set by Scan.
+ // Calling Init or Next invalidates the position (Line == 0).
+ // The Filename field is always left untouched by the Scanner.
+ // If an error is reported (via Error) and Position is invalid,
+ // the scanner is not inside a token. Call Pos to obtain an error
+ // position in that case.
+ Position
+}
+
+// Init initializes a Scanner with a new source and returns s.
+// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens,
+// and Whitespace is set to GoWhitespace.
+func (s *Scanner) Init(src io.Reader) *Scanner {
+ s.src = src
+
+ // initialize source buffer
+ // (the first call to next() will fill it by calling src.Read)
+ s.srcBuf[0] = utf8.RuneSelf // sentinel
+ s.srcPos = 0
+ s.srcEnd = 0
+
+ // initialize source position
+ s.srcBufOffset = 0
+ s.line = 1
+ s.column = 0
+ s.lastLineLen = 0
+ s.lastCharLen = 0
+
+ // initialize token text buffer
+ // (required for first call to next()).
+ s.tokPos = -1
+
+ // initialize one character look-ahead
+ s.ch = -1 // no char read yet
+
+ // initialize public fields
+ s.Error = nil
+ s.ErrorCount = 0
+ s.Mode = GoTokens
+ s.Whitespace = GoWhitespace
+ s.Line = 0 // invalidate token position
+
+ return s
+}
+
+// TODO(gri): The code for next() and the internal scanner state could benefit
+// from a rethink. While next() is optimized for the common ASCII
+// case, the "corrections" needed for proper position tracking undo
+// some of the attempts for fast-path optimization.
+
+// next reads and returns the next Unicode character. It is designed such
+// that only a minimal amount of work needs to be done in the common ASCII
+// case (one test to check for both ASCII and end-of-buffer, and one test
+// to check for newlines).
+func (s *Scanner) next() rune {
+ ch, width := rune(s.srcBuf[s.srcPos]), 1
+
+ if ch >= utf8.RuneSelf {
+ // uncommon case: not ASCII or not enough bytes
+ for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
+ // not enough bytes: read some more, but first
+ // save away token text if any
+ if s.tokPos >= 0 {
+ s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
+ s.tokPos = 0
+ // s.tokEnd is set by Scan()
+ }
+ // move unread bytes to beginning of buffer
+ copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
+ s.srcBufOffset += s.srcPos
+ // read more bytes
+ // (an io.Reader must return io.EOF when it reaches
+ // the end of what it is reading - simply returning
+ // n == 0 will make this loop retry forever; but the
+ // error is in the reader implementation in that case)
+ i := s.srcEnd - s.srcPos
+ n, err := s.src.Read(s.srcBuf[i:bufLen])
+ s.srcPos = 0
+ s.srcEnd = i + n
+ s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
+ if err != nil {
+ if s.srcEnd == 0 {
+ if s.lastCharLen > 0 {
+ // previous character was not EOF
+ s.column++
+ }
+ s.lastCharLen = 0
+ return EOF
+ }
+ if err != io.EOF {
+ s.error(err.Error())
+ }
+ // If err == EOF, we won't be getting more
+ // bytes; break to avoid infinite loop. If
+ // err is something else, we don't know if
+ // we can get more bytes; thus also break.
+ break
+ }
+ }
+ // at least one byte
+ ch = rune(s.srcBuf[s.srcPos])
+ if ch >= utf8.RuneSelf {
+ // uncommon case: not ASCII
+ ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
+ if ch == utf8.RuneError && width == 1 {
+ // advance for correct error position
+ s.srcPos += width
+ s.lastCharLen = width
+ s.column++
+ s.error("illegal UTF-8 encoding")
+ return ch
+ }
+ }
+ }
+
+ // advance
+ s.srcPos += width
+ s.lastCharLen = width
+ s.column++
+
+ // special situations
+ switch ch {
+ case 0:
+ // implementation restriction for compatibility with other tools
+ s.error("illegal character NUL")
+ case '\n':
+ s.line++
+ s.lastLineLen = s.column
+ s.column = 0
+ }
+
+ return ch
+}
+
+// Next reads and returns the next Unicode character.
+// It returns EOF at the end of the source. It reports
+// a read error by calling s.Error, if not nil; otherwise
+// it prints an error message to os.Stderr. Next does not
+// update the Scanner's Position field; use Pos() to
+// get the current position.
+func (s *Scanner) Next() rune {
+ s.tokPos = -1 // don't collect token text
+ s.Line = 0 // invalidate token position
+ ch := s.Peek()
+ s.ch = s.next()
+ return ch
+}
+
+// Peek returns the next Unicode character in the source without advancing
+// the scanner. It returns EOF if the scanner's position is at the last
+// character of the source.
+func (s *Scanner) Peek() rune {
+ if s.ch < 0 {
+ s.ch = s.next()
+ }
+ return s.ch
+}
+
+func (s *Scanner) error(msg string) {
+ s.ErrorCount++
+ if s.Error != nil {
+ s.Error(s, msg)
+ return
+ }
+ pos := s.Position
+ if !pos.IsValid() {
+ pos = s.Pos()
+ }
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+func (s *Scanner) scanIdentifier() rune {
+ ch := s.next() // read character after first '_' or letter
+ for ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) {
+ ch = s.next()
+ }
+ return ch
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+
+func (s *Scanner) scanMantissa(ch rune) rune {
+ for isDecimal(ch) {
+ ch = s.next()
+ }
+ return ch
+}
+
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.scanMantissa(s.next())
+ }
+ return ch
+}
+
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+func (s *Scanner) scanNumber(ch rune) (rune, rune) {
+ // isDecimal(ch)
+ if ch == '0' {
+ // int or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal int
+ ch = s.next()
+ for digitVal(ch) < 16 {
+ ch = s.next()
+ }
+ } else {
+ // octal int or float
+ seenDecimalDigit := false
+ for isDecimal(ch) {
+ if ch > '7' {
+ seenDecimalDigit = true
+ }
+ ch = s.next()
+ }
+ if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
+ // float
+ ch = s.scanFraction(ch)
+ ch = s.scanExponent(ch)
+ return Float, ch
+ }
+ // octal int
+ if seenDecimalDigit {
+ s.error("illegal octal number")
+ }
+ }
+ return Int, ch
+ }
+ // decimal int or float
+ ch = s.scanMantissa(ch)
+ if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
+ // float
+ ch = s.scanFraction(ch)
+ ch = s.scanExponent(ch)
+ return Float, ch
+ }
+ return Int, ch
+}
+
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.error("illegal char escape")
+ }
+ return ch
+}
+
+func (s *Scanner) scanEscape(quote rune) rune {
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ // nothing to do
+ ch = s.next()
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.error("illegal char escape")
+ }
+ return ch
+}
+
+func (s *Scanner) scanString(quote rune) (n int) {
+ ch := s.next() // read character after quote
+ for ch != quote {
+ if ch == '\n' || ch < 0 {
+ s.error("literal not terminated")
+ return
+ }
+ if ch == '\\' {
+ ch = s.scanEscape(quote)
+ } else {
+ ch = s.next()
+ }
+ n++
+ }
+ return
+}
+
+func (s *Scanner) scanRawString() {
+ ch := s.next() // read character after '`'
+ for ch != '`' {
+ if ch < 0 {
+ s.error("literal not terminated")
+ return
+ }
+ ch = s.next()
+ }
+}
+
+func (s *Scanner) scanChar() {
+ if s.scanString('\'') != 1 {
+ s.error("illegal char literal")
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) rune {
+ // ch == '/' || ch == '*'
+ if ch == '/' {
+ // line comment
+ ch = s.next() // read character after "//"
+ for ch != '\n' && ch >= 0 {
+ ch = s.next()
+ }
+ return ch
+ }
+
+ // general comment
+ ch = s.next() // read character after "/*"
+ for {
+ if ch < 0 {
+ s.error("comment not terminated")
+ break
+ }
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ ch = s.next()
+ break
+ }
+ }
+ return ch
+}
+
+// Scan reads the next token or Unicode character from source and returns it.
+// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set.
+// It returns EOF at the end of the source. It reports scanner errors (read and
+// token errors) by calling s.Error, if not nil; otherwise it prints an error
+// message to os.Stderr.
+func (s *Scanner) Scan() rune {
+ ch := s.Peek()
+
+ // reset token text position
+ s.tokPos = -1
+ s.Line = 0
+
+redo:
+ // skip white space
+ for s.Whitespace&(1<<uint(ch)) != 0 {
+ ch = s.next()
+ }
+
+ // start collecting token text
+ s.tokBuf.Reset()
+ s.tokPos = s.srcPos - s.lastCharLen
+
+ // set token position
+ // (this is a slightly optimized version of the code in Pos())
+ s.Offset = s.srcBufOffset + s.tokPos
+ if s.column > 0 {
+ // common case: last character was not a '\n'
+ s.Line = s.line
+ s.Column = s.column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.Line = s.line - 1
+ s.Column = s.lastLineLen
+ }
+
+ // determine token value
+ tok := ch
+ switch {
+ case unicode.IsLetter(ch) || ch == '_':
+ if s.Mode&ScanIdents != 0 {
+ tok = Ident
+ ch = s.scanIdentifier()
+ } else {
+ ch = s.next()
+ }
+ case isDecimal(ch):
+ if s.Mode&(ScanInts|ScanFloats) != 0 {
+ tok, ch = s.scanNumber(ch)
+ } else {
+ ch = s.next()
+ }
+ default:
+ switch ch {
+ case '"':
+ if s.Mode&ScanStrings != 0 {
+ s.scanString('"')
+ tok = String
+ }
+ ch = s.next()
+ case '\'':
+ if s.Mode&ScanChars != 0 {
+ s.scanChar()
+ tok = Char
+ }
+ ch = s.next()
+ case '.':
+ ch = s.next()
+ if isDecimal(ch) && s.Mode&ScanFloats != 0 {
+ tok = Float
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '/':
+ ch = s.next()
+ if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
+ if s.Mode&SkipComments != 0 {
+ s.tokPos = -1 // don't collect token text
+ ch = s.scanComment(ch)
+ goto redo
+ }
+ ch = s.scanComment(ch)
+ tok = Comment
+ }
+ case '`':
+ if s.Mode&ScanRawStrings != 0 {
+ s.scanRawString()
+ tok = String
+ }
+ ch = s.next()
+ default:
+ ch = s.next()
+ }
+ }
+
+ // end of token text
+ s.tokEnd = s.srcPos - s.lastCharLen
+
+ s.ch = ch
+ return tok
+}
+
+// Pos returns the position of the character immediately after
+// the character or token returned by the last call to Next or Scan.
+func (s *Scanner) Pos() (pos Position) {
+ pos.Filename = s.Filename
+ pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
+ switch {
+ case s.column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.line
+ pos.Column = s.column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ pos.Line = s.line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// TokenText returns the string corresponding to the most recently scanned token.
+// Valid after calling Scan().
+func (s *Scanner) TokenText() string {
+ if s.tokPos < 0 {
+ // no token text
+ return ""
+ }
+
+ if s.tokEnd < 0 {
+ // if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
+ s.tokEnd = s.tokPos
+ }
+
+ if s.tokBuf.Len() == 0 {
+ // common case: the entire token text is still in srcBuf
+ return string(s.srcBuf[s.tokPos:s.tokEnd])
+ }
+
+ // part of the token text was saved in tokBuf: save the rest in
+ // tokBuf as well and return its content
+ s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
+ s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
+ return s.tokBuf.String()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+// A StringReader delivers its data one string segment at a time via Read.
+type StringReader struct {
+ data []string
+ step int
+}
+
+func (r *StringReader) Read(p []byte) (n int, err error) {
+ if r.step < len(r.data) {
+ s := r.data[r.step]
+ n = copy(p, s)
+ r.step++
+ } else {
+ err = io.EOF
+ }
+ return
+}
+
+func readRuneSegments(t *testing.T, segments []string) {
+ got := ""
+ want := strings.Join(segments, "")
+ s := new(Scanner).Init(&StringReader{data: segments})
+ for {
+ ch := s.Next()
+ if ch == EOF {
+ break
+ }
+ got += string(ch)
+ }
+ if got != want {
+ t.Errorf("segments=%v got=%s want=%s", segments, got, want)
+ }
+}
+
+var segmentList = [][]string{
+ {},
+ {""},
+ {"日", "本語"},
+ {"\u65e5", "\u672c", "\u8a9e"},
+ {"\U000065e5", " ", "\U0000672c", "\U00008a9e"},
+ {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"},
+ {"Hello", ", ", "World", "!"},
+ {"Hello", ", ", "", "World", "!"},
+}
+
+func TestNext(t *testing.T) {
+ for _, s := range segmentList {
+ readRuneSegments(t, s)
+ }
+}
+
+type token struct {
+ tok rune
+ text string
+}
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+var tokenList = []token{
+ {Comment, "// line comments"},
+ {Comment, "//"},
+ {Comment, "////"},
+ {Comment, "// comment"},
+ {Comment, "// /* comment */"},
+ {Comment, "// // comment //"},
+ {Comment, "//" + f100},
+
+ {Comment, "// general comments"},
+ {Comment, "/**/"},
+ {Comment, "/***/"},
+ {Comment, "/* comment */"},
+ {Comment, "/* // comment */"},
+ {Comment, "/* /* comment */"},
+ {Comment, "/*\n comment\n*/"},
+ {Comment, "/*" + f100 + "*/"},
+
+ {Comment, "// identifiers"},
+ {Ident, "a"},
+ {Ident, "a0"},
+ {Ident, "foobar"},
+ {Ident, "abc123"},
+ {Ident, "LGTM"},
+ {Ident, "_"},
+ {Ident, "_abc123"},
+ {Ident, "abc123_"},
+ {Ident, "_abc_123_"},
+ {Ident, "_äöü"},
+ {Ident, "_本"},
+ {Ident, "äöü"},
+ {Ident, "本"},
+ {Ident, "a۰۱۸"},
+ {Ident, "foo६४"},
+ {Ident, "bar9876"},
+ {Ident, f100},
+
+ {Comment, "// decimal ints"},
+ {Int, "0"},
+ {Int, "1"},
+ {Int, "9"},
+ {Int, "42"},
+ {Int, "1234567890"},
+
+ {Comment, "// octal ints"},
+ {Int, "00"},
+ {Int, "01"},
+ {Int, "07"},
+ {Int, "042"},
+ {Int, "01234567"},
+
+ {Comment, "// hexadecimal ints"},
+ {Int, "0x0"},
+ {Int, "0x1"},
+ {Int, "0xf"},
+ {Int, "0x42"},
+ {Int, "0x123456789abcDEF"},
+ {Int, "0x" + f100},
+ {Int, "0X0"},
+ {Int, "0X1"},
+ {Int, "0XF"},
+ {Int, "0X42"},
+ {Int, "0X123456789abcDEF"},
+ {Int, "0X" + f100},
+
+ {Comment, "// floats"},
+ {Float, "0."},
+ {Float, "1."},
+ {Float, "42."},
+ {Float, "01234567890."},
+ {Float, ".0"},
+ {Float, ".1"},
+ {Float, ".42"},
+ {Float, ".0123456789"},
+ {Float, "0.0"},
+ {Float, "1.0"},
+ {Float, "42.0"},
+ {Float, "01234567890.0"},
+ {Float, "0e0"},
+ {Float, "1e0"},
+ {Float, "42e0"},
+ {Float, "01234567890e0"},
+ {Float, "0E0"},
+ {Float, "1E0"},
+ {Float, "42E0"},
+ {Float, "01234567890E0"},
+ {Float, "0e+10"},
+ {Float, "1e-10"},
+ {Float, "42e+10"},
+ {Float, "01234567890e-10"},
+ {Float, "0E+10"},
+ {Float, "1E-10"},
+ {Float, "42E+10"},
+ {Float, "01234567890E-10"},
+
+ {Comment, "// chars"},
+ {Char, `' '`},
+ {Char, `'a'`},
+ {Char, `'本'`},
+ {Char, `'\a'`},
+ {Char, `'\b'`},
+ {Char, `'\f'`},
+ {Char, `'\n'`},
+ {Char, `'\r'`},
+ {Char, `'\t'`},
+ {Char, `'\v'`},
+ {Char, `'\''`},
+ {Char, `'\000'`},
+ {Char, `'\777'`},
+ {Char, `'\x00'`},
+ {Char, `'\xff'`},
+ {Char, `'\u0000'`},
+ {Char, `'\ufA16'`},
+ {Char, `'\U00000000'`},
+ {Char, `'\U0000ffAB'`},
+
+ {Comment, "// strings"},
+ {String, `" "`},
+ {String, `"a"`},
+ {String, `"本"`},
+ {String, `"\a"`},
+ {String, `"\b"`},
+ {String, `"\f"`},
+ {String, `"\n"`},
+ {String, `"\r"`},
+ {String, `"\t"`},
+ {String, `"\v"`},
+ {String, `"\""`},
+ {String, `"\000"`},
+ {String, `"\777"`},
+ {String, `"\x00"`},
+ {String, `"\xff"`},
+ {String, `"\u0000"`},
+ {String, `"\ufA16"`},
+ {String, `"\U00000000"`},
+ {String, `"\U0000ffAB"`},
+ {String, `"` + f100 + `"`},
+
+ {Comment, "// raw strings"},
+ {String, "``"},
+ {String, "`\\`"},
+ {String, "`" + "\n\n/* foobar */\n\n" + "`"},
+ {String, "`" + f100 + "`"},
+
+ {Comment, "// individual characters"},
+ // NUL character is not allowed
+ {'\x01', "\x01"},
+ {' ' - 1, string(' ' - 1)},
+ {'+', "+"},
+ {'/', "/"},
+ {'.', "."},
+ {'~', "~"},
+ {'(', "("},
+}
+
+func makeSource(pattern string) *bytes.Buffer {
+ var buf bytes.Buffer
+ for _, k := range tokenList {
+ fmt.Fprintf(&buf, pattern, k.text)
+ }
+ return &buf
+}
+
+func checkTok(t *testing.T, s *Scanner, line int, got, want rune, text string) {
+ if got != want {
+ t.Fatalf("tok = %s, want %s for %q", TokenString(got), TokenString(want), text)
+ }
+ if s.Line != line {
+ t.Errorf("line = %d, want %d for %q", s.Line, line, text)
+ }
+ stext := s.TokenText()
+ if stext != text {
+ t.Errorf("text = %q, want %q", stext, text)
+ } else {
+ // check idempotency of TokenText() call
+ stext = s.TokenText()
+ if stext != text {
+ t.Errorf("text = %q, want %q (idempotency check)", stext, text)
+ }
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
+
+func testScan(t *testing.T, mode uint) {
+ s := new(Scanner).Init(makeSource(" \t%s\n"))
+ s.Mode = mode
+ tok := s.Scan()
+ line := 1
+ for _, k := range tokenList {
+ if mode&SkipComments == 0 || k.tok != Comment {
+ checkTok(t, s, line, tok, k.tok, k.text)
+ tok = s.Scan()
+ }
+ line += countNewlines(k.text) + 1 // each token is on a new line
+ }
+ checkTok(t, s, line, tok, EOF, "")
+}
+
+func TestScan(t *testing.T) {
+ testScan(t, GoTokens)
+ testScan(t, GoTokens&^SkipComments)
+}
+
+func TestPosition(t *testing.T) {
+ src := makeSource("\t\t\t\t%s\n")
+ s := new(Scanner).Init(src)
+ s.Mode = GoTokens &^ SkipComments
+ s.Scan()
+ pos := Position{"", 4, 1, 5}
+ for _, k := range tokenList {
+ if s.Offset != pos.Offset {
+ t.Errorf("offset = %d, want %d for %q", s.Offset, pos.Offset, k.text)
+ }
+ if s.Line != pos.Line {
+ t.Errorf("line = %d, want %d for %q", s.Line, pos.Line, k.text)
+ }
+ if s.Column != pos.Column {
+ t.Errorf("column = %d, want %d for %q", s.Column, pos.Column, k.text)
+ }
+ pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
+ pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+ s.Scan()
+ }
+ // make sure there were no token-internal errors reported by scanner
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanZeroMode(t *testing.T) {
+ src := makeSource("%s\n")
+ str := src.String()
+ s := new(Scanner).Init(src)
+ s.Mode = 0 // don't recognize any token classes
+ s.Whitespace = 0 // don't skip any whitespace
+ tok := s.Scan()
+ for i, ch := range str {
+ if tok != ch {
+ t.Fatalf("%d. tok = %s, want %s", i, TokenString(tok), TokenString(ch))
+ }
+ tok = s.Scan()
+ }
+ if tok != EOF {
+ t.Fatalf("tok = %s, want EOF", TokenString(tok))
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func testScanSelectedMode(t *testing.T, mode uint, class rune) {
+ src := makeSource("%s\n")
+ s := new(Scanner).Init(src)
+ s.Mode = mode
+ tok := s.Scan()
+ for tok != EOF {
+ if tok < 0 && tok != class {
+ t.Fatalf("tok = %s, want %s", TokenString(tok), TokenString(class))
+ }
+ tok = s.Scan()
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanSelectedMask(t *testing.T) {
+ testScanSelectedMode(t, 0, 0)
+ testScanSelectedMode(t, ScanIdents, Ident)
+ // Don't test ScanInts and ScanNumbers since some parts of
+ // the floats in the source look like (illegal) octal ints
+ // and ScanNumbers may return either Int or Float.
+ testScanSelectedMode(t, ScanChars, Char)
+ testScanSelectedMode(t, ScanStrings, String)
+ testScanSelectedMode(t, SkipComments, 0)
+ testScanSelectedMode(t, ScanComments, Comment)
+}
+
+func TestScanNext(t *testing.T) {
+ s := new(Scanner).Init(bytes.NewBufferString("if a == bcd /* comment */ {\n\ta += c\n} // line comment ending in eof"))
+ checkTok(t, s, 1, s.Scan(), Ident, "if")
+ checkTok(t, s, 1, s.Scan(), Ident, "a")
+ checkTok(t, s, 1, s.Scan(), '=', "=")
+ checkTok(t, s, 0, s.Next(), '=', "")
+ checkTok(t, s, 0, s.Next(), ' ', "")
+ checkTok(t, s, 0, s.Next(), 'b', "")
+ checkTok(t, s, 1, s.Scan(), Ident, "cd")
+ checkTok(t, s, 1, s.Scan(), '{', "{")
+ checkTok(t, s, 2, s.Scan(), Ident, "a")
+ checkTok(t, s, 2, s.Scan(), '+', "+")
+ checkTok(t, s, 0, s.Next(), '=', "")
+ checkTok(t, s, 2, s.Scan(), Ident, "c")
+ checkTok(t, s, 3, s.Scan(), '}', "}")
+ checkTok(t, s, 3, s.Scan(), -1, "")
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanWhitespace(t *testing.T) {
+ var buf bytes.Buffer
+ var ws uint64
+ // start at 1, NUL character is not allowed
+ for ch := byte(1); ch < ' '; ch++ {
+ buf.WriteByte(ch)
+ ws |= 1 << ch
+ }
+ const orig = 'x'
+ buf.WriteByte(orig)
+
+ s := new(Scanner).Init(&buf)
+ s.Mode = 0
+ s.Whitespace = ws
+ tok := s.Scan()
+ if tok != orig {
+ t.Errorf("tok = %s, want %s", TokenString(tok), TokenString(orig))
+ }
+}
+
+func testError(t *testing.T, src, pos, msg string, tok rune) {
+ s := new(Scanner).Init(bytes.NewBufferString(src))
+ errorCalled := false
+ s.Error = func(s *Scanner, m string) {
+ if !errorCalled {
+ // only look at first error
+ if p := s.Pos().String(); p != pos {
+ t.Errorf("pos = %q, want %q for %q", p, pos, src)
+ }
+ if m != msg {
+ t.Errorf("msg = %q, want %q for %q", m, msg, src)
+ }
+ errorCalled = true
+ }
+ }
+ tk := s.Scan()
+ if tk != tok {
+ t.Errorf("tok = %s, want %s for %q", TokenString(tk), TokenString(tok), src)
+ }
+ if !errorCalled {
+ t.Errorf("error handler not called for %q", src)
+ }
+ if s.ErrorCount == 0 {
+ t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func TestError(t *testing.T) {
+ testError(t, "\x00", "1:1", "illegal character NUL", 0)
+ testError(t, "\x80", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
+ testError(t, "\xff", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
+
+ testError(t, "a\x00", "1:2", "illegal character NUL", Ident)
+ testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", Ident)
+ testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", Ident)
+
+ testError(t, `"a`+"\x00", "1:3", "illegal character NUL", String)
+ testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", String)
+ testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", String)
+
+ testError(t, "`a"+"\x00", "1:3", "illegal character NUL", String)
+ testError(t, "`ab"+"\x80", "1:4", "illegal UTF-8 encoding", String)
+ testError(t, "`abc"+"\xff", "1:5", "illegal UTF-8 encoding", String)
+
+ testError(t, `'\"'`, "1:3", "illegal char escape", Char)
+ testError(t, `"\'"`, "1:3", "illegal char escape", String)
+
+ testError(t, `01238`, "1:6", "illegal octal number", Int)
+ testError(t, `'aa'`, "1:4", "illegal char literal", Char)
+
+ testError(t, `'`, "1:2", "literal not terminated", Char)
+ testError(t, `'`+"\n", "1:2", "literal not terminated", Char)
+ testError(t, `"abc`, "1:5", "literal not terminated", String)
+ testError(t, `"abc`+"\n", "1:5", "literal not terminated", String)
+ testError(t, "`abc\n", "2:1", "literal not terminated", String)
+ testError(t, `/*/`, "1:4", "comment not terminated", EOF)
+}
+
+func checkPos(t *testing.T, got, want Position) {
+ if got.Offset != want.Offset || got.Line != want.Line || got.Column != want.Column {
+ t.Errorf("got offset, line, column = %d, %d, %d; want %d, %d, %d",
+ got.Offset, got.Line, got.Column, want.Offset, want.Line, want.Column)
+ }
+}
+
+func checkNextPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
+ if ch := s.Next(); ch != char {
+ t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
+ }
+ want := Position{Offset: offset, Line: line, Column: column}
+ checkPos(t, s.Pos(), want)
+}
+
+func checkScanPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
+ want := Position{Offset: offset, Line: line, Column: column}
+ checkPos(t, s.Pos(), want)
+ if ch := s.Scan(); ch != char {
+ t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
+ if string(ch) != s.TokenText() {
+ t.Errorf("tok = %q, want %q", s.TokenText(), string(ch))
+ }
+ }
+ checkPos(t, s.Position, want)
+}
+
+func TestPos(t *testing.T) {
+ // corner case: empty source
+ s := new(Scanner).Init(bytes.NewBufferString(""))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ s.Peek() // peek doesn't affect the position
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+
+ // corner case: source with only a newline
+ s = new(Scanner).Init(bytes.NewBufferString("\n"))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ checkNextPos(t, s, 1, 2, 1, '\n')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 1, 2, 1, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // corner case: source with only a single character
+ s = new(Scanner).Init(bytes.NewBufferString("本"))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ checkNextPos(t, s, 3, 1, 2, '本')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 3, 1, 2, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // positions after calling Next
+ s = new(Scanner).Init(bytes.NewBufferString(" foo६४ \n\n本語\n"))
+ checkNextPos(t, s, 1, 1, 2, ' ')
+ s.Peek() // peek doesn't affect the position
+ checkNextPos(t, s, 2, 1, 3, ' ')
+ checkNextPos(t, s, 3, 1, 4, 'f')
+ checkNextPos(t, s, 4, 1, 5, 'o')
+ checkNextPos(t, s, 5, 1, 6, 'o')
+ checkNextPos(t, s, 8, 1, 7, '६')
+ checkNextPos(t, s, 11, 1, 8, '४')
+ checkNextPos(t, s, 12, 1, 9, ' ')
+ checkNextPos(t, s, 13, 1, 10, ' ')
+ checkNextPos(t, s, 14, 2, 1, '\n')
+ checkNextPos(t, s, 15, 3, 1, '\n')
+ checkNextPos(t, s, 18, 3, 2, '本')
+ checkNextPos(t, s, 21, 3, 3, '語')
+ checkNextPos(t, s, 22, 4, 1, '\n')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 22, 4, 1, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // positions after calling Scan
+ s = new(Scanner).Init(bytes.NewBufferString("abc\n本語\n\nx"))
+ s.Mode = 0
+ s.Whitespace = 0
+ checkScanPos(t, s, 0, 1, 1, 'a')
+ s.Peek() // peek doesn't affect the position
+ checkScanPos(t, s, 1, 1, 2, 'b')
+ checkScanPos(t, s, 2, 1, 3, 'c')
+ checkScanPos(t, s, 3, 1, 4, '\n')
+ checkScanPos(t, s, 4, 2, 1, '本')
+ checkScanPos(t, s, 7, 2, 2, '語')
+ checkScanPos(t, s, 10, 2, 3, '\n')
+ checkScanPos(t, s, 11, 3, 1, '\n')
+ checkScanPos(t, s, 12, 4, 1, 'x')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 13, 4, 2, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "unicode/utf8"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+ size int // cell size in bytes
+ width int // cell width in runes
+ htab bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are simply passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+ // configuration
+ output io.Writer
+ minwidth int
+ tabwidth int
+ padding int
+ padbytes [8]byte
+ flags uint
+
+ // current state
+ buf bytes.Buffer // collected text excluding tabs or line breaks
+ pos int // buffer position up to which cell.width of incomplete cell has been computed
+ cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+ endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+ lines [][]cell // list of lines; each line is a list of cells
+ widths []int // list of column widths in runes - re-used during formatting
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+ b.buf.Reset()
+ b.pos = 0
+ b.cell = cell{}
+ b.endChar = 0
+ b.lines = b.lines[0:0]
+ b.widths = b.widths[0:0]
+ b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+// (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+// position pos; html tags and entities are excluded from this width if html
+// filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+// which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+// formatting; it is kept in Writer because it's re-used
+//
+// |<---------- size ---------->|
+// | |
+// |<- width ->|<- ignored ->| |
+// | | | |
+// [---processed---tab------------<tag>...</tag>...]
+// ^ ^ ^
+// | | |
+// buf start of incomplete cell pos
+
+// Formatting can be controlled with these flags.
+const (
+ // Ignore html tags and treat entities (starting with '&'
+ // and ending in ';') as single characters (width = 1).
+ FilterHTML uint = 1 << iota
+
+ // Strip Escape characters bracketing escaped text segments
+ // instead of passing them through unchanged with the text.
+ StripEscape
+
+ // Force right-alignment of cell content.
+ // Default is left-alignment.
+ AlignRight
+
+ // Handle empty columns as if they were not present in
+ // the input in the first place.
+ DiscardEmptyColumns
+
+ // Always use tabs for indentation columns (i.e., padding of
+ // leading empty cells on the left) independent of padchar.
+ TabIndent
+
+ // Print a vertical bar ('|') between columns (after formatting).
+ // Discarded columns appear as zero-width columns ("||").
+ Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+// To format in tab-separated columns with a tab stop of 8:
+// b.Init(w, 8, 1, 8, '\t', 0);
+//
+// To format in space-separated columns with at least 4 spaces between columns:
+// b.Init(w, 0, 4, 8, ' ', 0);
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ if minwidth < 0 || tabwidth < 0 || padding < 0 {
+ panic("negative minwidth, tabwidth, or padding")
+ }
+ b.output = output
+ b.minwidth = minwidth
+ b.tabwidth = tabwidth
+ b.padding = padding
+ for i := range b.padbytes {
+ b.padbytes[i] = padchar
+ }
+ if padchar == '\t' {
+ // tab padding enforces left-alignment
+ flags &^= AlignRight
+ }
+ b.flags = flags
+
+ b.reset()
+
+ return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+ pos := 0
+ for i, line := range b.lines {
+ print("(", i, ") ")
+ for _, c := range line {
+ print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+ pos += c.size
+ }
+ print("\n")
+ }
+ print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+ err error
+}
+
+func (b *Writer) write0(buf []byte) {
+ n, err := b.output.Write(buf)
+ if n != len(buf) && err == nil {
+ err = os.EIO
+ }
+ if err != nil {
+ panic(osError{err})
+ }
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+ for n > len(src) {
+ b.write0(src)
+ n -= len(src)
+ }
+ b.write0(src[0:n])
+}
+
+var (
+ newline = []byte{'\n'}
+ tabs = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+ if b.padbytes[0] == '\t' || useTabs {
+ // padding is done with tabs
+ if b.tabwidth == 0 {
+ return // tabs have no width - can't do any padding
+ }
+ // make cellw the smallest multiple of b.tabwidth
+ cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+ n := cellw - textw // amount of padding
+ if n < 0 {
+ panic("internal error")
+ }
+ b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+ return
+ }
+
+ // padding is done with non-tab characters
+ b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ for i := line0; i < line1; i++ {
+ line := b.lines[i]
+
+ // if TabIndent is set, use tabs to pad leading empty cells
+ useTabs := b.flags&TabIndent != 0
+
+ for j, c := range line {
+ if j > 0 && b.flags&Debug != 0 {
+ // indicate column break
+ b.write0(vbar)
+ }
+
+ if c.size == 0 {
+ // empty cell
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], useTabs)
+ }
+ } else {
+ // non-empty cell
+ useTabs = false
+ if b.flags&AlignRight == 0 { // align left
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ } else { // align right
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ }
+ }
+ }
+
+ if i+1 == len(b.lines) {
+ // last buffered line - we don't have a newline, so just write
+ // any outstanding buffered data
+ b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+ pos += b.cell.size
+ } else {
+ // not the last line - write newline
+ b.write0(newline)
+ }
+ }
+ return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ column := len(b.widths)
+ for this := line0; this < line1; this++ {
+ line := b.lines[this]
+
+ if column < len(line)-1 {
+ // cell exists in this column => this line
+ // has more cells than the previous line
+ // (the last cell per line is ignored because cells are
+ // tab-terminated; the last cell per line describes the
+ // text before the newline/formfeed and does not belong
+ // to a column)
+
+ // print unprinted lines until beginning of block
+ pos = b.writeLines(pos, line0, this)
+ line0 = this
+
+ // column block begin
+ width := b.minwidth // minimal column width
+ discardable := true // true if all cells in this column are empty and "soft"
+ for ; this < line1; this++ {
+ line = b.lines[this]
+ if column < len(line)-1 {
+ // cell exists in this column
+ c := line[column]
+ // update width
+ if w := c.width + b.padding; w > width {
+ width = w
+ }
+ // update discardable
+ if c.width > 0 || c.htab {
+ discardable = false
+ }
+ } else {
+ break
+ }
+ }
+ // column block end
+
+ // discard empty columns if necessary
+ if discardable && b.flags&DiscardEmptyColumns != 0 {
+ width = 0
+ }
+
+ // format and print all columns to the right of this column
+ // (we know the widths of this column and all columns to the left)
+ b.widths = append(b.widths, width) // push width
+ pos = b.format(pos, line0, this)
+ b.widths = b.widths[0 : len(b.widths)-1] // pop width
+ line0 = this
+ }
+ }
+
+ // print unprinted lines until end
+ return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+ b.buf.Write(text)
+ b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+ b.cell.width += utf8.RuneCount(b.buf.Bytes()[b.pos:b.buf.Len()])
+ b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+ switch ch {
+ case Escape:
+ b.endChar = Escape
+ case '<':
+ b.endChar = '>'
+ case '&':
+ b.endChar = ';'
+ }
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+ switch b.endChar {
+ case Escape:
+ b.updateWidth()
+ if b.flags&StripEscape == 0 {
+ b.cell.width -= 2 // don't count the Escape chars
+ }
+ case '>': // tag of zero width
+ case ';':
+ b.cell.width++ // entity, count as one rune
+ }
+ b.pos = b.buf.Len()
+ b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+ b.cell.htab = htab
+ line := &b.lines[len(b.lines)-1]
+ *line = append(*line, b.cell)
+ b.cell = cell{}
+ return len(*line)
+}
+
+func handlePanic(err *error) {
+ if e := recover(); e != nil {
+ *err = e.(osError).err // re-panics if it's not a local osError
+ }
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is simply considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+ defer b.reset() // even in the presence of errors
+ defer handlePanic(&err)
+
+ // add current cell if not empty
+ if b.cell.size > 0 {
+ if b.endChar != 0 {
+ // inside escape - terminate it even if incomplete
+ b.endEscape()
+ }
+ b.terminateCell(false)
+ }
+
+ // format contents of buffer
+ b.format(0, 0, len(b.lines))
+
+ return
+}
+
+var hbar = []byte("---\n")
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+ defer handlePanic(&err)
+
+ // split text into cells
+ n = 0
+ for i, ch := range buf {
+ if b.endChar == 0 {
+ // outside escape
+ switch ch {
+ case '\t', '\v', '\n', '\f':
+ // end of cell
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i + 1 // ch consumed
+ ncells := b.terminateCell(ch == '\t')
+ if ch == '\n' || ch == '\f' {
+ // terminate line
+ b.addLine()
+ if ch == '\f' || ncells == 1 {
+ // A '\f' always forces a flush. Otherwise, if the previous
+ // line has only one cell which does not have an impact on
+ // the formatting of the following lines (the last cell per
+ // line is ignored by format()), thus we can flush the
+ // Writer contents.
+ if err = b.Flush(); err != nil {
+ return
+ }
+ if ch == '\f' && b.flags&Debug != 0 {
+ // indicate section break
+ b.write0(hbar)
+ }
+ }
+ }
+
+ case Escape:
+ // start of escaped sequence
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ if b.flags&StripEscape != 0 {
+ n++ // strip Escape
+ }
+ b.startEscape(Escape)
+
+ case '<', '&':
+ // possibly an html tag/entity
+ if b.flags&FilterHTML != 0 {
+ // begin of tag/entity
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ b.startEscape(ch)
+ }
+ }
+
+ } else {
+ // inside escape
+ if ch == b.endChar {
+ // end of tag/entity
+ j := i + 1
+ if ch == Escape && b.flags&StripEscape != 0 {
+ j = i // strip Escape
+ }
+ b.append(buf[n:j])
+ n = i + 1 // ch consumed
+ b.endEscape()
+ }
+ }
+ }
+
+ // append leftover text
+ b.append(buf[n:])
+ n = len(buf)
+ return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tabwriter
+
+import (
+ "io"
+ "testing"
+)
+
+type buffer struct {
+ a []byte
+}
+
+func (b *buffer) init(n int) { b.a = make([]byte, n)[0:0] }
+
+func (b *buffer) clear() { b.a = b.a[0:0] }
+
+func (b *buffer) Write(buf []byte) (written int, err error) {
+ n := len(b.a)
+ m := len(buf)
+ if n+m <= cap(b.a) {
+ b.a = b.a[0 : n+m]
+ for i := 0; i < m; i++ {
+ b.a[n+i] = buf[i]
+ }
+ } else {
+ panic("buffer.Write: buffer too small")
+ }
+ return len(buf), nil
+}
+
+func (b *buffer) String() string { return string(b.a) }
+
+func write(t *testing.T, testname string, w *Writer, src string) {
+ written, err := io.WriteString(w, src)
+ if err != nil {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err)
+ }
+ if written != len(src) {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- written = %d, len(src) = %d\n", testname, src, written, len(src))
+ }
+}
+
+func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
+ err := w.Flush()
+ if err != nil {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err)
+ }
+
+ res := b.String()
+ if res != expected {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- found:\n%q\n--- expected:\n%q\n", testname, src, res, expected)
+ }
+}
+
+func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
+ var b buffer
+ b.init(1000)
+
+ var w Writer
+ w.Init(&b, minwidth, tabwidth, padding, padchar, flags)
+
+ // write all at once
+ title := testname + " (written all at once)"
+ b.clear()
+ write(t, title, &w, src)
+ verify(t, title, &w, &b, src, expected)
+
+ // write byte-by-byte
+ title = testname + " (written byte-by-byte)"
+ b.clear()
+ for i := 0; i < len(src); i++ {
+ write(t, title, &w, src[i:i+1])
+ }
+ verify(t, title, &w, &b, src, expected)
+
+ // write using Fibonacci slice sizes
+ title = testname + " (written in fibonacci slices)"
+ b.clear()
+ for i, d := 0, 0; i < len(src); {
+ write(t, title, &w, src[i:i+d])
+ i, d = i+d, d+1
+ if i+d > len(src) {
+ d = len(src) - i
+ }
+ }
+ verify(t, title, &w, &b, src, expected)
+}
+
+var tests = []struct {
+ testname string
+ minwidth, tabwidth, padding int
+ padchar byte
+ flags uint
+ src, expected string
+}{
+ {
+ "1a",
+ 8, 0, 1, '.', 0,
+ "",
+ "",
+ },
+
+ {
+ "1a debug",
+ 8, 0, 1, '.', Debug,
+ "",
+ "",
+ },
+
+ {
+ "1b esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\xff",
+ "",
+ },
+
+ {
+ "1b esc",
+ 8, 0, 1, '.', 0,
+ "\xff\xff",
+ "\xff\xff",
+ },
+
+ {
+ "1c esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\t\xff",
+ "\t",
+ },
+
+ {
+ "1c esc",
+ 8, 0, 1, '.', 0,
+ "\xff\t\xff",
+ "\xff\t\xff",
+ },
+
+ {
+ "1d esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\"foo\t\n\tbar\"\xff",
+ "\"foo\t\n\tbar\"",
+ },
+
+ {
+ "1d esc",
+ 8, 0, 1, '.', 0,
+ "\xff\"foo\t\n\tbar\"\xff",
+ "\xff\"foo\t\n\tbar\"\xff",
+ },
+
+ {
+ "1e esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "abc\xff\tdef", // unterminated escape
+ "abc\tdef",
+ },
+
+ {
+ "1e esc",
+ 8, 0, 1, '.', 0,
+ "abc\xff\tdef", // unterminated escape
+ "abc\xff\tdef",
+ },
+
+ {
+ "2",
+ 8, 0, 1, '.', 0,
+ "\n\n\n",
+ "\n\n\n",
+ },
+
+ {
+ "3",
+ 8, 0, 1, '.', 0,
+ "a\nb\nc",
+ "a\nb\nc",
+ },
+
+ {
+ "4a",
+ 8, 0, 1, '.', 0,
+ "\t", // '\t' terminates an empty cell on last line - nothing to print
+ "",
+ },
+
+ {
+ "4b",
+ 8, 0, 1, '.', AlignRight,
+ "\t", // '\t' terminates an empty cell on last line - nothing to print
+ "",
+ },
+
+ {
+ "5",
+ 8, 0, 1, '.', 0,
+ "*\t*",
+ "*.......*",
+ },
+
+ {
+ "5b",
+ 8, 0, 1, '.', 0,
+ "*\t*\n",
+ "*.......*\n",
+ },
+
+ {
+ "5c",
+ 8, 0, 1, '.', 0,
+ "*\t*\t",
+ "*.......*",
+ },
+
+ {
+ "5c debug",
+ 8, 0, 1, '.', Debug,
+ "*\t*\t",
+ "*.......|*",
+ },
+
+ {
+ "5d",
+ 8, 0, 1, '.', AlignRight,
+ "*\t*\t",
+ ".......**",
+ },
+
+ {
+ "6",
+ 8, 0, 1, '.', 0,
+ "\t\n",
+ "........\n",
+ },
+
+ {
+ "7a",
+ 8, 0, 1, '.', 0,
+ "a) foo",
+ "a) foo",
+ },
+
+ {
+ "7b",
+ 8, 0, 1, ' ', 0,
+ "b) foo\tbar",
+ "b) foo bar",
+ },
+
+ {
+ "7c",
+ 8, 0, 1, '.', 0,
+ "c) foo\tbar\t",
+ "c) foo..bar",
+ },
+
+ {
+ "7d",
+ 8, 0, 1, '.', 0,
+ "d) foo\tbar\n",
+ "d) foo..bar\n",
+ },
+
+ {
+ "7e",
+ 8, 0, 1, '.', 0,
+ "e) foo\tbar\t\n",
+ "e) foo..bar.....\n",
+ },
+
+ {
+ "7f",
+ 8, 0, 1, '.', FilterHTML,
+ "f) f<o\t<b>bar</b>\t\n",
+ "f) f<o..<b>bar</b>.....\n",
+ },
+
+ {
+ "7g",
+ 8, 0, 1, '.', FilterHTML,
+ "g) f<o\t<b>bar</b>\t non-terminated entity &",
+ "g) f<o..<b>bar</b>..... non-terminated entity &",
+ },
+
+ {
+ "7g debug",
+ 8, 0, 1, '.', FilterHTML | Debug,
+ "g) f<o\t<b>bar</b>\t non-terminated entity &",
+ "g) f<o..|<b>bar</b>.....| non-terminated entity &",
+ },
+
+ {
+ "8",
+ 8, 0, 1, '*', 0,
+ "Hello, world!\n",
+ "Hello, world!\n",
+ },
+
+ {
+ "9a",
+ 1, 0, 0, '.', 0,
+ "1\t2\t3\t4\n" +
+ "11\t222\t3333\t44444\n",
+
+ "1.2..3...4\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9b",
+ 1, 0, 0, '.', FilterHTML,
+ "1\t2<!---\f--->\t3\t4\n" + // \f inside HTML is ignored
+ "11\t222\t3333\t44444\n",
+
+ "1.2<!---\f--->..3...4\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9c",
+ 1, 0, 0, '.', 0,
+ "1\t2\t3\t4\f" + // \f causes a newline and flush
+ "11\t222\t3333\t44444\n",
+
+ "1234\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9c debug",
+ 1, 0, 0, '.', Debug,
+ "1\t2\t3\t4\f" + // \f causes a newline and flush
+ "11\t222\t3333\t44444\n",
+
+ "1|2|3|4\n" +
+ "---\n" +
+ "11|222|3333|44444\n",
+ },
+
+ {
+ "10a",
+ 5, 0, 0, '.', 0,
+ "1\t2\t3\t4\n",
+ "1....2....3....4\n",
+ },
+
+ {
+ "10b",
+ 5, 0, 0, '.', 0,
+ "1\t2\t3\t4\t\n",
+ "1....2....3....4....\n",
+ },
+
+ {
+ "11",
+ 8, 0, 1, '.', 0,
+ "本\tb\tc\n" +
+ "aa\t\u672c\u672c\u672c\tcccc\tddddd\n" +
+ "aaa\tbbbb\n",
+
+ "本.......b.......c\n" +
+ "aa......本本本.....cccc....ddddd\n" +
+ "aaa.....bbbb\n",
+ },
+
+ {
+ "12a",
+ 8, 0, 1, ' ', AlignRight,
+ "a\tè\tc\t\n" +
+ "aa\tèèè\tcccc\tddddd\t\n" +
+ "aaa\tèèèè\t\n",
+
+ " a è c\n" +
+ " aa èèè cccc ddddd\n" +
+ " aaa èèèè\n",
+ },
+
+ {
+ "12b",
+ 2, 0, 0, ' ', 0,
+ "a\tb\tc\n" +
+ "aa\tbbb\tcccc\n" +
+ "aaa\tbbbb\n",
+
+ "a b c\n" +
+ "aa bbbcccc\n" +
+ "aaabbbb\n",
+ },
+
+ {
+ "12c",
+ 8, 0, 1, '_', 0,
+ "a\tb\tc\n" +
+ "aa\tbbb\tcccc\n" +
+ "aaa\tbbbb\n",
+
+ "a_______b_______c\n" +
+ "aa______bbb_____cccc\n" +
+ "aaa_____bbbb\n",
+ },
+
+ {
+ "13a",
+ 4, 0, 1, '-', 0,
+ "4444\t日本語\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t999999999\t0000000000\n",
+
+ "4444------日本語-22--1---333\n" +
+ "999999999-22\n" +
+ "7---------22\n" +
+ "------------------88888888\n" +
+ "\n" +
+ "666666-666666-666666----4444\n" +
+ "1------1------999999999-0000000000\n",
+ },
+
+ {
+ "13b",
+ 4, 0, 3, '.', 0,
+ "4444\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t999999999\t0000000000\n",
+
+ "4444........333...22...1...333\n" +
+ "999999999...22\n" +
+ "7...........22\n" +
+ "....................88888888\n" +
+ "\n" +
+ "666666...666666...666666......4444\n" +
+ "1........1........999999999...0000000000\n",
+ },
+
+ {
+ "13c",
+ 8, 8, 1, '\t', FilterHTML,
+ "4444\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
+
+ "4444\t\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t\t22\n" +
+ "\t\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t\t4444\n" +
+ "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
+ },
+
+ {
+ "14",
+ 1, 0, 2, ' ', AlignRight,
+ ".0\t.3\t2.4\t-5.1\t\n" +
+ "23.0\t12345678.9\t2.4\t-989.4\t\n" +
+ "5.1\t12.0\t2.4\t-7.0\t\n" +
+ ".0\t0.0\t332.0\t8908.0\t\n" +
+ ".0\t-.3\t456.4\t22.1\t\n" +
+ ".0\t1.2\t44.4\t-13.3\t\t",
+
+ " .0 .3 2.4 -5.1\n" +
+ " 23.0 12345678.9 2.4 -989.4\n" +
+ " 5.1 12.0 2.4 -7.0\n" +
+ " .0 0.0 332.0 8908.0\n" +
+ " .0 -.3 456.4 22.1\n" +
+ " .0 1.2 44.4 -13.3",
+ },
+
+ {
+ "14 debug",
+ 1, 0, 2, ' ', AlignRight | Debug,
+ ".0\t.3\t2.4\t-5.1\t\n" +
+ "23.0\t12345678.9\t2.4\t-989.4\t\n" +
+ "5.1\t12.0\t2.4\t-7.0\t\n" +
+ ".0\t0.0\t332.0\t8908.0\t\n" +
+ ".0\t-.3\t456.4\t22.1\t\n" +
+ ".0\t1.2\t44.4\t-13.3\t\t",
+
+ " .0| .3| 2.4| -5.1|\n" +
+ " 23.0| 12345678.9| 2.4| -989.4|\n" +
+ " 5.1| 12.0| 2.4| -7.0|\n" +
+ " .0| 0.0| 332.0| 8908.0|\n" +
+ " .0| -.3| 456.4| 22.1|\n" +
+ " .0| 1.2| 44.4| -13.3|",
+ },
+
+ {
+ "15a",
+ 4, 0, 0, '.', 0,
+ "a\t\tb",
+ "a.......b",
+ },
+
+ {
+ "15b",
+ 4, 0, 0, '.', DiscardEmptyColumns,
+ "a\t\tb", // htabs - do not discard column
+ "a.......b",
+ },
+
+ {
+ "15c",
+ 4, 0, 0, '.', DiscardEmptyColumns,
+ "a\v\vb",
+ "a...b",
+ },
+
+ {
+ "15d",
+ 4, 0, 0, '.', AlignRight | DiscardEmptyColumns,
+ "a\v\vb",
+ "...ab",
+ },
+
+ {
+ "16a",
+ 100, 100, 0, '\t', 0,
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16b",
+ 100, 100, 0, '\t', DiscardEmptyColumns,
+ "a\vb\v\vd\n" +
+ "a\vb\v\vd\ve\n" +
+ "a\n" +
+ "a\vb\vc\vd\n" +
+ "a\vb\vc\vd\ve\n",
+
+ "a\tb\td\n" +
+ "a\tb\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16b debug",
+ 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
+ "a\vb\v\vd\n" +
+ "a\vb\v\vd\ve\n" +
+ "a\n" +
+ "a\vb\vc\vd\n" +
+ "a\vb\vc\vd\ve\n",
+
+ "a\t|b\t||d\n" +
+ "a\t|b\t||d\t|e\n" +
+ "a\n" +
+ "a\t|b\t|c\t|d\n" +
+ "a\t|b\t|c\t|d\t|e\n",
+ },
+
+ {
+ "16c",
+ 100, 100, 0, '\t', DiscardEmptyColumns,
+ "a\tb\t\td\n" + // hard tabs - do not discard column
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16c debug",
+ 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
+ "a\tb\t\td\n" + // hard tabs - do not discard column
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\t|b\t|\t|d\n" +
+ "a\t|b\t|\t|d\t|e\n" +
+ "a\n" +
+ "a\t|b\t|c\t|d\n" +
+ "a\t|b\t|c\t|d\t|e\n",
+ },
+}
+
+func Test(t *testing.T) {
+ for _, e := range tests {
+ check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template implements data-driven templates for generating textual output
+such as HTML.
+
+Templates are executed by applying them to a data structure. Annotations in the
+template refer to elements of the data structure (typically a field of a struct
+or a key in a map) to control execution and derive values to be displayed.
+Execution of the template walks the structure and sets the cursor, represented
+by a period '.' and called "dot", to the value at the current location in the
+structure as execution proceeds.
+
+The input text for a template is UTF-8-encoded text in any format.
+"Actions"--data evaluations or control structures--are delimited by
+"{{" and "}}"; all text outside actions is copied to the output unchanged.
+Actions may not span newlines, although comments can.
+
+Once constructed, templates and template sets can be executed safely in
+parallel.
+
+Actions
+
+Here is the list of actions. "Arguments" and "pipelines" are evaluations of
+data, defined in detail below.
+
+*/
+// {{/* a comment */}}
+// A comment; discarded. May contain newlines.
+// Comments do not nest.
+/*
+
+ {{pipeline}}
+ The default textual representation of the value of the pipeline
+ is copied to the output.
+
+ {{if pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, T1 is executed. The empty values are false, 0, any
+ nil pointer or interface value, and any array, slice, map, or
+ string of length zero.
+ Dot is unaffected.
+
+ {{if pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, T0 is executed;
+ otherwise, T1 is executed. Dot is unaffected.
+
+ {{range pipeline}} T1 {{end}}
+ The value of the pipeline must be an array, slice, or map. If
+ the value of the pipeline has length zero, nothing is output;
+ otherwise, dot is set to the successive elements of the array,
+ slice, or map and T1 is executed.
+
+ {{range pipeline}} T1 {{else}} T0 {{end}}
+ The value of the pipeline must be an array, slice, or map. If
+ the value of the pipeline has length zero, dot is unaffected and
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+ {{template "name" pipeline}}
+ The template with the specified name is executed with dot set
+ to the value of the pipeline.
+
+ {{with pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, dot is set to the value of the pipeline and T1 is
+ executed.
+
+ {{with pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, dot is unaffected and T0
+ is executed; otherwise, dot is set to the value of the pipeline
+ and T1 is executed.
+
+Arguments
+
+An argument is a simple value, denoted by one of the following.
+
+ - A boolean, string, character, integer, floating-point, imaginary
+ or complex constant in Go syntax. These behave like Go's untyped
+ constants, although raw strings may not span newlines.
+ - The character '.' (period):
+ .
+ The result is the value of dot.
+ - A variable name, which is a (possibly empty) alphanumeric string
+ preceded by a dollar sign, such as
+ $piOver2
+ or
+ $
+ The result is the value of the variable.
+ Variables are described below.
+ - The name of a field of the data, which must be a struct, preceded
+ by a period, such as
+ .Field
+ The result is the value of the field. Field invocations may be
+ chained:
+ .Field1.Field2
+ Fields can also be evaluated on variables, including chaining:
+ $x.Field1.Field2
+ - The name of a key of the data, which must be a map, preceded
+ by a period, such as
+ .Key
+ The result is the map element value indexed by the key.
+ Key invocations may be chained and combined with fields to any
+ depth:
+ .Field1.Key1.Field2.Key2
+ Although the key must be an alphanumeric identifier, unlike with
+ field names they do not need to start with an upper case letter.
+ Keys can also be evaluated on variables, including chaining:
+ $x.key1.key2
+ - The name of a niladic method of the data, preceded by a period,
+ such as
+ .Method
+ The result is the value of invoking the method with dot as the
+ receiver, dot.Method(). Such a method must have one return value (of
+ any type) or two return values, the second of which is an error.
+ If it has two and the returned error is non-nil, execution terminates
+ and an error is returned to the caller as the value of Execute.
+ Method invocations may be chained and combined with fields and keys
+ to any depth:
+ .Field1.Key1.Method1.Field2.Key2.Method2
+ Methods can also be evaluated on variables, including chaining:
+ $x.Method1.Field
+ - The name of a niladic function, such as
+ fun
+ The result is the value of invoking the function, fun(). The return
+ types and values behave as in methods. Functions and function
+ names are described below.
+
+Arguments may evaluate to any type; if they are pointers the implementation
+automatically indirects to the base type when required.
+
+A pipeline is a possibly chained sequence of "commands". A command is a simple
+value (argument) or a function or method call, possibly with multiple arguments:
+
+ Argument
+ The result is the value of evaluating the argument.
+ .Method [Argument...]
+ The method can be alone or the last element of a chain but,
+ unlike methods in the middle of a chain, it can take arguments.
+ The result is the value of calling the method with the
+ arguments:
+ dot.Method(Argument1, etc.)
+ functionName [Argument...]
+ The result is the value of calling the function associated
+ with the name:
+ function(Argument1, etc.)
+ Functions and function names are described below.
+
+Pipelines
+
+A pipeline may be "chained" by separating a sequence of commands with pipeline
+characters '|'. In a chained pipeline, the result of the each command is
+passed as the last argument of the following command. The output of the final
+command in the pipeline is the value of the pipeline.
+
+The output of a command will be either one value or two values, the second of
+which has type error. If that second value is present and evaluates to
+non-nil, execution terminates and the error is returned to the caller of
+Execute.
+
+Variables
+
+A pipeline inside an action may initialize a variable to capture the result.
+The initialization has syntax
+
+ $variable := pipeline
+
+where $variable is the name of the variable. An action that declares a
+variable produces no output.
+
+If a "range" action initializes a variable, the variable is set to the
+successive elements of the iteration. Also, a "range" may declare two
+variables, separated by a comma:
+
+ $index, $element := pipeline
+
+in which case $index and $element are set to the successive values of the
+array/slice index or map key and element, respectively. Note that if there is
+only one variable, it is assigned the element; this is opposite to the
+convention in Go range clauses.
+
+A variable's scope extends to the "end" action of the control structure ("if",
+"with", or "range") in which it is declared, or to the end of the template if
+there is no such control structure. A template invocation does not inherit
+variables from the point of its invocation.
+
+When execution begins, $ is set to the data argument passed to Execute, that is,
+to the starting value of dot.
+
+Examples
+
+Here are some example one-line templates demonstrating pipelines and variables.
+All produce the quoted word "output":
+
+ {{"\"output\""}}
+ A string constant.
+ {{`"output"`}}
+ A raw string constant.
+ {{printf "%q" "output"}}
+ A function call.
+ {{"output" | printf "%q"}}
+ A function call whose final argument comes from the previous
+ command.
+ {{"put" | printf "%s%s" "out" | printf "%q"}}
+ A more elaborate call.
+ {{"output" | printf "%s" | printf "%q"}}
+ A longer chain.
+ {{with "output"}}{{printf "%q" .}}{{end}}
+ A with action using dot.
+ {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
+ A with action that creates and uses a variable.
+ {{with $x := "output"}}{{printf "%q" $x}}{{end}}
+ A with action that uses the variable in another action.
+ {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
+ The same, but pipelined.
+
+Functions
+
+During execution functions are found in three function maps: first in the
+template, then in the "template set" (described below), and finally in the
+global function map. By default, no functions are defined in the template or
+the set but the Funcs methods can be used to add them.
+
+Predefined global functions are named as follows.
+
+ and
+ Returns the boolean AND of its arguments by returning the
+ first empty argument or the last argument, that is,
+ "and x y" behaves as "if x then y else x". All the
+ arguments are evaluated.
+ html
+ Returns the escaped HTML equivalent of the textual
+ representation of its arguments.
+ index
+ Returns the result of indexing its first argument by the
+ following arguments. Thus "index x 1 2 3" is, in Go syntax,
+ x[1][2][3]. Each indexed item must be a map, slice, or array.
+ js
+ Returns the escaped JavaScript equivalent of the textual
+ representation of its arguments.
+ len
+ Returns the integer length of its argument.
+ not
+ Returns the boolean negation of its single argument.
+ or
+ Returns the boolean OR of its arguments by returning the
+ first non-empty argument or the last argument, that is,
+ "or x y" behaves as "if x then x else y". All the
+ arguments are evaluated.
+ print
+ An alias for fmt.Sprint
+ printf
+ An alias for fmt.Sprintf
+ println
+ An alias for fmt.Sprintln
+ urlquery
+ Returns the escaped value of the textual representation of
+ its arguments in a form suitable for embedding in a URL query.
+
+The boolean functions take any zero value to be false and a non-zero value to
+be true.
+
+Template sets
+
+Each template is named by a string specified when it is created. A template may
+use a template invocation to instantiate another template directly or by its
+name; see the explanation of the template action above. The name is looked up
+in the template set associated with the template.
+
+If no template invocation actions occur in the template, the issue of template
+sets can be ignored. If it does contain invocations, though, the template
+containing the invocations must be part of a template set in which to look up
+the names.
+
+There are two ways to construct template sets.
+
+The first is to use a Set's Parse method to create a set of named templates from
+a single input defining multiple templates. The syntax of the definitions is to
+surround each template declaration with a define and end action.
+
+The define action names the template being created by providing a string
+constant. Here is a simple example of input to Set.Parse:
+
+ `{{define "T1"}} definition of template T1 {{end}}
+ {{define "T2"}} definition of template T2 {{end}}
+ {{define "T3"}} {{template "T1"}} {{template "T2"}} {{end}}`
+
+This defines two templates, T1 and T2, and a third T3 that invokes the other two
+when it is executed.
+
+The second way to build a template set is to use Set's Add method to add a
+parsed template to a set. A template may be bound to at most one set. If it's
+necessary to have a template in multiple sets, the template definition must be
+parsed multiple times to create distinct *Template values.
+
+Set.Parse may be called multiple times on different inputs to construct the set.
+Two sets may therefore be constructed with a common base set of templates plus,
+through a second Parse call each, specializations for some elements.
+
+A template may be executed directly or through Set.Execute, which executes a
+named template from the set. To invoke our example above, we might write,
+
+ err := set.Execute(os.Stdout, "T3", "no data needed")
+ if err != nil {
+ log.Fatalf("execution failed: %s", err)
+ }
+*/
+package template
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "strings"
+ "text/template/parse"
+)
+
+// state represents the state of an execution. It's not part of the
+// template so that multiple executions of the same template
+// can execute in parallel.
+type state struct {
+ tmpl *Template
+ wr io.Writer
+ line int // line number for errors
+ vars []variable // push-down stack of variable values.
+}
+
+// variable holds the dynamic value of a variable such as $, $x etc.
+type variable struct {
+ name string
+ value reflect.Value
+}
+
+// push pushes a new variable on the stack.
+func (s *state) push(name string, value reflect.Value) {
+ s.vars = append(s.vars, variable{name, value})
+}
+
+// mark returns the length of the variable stack.
+func (s *state) mark() int {
+ return len(s.vars)
+}
+
+// pop pops the variable stack up to the mark.
+func (s *state) pop(mark int) {
+ s.vars = s.vars[0:mark]
+}
+
+// setVar overwrites the top-nth variable on the stack. Used by range iterations.
+func (s *state) setVar(n int, value reflect.Value) {
+ s.vars[len(s.vars)-n].value = value
+}
+
+// varValue returns the value of the named variable.
+func (s *state) varValue(name string) reflect.Value {
+ for i := s.mark() - 1; i >= 0; i-- {
+ if s.vars[i].name == name {
+ return s.vars[i].value
+ }
+ }
+ s.errorf("undefined variable: %s", name)
+ return zero
+}
+
+var zero reflect.Value
+
+// errorf formats the error and terminates processing.
+func (s *state) errorf(format string, args ...interface{}) {
+ format = fmt.Sprintf("template: %s:%d: %s", s.tmpl.Name(), s.line, format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (s *state) error(err error) {
+ s.errorf("%s", err)
+}
+
+// errRecover is the handler that turns panics into returns from the top
+// level of Parse.
+func errRecover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ *errp = e.(error)
+ }
+}
+
+// Execute applies a parsed template to the specified data object,
+// writing the output to wr.
+func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
+ defer errRecover(&err)
+ value := reflect.ValueOf(data)
+ state := &state{
+ tmpl: t,
+ wr: wr,
+ line: 1,
+ vars: []variable{{"$", value}},
+ }
+ if t.Tree == nil || t.Root == nil {
+ state.errorf("must be parsed before execution")
+ }
+ state.walk(value, t.Root)
+ return
+}
+
+// Walk functions step through the major pieces of the template structure,
+// generating output as they go.
+func (s *state) walk(dot reflect.Value, n parse.Node) {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ s.line = n.Line
+ // Do not pop variables so they persist until next end.
+ // Also, if the action declares variables, don't print the result.
+ val := s.evalPipeline(dot, n.Pipe)
+ if len(n.Pipe.Decl) == 0 {
+ s.printValue(n, val)
+ }
+ case *parse.IfNode:
+ s.line = n.Line
+ s.walkIfOrWith(parse.NodeIf, dot, n.Pipe, n.List, n.ElseList)
+ case *parse.ListNode:
+ for _, node := range n.Nodes {
+ s.walk(dot, node)
+ }
+ case *parse.RangeNode:
+ s.line = n.Line
+ s.walkRange(dot, n)
+ case *parse.TemplateNode:
+ s.line = n.Line
+ s.walkTemplate(dot, n)
+ case *parse.TextNode:
+ if _, err := s.wr.Write(n.Text); err != nil {
+ s.error(err)
+ }
+ case *parse.WithNode:
+ s.line = n.Line
+ s.walkIfOrWith(parse.NodeWith, dot, n.Pipe, n.List, n.ElseList)
+ default:
+ s.errorf("unknown node: %s", n)
+ }
+}
+
+// walkIfOrWith walks an 'if' or 'with' node. The two control structures
+// are identical in behavior except that 'with' sets dot.
+func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
+ defer s.pop(s.mark())
+ val := s.evalPipeline(dot, pipe)
+ truth, ok := isTrue(val)
+ if !ok {
+ s.errorf("if/with can't use %v", val)
+ }
+ if truth {
+ if typ == parse.NodeWith {
+ s.walk(val, list)
+ } else {
+ s.walk(dot, list)
+ }
+ } else if elseList != nil {
+ s.walk(dot, elseList)
+ }
+}
+
+// isTrue returns whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value.
+func isTrue(val reflect.Value) (truth, ok bool) {
+ if !val.IsValid() {
+ // Something like var x interface{}, never set. It's a form of nil.
+ return false, true
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ truth = val.Len() > 0
+ case reflect.Bool:
+ truth = val.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ truth = val.Complex() != 0
+ case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+ truth = !val.IsNil()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ truth = val.Int() != 0
+ case reflect.Float32, reflect.Float64:
+ truth = val.Float() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ truth = val.Uint() != 0
+ case reflect.Struct:
+ truth = true // Struct values are always true.
+ default:
+ return
+ }
+ return truth, true
+}
+
+func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ defer s.pop(s.mark())
+ val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+ // mark top of stack before any variables in the body are pushed.
+ mark := s.mark()
+ oneIteration := func(index, elem reflect.Value) {
+ // Set top var (lexically the second if there are two) to the element.
+ if len(r.Pipe.Decl) > 0 {
+ s.setVar(1, elem)
+ }
+ // Set next var (lexically the first if there are two) to the index.
+ if len(r.Pipe.Decl) > 1 {
+ s.setVar(2, index)
+ }
+ s.walk(elem, r.List)
+ s.pop(mark)
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ if val.Len() == 0 {
+ break
+ }
+ for i := 0; i < val.Len(); i++ {
+ oneIteration(reflect.ValueOf(i), val.Index(i))
+ }
+ return
+ case reflect.Map:
+ if val.Len() == 0 {
+ break
+ }
+ for _, key := range val.MapKeys() {
+ oneIteration(key, val.MapIndex(key))
+ }
+ return
+ case reflect.Chan:
+ if val.IsNil() {
+ break
+ }
+ i := 0
+ for ; ; i++ {
+ elem, ok := val.Recv()
+ if !ok {
+ break
+ }
+ oneIteration(reflect.ValueOf(i), elem)
+ }
+ if i == 0 {
+ break
+ }
+ return
+ case reflect.Invalid:
+ break // An invalid value is likely a nil map, etc. and acts like an empty map.
+ default:
+ s.errorf("range can't iterate over %v", val)
+ }
+ if r.ElseList != nil {
+ s.walk(dot, r.ElseList)
+ }
+}
+
+func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
+ set := s.tmpl.set
+ if set == nil {
+ s.errorf("no set defined in which to invoke template named %q", t.Name)
+ }
+ tmpl := set.tmpl[t.Name]
+ if tmpl == nil {
+ s.errorf("template %q not in set", t.Name)
+ }
+ // Variables declared by the pipeline persist.
+ dot = s.evalPipeline(dot, t.Pipe)
+ newState := *s
+ newState.tmpl = tmpl
+ // No dynamic scoping: template invocations inherit no variables.
+ newState.vars = []variable{{"$", dot}}
+ newState.walk(dot, tmpl.Root)
+}
+
+// Eval functions evaluate pipelines, commands, and their elements and extract
+// values from the data structure by examining fields, calling methods, and so on.
+// The printing of those values happens only through walk functions.
+
+// evalPipeline returns the value acquired by evaluating a pipeline. If the
+// pipeline has a variable declaration, the variable will be pushed on the
+// stack. Callers should therefore pop the stack after they are finished
+// executing commands depending on the pipeline value.
+func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
+ if pipe == nil {
+ return
+ }
+ for _, cmd := range pipe.Cmds {
+ value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
+ // If the object has type interface{}, dig down one level to the thing inside.
+ if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
+ value = reflect.ValueOf(value.Interface()) // lovely!
+ }
+ }
+ for _, variable := range pipe.Decl {
+ s.push(variable.Ident[0], value)
+ }
+ return value
+}
+
+func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
+ if len(args) > 1 || final.IsValid() {
+ s.errorf("can't give argument to non-function %s", args[0])
+ }
+}
+
+func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
+ firstWord := cmd.Args[0]
+ switch n := firstWord.(type) {
+ case *parse.FieldNode:
+ return s.evalFieldNode(dot, n, cmd.Args, final)
+ case *parse.IdentifierNode:
+ // Must be a function.
+ return s.evalFunction(dot, n.Ident, cmd.Args, final)
+ case *parse.VariableNode:
+ return s.evalVariableNode(dot, n, cmd.Args, final)
+ }
+ s.notAFunction(cmd.Args, final)
+ switch word := firstWord.(type) {
+ case *parse.BoolNode:
+ return reflect.ValueOf(word.True)
+ case *parse.DotNode:
+ return dot
+ case *parse.NumberNode:
+ return s.idealConstant(word)
+ case *parse.StringNode:
+ return reflect.ValueOf(word.Text)
+ }
+ s.errorf("can't evaluate command %q", firstWord)
+ panic("not reached")
+}
+
+// idealConstant is called to return the value of a number in a context where
+// we don't know the type. In that case, the syntax of the number tells us
+// its type, and we use Go rules to resolve. Note there is no such thing as
+// a uint ideal constant in this situation - the value must be of int type.
+func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
+ // These are ideal constants but we don't know the type
+ // and we have no context. (If it was a method argument,
+ // we'd know what we need.) The syntax guides us to some extent.
+ switch {
+ case constant.IsComplex:
+ return reflect.ValueOf(constant.Complex128) // incontrovertible.
+ case constant.IsFloat && strings.IndexAny(constant.Text, ".eE") >= 0:
+ return reflect.ValueOf(constant.Float64)
+ case constant.IsInt:
+ n := int(constant.Int64)
+ if int64(n) != constant.Int64 {
+ s.errorf("%s overflows int", constant.Text)
+ }
+ return reflect.ValueOf(n)
+ case constant.IsUint:
+ s.errorf("%s overflows int", constant.Text)
+ }
+ return zero
+}
+
+func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
+ return s.evalFieldChain(dot, dot, field.Ident, args, final)
+}
+
+func (s *state) evalVariableNode(dot reflect.Value, v *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
+ // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
+ value := s.varValue(v.Ident[0])
+ if len(v.Ident) == 1 {
+ return value
+ }
+ return s.evalFieldChain(dot, value, v.Ident[1:], args, final)
+}
+
+// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
+// dot is the environment in which to evaluate arguments, while
+// receiver is the value being walked along the chain.
+func (s *state) evalFieldChain(dot, receiver reflect.Value, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
+ n := len(ident)
+ for i := 0; i < n-1; i++ {
+ receiver = s.evalField(dot, ident[i], nil, zero, receiver)
+ }
+ // Now if it's a method, it gets the arguments.
+ return s.evalField(dot, ident[n-1], args, final, receiver)
+}
+
+func (s *state) evalFunction(dot reflect.Value, name string, args []parse.Node, final reflect.Value) reflect.Value {
+ function, ok := findFunction(name, s.tmpl, s.tmpl.set)
+ if !ok {
+ s.errorf("%q is not a defined function", name)
+ }
+ return s.evalCall(dot, function, name, args, final)
+}
+
+// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
+// The 'final' argument represents the return value from the preceding
+// value of the pipeline, if any.
+func (s *state) evalField(dot reflect.Value, fieldName string, args []parse.Node, final, receiver reflect.Value) reflect.Value {
+ if !receiver.IsValid() {
+ return zero
+ }
+ typ := receiver.Type()
+ receiver, _ = indirect(receiver)
+ // Unless it's an interface, need to get to a value of type *T to guarantee
+ // we see all methods of T and *T.
+ ptr := receiver
+ if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
+ ptr = ptr.Addr()
+ }
+ if method, ok := methodByName(ptr, fieldName); ok {
+ return s.evalCall(dot, method, fieldName, args, final)
+ }
+ hasArgs := len(args) > 1 || final.IsValid()
+ // It's not a method; is it a field of a struct?
+ receiver, isNil := indirect(receiver)
+ if receiver.Kind() == reflect.Struct {
+ tField, ok := receiver.Type().FieldByName(fieldName)
+ if ok {
+ field := receiver.FieldByIndex(tField.Index)
+ if hasArgs {
+ s.errorf("%s is not a method but has arguments", fieldName)
+ }
+ if tField.PkgPath == "" { // field is exported
+ return field
+ }
+ }
+ }
+ // If it's a map, attempt to use the field name as a key.
+ if receiver.Kind() == reflect.Map {
+ nameVal := reflect.ValueOf(fieldName)
+ if nameVal.Type().AssignableTo(receiver.Type().Key()) {
+ if hasArgs {
+ s.errorf("%s is not a method but has arguments", fieldName)
+ }
+ return receiver.MapIndex(nameVal)
+ }
+ }
+ if isNil {
+ s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
+ }
+ s.errorf("can't evaluate field %s in type %s", fieldName, typ)
+ panic("not reached")
+}
+
+// TODO: delete when reflect's own MethodByName is released.
+func methodByName(receiver reflect.Value, name string) (reflect.Value, bool) {
+ typ := receiver.Type()
+ for i := 0; i < typ.NumMethod(); i++ {
+ if typ.Method(i).Name == name {
+ return receiver.Method(i), true // This value includes the receiver.
+ }
+ }
+ return zero, false
+}
+
+var (
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+ fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
+// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
+// as the function itself.
+func (s *state) evalCall(dot, fun reflect.Value, name string, args []parse.Node, final reflect.Value) reflect.Value {
+ if args != nil {
+ args = args[1:] // Zeroth arg is function name/node; not passed to function.
+ }
+ typ := fun.Type()
+ numIn := len(args)
+ if final.IsValid() {
+ numIn++
+ }
+ numFixed := len(args)
+ if typ.IsVariadic() {
+ numFixed = typ.NumIn() - 1 // last arg is the variadic one.
+ if numIn < numFixed {
+ s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
+ }
+ } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
+ s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
+ }
+ if !goodFunc(typ) {
+ s.errorf("can't handle multiple results from method/function %q", name)
+ }
+ // Build the arg list.
+ argv := make([]reflect.Value, numIn)
+ // Args must be evaluated. Fixed args first.
+ i := 0
+ for ; i < numFixed; i++ {
+ argv[i] = s.evalArg(dot, typ.In(i), args[i])
+ }
+ // Now the ... args.
+ if typ.IsVariadic() {
+ argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
+ for ; i < len(args); i++ {
+ argv[i] = s.evalArg(dot, argType, args[i])
+ }
+ }
+ // Add final value if necessary.
+ if final.IsValid() {
+ argv[i] = final
+ }
+ result := fun.Call(argv)
+ // If we have an error that is not nil, stop execution and return that error to the caller.
+ if len(result) == 2 && !result[1].IsNil() {
+ s.errorf("error calling %s: %s", name, result[1].Interface().(error))
+ }
+ return result[0]
+}
+
+// validateType guarantees that the value is valid and assignable to the type.
+func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
+ if !value.IsValid() {
+ s.errorf("invalid value; expected %s", typ)
+ }
+ if !value.Type().AssignableTo(typ) {
+ // Does one dereference or indirection work? We could do more, as we
+ // do with method receivers, but that gets messy and method receivers
+ // are much more constrained, so it makes more sense there than here.
+ // Besides, one is almost always all you need.
+ switch {
+ case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
+ value = value.Elem()
+ case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
+ value = value.Addr()
+ default:
+ s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
+ }
+ }
+ return value
+}
+
+func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
+ switch arg := n.(type) {
+ case *parse.DotNode:
+ return s.validateType(dot, typ)
+ case *parse.FieldNode:
+ return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
+ case *parse.VariableNode:
+ return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
+ }
+ switch typ.Kind() {
+ case reflect.Bool:
+ return s.evalBool(typ, n)
+ case reflect.Complex64, reflect.Complex128:
+ return s.evalComplex(typ, n)
+ case reflect.Float32, reflect.Float64:
+ return s.evalFloat(typ, n)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return s.evalInteger(typ, n)
+ case reflect.Interface:
+ if typ.NumMethod() == 0 {
+ return s.evalEmptyInterface(dot, n)
+ }
+ case reflect.String:
+ return s.evalString(typ, n)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return s.evalUnsignedInteger(typ, n)
+ }
+ s.errorf("can't handle %s for arg of type %s", n, typ)
+ panic("not reached")
+}
+
+func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.BoolNode); ok {
+ value := reflect.New(typ).Elem()
+ value.SetBool(n.True)
+ return value
+ }
+ s.errorf("expected bool; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.StringNode); ok {
+ value := reflect.New(typ).Elem()
+ value.SetString(n.Text)
+ return value
+ }
+ s.errorf("expected string; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
+ value := reflect.New(typ).Elem()
+ value.SetInt(n.Int64)
+ return value
+ }
+ s.errorf("expected integer; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
+ value := reflect.New(typ).Elem()
+ value.SetUint(n.Uint64)
+ return value
+ }
+ s.errorf("expected unsigned integer; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
+ value := reflect.New(typ).Elem()
+ value.SetFloat(n.Float64)
+ return value
+ }
+ s.errorf("expected float; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
+ value := reflect.New(typ).Elem()
+ value.SetComplex(n.Complex128)
+ return value
+ }
+ s.errorf("expected complex; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
+ switch n := n.(type) {
+ case *parse.BoolNode:
+ return reflect.ValueOf(n.True)
+ case *parse.DotNode:
+ return dot
+ case *parse.FieldNode:
+ return s.evalFieldNode(dot, n, nil, zero)
+ case *parse.IdentifierNode:
+ return s.evalFunction(dot, n.Ident, nil, zero)
+ case *parse.NumberNode:
+ return s.idealConstant(n)
+ case *parse.StringNode:
+ return reflect.ValueOf(n.Text)
+ case *parse.VariableNode:
+ return s.evalVariableNode(dot, n, nil, zero)
+ }
+ s.errorf("can't handle assignment of %s to empty interface argument", n)
+ panic("not reached")
+}
+
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+ for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+ if v.IsNil() {
+ return v, true
+ }
+ if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+ break
+ }
+ }
+ return v, false
+}
+
+// printValue writes the textual representation of the value to the output of
+// the template.
+func (s *state) printValue(n parse.Node, v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ v, _ = indirect(v) // fmt.Fprint handles nil.
+ }
+ if !v.IsValid() {
+ fmt.Fprint(s.wr, "<no value>")
+ return
+ }
+
+ if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+ if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+ v = v.Addr()
+ } else {
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func:
+ s.errorf("can't print %s of type %s", n, v.Type())
+ }
+ }
+ }
+ fmt.Fprint(s.wr, v.Interface())
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+// T has lots of interesting pieces to use to test execution.
+type T struct {
+ // Basics
+ True bool
+ I int
+ U16 uint16
+ X string
+ FloatZero float64
+ ComplexZero float64
+ // Nested structs.
+ U *U
+ // Struct with String method.
+ V0 V
+ V1, V2 *V
+ // Struct with Error method.
+ W0 W
+ W1, W2 *W
+ // Slices
+ SI []int
+ SIEmpty []int
+ SB []bool
+ // Maps
+ MSI map[string]int
+ MSIone map[string]int // one element, for deterministic output
+ MSIEmpty map[string]int
+ MXI map[interface{}]int
+ MII map[int]int
+ SMSI []map[string]int
+ // Empty interfaces; used to see if we can dig inside one.
+ Empty0 interface{} // nil
+ Empty1 interface{}
+ Empty2 interface{}
+ Empty3 interface{}
+ Empty4 interface{}
+ // Non-empty interface.
+ NonEmptyInterface I
+ // Stringer.
+ Str fmt.Stringer
+ Err error
+ // Pointers
+ PI *int
+ PSI *[]int
+ NIL *int
+ // Template to test evaluation of templates.
+ Tmpl *Template
+}
+
+type U struct {
+ V string
+}
+
+type V struct {
+ j int
+}
+
+func (v *V) String() string {
+ if v == nil {
+ return "nilV"
+ }
+ return fmt.Sprintf("<%d>", v.j)
+}
+
+type W struct {
+ k int
+}
+
+func (w *W) Error() string {
+ if w == nil {
+ return "nilW"
+ }
+ return fmt.Sprintf("[%d]", w.k)
+}
+
+var tVal = &T{
+ True: true,
+ I: 17,
+ U16: 16,
+ X: "x",
+ U: &U{"v"},
+ V0: V{6666},
+ V1: &V{7777}, // leave V2 as nil
+ W0: W{888},
+ W1: &W{999}, // leave W2 as nil
+ SI: []int{3, 4, 5},
+ SB: []bool{true, false},
+ MSI: map[string]int{"one": 1, "two": 2, "three": 3},
+ MSIone: map[string]int{"one": 1},
+ MXI: map[interface{}]int{"one": 1},
+ MII: map[int]int{1: 1},
+ SMSI: []map[string]int{
+ {"one": 1, "two": 2},
+ {"eleven": 11, "twelve": 12},
+ },
+ Empty1: 3,
+ Empty2: "empty2",
+ Empty3: []int{7, 8},
+ Empty4: &U{"UinEmpty"},
+ NonEmptyInterface: new(T),
+ Str: bytes.NewBuffer([]byte("foozle")),
+ Err: errors.New("erroozle"),
+ PI: newInt(23),
+ PSI: newIntSlice(21, 22, 23),
+ Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X
+}
+
+// A non-empty interface.
+type I interface {
+ Method0() string
+}
+
+var iVal I = tVal
+
+// Helpers for creation.
+func newInt(n int) *int {
+ p := new(int)
+ *p = n
+ return p
+}
+
+func newIntSlice(n ...int) *[]int {
+ p := new([]int)
+ *p = make([]int, len(n))
+ copy(*p, n)
+ return p
+}
+
+// Simple methods with and without arguments.
+func (t *T) Method0() string {
+ return "M0"
+}
+
+func (t *T) Method1(a int) int {
+ return a
+}
+
+func (t *T) Method2(a uint16, b string) string {
+ return fmt.Sprintf("Method2: %d %s", a, b)
+}
+
+func (t *T) MAdd(a int, b []int) []int {
+ v := make([]int, len(b))
+ for i, x := range b {
+ v[i] = x + a
+ }
+ return v
+}
+
+// MSort is used to sort map keys for stable output. (Nice trick!)
+func (t *T) MSort(m map[string]int) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// EPERM returns a value and an error according to its argument.
+func (t *T) EPERM(error bool) (bool, error) {
+ if error {
+ return true, os.EPERM
+ }
+ return false, nil
+}
+
+// A few methods to test chaining.
+func (t *T) GetU() *U {
+ return t.U
+}
+
+func (u *U) TrueFalse(b bool) string {
+ if b {
+ return "true"
+ }
+ return ""
+}
+
+func typeOf(arg interface{}) string {
+ return fmt.Sprintf("%T", arg)
+}
+
+type execTest struct {
+ name string
+ input string
+ output string
+ data interface{}
+ ok bool
+}
+
+// bigInt and bigUint are hex string representing numbers either side
+// of the max int boundary.
+// We do it this way so the test doesn't depend on ints being 32 bits.
+var (
+ bigInt = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
+ bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+)
+
+var execTests = []execTest{
+ // Trivial cases.
+ {"empty", "", "", nil, true},
+ {"text", "some text", "some text", nil, true},
+
+ // Ideal constants.
+ {"ideal int", "{{typeOf 3}}", "int", 0, true},
+ {"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
+ {"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
+ {"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
+ {"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
+ {"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
+
+ // Fields of structs.
+ {".X", "-{{.X}}-", "-x-", tVal, true},
+ {".U.V", "-{{.U.V}}-", "-v-", tVal, true},
+
+ // Fields on maps.
+ {"map .one", "{{.MSI.one}}", "1", tVal, true},
+ {"map .two", "{{.MSI.two}}", "2", tVal, true},
+ {"map .NO", "{{.MSI.NO}}", "<no value>", tVal, true},
+ {"map .one interface", "{{.MXI.one}}", "1", tVal, true},
+ {"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
+ {"map .WRONG type", "{{.MII.one}}", "", tVal, false},
+
+ // Dots of all kinds to test basic evaluation.
+ {"dot int", "<{{.}}>", "<13>", 13, true},
+ {"dot uint", "<{{.}}>", "<14>", uint(14), true},
+ {"dot float", "<{{.}}>", "<15.1>", 15.1, true},
+ {"dot bool", "<{{.}}>", "<true>", true, true},
+ {"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true},
+ {"dot string", "<{{.}}>", "<hello>", "hello", true},
+ {"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true},
+ {"dot map", "<{{.}}>", "<map[two:22]>", map[string]int{"two": 22}, true},
+ {"dot struct", "<{{.}}>", "<{7 seven}>", struct {
+ a int
+ b string
+ }{7, "seven"}, true},
+
+ // Variables.
+ {"$ int", "{{$}}", "123", 123, true},
+ {"$.I", "{{$.I}}", "17", tVal, true},
+ {"$.U.V", "{{$.U.V}}", "v", tVal, true},
+ {"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
+
+ // Type with String method.
+ {"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true},
+ {"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true},
+ {"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
+
+ // Type with Error method.
+ {"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true},
+ {"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
+ {"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
+
+ // Pointers.
+ {"*int", "{{.PI}}", "23", tVal, true},
+ {"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
+ {"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
+ {"NIL", "{{.NIL}}", "<nil>", tVal, true},
+
+ // Empty interfaces holding values.
+ {"empty nil", "{{.Empty0}}", "<no value>", tVal, true},
+ {"empty with int", "{{.Empty1}}", "3", tVal, true},
+ {"empty with string", "{{.Empty2}}", "empty2", tVal, true},
+ {"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
+ {"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
+ {"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
+
+ // Method calls.
+ {".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
+ {".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
+ {".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
+ {".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
+ {".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
+ {".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
+ {"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
+ {"method on chained var",
+ "{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+ "true", tVal, true},
+ {"chained method",
+ "{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+ "true", tVal, true},
+ {"chained method on variable",
+ "{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
+ "true", tVal, true},
+
+ // Pipelines.
+ {"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
+
+ // If.
+ {"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
+ {"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
+ {"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
+ {"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
+
+ // Print etc.
+ {"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
+ {"print", `{{print 1 2 3}}`, "1 2 3", tVal, true},
+ {"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
+ {"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
+ {"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
+ {"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true},
+ {"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
+ {"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
+ {"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
+ {"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
+ {"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
+ {"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
+ {"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
+
+ // HTML.
+ {"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
+ "<script>alert("XSS");</script>", nil, true},
+ {"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
+ "<script>alert("XSS");</script>", nil, true},
+
+ // JavaScript.
+ {"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true},
+
+ // URL query.
+ {"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
+
+ // Booleans
+ {"not", "{{not true}} {{not false}}", "false true", nil, true},
+ {"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
+ {"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
+ {"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
+ {"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
+
+ // Indexing.
+ {"slice[0]", "{{index .SI 0}}", "3", tVal, true},
+ {"slice[1]", "{{index .SI 1}}", "4", tVal, true},
+ {"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
+ {"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
+ {"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
+ {"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
+ {"map[NO]", "{{index .MSI `XXX`}}", "", tVal, true},
+ {"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
+ {"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
+
+ // Len.
+ {"slice", "{{len .SI}}", "3", tVal, true},
+ {"map", "{{len .MSI }}", "3", tVal, true},
+ {"len of int", "{{len 3}}", "", tVal, false},
+ {"len of nothing", "{{len .Empty0}}", "", tVal, false},
+
+ // With.
+ {"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
+ {"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
+ {"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
+ {"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
+ {"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true},
+ {"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
+ {"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
+ {"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
+ {"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
+ {"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
+ {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
+ {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
+
+ // Range.
+ {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI | .MSort}}-{{.}}-{{end}}", "-one--three--two-", tVal, true},
+ {"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range map else", "{{range .MSI | .MSort}}-{{.}}-{{else}}EMPTY{{end}}", "-one--three--two-", tVal, true},
+ {"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
+ {"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
+ {"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true},
+ {"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true},
+ {"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true},
+ {"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "<one=1>", tVal, true},
+ {"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+ {"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+ {"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
+ {"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
+
+ // Cute examples.
+ {"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
+ {"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
+
+ // Error handling.
+ {"error method, error", "{{.EPERM true}}", "", tVal, false},
+ {"error method, no error", "{{.EPERM false}}", "false", tVal, true},
+
+ // Fixed bugs.
+ // Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
+ {"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
+ // Do not loop endlessly in indirect for non-empty interfaces.
+ // The bug appears with *interface only; looped forever.
+ {"bug1", "{{.Method0}}", "M0", &iVal, true},
+ // Was taking address of interface field, so method set was empty.
+ {"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
+ // Struct values were not legal in with - mere oversight.
+ {"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
+ // Nil interface values in if.
+ {"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
+ // Stringer.
+ {"bug5", "{{.Str}}", "foozle", tVal, true},
+ {"bug5a", "{{.Err}}", "erroozle", tVal, true},
+ // Args need to be indirected and dereferenced sometimes.
+ {"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
+ {"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
+ {"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
+ {"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
+}
+
+func zeroArgs() string {
+ return "zeroArgs"
+}
+
+func oneArg(a string) string {
+ return "oneArg=" + a
+}
+
+// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
+func count(n int) chan string {
+ if n == 0 {
+ return nil
+ }
+ c := make(chan string)
+ go func() {
+ for i := 0; i < n; i++ {
+ c <- "abcdefghijklmnop"[i : i+1]
+ }
+ close(c)
+ }()
+ return c
+}
+
+// vfunc takes a *V and a V
+func vfunc(V, *V) string {
+ return "vfunc"
+}
+
+func testExecute(execTests []execTest, set *Set, t *testing.T) {
+ b := new(bytes.Buffer)
+ funcs := FuncMap{
+ "count": count,
+ "oneArg": oneArg,
+ "typeOf": typeOf,
+ "vfunc": vfunc,
+ "zeroArgs": zeroArgs,
+ }
+ for _, test := range execTests {
+ tmpl := New(test.name).Funcs(funcs)
+ _, err := tmpl.ParseInSet(test.input, set)
+ if err != nil {
+ t.Errorf("%s: parse error: %s", test.name, err)
+ continue
+ }
+ b.Reset()
+ err = tmpl.Execute(b, test.data)
+ switch {
+ case !test.ok && err == nil:
+ t.Errorf("%s: expected error; got none", test.name)
+ continue
+ case test.ok && err != nil:
+ t.Errorf("%s: unexpected execute error: %s", test.name, err)
+ continue
+ case !test.ok && err != nil:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ }
+ result := b.String()
+ if result != test.output {
+ t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
+ }
+ }
+}
+
+func TestExecute(t *testing.T) {
+ testExecute(execTests, nil, t)
+}
+
+var delimPairs = []string{
+ "", "", // default
+ "{{", "}}", // same as default
+ "<<", ">>", // distinct
+ "|", "|", // same
+ "(日)", "(本)", // peculiar
+}
+
+func TestDelims(t *testing.T) {
+ const hello = "Hello, world"
+ var value = struct{ Str string }{hello}
+ for i := 0; i < len(delimPairs); i += 2 {
+ text := ".Str"
+ left := delimPairs[i+0]
+ trueLeft := left
+ right := delimPairs[i+1]
+ trueRight := right
+ if left == "" { // default case
+ trueLeft = "{{"
+ }
+ if right == "" { // default case
+ trueRight = "}}"
+ }
+ text = trueLeft + text + trueRight
+ // Now add a comment
+ text += trueLeft + "/*comment*/" + trueRight
+ // Now add an action containing a string.
+ text += trueLeft + `"` + trueLeft + `"` + trueRight
+ // At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
+ tmpl, err := New("delims").Delims(left, right).Parse(text)
+ if err != nil {
+ t.Fatalf("delim %q text %q parse err %s", left, text, err)
+ }
+ var b = new(bytes.Buffer)
+ err = tmpl.Execute(b, value)
+ if err != nil {
+ t.Fatalf("delim %q exec err %s", left, err)
+ }
+ if b.String() != hello+trueLeft {
+ t.Errorf("expected %q got %q", hello+trueLeft, b.String())
+ }
+ }
+}
+
+// Check that an error from a method flows back to the top.
+func TestExecuteError(t *testing.T) {
+ b := new(bytes.Buffer)
+ tmpl := New("error")
+ _, err := tmpl.Parse("{{.EPERM true}}")
+ if err != nil {
+ t.Fatalf("parse error: %s", err)
+ }
+ err = tmpl.Execute(b, tVal)
+ if err == nil {
+ t.Errorf("expected error; got none")
+ } else if !strings.Contains(err.Error(), os.EPERM.Error()) {
+ if *debug {
+ fmt.Printf("test execute error: %s\n", err)
+ }
+ t.Errorf("expected os.EPERM; got %s", err)
+ }
+}
+
+func TestJSEscaping(t *testing.T) {
+ testCases := []struct {
+ in, exp string
+ }{
+ {`a`, `a`},
+ {`'foo`, `\'foo`},
+ {`Go "jump" \`, `Go \"jump\" \\`},
+ {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+ {"unprintable \uFDFF", `unprintable \uFDFF`},
+ {`<html>`, `\x3Chtml\x3E`},
+ }
+ for _, tc := range testCases {
+ s := JSEscapeString(tc.in)
+ if s != tc.exp {
+ t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
+ }
+ }
+}
+
+// A nice example: walk a binary tree.
+
+type Tree struct {
+ Val int
+ Left, Right *Tree
+}
+
+// Use different delimiters to test Set.Delims.
+const treeTemplate = `
+ (define "tree")
+ [
+ (.Val)
+ (with .Left)
+ (template "tree" .)
+ (end)
+ (with .Right)
+ (template "tree" .)
+ (end)
+ ]
+ (end)
+`
+
+func TestTree(t *testing.T) {
+ var tree = &Tree{
+ 1,
+ &Tree{
+ 2, &Tree{
+ 3,
+ &Tree{
+ 4, nil, nil,
+ },
+ nil,
+ },
+ &Tree{
+ 5,
+ &Tree{
+ 6, nil, nil,
+ },
+ nil,
+ },
+ },
+ &Tree{
+ 7,
+ &Tree{
+ 8,
+ &Tree{
+ 9, nil, nil,
+ },
+ nil,
+ },
+ &Tree{
+ 10,
+ &Tree{
+ 11, nil, nil,
+ },
+ nil,
+ },
+ },
+ }
+ set := new(Set)
+ _, err := set.Delims("(", ")").Parse(treeTemplate)
+ if err != nil {
+ t.Fatal("parse error:", err)
+ }
+ var b bytes.Buffer
+ err = set.Execute(&b, "tree", tree)
+ if err != nil {
+ t.Fatal("exec error:", err)
+ }
+ stripSpace := func(r rune) rune {
+ if r == '\t' || r == '\n' {
+ return -1
+ }
+ return r
+ }
+ result := strings.Map(stripSpace, b.String())
+ const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
+ if result != expect {
+ t.Errorf("expected %q got %q", expect, result)
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. If the second argument evaluates to non-nil
+// during execution, execution terminates and Execute returns an error.
+type FuncMap map[string]interface{}
+
+var builtins = FuncMap{
+ "and": and,
+ "html": HTMLEscaper,
+ "index": index,
+ "js": JSEscaper,
+ "len": length,
+ "not": not,
+ "or": or,
+ "print": fmt.Sprint,
+ "printf": fmt.Sprintf,
+ "println": fmt.Sprintln,
+ "urlquery": URLQueryEscaper,
+}
+
+var builtinFuncs = createValueFuncs(builtins)
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+ m := make(map[string]reflect.Value)
+ addValueFuncs(m, funcMap)
+ return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+ for name, fn := range in {
+ v := reflect.ValueOf(fn)
+ if v.Kind() != reflect.Func {
+ panic("value for " + name + " not a function")
+ }
+ if !goodFunc(v.Type()) {
+ panic(fmt.Errorf("can't handle multiple results from method/function %q", name))
+ }
+ out[name] = v
+ }
+}
+
+// addFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+ for name, fn := range in {
+ out[name] = fn
+ }
+}
+
+// goodFunc checks that the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+ // We allow functions with 1 result or 2 results where the second is an error.
+ switch {
+ case typ.NumOut() == 1:
+ return true
+ case typ.NumOut() == 2 && typ.Out(1) == errorType:
+ return true
+ }
+ return false
+}
+
+// findFunction looks for a function in the template, set, and global map.
+func findFunction(name string, tmpl *Template, set *Set) (reflect.Value, bool) {
+ if tmpl != nil {
+ if fn := tmpl.execFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ }
+ if set != nil {
+ if fn := set.execFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ }
+ if fn := builtinFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ return reflect.Value{}, false
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item interface{}, indices ...interface{}) (interface{}, error) {
+ v := reflect.ValueOf(item)
+ for _, i := range indices {
+ index := reflect.ValueOf(i)
+ var isNil bool
+ if v, isNil = indirect(v); isNil {
+ return nil, fmt.Errorf("index of nil pointer")
+ }
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice:
+ var x int64
+ switch index.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x = index.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x = int64(index.Uint())
+ default:
+ return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+ }
+ if x < 0 || x >= int64(v.Len()) {
+ return nil, fmt.Errorf("index out of range: %d", x)
+ }
+ v = v.Index(int(x))
+ case reflect.Map:
+ if !index.Type().AssignableTo(v.Type().Key()) {
+ return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
+ }
+ if x := v.MapIndex(index); x.IsValid() {
+ v = x
+ } else {
+ v = reflect.Zero(v.Type().Key())
+ }
+ default:
+ return nil, fmt.Errorf("can't index item of type %s", index.Type())
+ }
+ }
+ return v.Interface(), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item interface{}) (int, error) {
+ v, isNil := indirect(reflect.ValueOf(item))
+ if isNil {
+ return 0, fmt.Errorf("len of nil pointer")
+ }
+ switch v.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len(), nil
+ }
+ return 0, fmt.Errorf("len of type %s", v.Type())
+}
+
+// Boolean logic.
+
+func truth(a interface{}) bool {
+ t, _ := isTrue(reflect.ValueOf(a))
+ return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 interface{}, args ...interface{}) interface{} {
+ if !truth(arg0) {
+ return arg0
+ }
+ for i := range args {
+ arg0 = args[i]
+ if !truth(arg0) {
+ break
+ }
+ }
+ return arg0
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 interface{}, args ...interface{}) interface{} {
+ if truth(arg0) {
+ return arg0
+ }
+ for i := range args {
+ arg0 = args[i]
+ if truth(arg0) {
+ break
+ }
+ }
+ return arg0
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg interface{}) (truth bool) {
+ truth, _ = isTrue(reflect.ValueOf(arg))
+ return !truth
+}
+
+// HTML escaping.
+
+var (
+ htmlQuot = []byte(""") // shorter than """
+ htmlApos = []byte("'") // shorter than "'"
+ htmlAmp = []byte("&")
+ htmlLt = []byte("<")
+ htmlGt = []byte(">")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+ last := 0
+ for i, c := range b {
+ var html []byte
+ switch c {
+ case '"':
+ html = htmlQuot
+ case '\'':
+ html = htmlApos
+ case '&':
+ html = htmlAmp
+ case '<':
+ html = htmlLt
+ case '>':
+ html = htmlGt
+ default:
+ continue
+ }
+ w.Write(b[last:i])
+ w.Write(html)
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexAny(s, `'"&<>`) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ HTMLEscape(&b, []byte(s))
+ return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+ ok := false
+ var s string
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ s = fmt.Sprint(args...)
+ }
+ return HTMLEscapeString(s)
+}
+
+// JavaScript escaping.
+
+var (
+ jsLowUni = []byte(`\u00`)
+ hex = []byte("0123456789ABCDEF")
+
+ jsBackslash = []byte(`\\`)
+ jsApos = []byte(`\'`)
+ jsQuot = []byte(`\"`)
+ jsLt = []byte(`\x3C`)
+ jsGt = []byte(`\x3E`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+ last := 0
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+
+ if !jsIsSpecial(rune(c)) {
+ // fast path: nothing to do
+ continue
+ }
+ w.Write(b[last:i])
+
+ if c < utf8.RuneSelf {
+ // Quotes, slashes and angle brackets get quoted.
+ // Control characters get written as \u00XX.
+ switch c {
+ case '\\':
+ w.Write(jsBackslash)
+ case '\'':
+ w.Write(jsApos)
+ case '"':
+ w.Write(jsQuot)
+ case '<':
+ w.Write(jsLt)
+ case '>':
+ w.Write(jsGt)
+ default:
+ w.Write(jsLowUni)
+ t, b := c>>4, c&0x0f
+ w.Write(hex[t : t+1])
+ w.Write(hex[b : b+1])
+ }
+ } else {
+ // Unicode rune.
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.IsPrint(r) {
+ w.Write(b[i : i+size])
+ } else {
+ // TODO(dsymonds): Do this without fmt?
+ fmt.Fprintf(w, "\\u%04X", r)
+ }
+ i += size - 1
+ }
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexFunc(s, jsIsSpecial) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ JSEscape(&b, []byte(s))
+ return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+ switch r {
+ case '\\', '\'', '"', '<', '>':
+ return true
+ }
+ return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+ ok := false
+ var s string
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ s = fmt.Sprint(args...)
+ }
+ return JSEscapeString(s)
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+ s, ok := "", false
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ s = fmt.Sprint(args...)
+ }
+ return url.QueryEscape(s)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions to make constructing templates and sets easier.
+
+package template
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+)
+
+// Functions and methods to parse a single template.
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable initializations
+// such as
+// var t = template.Must(template.New("name").Parse("text"))
+func Must(t *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// ParseFile creates a new Template and parses the template definition from
+// the named file. The template name is the base name of the file.
+func ParseFile(filename string) (*Template, error) {
+ t := New(filepath.Base(filename))
+ return t.ParseFile(filename)
+}
+
+// parseFileInSet creates a new Template and parses the template
+// definition from the named file. The template name is the base name
+// of the file. It also adds the template to the set. Function bindings are
+// checked against those in the set.
+func parseFileInSet(filename string, set *Set) (*Template, error) {
+ t := New(filepath.Base(filename))
+ return t.parseFileInSet(filename, set)
+}
+
+// ParseFile reads the template definition from a file and parses it to
+// construct an internal representation of the template for execution.
+// The returned template will be nil if an error occurs.
+func (t *Template) ParseFile(filename string) (*Template, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return t.Parse(string(b))
+}
+
+// parseFileInSet is the same as ParseFile except that function bindings
+// are checked against those in the set and the template is added
+// to the set.
+// The returned template will be nil if an error occurs.
+func (t *Template) parseFileInSet(filename string, set *Set) (*Template, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return t.ParseInSet(string(b), set)
+}
+
+// Functions and methods to parse a set.
+
+// SetMust is a helper that wraps a call to a function returning (*Set, error)
+// and panics if the error is non-nil. It is intended for use in variable initializations
+// such as
+// var s = template.SetMust(template.ParseSetFiles("file"))
+func SetMust(s *Set, err error) *Set {
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// ParseFiles parses the named files into a set of named templates.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseFiles(filenames ...string) (*Set, error) {
+ for _, filename := range filenames {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ _, err = s.Parse(string(b))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseSetFiles creates a new Set and parses the set definition from the
+// named files. Each file must be individually parseable.
+func ParseSetFiles(filenames ...string) (*Set, error) {
+ s := new(Set)
+ for _, filename := range filenames {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ _, err = s.Parse(string(b))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseGlob parses the set definition from the files identified by the
+// pattern. The pattern is processed by filepath.Glob and must match at
+// least one file.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseGlob(pattern string) (*Set, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
+ }
+ return s.ParseFiles(filenames...)
+}
+
+// ParseSetGlob creates a new Set and parses the set definition from the
+// files identified by the pattern. The pattern is processed by filepath.Glob
+// and must match at least one file.
+func ParseSetGlob(pattern string) (*Set, error) {
+ set, err := new(Set).ParseGlob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ return set, nil
+}
+
+// Functions and methods to parse stand-alone template files into a set.
+
+// ParseTemplateFiles parses the named template files and adds
+// them to the set. Each template will be named the base name of
+// its file.
+// Unlike with ParseFiles, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateFiles is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseTemplateFiles(filenames ...string) (*Set, error) {
+ for _, filename := range filenames {
+ _, err := parseFileInSet(filename, s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseTemplateGlob parses the template files matched by the
+// patern and adds them to the set. Each template will be named
+// the base name of its file.
+// Unlike with ParseGlob, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateGlob is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseTemplateGlob(pattern string) (*Set, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ for _, filename := range filenames {
+ _, err := parseFileInSet(filename, s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseTemplateFiles creates a set by parsing the named files,
+// each of which defines a single template. Each template will be
+// named the base name of its file.
+// Unlike with ParseFiles, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateFiles is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself. Parsing stops if an error is
+// encountered.
+func ParseTemplateFiles(filenames ...string) (*Set, error) {
+ set := new(Set)
+ set.init()
+ for _, filename := range filenames {
+ t, err := ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ if err := set.add(t); err != nil {
+ return nil, err
+ }
+ }
+ return set, nil
+}
+
+// ParseTemplateGlob creates a set by parsing the files matched
+// by the pattern, each of which defines a single template. The pattern
+// is processed by filepath.Glob and must match at least one file. Each
+// template will be named the base name of its file.
+// Unlike with ParseGlob, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateGlob is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself. Parsing stops if an error is
+// encountered.
+func ParseTemplateGlob(pattern string) (*Set, error) {
+ set := new(Set)
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
+ }
+ for _, filename := range filenames {
+ t, err := ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ if err := set.add(t); err != nil {
+ return nil, err
+ }
+ }
+ return set, nil
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "reflect"
+ "text/template/parse"
+)
+
+// Template is the representation of a parsed template.
+type Template struct {
+ name string
+ *parse.Tree
+ leftDelim string
+ rightDelim string
+ // We use two maps, one for parsing and one for execution.
+ // This separation makes the API cleaner since it doesn't
+ // expose reflection to the client.
+ parseFuncs FuncMap
+ execFuncs map[string]reflect.Value
+ set *Set // can be nil.
+}
+
+// Name returns the name of the template.
+func (t *Template) Name() string {
+ return t.name
+}
+
+// Parsing.
+
+// New allocates a new template with the given name.
+func New(name string) *Template {
+ return &Template{
+ name: name,
+ parseFuncs: make(FuncMap),
+ execFuncs: make(map[string]reflect.Value),
+ }
+}
+
+// Delims sets the action delimiters, to be used in a subsequent
+// parse, to the specified strings.
+// An empty delimiter stands for the corresponding default: {{ or }}.
+// The return value is the template, so calls can be chained.
+func (t *Template) Delims(left, right string) *Template {
+ t.leftDelim = left
+ t.rightDelim = right
+ return t
+}
+
+// Funcs adds the elements of the argument map to the template's function
+// map. It panics if a value in the map is not a function with appropriate
+// return type.
+// The return value is the template, so calls can be chained.
+func (t *Template) Funcs(funcMap FuncMap) *Template {
+ addValueFuncs(t.execFuncs, funcMap)
+ addFuncs(t.parseFuncs, funcMap)
+ return t
+}
+
+// Parse parses the template definition string to construct an internal
+// representation of the template for execution.
+func (t *Template) Parse(s string) (tmpl *Template, err error) {
+ t.Tree, err = parse.New(t.name).Parse(s, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+// ParseInSet parses the template definition string to construct an internal
+// representation of the template for execution. It also adds the template
+// to the set. It is an error if s is already defined in the set.
+// Function bindings are checked against those in the set.
+func (t *Template) ParseInSet(s string, set *Set) (tmpl *Template, err error) {
+ var setFuncs FuncMap
+ if set != nil {
+ setFuncs = set.parseFuncs
+ }
+ t.Tree, err = parse.New(t.name).Parse(s, t.leftDelim, t.rightDelim, t.parseFuncs, setFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ if set != nil {
+ err = set.add(t)
+ }
+ return t, err
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType
+ val string
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case i.typ > itemKeyword:
+ return fmt.Sprintf("<%s>", i.val)
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemColonEquals // colon-equals (':=') introducing a declaration
+ itemEOF
+ itemField // alphanumeric identifier, starting with '.', possibly chained ('.x.y')
+ itemIdentifier // alphanumeric identifier
+ itemLeftDelim // left action delimiter
+ itemNumber // simple number, including imaginary
+ itemPipe // pipe symbol
+ itemRawString // raw quoted string (includes quotes)
+ itemRightDelim // right action delimiter
+ itemString // quoted string (includes quotes)
+ itemText // plain text
+ itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'.
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemDot // the cursor, spelled '.'.
+ itemDefine // define keyword
+ itemElse // else keyword
+ itemEnd // end keyword
+ itemIf // if keyword
+ itemRange // range keyword
+ itemTemplate // template keyword
+ itemWith // with keyword
+)
+
+// Make the types prettyprint.
+var itemName = map[itemType]string{
+ itemError: "error",
+ itemBool: "bool",
+ itemChar: "char",
+ itemCharConstant: "charconst",
+ itemComplex: "complex",
+ itemColonEquals: ":=",
+ itemEOF: "EOF",
+ itemField: "field",
+ itemIdentifier: "identifier",
+ itemLeftDelim: "left delim",
+ itemNumber: "number",
+ itemPipe: "pipe",
+ itemRawString: "raw string",
+ itemRightDelim: "right delim",
+ itemString: "string",
+ itemVariable: "variable",
+ // keywords
+ itemDot: ".",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+ itemEnd: "end",
+ itemRange: "range",
+ itemTemplate: "template",
+ itemWith: "with",
+}
+
+func (i itemType) String() string {
+ s := itemName[i]
+ if s == "" {
+ return fmt.Sprintf("item%d", int(i))
+ }
+ return s
+}
+
+var key = map[string]itemType{
+ ".": itemDot,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+ "if": itemIf,
+ "range": itemRange,
+ "template": itemTemplate,
+ "with": itemWith,
+}
+
+const eof = -1
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ name string // the name of the input; used only for error reports.
+ input string // the string being scanned.
+ leftDelim string // start of action.
+ rightDelim string // end of action.
+ state stateFn // the next lexing function to enter.
+ pos int // current position in the input.
+ start int // start position of this item.
+ width int // width of last rune read from input.
+ items chan item // channel of scanned items.
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() (r rune) {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ l.items <- item{t, l.input[l.start:l.pos]}
+ l.start = l.pos
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.IndexRune(valid, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+// lineNumber reports which line we're on. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.pos], "\n")
+}
+
+// error returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.run.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-l.items:
+ return item
+ default:
+ l.state = l.state(l)
+ }
+ }
+ panic("not reached")
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input, left, right string) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+ if right == "" {
+ right = rightDelim
+ }
+ l := &lexer{
+ name: name,
+ input: input,
+ leftDelim: left,
+ rightDelim: right,
+ state: lexText,
+ items: make(chan item, 2), // Two items of buffering is sufficient for all state functions
+ }
+ return l
+}
+
+// state functions
+
+const (
+ leftDelim = "{{"
+ rightDelim = "}}"
+ leftComment = "/*"
+ rightComment = "*/"
+)
+
+// lexText scans until an opening action delimiter, "{{".
+func lexText(l *lexer) stateFn {
+ for {
+ if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ return lexLeftDelim
+ }
+ if l.next() == eof {
+ break
+ }
+ }
+ // Correctly reached EOF.
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ l.emit(itemEOF)
+ return nil
+}
+
+// lexLeftDelim scans the left delimiter, which is known to be present.
+func lexLeftDelim(l *lexer) stateFn {
+ if strings.HasPrefix(l.input[l.pos:], l.leftDelim+leftComment) {
+ return lexComment
+ }
+ l.pos += len(l.leftDelim)
+ l.emit(itemLeftDelim)
+ return lexInsideAction
+}
+
+// lexComment scans a comment. The left comment marker is known to be present.
+func lexComment(l *lexer) stateFn {
+ i := strings.Index(l.input[l.pos:], rightComment+l.rightDelim)
+ if i < 0 {
+ return l.errorf("unclosed comment")
+ }
+ l.pos += i + len(rightComment) + len(l.rightDelim)
+ l.ignore()
+ return lexText
+}
+
+// lexRightDelim scans the right delimiter, which is known to be present.
+func lexRightDelim(l *lexer) stateFn {
+ l.pos += len(l.rightDelim)
+ l.emit(itemRightDelim)
+ return lexText
+}
+
+// lexInsideAction scans the elements inside action delimiters.
+func lexInsideAction(l *lexer) stateFn {
+ // Either number, quoted string, or identifier.
+ // Spaces separate and are ignored.
+ // Pipe symbols separate and are emitted.
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
+ return lexRightDelim
+ }
+ switch r := l.next(); {
+ case r == eof || r == '\n':
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ l.ignore()
+ case r == ':':
+ if l.next() != '=' {
+ return l.errorf("expected :=")
+ }
+ l.emit(itemColonEquals)
+ case r == '|':
+ l.emit(itemPipe)
+ case r == '"':
+ return lexQuote
+ case r == '`':
+ return lexRawQuote
+ case r == '$':
+ return lexIdentifier
+ case r == '\'':
+ return lexChar
+ case r == '.':
+ // special look-ahead for ".field" so we don't break l.backup().
+ if l.pos < len(l.input) {
+ r := l.input[l.pos]
+ if r < '0' || '9' < r {
+ return lexIdentifier // itemDot comes from the keyword table.
+ }
+ }
+ fallthrough // '.' can start a number.
+ case r == '+' || r == '-' || ('0' <= r && r <= '9'):
+ l.backup()
+ return lexNumber
+ case isAlphaNumeric(r):
+ l.backup()
+ return lexIdentifier
+ case r <= unicode.MaxASCII && unicode.IsPrint(r):
+ l.emit(itemChar)
+ return lexInsideAction
+ default:
+ return l.errorf("unrecognized character in action: %#U", r)
+ }
+ return lexInsideAction
+}
+
+// lexIdentifier scans an alphanumeric or field.
+func lexIdentifier(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case isAlphaNumeric(r):
+ // absorb.
+ case r == '.' && (l.input[l.start] == '.' || l.input[l.start] == '$'):
+ // field chaining; absorb into one token.
+ default:
+ l.backup()
+ word := l.input[l.start:l.pos]
+ switch {
+ case key[word] > itemKeyword:
+ l.emit(key[word])
+ case word[0] == '.':
+ l.emit(itemField)
+ case word[0] == '$':
+ l.emit(itemVariable)
+ case word == "true", word == "false":
+ l.emit(itemBool)
+ default:
+ l.emit(itemIdentifier)
+ }
+ break Loop
+ }
+ }
+ return lexInsideAction
+}
+
+// lexChar scans a character constant. The initial quote is already
+// scanned. Syntax checking is done by the parse.
+func lexChar(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated character constant")
+ case '\'':
+ break Loop
+ }
+ }
+ l.emit(itemCharConstant)
+ return lexInsideAction
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+func lexNumber(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ if sign := l.peek(); sign == '+' || sign == '-' {
+ // Complex: 1+2i. No spaces, must end in 'i'.
+ if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(itemComplex)
+ } else {
+ l.emit(itemNumber)
+ }
+ return lexInsideAction
+}
+
+func (l *lexer) scanNumber() bool {
+ // Optional leading sign.
+ l.accept("+-")
+ // Is it hex?
+ digits := "0123456789"
+ if l.accept("0") && l.accept("xX") {
+ digits = "0123456789abcdefABCDEF"
+ }
+ l.acceptRun(digits)
+ if l.accept(".") {
+ l.acceptRun(digits)
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun("0123456789")
+ }
+ // Is it imaginary?
+ l.accept("i")
+ // Next thing mustn't be alphanumeric.
+ if isAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+// lexQuote scans a quoted string.
+func lexQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated quoted string")
+ case '"':
+ break Loop
+ }
+ }
+ l.emit(itemString)
+ return lexInsideAction
+}
+
+// lexRawQuote scans a raw quoted string.
+func lexRawQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case eof, '\n':
+ return l.errorf("unterminated raw quoted string")
+ case '`':
+ break Loop
+ }
+ }
+ l.emit(itemRawString)
+ return lexInsideAction
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+ switch r {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "reflect"
+ "testing"
+)
+
+type lexTest struct {
+ name string
+ input string
+ items []item
+}
+
+var (
+ tEOF = item{itemEOF, ""}
+ tLeft = item{itemLeftDelim, "{{"}
+ tRight = item{itemRightDelim, "}}"}
+ tRange = item{itemRange, "range"}
+ tPipe = item{itemPipe, "|"}
+ tFor = item{itemIdentifier, "for"}
+ tQuote = item{itemString, `"abc \n\t\" "`}
+ raw = "`" + `abc\n\t\" ` + "`"
+ tRawQuote = item{itemRawString, raw}
+)
+
+var lexTests = []lexTest{
+ {"empty", "", []item{tEOF}},
+ {"spaces", " \t\n", []item{{itemText, " \t\n"}, tEOF}},
+ {"text", `now is the time`, []item{{itemText, "now is the time"}, tEOF}},
+ {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+ {itemText, "hello-"},
+ {itemText, "-world"},
+ tEOF,
+ }},
+ {"punctuation", "{{,@%}}", []item{
+ tLeft,
+ {itemChar, ","},
+ {itemChar, "@"},
+ {itemChar, "%"},
+ tRight,
+ tEOF,
+ }},
+ {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
+ {"for", `{{for }}`, []item{tLeft, tFor, tRight, tEOF}},
+ {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
+ {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
+ {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{
+ tLeft,
+ {itemNumber, "1"},
+ {itemNumber, "02"},
+ {itemNumber, "0x14"},
+ {itemNumber, "-7.2i"},
+ {itemNumber, "1e3"},
+ {itemNumber, "+1.2e-4"},
+ {itemNumber, "4.2i"},
+ {itemComplex, "1+2i"},
+ tRight,
+ tEOF,
+ }},
+ {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
+ tLeft,
+ {itemCharConstant, `'a'`},
+ {itemCharConstant, `'\n'`},
+ {itemCharConstant, `'\''`},
+ {itemCharConstant, `'\\'`},
+ {itemCharConstant, `'\u00FF'`},
+ {itemCharConstant, `'\xFF'`},
+ {itemCharConstant, `'本'`},
+ tRight,
+ tEOF,
+ }},
+ {"bools", "{{true false}}", []item{
+ tLeft,
+ {itemBool, "true"},
+ {itemBool, "false"},
+ tRight,
+ tEOF,
+ }},
+ {"dot", "{{.}}", []item{
+ tLeft,
+ {itemDot, "."},
+ tRight,
+ tEOF,
+ }},
+ {"dots", "{{.x . .2 .x.y}}", []item{
+ tLeft,
+ {itemField, ".x"},
+ {itemDot, "."},
+ {itemNumber, ".2"},
+ {itemField, ".x.y"},
+ tRight,
+ tEOF,
+ }},
+ {"keywords", "{{range if else end with}}", []item{
+ tLeft,
+ {itemRange, "range"},
+ {itemIf, "if"},
+ {itemElse, "else"},
+ {itemEnd, "end"},
+ {itemWith, "with"},
+ tRight,
+ tEOF,
+ }},
+ {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
+ tLeft,
+ {itemVariable, "$c"},
+ {itemColonEquals, ":="},
+ {itemIdentifier, "printf"},
+ {itemVariable, "$"},
+ {itemVariable, "$hello"},
+ {itemVariable, "$23"},
+ {itemVariable, "$"},
+ {itemVariable, "$var.Field"},
+ {itemField, ".Method"},
+ tRight,
+ tEOF,
+ }},
+ {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
+ {itemText, "intro "},
+ tLeft,
+ {itemIdentifier, "echo"},
+ {itemIdentifier, "hi"},
+ {itemNumber, "1.2"},
+ tPipe,
+ {itemIdentifier, "noargs"},
+ tPipe,
+ {itemIdentifier, "args"},
+ {itemNumber, "1"},
+ {itemString, `"hi"`},
+ tRight,
+ {itemText, " outro"},
+ tEOF,
+ }},
+ {"declaration", "{{$v := 3}}", []item{
+ tLeft,
+ {itemVariable, "$v"},
+ {itemColonEquals, ":="},
+ {itemNumber, "3"},
+ tRight,
+ tEOF,
+ }},
+ {"2 declarations", "{{$v , $w := 3}}", []item{
+ tLeft,
+ {itemVariable, "$v"},
+ {itemChar, ","},
+ {itemVariable, "$w"},
+ {itemColonEquals, ":="},
+ {itemNumber, "3"},
+ tRight,
+ tEOF,
+ }},
+ // errors
+ {"badchar", "#{{\x01}}", []item{
+ {itemText, "#"},
+ tLeft,
+ {itemError, "unrecognized character in action: U+0001"},
+ }},
+ {"unclosed action", "{{\n}}", []item{
+ tLeft,
+ {itemError, "unclosed action"},
+ }},
+ {"EOF in action", "{{range", []item{
+ tLeft,
+ tRange,
+ {itemError, "unclosed action"},
+ }},
+ {"unclosed quote", "{{\"\n\"}}", []item{
+ tLeft,
+ {itemError, "unterminated quoted string"},
+ }},
+ {"unclosed raw quote", "{{`xx\n`}}", []item{
+ tLeft,
+ {itemError, "unterminated raw quoted string"},
+ }},
+ {"unclosed char constant", "{{'\n}}", []item{
+ tLeft,
+ {itemError, "unterminated character constant"},
+ }},
+ {"bad number", "{{3k}}", []item{
+ tLeft,
+ {itemError, `bad number syntax: "3k"`},
+ }},
+
+ // Fixed bugs
+ // Many elements in an action blew the lookahead until
+ // we made lexInsideAction not loop.
+ {"long pipeline deadlock", "{{|||||}}", []item{
+ tLeft,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tRight,
+ tEOF,
+ }},
+}
+
+// collect gathers the emitted items into a slice.
+func collect(t *lexTest, left, right string) (items []item) {
+ l := lex(t.name, t.input, left, right)
+ for {
+ item := l.nextItem()
+ items = append(items, item)
+ if item.typ == itemEOF || item.typ == itemError {
+ break
+ }
+ }
+ return
+}
+
+func TestLex(t *testing.T) {
+ for _, test := range lexTests {
+ items := collect(&test, "", "")
+ if !reflect.DeepEqual(items, test.items) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
+
+// Some easy cases from above, but with delimiters $$ and @@
+var lexDelimTests = []lexTest{
+ {"punctuation", "$$,@%{{}}@@", []item{
+ tLeftDelim,
+ {itemChar, ","},
+ {itemChar, "@"},
+ {itemChar, "%"},
+ {itemChar, "{"},
+ {itemChar, "{"},
+ {itemChar, "}"},
+ {itemChar, "}"},
+ tRightDelim,
+ tEOF,
+ }},
+ {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
+ {"for", `$$for @@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
+ {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
+ {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
+}
+
+var (
+ tLeftDelim = item{itemLeftDelim, "$$"}
+ tRightDelim = item{itemRightDelim, "@@"}
+)
+
+func TestDelims(t *testing.T) {
+ for _, test := range lexDelimTests {
+ items := collect(&test, "$$", "@@")
+ if !reflect.DeepEqual(items, test.items) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parse nodes.
+
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// A node is an element in the parse tree. The interface is trivial.
+type Node interface {
+ Type() NodeType
+ String() string
+}
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+ return t
+}
+
+const (
+ NodeText NodeType = iota // Plain text.
+ NodeAction // A simple action such as field evaluation.
+ NodeBool // A boolean constant.
+ NodeCommand // An element of a pipeline.
+ NodeDot // The cursor, dot.
+ nodeElse // An else action. Not added to tree.
+ nodeEnd // An end action. Not added to tree.
+ NodeField // A field or method name.
+ NodeIdentifier // An identifier; always a function name.
+ NodeIf // An if action.
+ NodeList // A list of Nodes.
+ NodeNumber // A numerical constant.
+ NodePipe // A pipeline of commands.
+ NodeRange // A range action.
+ NodeString // A string constant.
+ NodeTemplate // A template invocation action.
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+)
+
+// Nodes.
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+ NodeType
+ Nodes []Node // The element nodes in lexical order.
+}
+
+func newList() *ListNode {
+ return &ListNode{NodeType: NodeList}
+}
+
+func (l *ListNode) append(n Node) {
+ l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) String() string {
+ b := new(bytes.Buffer)
+ fmt.Fprint(b, "[")
+ for _, n := range l.Nodes {
+ fmt.Fprint(b, n)
+ }
+ fmt.Fprint(b, "]")
+ return b.String()
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+ NodeType
+ Text []byte // The text; may span newlines.
+}
+
+func newText(text string) *TextNode {
+ return &TextNode{NodeType: NodeText, Text: []byte(text)}
+}
+
+func (t *TextNode) String() string {
+ return fmt.Sprintf("(text: %q)", t.Text)
+}
+
+// PipeNode holds a pipeline with optional declaration
+type PipeNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Decl []*VariableNode // Variable declarations in lexical order.
+ Cmds []*CommandNode // The commands in lexical order.
+}
+
+func newPipeline(line int, decl []*VariableNode) *PipeNode {
+ return &PipeNode{NodeType: NodePipe, Line: line, Decl: decl}
+}
+
+func (p *PipeNode) append(command *CommandNode) {
+ p.Cmds = append(p.Cmds, command)
+}
+
+func (p *PipeNode) String() string {
+ if p.Decl != nil {
+ return fmt.Sprintf("%v := %v", p.Decl, p.Cmds)
+ }
+ return fmt.Sprintf("%v", p.Cmds)
+}
+
+// ActionNode holds an action (something bounded by delimiters).
+// Control actions have their own nodes; ActionNode represents simple
+// ones such as field evaluations.
+type ActionNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Pipe *PipeNode // The pipeline in the action.
+}
+
+func newAction(line int, pipe *PipeNode) *ActionNode {
+ return &ActionNode{NodeType: NodeAction, Line: line, Pipe: pipe}
+}
+
+func (a *ActionNode) String() string {
+ return fmt.Sprintf("(action: %v)", a.Pipe)
+}
+
+// CommandNode holds a command (a pipeline inside an evaluating action).
+type CommandNode struct {
+ NodeType
+ Args []Node // Arguments in lexical order: Identifier, field, or constant.
+}
+
+func newCommand() *CommandNode {
+ return &CommandNode{NodeType: NodeCommand}
+}
+
+func (c *CommandNode) append(arg Node) {
+ c.Args = append(c.Args, arg)
+}
+
+func (c *CommandNode) String() string {
+ return fmt.Sprintf("(command: %v)", c.Args)
+}
+
+// IdentifierNode holds an identifier.
+type IdentifierNode struct {
+ NodeType
+ Ident string // The identifier's name.
+}
+
+// NewIdentifier returns a new IdentifierNode with the given identifier name.
+func NewIdentifier(ident string) *IdentifierNode {
+ return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
+}
+
+func (i *IdentifierNode) String() string {
+ return fmt.Sprintf("I=%s", i.Ident)
+}
+
+// VariableNode holds a list of variable names. The dollar sign is
+// part of the name.
+type VariableNode struct {
+ NodeType
+ Ident []string // Variable names in lexical order.
+}
+
+func newVariable(ident string) *VariableNode {
+ return &VariableNode{NodeType: NodeVariable, Ident: strings.Split(ident, ".")}
+}
+
+func (v *VariableNode) String() string {
+ return fmt.Sprintf("V=%s", v.Ident)
+}
+
+// DotNode holds the special identifier '.'. It is represented by a nil pointer.
+type DotNode bool
+
+func newDot() *DotNode {
+ return nil
+}
+
+func (d *DotNode) Type() NodeType {
+ return NodeDot
+}
+
+func (d *DotNode) String() string {
+ return "{{<.>}}"
+}
+
+// FieldNode holds a field (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The period is dropped from each ident.
+type FieldNode struct {
+ NodeType
+ Ident []string // The identifiers in lexical order.
+}
+
+func newField(ident string) *FieldNode {
+ return &FieldNode{NodeType: NodeField, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
+}
+
+func (f *FieldNode) String() string {
+ return fmt.Sprintf("F=%s", f.Ident)
+}
+
+// BoolNode holds a boolean constant.
+type BoolNode struct {
+ NodeType
+ True bool // The value of the boolean constant.
+}
+
+func newBool(true bool) *BoolNode {
+ return &BoolNode{NodeType: NodeBool, True: true}
+}
+
+func (b *BoolNode) String() string {
+ return fmt.Sprintf("B=%t", b.True)
+}
+
+// NumberNode holds a number: signed or unsigned integer, float, or complex.
+// The value is parsed and stored under all the types that can represent the value.
+// This simulates in a small amount of code the behavior of Go's ideal constants.
+type NumberNode struct {
+ NodeType
+ IsInt bool // Number has an integral value.
+ IsUint bool // Number has an unsigned integral value.
+ IsFloat bool // Number has a floating-point value.
+ IsComplex bool // Number is complex.
+ Int64 int64 // The signed integer value.
+ Uint64 uint64 // The unsigned integer value.
+ Float64 float64 // The floating-point value.
+ Complex128 complex128 // The complex value.
+ Text string // The original textual representation from the input.
+}
+
+func newNumber(text string, typ itemType) (*NumberNode, error) {
+ n := &NumberNode{NodeType: NodeNumber, Text: text}
+ switch typ {
+ case itemCharConstant:
+ rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
+ if err != nil {
+ return nil, err
+ }
+ if tail != "'" {
+ return nil, fmt.Errorf("malformed character constant: %s", text)
+ }
+ n.Int64 = int64(rune)
+ n.IsInt = true
+ n.Uint64 = uint64(rune)
+ n.IsUint = true
+ n.Float64 = float64(rune) // odd but those are the rules.
+ n.IsFloat = true
+ return n, nil
+ case itemComplex:
+ // fmt.Sscan can parse the pair, so let it do the work.
+ if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
+ return nil, err
+ }
+ n.IsComplex = true
+ n.simplifyComplex()
+ return n, nil
+ }
+ // Imaginary constants can only be complex unless they are zero.
+ if len(text) > 0 && text[len(text)-1] == 'i' {
+ f, err := strconv.Atof64(text[:len(text)-1])
+ if err == nil {
+ n.IsComplex = true
+ n.Complex128 = complex(0, f)
+ n.simplifyComplex()
+ return n, nil
+ }
+ }
+ // Do integer test first so we get 0x123 etc.
+ u, err := strconv.Btoui64(text, 0) // will fail for -0; fixed below.
+ if err == nil {
+ n.IsUint = true
+ n.Uint64 = u
+ }
+ i, err := strconv.Btoi64(text, 0)
+ if err == nil {
+ n.IsInt = true
+ n.Int64 = i
+ if i == 0 {
+ n.IsUint = true // in case of -0.
+ n.Uint64 = u
+ }
+ }
+ // If an integer extraction succeeded, promote the float.
+ if n.IsInt {
+ n.IsFloat = true
+ n.Float64 = float64(n.Int64)
+ } else if n.IsUint {
+ n.IsFloat = true
+ n.Float64 = float64(n.Uint64)
+ } else {
+ f, err := strconv.Atof64(text)
+ if err == nil {
+ n.IsFloat = true
+ n.Float64 = f
+ // If a floating-point extraction succeeded, extract the int if needed.
+ if !n.IsInt && float64(int64(f)) == f {
+ n.IsInt = true
+ n.Int64 = int64(f)
+ }
+ if !n.IsUint && float64(uint64(f)) == f {
+ n.IsUint = true
+ n.Uint64 = uint64(f)
+ }
+ }
+ }
+ if !n.IsInt && !n.IsUint && !n.IsFloat {
+ return nil, fmt.Errorf("illegal number syntax: %q", text)
+ }
+ return n, nil
+}
+
+// simplifyComplex pulls out any other types that are represented by the complex number.
+// These all require that the imaginary part be zero.
+func (n *NumberNode) simplifyComplex() {
+ n.IsFloat = imag(n.Complex128) == 0
+ if n.IsFloat {
+ n.Float64 = real(n.Complex128)
+ n.IsInt = float64(int64(n.Float64)) == n.Float64
+ if n.IsInt {
+ n.Int64 = int64(n.Float64)
+ }
+ n.IsUint = float64(uint64(n.Float64)) == n.Float64
+ if n.IsUint {
+ n.Uint64 = uint64(n.Float64)
+ }
+ }
+}
+
+func (n *NumberNode) String() string {
+ return fmt.Sprintf("N=%s", n.Text)
+}
+
+// StringNode holds a string constant. The value has been "unquoted".
+type StringNode struct {
+ NodeType
+ Quoted string // The original text of the string, with quotes.
+ Text string // The string, after quote processing.
+}
+
+func newString(orig, text string) *StringNode {
+ return &StringNode{NodeType: NodeString, Quoted: orig, Text: text}
+}
+
+func (s *StringNode) String() string {
+ return fmt.Sprintf("S=%#q", s.Text)
+}
+
+// endNode represents an {{end}} action. It is represented by a nil pointer.
+// It does not appear in the final parse tree.
+type endNode bool
+
+func newEnd() *endNode {
+ return nil
+}
+
+func (e *endNode) Type() NodeType {
+ return nodeEnd
+}
+
+func (e *endNode) String() string {
+ return "{{end}}"
+}
+
+// elseNode represents an {{else}} action. Does not appear in the final tree.
+type elseNode struct {
+ NodeType
+ Line int // The line number in the input.
+}
+
+func newElse(line int) *elseNode {
+ return &elseNode{NodeType: nodeElse, Line: line}
+}
+
+func (e *elseNode) Type() NodeType {
+ return nodeElse
+}
+
+func (e *elseNode) String() string {
+ return "{{else}}"
+}
+
+// BranchNode is the common representation of if, range, and with.
+type BranchNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Pipe *PipeNode // The pipeline to be evaluated.
+ List *ListNode // What to execute if the value is non-empty.
+ ElseList *ListNode // What to execute if the value is empty (nil if absent).
+}
+
+func (b *BranchNode) String() string {
+ name := ""
+ switch b.NodeType {
+ case NodeIf:
+ name = "if"
+ case NodeRange:
+ name = "range"
+ case NodeWith:
+ name = "with"
+ default:
+ panic("unknown branch type")
+ }
+ if b.ElseList != nil {
+ return fmt.Sprintf("({{%s %s}} %s {{else}} %s)", name, b.Pipe, b.List, b.ElseList)
+ }
+ return fmt.Sprintf("({{%s %s}} %s)", name, b.Pipe, b.List)
+}
+
+// IfNode represents an {{if}} action and its commands.
+type IfNode struct {
+ BranchNode
+}
+
+func newIf(line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
+ return &IfNode{BranchNode{NodeType: NodeIf, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+// RangeNode represents a {{range}} action and its commands.
+type RangeNode struct {
+ BranchNode
+}
+
+func newRange(line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
+ return &RangeNode{BranchNode{NodeType: NodeRange, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+// WithNode represents a {{with}} action and its commands.
+type WithNode struct {
+ BranchNode
+}
+
+func newWith(line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
+ return &WithNode{BranchNode{NodeType: NodeWith, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+// TemplateNode represents a {{template}} action.
+type TemplateNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Name string // The name of the template (unquoted).
+ Pipe *PipeNode // The command to evaluate as dot for the template.
+}
+
+func newTemplate(line int, name string, pipe *PipeNode) *TemplateNode {
+ return &TemplateNode{NodeType: NodeTemplate, Line: line, Name: name, Pipe: pipe}
+}
+
+func (t *TemplateNode) String() string {
+ if t.Pipe == nil {
+ return fmt.Sprintf("{{template %q}}", t.Name)
+ }
+ return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse builds parse trees for templates. The grammar is defined
+// in the documents for the template package.
+package parse
+
+import (
+ "fmt"
+ "runtime"
+ "strconv"
+ "unicode"
+)
+
+// Tree is the representation of a parsed template.
+type Tree struct {
+ Name string // Name is the name of the template.
+ Root *ListNode // Root is the top-level root of the parse tree.
+ // Parsing only; cleared after parse.
+ funcs []map[string]interface{}
+ lex *lexer
+ token [2]item // two-token lookahead for parser.
+ peekCount int
+ vars []string // variables defined at the moment.
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+ if t.peekCount > 0 {
+ t.peekCount--
+ } else {
+ t.token[0] = t.lex.nextItem()
+ }
+ return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+ t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens
+func (t *Tree) backup2(t1 item) {
+ t.token[1] = t1
+ t.peekCount = 2
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+ if t.peekCount > 0 {
+ return t.token[t.peekCount-1]
+ }
+ t.peekCount = 1
+ t.token[0] = t.lex.nextItem()
+ return t.token[0]
+}
+
+// Parsing.
+
+// New allocates a new template with the given name.
+func New(name string, funcs ...map[string]interface{}) *Tree {
+ return &Tree{
+ Name: name,
+ funcs: funcs,
+ }
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...interface{}) {
+ t.Root = nil
+ format = fmt.Sprintf("template: %s:%d: %s", t.Name, t.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (t *Tree) error(err error) {
+ t.errorf("%s", err)
+}
+
+// expect consumes the next token and guarantees it has the required type.
+func (t *Tree) expect(expected itemType, context string) item {
+ token := t.next()
+ if token.typ != expected {
+ t.errorf("expected %s in %s; got %s", expected, context, token)
+ }
+ return token
+}
+
+// unexpected complains about the token and terminates processing.
+func (t *Tree) unexpected(token item, context string) {
+ t.errorf("unexpected %s in %s", token, context)
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ if t != nil {
+ t.stopParse()
+ }
+ *errp = e.(error)
+ }
+ return
+}
+
+// startParse starts the template parsing from the lexer.
+func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
+ t.Root = nil
+ t.lex = lex
+ t.vars = []string{"$"}
+ t.funcs = funcs
+}
+
+// stopParse terminates parsing.
+func (t *Tree) stopParse() {
+ t.lex = nil
+ t.vars = nil
+ t.funcs = nil
+}
+
+// atEOF returns true if, possibly after spaces, we're at EOF.
+func (t *Tree) atEOF() bool {
+ for {
+ token := t.peek()
+ switch token.typ {
+ case itemEOF:
+ return true
+ case itemText:
+ for _, r := range token.val {
+ if !unicode.IsSpace(r) {
+ return false
+ }
+ }
+ t.next() // skip spaces.
+ continue
+ }
+ break
+ }
+ return false
+}
+
+// Parse parses the template definition string to construct an internal
+// representation of the template for execution. If either action delimiter
+// string is empty, the default ("{{" or "}}") is used.
+func (t *Tree) Parse(s, leftDelim, rightDelim string, funcs ...map[string]interface{}) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.startParse(funcs, lex(t.Name, s, leftDelim, rightDelim))
+ t.parse(true)
+ t.stopParse()
+ return t, nil
+}
+
+// parse is the helper for Parse.
+// It triggers an error if we expect EOF but don't reach it.
+func (t *Tree) parse(toEOF bool) (next Node) {
+ t.Root, next = t.itemList(true)
+ if toEOF && next != nil {
+ t.errorf("unexpected %s", next)
+ }
+ return next
+}
+
+// itemList:
+// textOrAction*
+// Terminates at EOF and at {{end}} or {{else}}, which is returned separately.
+// The toEOF flag tells whether we expect to reach EOF.
+func (t *Tree) itemList(toEOF bool) (list *ListNode, next Node) {
+ list = newList()
+ for t.peek().typ != itemEOF {
+ n := t.textOrAction()
+ switch n.Type() {
+ case nodeEnd, nodeElse:
+ return list, n
+ }
+ list.append(n)
+ }
+ if !toEOF {
+ t.unexpected(t.next(), "input")
+ }
+ return list, nil
+}
+
+// textOrAction:
+// text | action
+func (t *Tree) textOrAction() Node {
+ switch token := t.next(); token.typ {
+ case itemText:
+ return newText(token.val)
+ case itemLeftDelim:
+ return t.action()
+ default:
+ t.unexpected(token, "input")
+ }
+ return nil
+}
+
+// Action:
+// control
+// command ("|" command)*
+// Left delim is past. Now get actions.
+// First word could be a keyword such as range.
+func (t *Tree) action() (n Node) {
+ switch token := t.next(); token.typ {
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+ return t.endControl()
+ case itemIf:
+ return t.ifControl()
+ case itemRange:
+ return t.rangeControl()
+ case itemTemplate:
+ return t.templateControl()
+ case itemWith:
+ return t.withControl()
+ }
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ return newAction(t.lex.lineNumber(), t.pipeline("command"))
+}
+
+// Pipeline:
+// field or command
+// pipeline "|" pipeline
+func (t *Tree) pipeline(context string) (pipe *PipeNode) {
+ var decl []*VariableNode
+ // Are there declarations?
+ for {
+ if v := t.peek(); v.typ == itemVariable {
+ t.next()
+ if next := t.peek(); next.typ == itemColonEquals || next.typ == itemChar {
+ t.next()
+ variable := newVariable(v.val)
+ if len(variable.Ident) != 1 {
+ t.errorf("illegal variable in declaration: %s", v.val)
+ }
+ decl = append(decl, variable)
+ t.vars = append(t.vars, v.val)
+ if next.typ == itemChar && next.val == "," {
+ if context == "range" && len(decl) < 2 {
+ continue
+ }
+ t.errorf("too many declarations in %s", context)
+ }
+ } else {
+ t.backup2(v)
+ }
+ }
+ break
+ }
+ pipe = newPipeline(t.lex.lineNumber(), decl)
+ for {
+ switch token := t.next(); token.typ {
+ case itemRightDelim:
+ if len(pipe.Cmds) == 0 {
+ t.errorf("missing value for %s", context)
+ }
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemVariable, itemNumber, itemRawString, itemString:
+ t.backup()
+ pipe.append(t.command())
+ default:
+ t.unexpected(token, context)
+ }
+ }
+ return
+}
+
+func (t *Tree) parseControl(context string) (lineNum int, pipe *PipeNode, list, elseList *ListNode) {
+ lineNum = t.lex.lineNumber()
+ defer t.popVars(len(t.vars))
+ pipe = t.pipeline(context)
+ var next Node
+ list, next = t.itemList(false)
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+ elseList, next = t.itemList(false)
+ if next.Type() != nodeEnd {
+ t.errorf("expected end; found %s", next)
+ }
+ elseList = elseList
+ }
+ return lineNum, pipe, list, elseList
+}
+
+// If:
+// {{if pipeline}} itemList {{end}}
+// {{if pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) ifControl() Node {
+ return newIf(t.parseControl("if"))
+}
+
+// Range:
+// {{range pipeline}} itemList {{end}}
+// {{range pipeline}} itemList {{else}} itemList {{end}}
+// Range keyword is past.
+func (t *Tree) rangeControl() Node {
+ return newRange(t.parseControl("range"))
+}
+
+// With:
+// {{with pipeline}} itemList {{end}}
+// {{with pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) withControl() Node {
+ return newWith(t.parseControl("with"))
+}
+
+// End:
+// {{end}}
+// End keyword is past.
+func (t *Tree) endControl() Node {
+ t.expect(itemRightDelim, "end")
+ return newEnd()
+}
+
+// Else:
+// {{else}}
+// Else keyword is past.
+func (t *Tree) elseControl() Node {
+ t.expect(itemRightDelim, "else")
+ return newElse(t.lex.lineNumber())
+}
+
+// Template:
+// {{template stringValue pipeline}}
+// Template keyword is past. The name must be something that can evaluate
+// to a string.
+func (t *Tree) templateControl() Node {
+ var name string
+ switch token := t.next(); token.typ {
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ name = s
+ default:
+ t.unexpected(token, "template invocation")
+ }
+ var pipe *PipeNode
+ if t.next().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ pipe = t.pipeline("template")
+ }
+ return newTemplate(t.lex.lineNumber(), name, pipe)
+}
+
+// command:
+// space-separated arguments up to a pipeline character or right delimiter.
+// we consume the pipe character but leave the right delim to terminate the action.
+func (t *Tree) command() *CommandNode {
+ cmd := newCommand()
+Loop:
+ for {
+ switch token := t.next(); token.typ {
+ case itemRightDelim:
+ t.backup()
+ break Loop
+ case itemPipe:
+ break Loop
+ case itemError:
+ t.errorf("%s", token.val)
+ case itemIdentifier:
+ if !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+ }
+ cmd.append(NewIdentifier(token.val))
+ case itemDot:
+ cmd.append(newDot())
+ case itemVariable:
+ cmd.append(t.useVar(token.val))
+ case itemField:
+ cmd.append(newField(token.val))
+ case itemBool:
+ cmd.append(newBool(token.val == "true"))
+ case itemCharConstant, itemComplex, itemNumber:
+ number, err := newNumber(token.val, token.typ)
+ if err != nil {
+ t.error(err)
+ }
+ cmd.append(number)
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ cmd.append(newString(token.val, s))
+ default:
+ t.unexpected(token, "command")
+ }
+ }
+ if len(cmd.Args) == 0 {
+ t.errorf("empty command")
+ }
+ return cmd
+}
+
+// hasFunction reports if a function name exists in the Tree's maps.
+func (t *Tree) hasFunction(name string) bool {
+ for _, funcMap := range t.funcs {
+ if funcMap == nil {
+ continue
+ }
+ if funcMap[name] != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// popVars trims the variable list to the specified length
+func (t *Tree) popVars(n int) {
+ t.vars = t.vars[:n]
+}
+
+// useVar returns a node for a variable reference. It errors if the
+// variable is not defined.
+func (t *Tree) useVar(name string) Node {
+ v := newVariable(name)
+ for _, varName := range t.vars {
+ if varName == v.Ident[0] {
+ return v
+ }
+ }
+ t.errorf("undefined variable %q", v.Ident[0])
+ return nil
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "flag"
+ "fmt"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+type numberTest struct {
+ text string
+ isInt bool
+ isUint bool
+ isFloat bool
+ isComplex bool
+ int64
+ uint64
+ float64
+ complex128
+}
+
+var numberTests = []numberTest{
+ // basics
+ {"0", true, true, true, false, 0, 0, 0, 0},
+ {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
+ {"73", true, true, true, false, 73, 73, 73, 0},
+ {"073", true, true, true, false, 073, 073, 073, 0},
+ {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
+ {"-73", true, false, true, false, -73, 0, -73, 0},
+ {"+73", true, false, true, false, 73, 0, 73, 0},
+ {"100", true, true, true, false, 100, 100, 100, 0},
+ {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
+ {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
+ {"-1.2", false, false, true, false, 0, 0, -1.2, 0},
+ {"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
+ {"-1e19", false, false, true, false, 0, 0, -1e19, 0},
+ {"4i", false, false, false, true, 0, 0, 0, 4i},
+ {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
+ {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
+ // complex with 0 imaginary are float (and maybe integer)
+ {"0i", true, true, true, true, 0, 0, 0, 0},
+ {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
+ {"-12+0i", true, false, true, true, -12, 0, -12, -12},
+ {"13+0i", true, true, true, true, 13, 13, 13, 13},
+ // funny bases
+ {"0123", true, true, true, false, 0123, 0123, 0123, 0},
+ {"-0x0", true, true, true, false, 0, 0, 0, 0},
+ {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
+ // character constants
+ {`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
+ {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
+ {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
+ {`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
+ {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
+ {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ // some broken syntax
+ {text: "+-2"},
+ {text: "0x123."},
+ {text: "1e."},
+ {text: "0xi."},
+ {text: "1+2."},
+ {text: "'x"},
+ {text: "'xx'"},
+}
+
+func TestNumberParse(t *testing.T) {
+ for _, test := range numberTests {
+ // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
+ // because imaginary comes out as a number.
+ var c complex128
+ typ := itemNumber
+ if test.text[0] == '\'' {
+ typ = itemCharConstant
+ } else {
+ _, err := fmt.Sscan(test.text, &c)
+ if err == nil {
+ typ = itemComplex
+ }
+ }
+ n, err := newNumber(test.text, typ)
+ ok := test.isInt || test.isUint || test.isFloat || test.isComplex
+ if ok && err != nil {
+ t.Errorf("unexpected error for %q: %s", test.text, err)
+ continue
+ }
+ if !ok && err == nil {
+ t.Errorf("expected error for %q", test.text)
+ continue
+ }
+ if !ok {
+ if *debug {
+ fmt.Printf("%s\n\t%s\n", test.text, err)
+ }
+ continue
+ }
+ if n.IsComplex != test.isComplex {
+ t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
+ }
+ if test.isInt {
+ if !n.IsInt {
+ t.Errorf("expected integer for %q", test.text)
+ }
+ if n.Int64 != test.int64 {
+ t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
+ }
+ } else if n.IsInt {
+ t.Errorf("did not expect integer for %q", test.text)
+ }
+ if test.isUint {
+ if !n.IsUint {
+ t.Errorf("expected unsigned integer for %q", test.text)
+ }
+ if n.Uint64 != test.uint64 {
+ t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
+ }
+ } else if n.IsUint {
+ t.Errorf("did not expect unsigned integer for %q", test.text)
+ }
+ if test.isFloat {
+ if !n.IsFloat {
+ t.Errorf("expected float for %q", test.text)
+ }
+ if n.Float64 != test.float64 {
+ t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
+ }
+ } else if n.IsFloat {
+ t.Errorf("did not expect float for %q", test.text)
+ }
+ if test.isComplex {
+ if !n.IsComplex {
+ t.Errorf("expected complex for %q", test.text)
+ }
+ if n.Complex128 != test.complex128 {
+ t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
+ }
+ } else if n.IsComplex {
+ t.Errorf("did not expect complex for %q", test.text)
+ }
+ }
+}
+
+type parseTest struct {
+ name string
+ input string
+ ok bool
+ result string
+}
+
+const (
+ noError = true
+ hasError = false
+)
+
+var parseTests = []parseTest{
+ {"empty", "", noError,
+ `[]`},
+ {"comment", "{{/*\n\n\n*/}}", noError,
+ `[]`},
+ {"spaces", " \t\n", noError,
+ `[(text: " \t\n")]`},
+ {"text", "some text", noError,
+ `[(text: "some text")]`},
+ {"emptyAction", "{{}}", hasError,
+ `[(action: [])]`},
+ {"field", "{{.X}}", noError,
+ `[(action: [(command: [F=[X]])])]`},
+ {"simple command", "{{printf}}", noError,
+ `[(action: [(command: [I=printf])])]`},
+ {"$ invocation", "{{$}}", noError,
+ "[(action: [(command: [V=[$]])])]"},
+ {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
+ "[({{with [V=[$x]] := [(command: [N=3])]}} [(action: [(command: [V=[$x] N=23])])])]"},
+ {"variable with fields", "{{$.I}}", noError,
+ "[(action: [(command: [V=[$ I]])])]"},
+ {"multi-word command", "{{printf `%d` 23}}", noError,
+ "[(action: [(command: [I=printf S=`%d` N=23])])]"},
+ {"pipeline", "{{.X|.Y}}", noError,
+ `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ {"pipeline with decl", "{{$x := .X|.Y}}", noError,
+ `[(action: [V=[$x]] := [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ {"declaration", "{{.X|.Y}}", noError,
+ `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ {"simple if", "{{if .X}}hello{{end}}", noError,
+ `[({{if [(command: [F=[X]])]}} [(text: "hello")])]`},
+ {"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
+ `[({{if [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ {"simple range", "{{range .X}}hello{{end}}", noError,
+ `[({{range [(command: [F=[X]])]}} [(text: "hello")])]`},
+ {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
+ `[({{range [(command: [F=[X Y Z]])]}} [(text: "hello")])]`},
+ {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
+ `[({{range [(command: [F=[X]])]}} [(text: "hello")({{range [(command: [F=[Y]])]}} [(text: "goodbye")])])]`},
+ {"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
+ `[({{range [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
+ `[({{range [(command: [F=[X]]) (command: [F=[M]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ {"range []int", "{{range .SI}}{{.}}{{end}}", noError,
+ `[({{range [(command: [F=[SI]])]}} [(action: [(command: [{{<.>}}])])])]`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a'}}{{end}}", noError,
+ `[({{range [(command: [F=[SI] N=1 N=-3.2i B=true B=false N='a'])]}} [])]`},
+ {"template", "{{template `x`}}", noError,
+ `[{{template "x"}}]`},
+ {"template with arg", "{{template `x` .Y}}", noError,
+ `[{{template "x" [(command: [F=[Y]])]}}]`},
+ {"with", "{{with .X}}hello{{end}}", noError,
+ `[({{with [(command: [F=[X]])]}} [(text: "hello")])]`},
+ {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
+ `[({{with [(command: [F=[X]])]}} [(text: "hello")] {{else}} [(text: "goodbye")])]`},
+ // Errors.
+ {"unclosed action", "hello{{range", hasError, ""},
+ {"unmatched end", "{{end}}", hasError, ""},
+ {"missing end", "hello{{range .x}}", hasError, ""},
+ {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
+ {"undefined function", "hello{{undefined}}", hasError, ""},
+ {"undefined variable", "{{$x}}", hasError, ""},
+ {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
+ {"variable undefined in template", "{{template $v}}", hasError, ""},
+ {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
+ {"template with field ref", "{{template .X}}", hasError, ""},
+ {"template with var", "{{template $v}}", hasError, ""},
+ {"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
+ {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
+ {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
+}
+
+var builtins = map[string]interface{}{
+ "printf": fmt.Sprintf,
+}
+
+func TestParse(t *testing.T) {
+ for _, test := range parseTests {
+ tmpl, err := New(test.name).Parse(test.input, "", "", builtins)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ result := tmpl.Root.String()
+ if result != test.result {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Set returns a slice of Trees created by parsing the template set
+// definition in the argument string. If an error is encountered,
+// parsing stops and an empty slice is returned with the error.
+func Set(text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (tree map[string]*Tree, err error) {
+ tree = make(map[string]*Tree)
+ defer (*Tree)(nil).recover(&err)
+ lex := lex("set", text, leftDelim, rightDelim)
+ const context = "define clause"
+ for {
+ t := New("set") // name will be updated once we know it.
+ t.startParse(funcs, lex)
+ // Expect EOF or "{{ define name }}".
+ if t.atEOF() {
+ break
+ }
+ t.expect(itemLeftDelim, context)
+ t.expect(itemDefine, context)
+ name := t.expect(itemString, context)
+ t.Name, err = strconv.Unquote(name.val)
+ if err != nil {
+ t.error(err)
+ }
+ t.expect(itemRightDelim, context)
+ end := t.parse(false)
+ if end == nil {
+ t.errorf("unexpected EOF in %s", context)
+ }
+ if end.Type() != nodeEnd {
+ t.errorf("unexpected %s in %s", end, context)
+ }
+ t.stopParse()
+ if _, present := tree[t.Name]; present {
+ return nil, fmt.Errorf("template: %q multiply defined", name)
+ }
+ tree[t.Name] = t
+ }
+ return
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "text/template/parse"
+)
+
+// Set holds a set of related templates that can refer to one another by name.
+// The zero value represents an empty set.
+// A template may be a member of multiple sets.
+type Set struct {
+ tmpl map[string]*Template
+ leftDelim string
+ rightDelim string
+ parseFuncs FuncMap
+ execFuncs map[string]reflect.Value
+}
+
+func (s *Set) init() {
+ if s.tmpl == nil {
+ s.tmpl = make(map[string]*Template)
+ s.parseFuncs = make(FuncMap)
+ s.execFuncs = make(map[string]reflect.Value)
+ }
+}
+
+// Delims sets the action delimiters, to be used in a subsequent
+// parse, to the specified strings.
+// An empty delimiter stands for the corresponding default: {{ or }}.
+// The return value is the set, so calls can be chained.
+func (s *Set) Delims(left, right string) *Set {
+ s.leftDelim = left
+ s.rightDelim = right
+ return s
+}
+
+// Funcs adds the elements of the argument map to the set's function map. It
+// panics if a value in the map is not a function with appropriate return
+// type.
+// The return value is the set, so calls can be chained.
+func (s *Set) Funcs(funcMap FuncMap) *Set {
+ s.init()
+ addValueFuncs(s.execFuncs, funcMap)
+ addFuncs(s.parseFuncs, funcMap)
+ return s
+}
+
+// Add adds the argument templates to the set. It panics if two templates
+// with the same name are added or if a template is already a member of
+// a set.
+// The return value is the set, so calls can be chained.
+func (s *Set) Add(templates ...*Template) *Set {
+ for _, t := range templates {
+ if err := s.add(t); err != nil {
+ panic(err)
+ }
+ }
+ return s
+}
+
+// add adds the argument template to the set.
+func (s *Set) add(t *Template) error {
+ s.init()
+ if t.set != nil {
+ return fmt.Errorf("template: %q already in a set", t.name)
+ }
+ if _, ok := s.tmpl[t.name]; ok {
+ return fmt.Errorf("template: %q already defined in set", t.name)
+ }
+ s.tmpl[t.name] = t
+ t.set = s
+ return nil
+}
+
+// Template returns the template with the given name in the set,
+// or nil if there is no such template.
+func (s *Set) Template(name string) *Template {
+ return s.tmpl[name]
+}
+
+// FuncMap returns the set's function map.
+func (s *Set) FuncMap() FuncMap {
+ return s.parseFuncs
+}
+
+// Execute applies the named template to the specified data object, writing
+// the output to wr.
+func (s *Set) Execute(wr io.Writer, name string, data interface{}) error {
+ tmpl := s.tmpl[name]
+ if tmpl == nil {
+ return fmt.Errorf("template: no template %q in set", name)
+ }
+ return tmpl.Execute(wr, data)
+}
+
+// Parse parses a string into a set of named templates. Parse may be called
+// multiple times for a given set, adding the templates defined in the string
+// to the set. It is an error if a template has a name already defined in the set.
+func (s *Set) Parse(text string) (*Set, error) {
+ trees, err := parse.Set(text, s.leftDelim, s.rightDelim, s.parseFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ s.init()
+ for name, tree := range trees {
+ tmpl := New(name)
+ tmpl.Tree = tree
+ err = s.add(tmpl)
+ if err != nil {
+ return s, err
+ }
+ }
+ return s, nil
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "testing"
+)
+
+const (
+ noError = true
+ hasError = false
+)
+
+type setParseTest struct {
+ name string
+ input string
+ ok bool
+ names []string
+ results []string
+}
+
+var setParseTests = []setParseTest{
+ {"empty", "", noError,
+ nil,
+ nil},
+ {"one", `{{define "foo"}} FOO {{end}}`, noError,
+ []string{"foo"},
+ []string{`[(text: " FOO ")]`}},
+ {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
+ []string{"foo", "bar"},
+ []string{`[(text: " FOO ")]`, `[(text: " BAR ")]`}},
+ // errors
+ {"missing end", `{{define "foo"}} FOO `, hasError,
+ nil,
+ nil},
+ {"malformed name", `{{define "foo}} FOO `, hasError,
+ nil,
+ nil},
+}
+
+func TestSetParse(t *testing.T) {
+ for _, test := range setParseTests {
+ set, err := new(Set).Parse(test.input)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ if set == nil {
+ continue
+ }
+ if len(set.tmpl) != len(test.names) {
+ t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(set.tmpl))
+ continue
+ }
+ for i, name := range test.names {
+ tmpl, ok := set.tmpl[name]
+ if !ok {
+ t.Errorf("%s: can't find template %q", test.name, name)
+ continue
+ }
+ result := tmpl.Root.String()
+ if result != test.results[i] {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
+ }
+ }
+ }
+}
+
+var setExecTests = []execTest{
+ {"empty", "", "", nil, true},
+ {"text", "some text", "some text", nil, true},
+ {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
+ {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
+ {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
+ {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
+ {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
+ {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
+ {"variable declared by template", `{{template "nested" $x=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
+
+ // User-defined function: test argument evaluator.
+ {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
+ {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
+}
+
+// These strings are also in testdata/*.
+const setText1 = `
+ {{define "x"}}TEXT{{end}}
+ {{define "dotV"}}{{.V}}{{end}}
+`
+
+const setText2 = `
+ {{define "dot"}}{{.}}{{end}}
+ {{define "nested"}}{{template "dot" .}}{{end}}
+`
+
+func TestSetExecute(t *testing.T) {
+ // Declare a set with a couple of templates first.
+ set := new(Set)
+ _, err := set.Parse(setText1)
+ if err != nil {
+ t.Fatalf("error parsing set: %s", err)
+ }
+ _, err = set.Parse(setText2)
+ if err != nil {
+ t.Fatalf("error parsing set: %s", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestSetParseFiles(t *testing.T) {
+ set := new(Set)
+ _, err := set.ParseFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = set.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestParseSetFiles(t *testing.T) {
+ set := new(Set)
+ _, err := ParseSetFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ set, err = ParseSetFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestSetParseGlob(t *testing.T) {
+ _, err := new(Set).ParseGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = new(Set).ParseGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := new(Set).ParseGlob("testdata/file*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestParseSetGlob(t *testing.T) {
+ _, err := ParseSetGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = ParseSetGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := ParseSetGlob("testdata/file*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+var templateFileExecTests = []execTest{
+ {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\ntemplate2\n", 0, true},
+}
+
+func TestSetParseTemplateFiles(t *testing.T) {
+ _, err := ParseTemplateFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ set, err := new(Set).ParseTemplateFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
+
+func TestParseTemplateFiles(t *testing.T) {
+ _, err := ParseTemplateFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ set, err := new(Set).ParseTemplateFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
+
+func TestSetParseTemplateGlob(t *testing.T) {
+ _, err := ParseTemplateGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = new(Set).ParseTemplateGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := new(Set).ParseTemplateGlob("testdata/tmpl*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
+
+func TestParseTemplateGlob(t *testing.T) {
+ _, err := ParseTemplateGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = ParseTemplateGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := ParseTemplateGlob("testdata/tmpl*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
--- /dev/null
+{{define "x"}}TEXT{{end}}
+{{define "dotV"}}{{.V}}{{end}}
--- /dev/null
+{{define "dot"}}{{.}}{{end}}
+{{define "nested"}}{{template "dot" .}}{{end}}
import (
"errors"
"fmt"
- "testing"
"sort"
+ "testing"
. "time"
)
}
// For gccgo omit 0 for now because it can take too long to start the
-var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, /*0*/}
+var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8 /*0*/ }
type afterResult struct {
slot int
package time
-import "os"
-
// Seconds reports the number of seconds since the Unix epoch,
// January 1, 1970 00:00:00 UTC.
func Seconds() int64 {
- sec, _, err := os.Time()
- if err != nil {
- panic(err)
- }
- return sec
+ return Nanoseconds() / 1e9
}
+// Nanoseconds is implemented by package runtime.
+
// Nanoseconds reports the number of nanoseconds since the Unix epoch,
// January 1, 1970 00:00:00 UTC.
-func Nanoseconds() int64 {
- sec, nsec, err := os.Time()
- if err != nil {
- panic(err)
- }
- return sec*1e9 + nsec
-}
+func Nanoseconds() int64
// Sleep pauses the current goroutine for at least ns nanoseconds.
// Higher resolution sleeping may be provided by syscall.Nanosleep
package time
import (
- "syscall"
- "sync"
"os"
+ "sync"
+ "syscall"
)
// BUG(brainman): The Windows implementation assumes that
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utf16 implements encoding and decoding of UTF-16 sequences.
+package utf16
+
+import "unicode"
+
+const (
+ // 0xd800-0xdc00 encodes the high 10 bits of a pair.
+ // 0xdc00-0xe000 encodes the low 10 bits of a pair.
+ // the value is those 20 bits plus 0x10000.
+ surr1 = 0xd800
+ surr2 = 0xdc00
+ surr3 = 0xe000
+
+ surrSelf = 0x10000
+)
+
+// IsSurrogate returns true if the specified Unicode code point
+// can appear in a surrogate pair.
+func IsSurrogate(r rune) bool {
+ return surr1 <= r && r < surr3
+}
+
+// DecodeRune returns the UTF-16 decoding of a surrogate pair.
+// If the pair is not a valid UTF-16 surrogate pair, DecodeRune returns
+// the Unicode replacement code point U+FFFD.
+func DecodeRune(r1, r2 rune) rune {
+ if surr1 <= r1 && r1 < surr2 && surr2 <= r2 && r2 < surr3 {
+ return (rune(r1)-surr1)<<10 | (rune(r2) - surr2) + 0x10000
+ }
+ return unicode.ReplacementChar
+}
+
+// EncodeRune returns the UTF-16 surrogate pair r1, r2 for the given rune.
+// If the rune is not a valid Unicode code point or does not need encoding,
+// EncodeRune returns U+FFFD, U+FFFD.
+func EncodeRune(r rune) (r1, r2 rune) {
+ if r < surrSelf || r > unicode.MaxRune || IsSurrogate(r) {
+ return unicode.ReplacementChar, unicode.ReplacementChar
+ }
+ r -= surrSelf
+ return surr1 + (r>>10)&0x3ff, surr2 + r&0x3ff
+}
+
+// Encode returns the UTF-16 encoding of the Unicode code point sequence s.
+func Encode(s []rune) []uint16 {
+ n := len(s)
+ for _, v := range s {
+ if v >= surrSelf {
+ n++
+ }
+ }
+
+ a := make([]uint16, n)
+ n = 0
+ for _, v := range s {
+ switch {
+ case v < 0, surr1 <= v && v < surr3, v > unicode.MaxRune:
+ v = unicode.ReplacementChar
+ fallthrough
+ case v < surrSelf:
+ a[n] = uint16(v)
+ n++
+ default:
+ r1, r2 := EncodeRune(v)
+ a[n] = uint16(r1)
+ a[n+1] = uint16(r2)
+ n += 2
+ }
+ }
+ return a[0:n]
+}
+
+// Decode returns the Unicode code point sequence represented
+// by the UTF-16 encoding s.
+func Decode(s []uint16) []rune {
+ a := make([]rune, len(s))
+ n := 0
+ for i := 0; i < len(s); i++ {
+ switch r := s[i]; {
+ case surr1 <= r && r < surr2 && i+1 < len(s) &&
+ surr2 <= s[i+1] && s[i+1] < surr3:
+ // valid surrogate sequence
+ a[n] = DecodeRune(rune(r), rune(s[i+1]))
+ i++
+ n++
+ case surr1 <= r && r < surr3:
+ // invalid surrogate sequence
+ a[n] = unicode.ReplacementChar
+ n++
+ default:
+ // normal rune
+ a[n] = rune(r)
+ n++
+ }
+ }
+ return a[0:n]
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utf16_test
+
+import (
+ "reflect"
+ "testing"
+ "unicode"
+ . "unicode/utf16"
+)
+
+type encodeTest struct {
+ in []rune
+ out []uint16
+}
+
+var encodeTests = []encodeTest{
+ {[]rune{1, 2, 3, 4}, []uint16{1, 2, 3, 4}},
+ {[]rune{0xffff, 0x10000, 0x10001, 0x12345, 0x10ffff},
+ []uint16{0xffff, 0xd800, 0xdc00, 0xd800, 0xdc01, 0xd808, 0xdf45, 0xdbff, 0xdfff}},
+ {[]rune{'a', 'b', 0xd7ff, 0xd800, 0xdfff, 0xe000, 0x110000, -1},
+ []uint16{'a', 'b', 0xd7ff, 0xfffd, 0xfffd, 0xe000, 0xfffd, 0xfffd}},
+}
+
+func TestEncode(t *testing.T) {
+ for _, tt := range encodeTests {
+ out := Encode(tt.in)
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("Encode(%x) = %x; want %x", tt.in, out, tt.out)
+ }
+ }
+}
+
+func TestEncodeRune(t *testing.T) {
+ for i, tt := range encodeTests {
+ j := 0
+ for _, r := range tt.in {
+ r1, r2 := EncodeRune(r)
+ if r < 0x10000 || r > unicode.MaxRune {
+ if j >= len(tt.out) {
+ t.Errorf("#%d: ran out of tt.out", i)
+ break
+ }
+ if r1 != unicode.ReplacementChar || r2 != unicode.ReplacementChar {
+ t.Errorf("EncodeRune(%#x) = %#x, %#x; want 0xfffd, 0xfffd", r, r1, r2)
+ }
+ j++
+ } else {
+ if j+1 >= len(tt.out) {
+ t.Errorf("#%d: ran out of tt.out", i)
+ break
+ }
+ if r1 != rune(tt.out[j]) || r2 != rune(tt.out[j+1]) {
+ t.Errorf("EncodeRune(%#x) = %#x, %#x; want %#x, %#x", r, r1, r2, tt.out[j], tt.out[j+1])
+ }
+ j += 2
+ dec := DecodeRune(r1, r2)
+ if dec != r {
+ t.Errorf("DecodeRune(%#x, %#x) = %#x; want %#x", r1, r2, dec, r)
+ }
+ }
+ }
+ if j != len(tt.out) {
+ t.Errorf("#%d: EncodeRune didn't generate enough output", i)
+ }
+ }
+}
+
+type decodeTest struct {
+ in []uint16
+ out []rune
+}
+
+var decodeTests = []decodeTest{
+ {[]uint16{1, 2, 3, 4}, []rune{1, 2, 3, 4}},
+ {[]uint16{0xffff, 0xd800, 0xdc00, 0xd800, 0xdc01, 0xd808, 0xdf45, 0xdbff, 0xdfff},
+ []rune{0xffff, 0x10000, 0x10001, 0x12345, 0x10ffff}},
+ {[]uint16{0xd800, 'a'}, []rune{0xfffd, 'a'}},
+ {[]uint16{0xdfff}, []rune{0xfffd}},
+}
+
+func TestDecode(t *testing.T) {
+ for _, tt := range decodeTests {
+ out := Decode(tt.in)
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("Decode(%x) = %x; want %x", tt.in, out, tt.out)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utf8
+
+import "errors"
+
+// String wraps a regular string with a small structure that provides more
+// efficient indexing by code point index, as opposed to byte index.
+// Scanning incrementally forwards or backwards is O(1) per index operation
+// (although not as fast a range clause going forwards). Random access is
+// O(N) in the length of the string, but the overhead is less than always
+// scanning from the beginning.
+// If the string is ASCII, random access is O(1).
+// Unlike the built-in string type, String has internal mutable state and
+// is not thread-safe.
+type String struct {
+ str string
+ numRunes int
+ // If width > 0, the rune at runePos starts at bytePos and has the specified width.
+ width int
+ bytePos int
+ runePos int
+ nonASCII int // byte index of the first non-ASCII rune.
+}
+
+// NewString returns a new UTF-8 string with the provided contents.
+func NewString(contents string) *String {
+ return new(String).Init(contents)
+}
+
+// Init initializes an existing String to hold the provided contents.
+// It returns a pointer to the initialized String.
+func (s *String) Init(contents string) *String {
+ s.str = contents
+ s.bytePos = 0
+ s.runePos = 0
+ for i := 0; i < len(contents); i++ {
+ if contents[i] >= RuneSelf {
+ // Not ASCII.
+ s.numRunes = RuneCountInString(contents)
+ _, s.width = DecodeRuneInString(contents)
+ s.nonASCII = i
+ return s
+ }
+ }
+ // ASCII is simple. Also, the empty string is ASCII.
+ s.numRunes = len(contents)
+ s.width = 0
+ s.nonASCII = len(contents)
+ return s
+}
+
+// String returns the contents of the String. This method also means the
+// String is directly printable by fmt.Print.
+func (s *String) String() string {
+ return s.str
+}
+
+// RuneCount returns the number of runes (Unicode code points) in the String.
+func (s *String) RuneCount() int {
+ return s.numRunes
+}
+
+// IsASCII returns a boolean indicating whether the String contains only ASCII bytes.
+func (s *String) IsASCII() bool {
+ return s.width == 0
+}
+
+// Slice returns the string sliced at rune positions [i:j].
+func (s *String) Slice(i, j int) string {
+ // ASCII is easy. Let the compiler catch the indexing error if there is one.
+ if j < s.nonASCII {
+ return s.str[i:j]
+ }
+ if i < 0 || j > s.numRunes || i > j {
+ panic(sliceOutOfRange)
+ }
+ if i == j {
+ return ""
+ }
+ // For non-ASCII, after At(i), bytePos is always the position of the indexed character.
+ var low, high int
+ switch {
+ case i < s.nonASCII:
+ low = i
+ case i == s.numRunes:
+ low = len(s.str)
+ default:
+ s.At(i)
+ low = s.bytePos
+ }
+ switch {
+ case j == s.numRunes:
+ high = len(s.str)
+ default:
+ s.At(j)
+ high = s.bytePos
+ }
+ return s.str[low:high]
+}
+
+// At returns the rune with index i in the String. The sequence of runes is the same
+// as iterating over the contents with a "for range" clause.
+func (s *String) At(i int) rune {
+ // ASCII is easy. Let the compiler catch the indexing error if there is one.
+ if i < s.nonASCII {
+ return rune(s.str[i])
+ }
+
+ // Now we do need to know the index is valid.
+ if i < 0 || i >= s.numRunes {
+ panic(outOfRange)
+ }
+
+ var r rune
+
+ // Five easy common cases: within 1 spot of bytePos/runePos, or the beginning, or the end.
+ // With these cases, all scans from beginning or end work in O(1) time per rune.
+ switch {
+
+ case i == s.runePos-1: // backing up one rune
+ r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos])
+ s.runePos = i
+ s.bytePos -= s.width
+ return r
+ case i == s.runePos+1: // moving ahead one rune
+ s.runePos = i
+ s.bytePos += s.width
+ fallthrough
+ case i == s.runePos:
+ r, s.width = DecodeRuneInString(s.str[s.bytePos:])
+ return r
+ case i == 0: // start of string
+ r, s.width = DecodeRuneInString(s.str)
+ s.runePos = 0
+ s.bytePos = 0
+ return r
+
+ case i == s.numRunes-1: // last rune in string
+ r, s.width = DecodeLastRuneInString(s.str)
+ s.runePos = i
+ s.bytePos = len(s.str) - s.width
+ return r
+ }
+
+ // We need to do a linear scan. There are three places to start from:
+ // 1) The beginning
+ // 2) bytePos/runePos.
+ // 3) The end
+ // Choose the closest in rune count, scanning backwards if necessary.
+ forward := true
+ if i < s.runePos {
+ // Between beginning and pos. Which is closer?
+ // Since both i and runePos are guaranteed >= nonASCII, that's the
+ // lowest location we need to start from.
+ if i < (s.runePos-s.nonASCII)/2 {
+ // Scan forward from beginning
+ s.bytePos, s.runePos = s.nonASCII, s.nonASCII
+ } else {
+ // Scan backwards from where we are
+ forward = false
+ }
+ } else {
+ // Between pos and end. Which is closer?
+ if i-s.runePos < (s.numRunes-s.runePos)/2 {
+ // Scan forward from pos
+ } else {
+ // Scan backwards from end
+ s.bytePos, s.runePos = len(s.str), s.numRunes
+ forward = false
+ }
+ }
+ if forward {
+ // TODO: Is it much faster to use a range loop for this scan?
+ for {
+ r, s.width = DecodeRuneInString(s.str[s.bytePos:])
+ if s.runePos == i {
+ break
+ }
+ s.runePos++
+ s.bytePos += s.width
+ }
+ } else {
+ for {
+ r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos])
+ s.runePos--
+ s.bytePos -= s.width
+ if s.runePos == i {
+ break
+ }
+ }
+ }
+ return r
+}
+
+var outOfRange = errors.New("utf8.String: index out of range")
+var sliceOutOfRange = errors.New("utf8.String: slice index out of range")
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utf8_test
+
+import (
+ "math/rand"
+ "testing"
+ . "unicode/utf8"
+)
+
+func TestScanForwards(t *testing.T) {
+ for _, s := range testStrings {
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for i, expect := range runes {
+ got := str.At(i)
+ if got != expect {
+ t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
+ }
+ }
+ }
+}
+
+func TestScanBackwards(t *testing.T) {
+ for _, s := range testStrings {
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for i := len(runes) - 1; i >= 0; i-- {
+ expect := runes[i]
+ got := str.At(i)
+ if got != expect {
+ t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
+ }
+ }
+ }
+}
+
+func randCount() int {
+ if testing.Short() {
+ return 100
+ }
+ return 100000
+}
+
+func TestRandomAccess(t *testing.T) {
+ for _, s := range testStrings {
+ if len(s) == 0 {
+ continue
+ }
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for j := 0; j < randCount(); j++ {
+ i := rand.Intn(len(runes))
+ expect := runes[i]
+ got := str.At(i)
+ if got != expect {
+ t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
+ }
+ }
+ }
+}
+
+func TestRandomSliceAccess(t *testing.T) {
+ for _, s := range testStrings {
+ if len(s) == 0 || s[0] == '\x80' { // the bad-UTF-8 string fools this simple test
+ continue
+ }
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for k := 0; k < randCount(); k++ {
+ i := rand.Intn(len(runes))
+ j := rand.Intn(len(runes) + 1)
+ if i > j { // include empty strings
+ continue
+ }
+ expect := string(runes[i:j])
+ got := str.Slice(i, j)
+ if got != expect {
+ t.Errorf("%s[%d:%d]: expected %q got %q", s, i, j, expect, got)
+ }
+ }
+ }
+}
+
+func TestLimitSliceAccess(t *testing.T) {
+ for _, s := range testStrings {
+ str := NewString(s)
+ if str.Slice(0, 0) != "" {
+ t.Error("failure with empty slice at beginning")
+ }
+ nr := RuneCountInString(s)
+ if str.Slice(nr, nr) != "" {
+ t.Error("failure with empty slice at end")
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utf8 implements functions and constants to support text encoded in
+// UTF-8. This package calls a Unicode character a rune for brevity.
+package utf8
+
+import "unicode" // only needed for a couple of constants
+
+// Numbers fundamental to the encoding.
+const (
+ RuneError = unicode.ReplacementChar // the "error" Rune or "replacement character".
+ RuneSelf = 0x80 // characters below Runeself are represented as themselves in a single byte.
+ UTFMax = 4 // maximum number of bytes of a UTF-8 encoded Unicode character.
+)
+
+const (
+ t1 = 0x00 // 0000 0000
+ tx = 0x80 // 1000 0000
+ t2 = 0xC0 // 1100 0000
+ t3 = 0xE0 // 1110 0000
+ t4 = 0xF0 // 1111 0000
+ t5 = 0xF8 // 1111 1000
+
+ maskx = 0x3F // 0011 1111
+ mask2 = 0x1F // 0001 1111
+ mask3 = 0x0F // 0000 1111
+ mask4 = 0x07 // 0000 0111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+ rune4Max = 1<<21 - 1
+)
+
+func decodeRuneInternal(p []byte) (r rune, size int, short bool) {
+ n := len(p)
+ if n < 1 {
+ return RuneError, 0, true
+ }
+ c0 := p[0]
+
+ // 1-byte, 7-bit sequence?
+ if c0 < tx {
+ return rune(c0), 1, false
+ }
+
+ // unexpected continuation byte?
+ if c0 < t2 {
+ return RuneError, 1, false
+ }
+
+ // need first continuation byte
+ if n < 2 {
+ return RuneError, 1, true
+ }
+ c1 := p[1]
+ if c1 < tx || t2 <= c1 {
+ return RuneError, 1, false
+ }
+
+ // 2-byte, 11-bit sequence?
+ if c0 < t3 {
+ r = rune(c0&mask2)<<6 | rune(c1&maskx)
+ if r <= rune1Max {
+ return RuneError, 1, false
+ }
+ return r, 2, false
+ }
+
+ // need second continuation byte
+ if n < 3 {
+ return RuneError, 1, true
+ }
+ c2 := p[2]
+ if c2 < tx || t2 <= c2 {
+ return RuneError, 1, false
+ }
+
+ // 3-byte, 16-bit sequence?
+ if c0 < t4 {
+ r = rune(c0&mask3)<<12 | rune(c1&maskx)<<6 | rune(c2&maskx)
+ if r <= rune2Max {
+ return RuneError, 1, false
+ }
+ return r, 3, false
+ }
+
+ // need third continuation byte
+ if n < 4 {
+ return RuneError, 1, true
+ }
+ c3 := p[3]
+ if c3 < tx || t2 <= c3 {
+ return RuneError, 1, false
+ }
+
+ // 4-byte, 21-bit sequence?
+ if c0 < t5 {
+ r = rune(c0&mask4)<<18 | rune(c1&maskx)<<12 | rune(c2&maskx)<<6 | rune(c3&maskx)
+ if r <= rune3Max {
+ return RuneError, 1, false
+ }
+ return r, 4, false
+ }
+
+ // error
+ return RuneError, 1, false
+}
+
+func decodeRuneInStringInternal(s string) (r rune, size int, short bool) {
+ n := len(s)
+ if n < 1 {
+ return RuneError, 0, true
+ }
+ c0 := s[0]
+
+ // 1-byte, 7-bit sequence?
+ if c0 < tx {
+ return rune(c0), 1, false
+ }
+
+ // unexpected continuation byte?
+ if c0 < t2 {
+ return RuneError, 1, false
+ }
+
+ // need first continuation byte
+ if n < 2 {
+ return RuneError, 1, true
+ }
+ c1 := s[1]
+ if c1 < tx || t2 <= c1 {
+ return RuneError, 1, false
+ }
+
+ // 2-byte, 11-bit sequence?
+ if c0 < t3 {
+ r = rune(c0&mask2)<<6 | rune(c1&maskx)
+ if r <= rune1Max {
+ return RuneError, 1, false
+ }
+ return r, 2, false
+ }
+
+ // need second continuation byte
+ if n < 3 {
+ return RuneError, 1, true
+ }
+ c2 := s[2]
+ if c2 < tx || t2 <= c2 {
+ return RuneError, 1, false
+ }
+
+ // 3-byte, 16-bit sequence?
+ if c0 < t4 {
+ r = rune(c0&mask3)<<12 | rune(c1&maskx)<<6 | rune(c2&maskx)
+ if r <= rune2Max {
+ return RuneError, 1, false
+ }
+ return r, 3, false
+ }
+
+ // need third continuation byte
+ if n < 4 {
+ return RuneError, 1, true
+ }
+ c3 := s[3]
+ if c3 < tx || t2 <= c3 {
+ return RuneError, 1, false
+ }
+
+ // 4-byte, 21-bit sequence?
+ if c0 < t5 {
+ r = rune(c0&mask4)<<18 | rune(c1&maskx)<<12 | rune(c2&maskx)<<6 | rune(c3&maskx)
+ if r <= rune3Max {
+ return RuneError, 1, false
+ }
+ return r, 4, false
+ }
+
+ // error
+ return RuneError, 1, false
+}
+
+// FullRune reports whether the bytes in p begin with a full UTF-8 encoding of a rune.
+// An invalid encoding is considered a full Rune since it will convert as a width-1 error rune.
+func FullRune(p []byte) bool {
+ _, _, short := decodeRuneInternal(p)
+ return !short
+}
+
+// FullRuneInString is like FullRune but its input is a string.
+func FullRuneInString(s string) bool {
+ _, _, short := decodeRuneInStringInternal(s)
+ return !short
+}
+
+// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and its width in bytes.
+func DecodeRune(p []byte) (r rune, size int) {
+ r, size, _ = decodeRuneInternal(p)
+ return
+}
+
+// DecodeRuneInString is like DecodeRune but its input is a string.
+func DecodeRuneInString(s string) (r rune, size int) {
+ r, size, _ = decodeRuneInStringInternal(s)
+ return
+}
+
+// DecodeLastRune unpacks the last UTF-8 encoding in p
+// and returns the rune and its width in bytes.
+func DecodeLastRune(p []byte) (r rune, size int) {
+ end := len(p)
+ if end == 0 {
+ return RuneError, 0
+ }
+ start := end - 1
+ r = rune(p[start])
+ if r < RuneSelf {
+ return r, 1
+ }
+ // guard against O(n^2) behavior when traversing
+ // backwards through strings with long sequences of
+ // invalid UTF-8.
+ lim := end - UTFMax
+ if lim < 0 {
+ lim = 0
+ }
+ for start--; start >= lim; start-- {
+ if RuneStart(p[start]) {
+ break
+ }
+ }
+ if start < 0 {
+ start = 0
+ }
+ r, size = DecodeRune(p[start:end])
+ if start+size != end {
+ return RuneError, 1
+ }
+ return r, size
+}
+
+// DecodeLastRuneInString is like DecodeLastRune but its input is a string.
+func DecodeLastRuneInString(s string) (r rune, size int) {
+ end := len(s)
+ if end == 0 {
+ return RuneError, 0
+ }
+ start := end - 1
+ r = rune(s[start])
+ if r < RuneSelf {
+ return r, 1
+ }
+ // guard against O(n^2) behavior when traversing
+ // backwards through strings with long sequences of
+ // invalid UTF-8.
+ lim := end - UTFMax
+ if lim < 0 {
+ lim = 0
+ }
+ for start--; start >= lim; start-- {
+ if RuneStart(s[start]) {
+ break
+ }
+ }
+ if start < 0 {
+ start = 0
+ }
+ r, size = DecodeRuneInString(s[start:end])
+ if start+size != end {
+ return RuneError, 1
+ }
+ return r, size
+}
+
+// RuneLen returns the number of bytes required to encode the rune.
+func RuneLen(r rune) int {
+ switch {
+ case r <= rune1Max:
+ return 1
+ case r <= rune2Max:
+ return 2
+ case r <= rune3Max:
+ return 3
+ case r <= rune4Max:
+ return 4
+ }
+ return -1
+}
+
+// EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
+// It returns the number of bytes written.
+func EncodeRune(p []byte, r rune) int {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ if uint32(r) <= rune1Max {
+ p[0] = byte(r)
+ return 1
+ }
+
+ if uint32(r) <= rune2Max {
+ p[0] = t2 | byte(r>>6)
+ p[1] = tx | byte(r)&maskx
+ return 2
+ }
+
+ if uint32(r) > unicode.MaxRune {
+ r = RuneError
+ }
+
+ if uint32(r) <= rune3Max {
+ p[0] = t3 | byte(r>>12)
+ p[1] = tx | byte(r>>6)&maskx
+ p[2] = tx | byte(r)&maskx
+ return 3
+ }
+
+ p[0] = t4 | byte(r>>18)
+ p[1] = tx | byte(r>>12)&maskx
+ p[2] = tx | byte(r>>6)&maskx
+ p[3] = tx | byte(r)&maskx
+ return 4
+}
+
+// RuneCount returns the number of runes in p. Erroneous and short
+// encodings are treated as single runes of width 1 byte.
+func RuneCount(p []byte) int {
+ i := 0
+ var n int
+ for n = 0; i < len(p); n++ {
+ if p[i] < RuneSelf {
+ i++
+ } else {
+ _, size := DecodeRune(p[i:])
+ i += size
+ }
+ }
+ return n
+}
+
+// RuneCountInString is like RuneCount but its input is a string.
+func RuneCountInString(s string) (n int) {
+ for _ = range s {
+ n++
+ }
+ return
+}
+
+// RuneStart reports whether the byte could be the first byte of
+// an encoded rune. Second and subsequent bytes always have the top
+// two bits set to 10.
+func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
+
+// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
+func Valid(p []byte) bool {
+ i := 0
+ for i < len(p) {
+ if p[i] < RuneSelf {
+ i++
+ } else {
+ _, size := DecodeRune(p[i:])
+ if size == 1 {
+ // All valid runes of size of 1 (those
+ // below RuneSelf) were handled above.
+ // This must be a RuneError.
+ return false
+ }
+ i += size
+ }
+ }
+ return true
+}
+
+// ValidString reports whether s consists entirely of valid UTF-8-encoded runes.
+func ValidString(s string) bool {
+ for i, r := range s {
+ if r == RuneError {
+ // The RuneError value can be an error
+ // sentinel value (if it's size 1) or the same
+ // value encoded properly. Decode it to see if
+ // it's the 1 byte sentinel value.
+ _, size := DecodeRuneInString(s[i:])
+ if size == 1 {
+ return false
+ }
+ }
+ }
+ return true
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utf8_test
+
+import (
+ "bytes"
+ "testing"
+ . "unicode/utf8"
+)
+
+type Utf8Map struct {
+ r rune
+ str string
+}
+
+var utf8map = []Utf8Map{
+ {0x0000, "\x00"},
+ {0x0001, "\x01"},
+ {0x007e, "\x7e"},
+ {0x007f, "\x7f"},
+ {0x0080, "\xc2\x80"},
+ {0x0081, "\xc2\x81"},
+ {0x00bf, "\xc2\xbf"},
+ {0x00c0, "\xc3\x80"},
+ {0x00c1, "\xc3\x81"},
+ {0x00c8, "\xc3\x88"},
+ {0x00d0, "\xc3\x90"},
+ {0x00e0, "\xc3\xa0"},
+ {0x00f0, "\xc3\xb0"},
+ {0x00f8, "\xc3\xb8"},
+ {0x00ff, "\xc3\xbf"},
+ {0x0100, "\xc4\x80"},
+ {0x07ff, "\xdf\xbf"},
+ {0x0800, "\xe0\xa0\x80"},
+ {0x0801, "\xe0\xa0\x81"},
+ {0xfffe, "\xef\xbf\xbe"},
+ {0xffff, "\xef\xbf\xbf"},
+ {0x10000, "\xf0\x90\x80\x80"},
+ {0x10001, "\xf0\x90\x80\x81"},
+ {0x10fffe, "\xf4\x8f\xbf\xbe"},
+ {0x10ffff, "\xf4\x8f\xbf\xbf"},
+ {0xFFFD, "\xef\xbf\xbd"},
+}
+
+var testStrings = []string{
+ "",
+ "abcd",
+ "☺☻☹",
+ "日a本b語ç日ð本Ê語þ日¥本¼語i日©",
+ "日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©",
+ "\x80\x80\x80\x80",
+}
+
+func TestFullRune(t *testing.T) {
+ for i := 0; i < len(utf8map); i++ {
+ m := utf8map[i]
+ b := []byte(m.str)
+ if !FullRune(b) {
+ t.Errorf("FullRune(%q) (%U) = false, want true", b, m.r)
+ }
+ s := m.str
+ if !FullRuneInString(s) {
+ t.Errorf("FullRuneInString(%q) (%U) = false, want true", s, m.r)
+ }
+ b1 := b[0 : len(b)-1]
+ if FullRune(b1) {
+ t.Errorf("FullRune(%q) = true, want false", b1)
+ }
+ s1 := string(b1)
+ if FullRuneInString(s1) {
+ t.Errorf("FullRune(%q) = true, want false", s1)
+ }
+ }
+}
+
+func TestEncodeRune(t *testing.T) {
+ for i := 0; i < len(utf8map); i++ {
+ m := utf8map[i]
+ b := []byte(m.str)
+ var buf [10]byte
+ n := EncodeRune(buf[0:], m.r)
+ b1 := buf[0:n]
+ if !bytes.Equal(b, b1) {
+ t.Errorf("EncodeRune(%#04x) = %q want %q", m.r, b1, b)
+ }
+ }
+}
+
+func TestDecodeRune(t *testing.T) {
+ for i := 0; i < len(utf8map); i++ {
+ m := utf8map[i]
+ b := []byte(m.str)
+ r, size := DecodeRune(b)
+ if r != m.r || size != len(b) {
+ t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b, r, size, m.r, len(b))
+ }
+ s := m.str
+ r, size = DecodeRuneInString(s)
+ if r != m.r || size != len(b) {
+ t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", s, r, size, m.r, len(b))
+ }
+
+ // there's an extra byte that bytes left behind - make sure trailing byte works
+ r, size = DecodeRune(b[0:cap(b)])
+ if r != m.r || size != len(b) {
+ t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b, r, size, m.r, len(b))
+ }
+ s = m.str + "\x00"
+ r, size = DecodeRuneInString(s)
+ if r != m.r || size != len(b) {
+ t.Errorf("DecodeRuneInString(%q) = %#04x, %d want %#04x, %d", s, r, size, m.r, len(b))
+ }
+
+ // make sure missing bytes fail
+ wantsize := 1
+ if wantsize >= len(b) {
+ wantsize = 0
+ }
+ r, size = DecodeRune(b[0 : len(b)-1])
+ if r != RuneError || size != wantsize {
+ t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b[0:len(b)-1], r, size, RuneError, wantsize)
+ }
+ s = m.str[0 : len(m.str)-1]
+ r, size = DecodeRuneInString(s)
+ if r != RuneError || size != wantsize {
+ t.Errorf("DecodeRuneInString(%q) = %#04x, %d want %#04x, %d", s, r, size, RuneError, wantsize)
+ }
+
+ // make sure bad sequences fail
+ if len(b) == 1 {
+ b[0] = 0x80
+ } else {
+ b[len(b)-1] = 0x7F
+ }
+ r, size = DecodeRune(b)
+ if r != RuneError || size != 1 {
+ t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b, r, size, RuneError, 1)
+ }
+ s = string(b)
+ r, size = DecodeRune(b)
+ if r != RuneError || size != 1 {
+ t.Errorf("DecodeRuneInString(%q) = %#04x, %d want %#04x, %d", s, r, size, RuneError, 1)
+ }
+
+ }
+}
+
+// Check that DecodeRune and DecodeLastRune correspond to
+// the equivalent range loop.
+func TestSequencing(t *testing.T) {
+ for _, ts := range testStrings {
+ for _, m := range utf8map {
+ for _, s := range []string{ts + m.str, m.str + ts, ts + m.str + ts} {
+ testSequence(t, s)
+ }
+ }
+ }
+}
+
+// Check that a range loop and a []int conversion visit the same runes.
+// Not really a test of this package, but the assumption is used here and
+// it's good to verify
+func TestIntConversion(t *testing.T) {
+ for _, ts := range testStrings {
+ runes := []rune(ts)
+ if RuneCountInString(ts) != len(runes) {
+ t.Errorf("%q: expected %d runes; got %d", ts, len(runes), RuneCountInString(ts))
+ break
+ }
+ i := 0
+ for _, r := range ts {
+ if r != runes[i] {
+ t.Errorf("%q[%d]: expected %c (%U); got %c (%U)", ts, i, runes[i], runes[i], r, r)
+ }
+ i++
+ }
+ }
+}
+
+func testSequence(t *testing.T, s string) {
+ type info struct {
+ index int
+ r rune
+ }
+ index := make([]info, len(s))
+ b := []byte(s)
+ si := 0
+ j := 0
+ for i, r := range s {
+ if si != i {
+ t.Errorf("Sequence(%q) mismatched index %d, want %d", s, si, i)
+ return
+ }
+ index[j] = info{i, r}
+ j++
+ r1, size1 := DecodeRune(b[i:])
+ if r != r1 {
+ t.Errorf("DecodeRune(%q) = %#04x, want %#04x", s[i:], r1, r)
+ return
+ }
+ r2, size2 := DecodeRuneInString(s[i:])
+ if r != r2 {
+ t.Errorf("DecodeRuneInString(%q) = %#04x, want %#04x", s[i:], r2, r)
+ return
+ }
+ if size1 != size2 {
+ t.Errorf("DecodeRune/DecodeRuneInString(%q) size mismatch %d/%d", s[i:], size1, size2)
+ return
+ }
+ si += size1
+ }
+ j--
+ for si = len(s); si > 0; {
+ r1, size1 := DecodeLastRune(b[0:si])
+ r2, size2 := DecodeLastRuneInString(s[0:si])
+ if size1 != size2 {
+ t.Errorf("DecodeLastRune/DecodeLastRuneInString(%q, %d) size mismatch %d/%d", s, si, size1, size2)
+ return
+ }
+ if r1 != index[j].r {
+ t.Errorf("DecodeLastRune(%q, %d) = %#04x, want %#04x", s, si, r1, index[j].r)
+ return
+ }
+ if r2 != index[j].r {
+ t.Errorf("DecodeLastRuneInString(%q, %d) = %#04x, want %#04x", s, si, r2, index[j].r)
+ return
+ }
+ si -= size1
+ if si != index[j].index {
+ t.Errorf("DecodeLastRune(%q) index mismatch at %d, want %d", s, si, index[j].index)
+ return
+ }
+ j--
+ }
+ if si != 0 {
+ t.Errorf("DecodeLastRune(%q) finished at %d, not 0", s, si)
+ }
+}
+
+// Check that negative runes encode as U+FFFD.
+func TestNegativeRune(t *testing.T) {
+ errorbuf := make([]byte, UTFMax)
+ errorbuf = errorbuf[0:EncodeRune(errorbuf, RuneError)]
+ buf := make([]byte, UTFMax)
+ buf = buf[0:EncodeRune(buf, -1)]
+ if !bytes.Equal(buf, errorbuf) {
+ t.Errorf("incorrect encoding [% x] for -1; expected [% x]", buf, errorbuf)
+ }
+}
+
+type RuneCountTest struct {
+ in string
+ out int
+}
+
+var runecounttests = []RuneCountTest{
+ {"abcd", 4},
+ {"☺☻☹", 3},
+ {"1,2,3,4", 7},
+ {"\xe2\x00", 2},
+}
+
+func TestRuneCount(t *testing.T) {
+ for i := 0; i < len(runecounttests); i++ {
+ tt := runecounttests[i]
+ if out := RuneCountInString(tt.in); out != tt.out {
+ t.Errorf("RuneCountInString(%q) = %d, want %d", tt.in, out, tt.out)
+ }
+ if out := RuneCount([]byte(tt.in)); out != tt.out {
+ t.Errorf("RuneCount(%q) = %d, want %d", tt.in, out, tt.out)
+ }
+ }
+}
+
+type ValidTest struct {
+ in string
+ out bool
+}
+
+var validTests = []ValidTest{
+ {"", true},
+ {"a", true},
+ {"abc", true},
+ {"Ж", true},
+ {"ЖЖ", true},
+ {"брэд-ЛГТМ", true},
+ {"☺☻☹", true},
+ {string([]byte{66, 250}), false},
+ {string([]byte{66, 250, 67}), false},
+ {"a\uFFFDb", true},
+}
+
+func TestValid(t *testing.T) {
+ for i, tt := range validTests {
+ if Valid([]byte(tt.in)) != tt.out {
+ t.Errorf("%d. Valid(%q) = %v; want %v", i, tt.in, !tt.out, tt.out)
+ }
+ if ValidString(tt.in) != tt.out {
+ t.Errorf("%d. ValidString(%q) = %v; want %v", i, tt.in, !tt.out, tt.out)
+ }
+ }
+}
+
+func BenchmarkRuneCountTenASCIIChars(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ RuneCountInString("0123456789")
+ }
+}
+
+func BenchmarkRuneCountTenJapaneseChars(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ RuneCountInString("日本語日本語日本語日")
+ }
+}
+
+func BenchmarkEncodeASCIIRune(b *testing.B) {
+ buf := make([]byte, UTFMax)
+ for i := 0; i < b.N; i++ {
+ EncodeRune(buf, 'a')
+ }
+}
+
+func BenchmarkEncodeJapaneseRune(b *testing.B) {
+ buf := make([]byte, UTFMax)
+ for i := 0; i < b.N; i++ {
+ EncodeRune(buf, '本')
+ }
+}
+
+func BenchmarkDecodeASCIIRune(b *testing.B) {
+ a := []byte{'a'}
+ for i := 0; i < b.N; i++ {
+ DecodeRune(a)
+ }
+}
+
+func BenchmarkDecodeJapaneseRune(b *testing.B) {
+ nihon := []byte("本")
+ for i := 0; i < b.N; i++ {
+ DecodeRune(nihon)
+ }
+}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package URL parses URLs and implements query escaping.
-// See RFC 3986.
-package url
-
-import (
- "errors"
- "strconv"
- "strings"
-)
-
-// Error reports an error and the operation and URL that caused it.
-type Error struct {
- Op string
- URL string
- Err error
-}
-
-func (e *Error) Error() string { return e.Op + " " + e.URL + ": " + e.Err.Error() }
-
-func ishex(c byte) bool {
- switch {
- case '0' <= c && c <= '9':
- return true
- case 'a' <= c && c <= 'f':
- return true
- case 'A' <= c && c <= 'F':
- return true
- }
- return false
-}
-
-func unhex(c byte) byte {
- switch {
- case '0' <= c && c <= '9':
- return c - '0'
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10
- }
- return 0
-}
-
-type encoding int
-
-const (
- encodePath encoding = 1 + iota
- encodeUserPassword
- encodeQueryComponent
- encodeFragment
- encodeOpaque
-)
-
-type EscapeError string
-
-func (e EscapeError) Error() string {
- return "invalid URL escape " + strconv.Quote(string(e))
-}
-
-// Return true if the specified character should be escaped when
-// appearing in a URL string, according to RFC 2396.
-// When 'all' is true the full range of reserved characters are matched.
-func shouldEscape(c byte, mode encoding) bool {
- // RFC 2396 §2.3 Unreserved characters (alphanum)
- if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
- return false
- }
- switch c {
- case '-', '_', '.', '!', '~', '*', '\'', '(', ')': // §2.3 Unreserved characters (mark)
- return false
-
- case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved)
- // Different sections of the URL allow a few of
- // the reserved characters to appear unescaped.
- switch mode {
- case encodePath: // §3.3
- // The RFC allows : @ & = + $ , but saves / ; for assigning
- // meaning to individual path segments. This package
- // only manipulates the path as a whole, so we allow those
- // last two as well. Clients that need to distinguish between
- // `/foo;y=z/bar` and `/foo%3by=z/bar` will have to re-decode RawPath.
- // That leaves only ? to escape.
- return c == '?'
-
- case encodeUserPassword: // §3.2.2
- // The RFC allows ; : & = + $ , in userinfo, so we must escape only @ and /.
- // The parsing of userinfo treats : as special so we must escape that too.
- return c == '@' || c == '/' || c == ':'
-
- case encodeQueryComponent: // §3.4
- // The RFC reserves (so we must escape) everything.
- return true
-
- case encodeFragment: // §4.1
- // The RFC text is silent but the grammar allows
- // everything, so escape nothing.
- return false
-
- case encodeOpaque: // §3 opaque_part
- // The RFC allows opaque_part to use all characters
- // except that the leading / must be escaped.
- // (We implement that case in String.)
- return false
- }
- }
-
- // Everything else must be escaped.
- return true
-}
-
-// QueryUnescape does the inverse transformation of QueryEscape, converting
-// %AB into the byte 0xAB and '+' into ' ' (space). It returns an error if
-// any % is not followed by two hexadecimal digits.
-func QueryUnescape(s string) (string, error) {
- return unescape(s, encodeQueryComponent)
-}
-
-// unescape unescapes a string; the mode specifies
-// which section of the URL string is being unescaped.
-func unescape(s string, mode encoding) (string, error) {
- // Count %, check that they're well-formed.
- n := 0
- hasPlus := false
- for i := 0; i < len(s); {
- switch s[i] {
- case '%':
- n++
- if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
- s = s[i:]
- if len(s) > 3 {
- s = s[0:3]
- }
- return "", EscapeError(s)
- }
- i += 3
- case '+':
- hasPlus = mode == encodeQueryComponent
- i++
- default:
- i++
- }
- }
-
- if n == 0 && !hasPlus {
- return s, nil
- }
-
- t := make([]byte, len(s)-2*n)
- j := 0
- for i := 0; i < len(s); {
- switch s[i] {
- case '%':
- t[j] = unhex(s[i+1])<<4 | unhex(s[i+2])
- j++
- i += 3
- case '+':
- if mode == encodeQueryComponent {
- t[j] = ' '
- } else {
- t[j] = '+'
- }
- j++
- i++
- default:
- t[j] = s[i]
- j++
- i++
- }
- }
- return string(t), nil
-}
-
-// QueryEscape escapes the string so it can be safely placed
-// inside a URL query.
-func QueryEscape(s string) string {
- return escape(s, encodeQueryComponent)
-}
-
-func escape(s string, mode encoding) string {
- spaceCount, hexCount := 0, 0
- for i := 0; i < len(s); i++ {
- c := s[i]
- if shouldEscape(c, mode) {
- if c == ' ' && mode == encodeQueryComponent {
- spaceCount++
- } else {
- hexCount++
- }
- }
- }
-
- if spaceCount == 0 && hexCount == 0 {
- return s
- }
-
- t := make([]byte, len(s)+2*hexCount)
- j := 0
- for i := 0; i < len(s); i++ {
- switch c := s[i]; {
- case c == ' ' && mode == encodeQueryComponent:
- t[j] = '+'
- j++
- case shouldEscape(c, mode):
- t[j] = '%'
- t[j+1] = "0123456789ABCDEF"[c>>4]
- t[j+2] = "0123456789ABCDEF"[c&15]
- j += 3
- default:
- t[j] = s[i]
- j++
- }
- }
- return string(t)
-}
-
-// UnescapeUserinfo parses the RawUserinfo field of a URL
-// as the form user or user:password and unescapes and returns
-// the two halves.
-//
-// This functionality should only be used with legacy web sites.
-// RFC 2396 warns that interpreting Userinfo this way
-// ``is NOT RECOMMENDED, because the passing of authentication
-// information in clear text (such as URI) has proven to be a
-// security risk in almost every case where it has been used.''
-func UnescapeUserinfo(rawUserinfo string) (user, password string, err error) {
- u, p := split(rawUserinfo, ':', true)
- if user, err = unescape(u, encodeUserPassword); err != nil {
- return "", "", err
- }
- if password, err = unescape(p, encodeUserPassword); err != nil {
- return "", "", err
- }
- return
-}
-
-// EscapeUserinfo combines user and password in the form
-// user:password (or just user if password is empty) and then
-// escapes it for use as the URL.RawUserinfo field.
-//
-// This functionality should only be used with legacy web sites.
-// RFC 2396 warns that interpreting Userinfo this way
-// ``is NOT RECOMMENDED, because the passing of authentication
-// information in clear text (such as URI) has proven to be a
-// security risk in almost every case where it has been used.''
-func EscapeUserinfo(user, password string) string {
- raw := escape(user, encodeUserPassword)
- if password != "" {
- raw += ":" + escape(password, encodeUserPassword)
- }
- return raw
-}
-
-// A URL represents a parsed URL (technically, a URI reference).
-// The general form represented is:
-// scheme://[userinfo@]host/path[?query][#fragment]
-// The Raw, RawAuthority, RawPath, and RawQuery fields are in "wire format"
-// (special characters must be hex-escaped if not meant to have special meaning).
-// All other fields are logical values; '+' or '%' represent themselves.
-//
-// The various Raw values are supplied in wire format because
-// clients typically have to split them into pieces before further
-// decoding.
-type URL struct {
- Raw string // the original string
- Scheme string // scheme
- RawAuthority string // [userinfo@]host
- RawUserinfo string // userinfo
- Host string // host
- RawPath string // /path[?query][#fragment]
- Path string // /path
- OpaquePath bool // path is opaque (unrooted when scheme is present)
- RawQuery string // query
- Fragment string // fragment
-}
-
-// Maybe rawurl is of the form scheme:path.
-// (Scheme must be [a-zA-Z][a-zA-Z0-9+-.]*)
-// If so, return scheme, path; else return "", rawurl.
-func getscheme(rawurl string) (scheme, path string, err error) {
- for i := 0; i < len(rawurl); i++ {
- c := rawurl[i]
- switch {
- case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
- // do nothing
- case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
- if i == 0 {
- return "", rawurl, nil
- }
- case c == ':':
- if i == 0 {
- return "", "", errors.New("missing protocol scheme")
- }
- return rawurl[0:i], rawurl[i+1:], nil
- default:
- // we have encountered an invalid character,
- // so there is no valid scheme
- return "", rawurl, nil
- }
- }
- return "", rawurl, nil
-}
-
-// Maybe s is of the form t c u.
-// If so, return t, c u (or t, u if cutc == true).
-// If not, return s, "".
-func split(s string, c byte, cutc bool) (string, string) {
- for i := 0; i < len(s); i++ {
- if s[i] == c {
- if cutc {
- return s[0:i], s[i+1:]
- }
- return s[0:i], s[i:]
- }
- }
- return s, ""
-}
-
-// Parse parses rawurl into a URL structure.
-// The string rawurl is assumed not to have a #fragment suffix.
-// (Web browsers strip #fragment before sending the URL to a web server.)
-// The rawurl may be relative or absolute.
-func Parse(rawurl string) (url *URL, err error) {
- return parse(rawurl, false)
-}
-
-// ParseRequest parses rawurl into a URL structure. It assumes that
-// rawurl was received from an HTTP request, so the rawurl is interpreted
-// only as an absolute URI or an absolute path.
-// The string rawurl is assumed not to have a #fragment suffix.
-// (Web browsers strip #fragment before sending the URL to a web server.)
-func ParseRequest(rawurl string) (url *URL, err error) {
- return parse(rawurl, true)
-}
-
-// parse parses a URL from a string in one of two contexts. If
-// viaRequest is true, the URL is assumed to have arrived via an HTTP request,
-// in which case only absolute URLs or path-absolute relative URLs are allowed.
-// If viaRequest is false, all forms of relative URLs are allowed.
-func parse(rawurl string, viaRequest bool) (url *URL, err error) {
- var (
- leadingSlash bool
- path string
- )
-
- if rawurl == "" {
- err = errors.New("empty url")
- goto Error
- }
- url = new(URL)
- url.Raw = rawurl
-
- // Split off possible leading "http:", "mailto:", etc.
- // Cannot contain escaped characters.
- if url.Scheme, path, err = getscheme(rawurl); err != nil {
- goto Error
- }
- leadingSlash = strings.HasPrefix(path, "/")
-
- if url.Scheme != "" && !leadingSlash {
- // RFC 2396:
- // Absolute URI (has scheme) with non-rooted path
- // is uninterpreted. It doesn't even have a ?query.
- // This is the case that handles mailto:name@example.com.
- url.RawPath = path
-
- if url.Path, err = unescape(path, encodeOpaque); err != nil {
- goto Error
- }
- url.OpaquePath = true
- } else {
- if viaRequest && !leadingSlash {
- err = errors.New("invalid URI for request")
- goto Error
- }
-
- // Split off query before parsing path further.
- url.RawPath = path
- path, query := split(path, '?', false)
- if len(query) > 1 {
- url.RawQuery = query[1:]
- }
-
- // Maybe path is //authority/path
- if (url.Scheme != "" || !viaRequest) &&
- strings.HasPrefix(path, "//") && !strings.HasPrefix(path, "///") {
- url.RawAuthority, path = split(path[2:], '/', false)
- url.RawPath = url.RawPath[2+len(url.RawAuthority):]
- }
-
- // Split authority into userinfo@host.
- // If there's no @, split's default is wrong. Check explicitly.
- var rawHost string
- if strings.Index(url.RawAuthority, "@") < 0 {
- rawHost = url.RawAuthority
- } else {
- url.RawUserinfo, rawHost = split(url.RawAuthority, '@', true)
- }
-
- // We leave RawAuthority only in raw form because clients
- // of common protocols should be using Userinfo and Host
- // instead. Clients that wish to use RawAuthority will have to
- // interpret it themselves: RFC 2396 does not define the meaning.
-
- if strings.Contains(rawHost, "%") {
- // Host cannot contain escaped characters.
- err = errors.New("hexadecimal escape in host")
- goto Error
- }
- url.Host = rawHost
-
- if url.Path, err = unescape(path, encodePath); err != nil {
- goto Error
- }
- }
- return url, nil
-
-Error:
- return nil, &Error{"parse", rawurl, err}
-
-}
-
-// ParseWithReference is like Parse but allows a trailing #fragment.
-func ParseWithReference(rawurlref string) (url *URL, err error) {
- // Cut off #frag.
- rawurl, frag := split(rawurlref, '#', false)
- if url, err = Parse(rawurl); err != nil {
- return nil, err
- }
- url.Raw += frag
- url.RawPath += frag
- if len(frag) > 1 {
- frag = frag[1:]
- if url.Fragment, err = unescape(frag, encodeFragment); err != nil {
- return nil, &Error{"parse", rawurl, err}
- }
- }
- return url, nil
-}
-
-// String reassembles url into a valid URL string.
-//
-// There are redundant fields stored in the URL structure:
-// the String method consults Scheme, Path, Host, RawUserinfo,
-// RawQuery, and Fragment, but not Raw, RawPath or RawAuthority.
-func (url *URL) String() string {
- result := ""
- if url.Scheme != "" {
- result += url.Scheme + ":"
- }
- if url.Host != "" || url.RawUserinfo != "" {
- result += "//"
- if url.RawUserinfo != "" {
- // hide the password, if any
- info := url.RawUserinfo
- if i := strings.Index(info, ":"); i >= 0 {
- info = info[0:i] + ":******"
- }
- result += info + "@"
- }
- result += url.Host
- }
- if url.OpaquePath {
- path := url.Path
- if strings.HasPrefix(path, "/") {
- result += "%2f"
- path = path[1:]
- }
- result += escape(path, encodeOpaque)
- } else {
- result += escape(url.Path, encodePath)
- }
- if url.RawQuery != "" {
- result += "?" + url.RawQuery
- }
- if url.Fragment != "" {
- result += "#" + escape(url.Fragment, encodeFragment)
- }
- return result
-}
-
-// Values maps a string key to a list of values.
-// It is typically used for query parameters and form values.
-// Unlike in the http.Header map, the keys in a Values map
-// are case-sensitive.
-type Values map[string][]string
-
-// Get gets the first value associated with the given key.
-// If there are no values associated with the key, Get returns
-// the empty string. To access multiple values, use the map
-// directly.
-func (v Values) Get(key string) string {
- if v == nil {
- return ""
- }
- vs, ok := v[key]
- if !ok || len(vs) == 0 {
- return ""
- }
- return vs[0]
-}
-
-// Set sets the key to value. It replaces any existing
-// values.
-func (v Values) Set(key, value string) {
- v[key] = []string{value}
-}
-
-// Add adds the key to value. It appends to any existing
-// values associated with key.
-func (v Values) Add(key, value string) {
- v[key] = append(v[key], value)
-}
-
-// Del deletes the values associated with key.
-func (v Values) Del(key string) {
- delete(v, key)
-}
-
-// ParseQuery parses the URL-encoded query string and returns
-// a map listing the values specified for each key.
-// ParseQuery always returns a non-nil map containing all the
-// valid query parameters found; err describes the first decoding error
-// encountered, if any.
-func ParseQuery(query string) (m Values, err error) {
- m = make(Values)
- err = parseQuery(m, query)
- return
-}
-
-func parseQuery(m Values, query string) (err error) {
- for query != "" {
- key := query
- if i := strings.IndexAny(key, "&;"); i >= 0 {
- key, query = key[:i], key[i+1:]
- } else {
- query = ""
- }
- if key == "" {
- continue
- }
- value := ""
- if i := strings.Index(key, "="); i >= 0 {
- key, value = key[:i], key[i+1:]
- }
- key, err1 := QueryUnescape(key)
- if err1 != nil {
- err = err1
- continue
- }
- value, err1 = QueryUnescape(value)
- if err1 != nil {
- err = err1
- continue
- }
- m[key] = append(m[key], value)
- }
- return err
-}
-
-// Encode encodes the values into ``URL encoded'' form.
-// e.g. "foo=bar&bar=baz"
-func (v Values) Encode() string {
- if v == nil {
- return ""
- }
- parts := make([]string, 0, len(v)) // will be large enough for most uses
- for k, vs := range v {
- prefix := QueryEscape(k) + "="
- for _, v := range vs {
- parts = append(parts, prefix+QueryEscape(v))
- }
- }
- return strings.Join(parts, "&")
-}
-
-// resolvePath applies special path segments from refs and applies
-// them to base, per RFC 2396.
-func resolvePath(basepath string, refpath string) string {
- base := strings.Split(basepath, "/")
- refs := strings.Split(refpath, "/")
- if len(base) == 0 {
- base = []string{""}
- }
- for idx, ref := range refs {
- switch {
- case ref == ".":
- base[len(base)-1] = ""
- case ref == "..":
- newLen := len(base) - 1
- if newLen < 1 {
- newLen = 1
- }
- base = base[0:newLen]
- base[len(base)-1] = ""
- default:
- if idx == 0 || base[len(base)-1] == "" {
- base[len(base)-1] = ref
- } else {
- base = append(base, ref)
- }
- }
- }
- return strings.Join(base, "/")
-}
-
-// IsAbs returns true if the URL is absolute.
-func (url *URL) IsAbs() bool {
- return url.Scheme != ""
-}
-
-// Parse parses a URL in the context of a base URL. The URL in ref
-// may be relative or absolute. Parse returns nil, err on parse
-// failure, otherwise its return value is the same as ResolveReference.
-func (base *URL) Parse(ref string) (*URL, error) {
- refurl, err := Parse(ref)
- if err != nil {
- return nil, err
- }
- return base.ResolveReference(refurl), nil
-}
-
-// ResolveReference resolves a URI reference to an absolute URI from
-// an absolute base URI, per RFC 2396 Section 5.2. The URI reference
-// may be relative or absolute. ResolveReference always returns a new
-// URL instance, even if the returned URL is identical to either the
-// base or reference. If ref is an absolute URL, then ResolveReference
-// ignores base and returns a copy of ref.
-func (base *URL) ResolveReference(ref *URL) *URL {
- url := new(URL)
- switch {
- case ref.IsAbs():
- *url = *ref
- default:
- // relativeURI = ( net_path | abs_path | rel_path ) [ "?" query ]
- *url = *base
- if ref.RawAuthority != "" {
- // The "net_path" case.
- url.RawAuthority = ref.RawAuthority
- url.Host = ref.Host
- url.RawUserinfo = ref.RawUserinfo
- }
- switch {
- case url.OpaquePath:
- url.Path = ref.Path
- url.RawPath = ref.RawPath
- url.RawQuery = ref.RawQuery
- case strings.HasPrefix(ref.Path, "/"):
- // The "abs_path" case.
- url.Path = ref.Path
- url.RawPath = ref.RawPath
- url.RawQuery = ref.RawQuery
- default:
- // The "rel_path" case.
- path := resolvePath(base.Path, ref.Path)
- if !strings.HasPrefix(path, "/") {
- path = "/" + path
- }
- url.Path = path
- url.RawPath = url.Path
- url.RawQuery = ref.RawQuery
- if ref.RawQuery != "" {
- url.RawPath += "?" + url.RawQuery
- }
- }
-
- url.Fragment = ref.Fragment
- }
- url.Raw = url.String()
- return url
-}
-
-// Query parses RawQuery and returns the corresponding values.
-func (u *URL) Query() Values {
- v, _ := ParseQuery(u.RawQuery)
- return v
-}
-
-// EncodedPath returns the URL's path in "URL path encoded" form.
-func (u *URL) EncodedPath() string {
- return escape(u.Path, encodePath)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package url
-
-import (
- "fmt"
- "reflect"
- "testing"
-)
-
-type URLTest struct {
- in string
- out *URL
- roundtrip string // expected result of reserializing the URL; empty means same as "in".
-}
-
-var urltests = []URLTest{
- // no path
- {
- "http://www.google.com",
- &URL{
- Raw: "http://www.google.com",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- },
- "",
- },
- // path
- {
- "http://www.google.com/",
- &URL{
- Raw: "http://www.google.com/",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/",
- Path: "/",
- },
- "",
- },
- // path with hex escaping
- {
- "http://www.google.com/file%20one%26two",
- &URL{
- Raw: "http://www.google.com/file%20one%26two",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/file%20one%26two",
- Path: "/file one&two",
- },
- "http://www.google.com/file%20one&two",
- },
- // user
- {
- "ftp://webmaster@www.google.com/",
- &URL{
- Raw: "ftp://webmaster@www.google.com/",
- Scheme: "ftp",
- RawAuthority: "webmaster@www.google.com",
- RawUserinfo: "webmaster",
- Host: "www.google.com",
- RawPath: "/",
- Path: "/",
- },
- "",
- },
- // escape sequence in username
- {
- "ftp://john%20doe@www.google.com/",
- &URL{
- Raw: "ftp://john%20doe@www.google.com/",
- Scheme: "ftp",
- RawAuthority: "john%20doe@www.google.com",
- RawUserinfo: "john%20doe",
- Host: "www.google.com",
- RawPath: "/",
- Path: "/",
- },
- "ftp://john%20doe@www.google.com/",
- },
- // query
- {
- "http://www.google.com/?q=go+language",
- &URL{
- Raw: "http://www.google.com/?q=go+language",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language",
- Path: "/",
- RawQuery: "q=go+language",
- },
- "",
- },
- // query with hex escaping: NOT parsed
- {
- "http://www.google.com/?q=go%20language",
- &URL{
- Raw: "http://www.google.com/?q=go%20language",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go%20language",
- Path: "/",
- RawQuery: "q=go%20language",
- },
- "",
- },
- // %20 outside query
- {
- "http://www.google.com/a%20b?q=c+d",
- &URL{
- Raw: "http://www.google.com/a%20b?q=c+d",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/a%20b?q=c+d",
- Path: "/a b",
- RawQuery: "q=c+d",
- },
- "",
- },
- // path without leading /, so no query parsing
- {
- "http:www.google.com/?q=go+language",
- &URL{
- Raw: "http:www.google.com/?q=go+language",
- Scheme: "http",
- RawPath: "www.google.com/?q=go+language",
- Path: "www.google.com/?q=go+language",
- OpaquePath: true,
- },
- "http:www.google.com/?q=go+language",
- },
- // path without leading /, so no query parsing
- {
- "http:%2f%2fwww.google.com/?q=go+language",
- &URL{
- Raw: "http:%2f%2fwww.google.com/?q=go+language",
- Scheme: "http",
- RawPath: "%2f%2fwww.google.com/?q=go+language",
- Path: "//www.google.com/?q=go+language",
- OpaquePath: true,
- },
- "http:%2f/www.google.com/?q=go+language",
- },
- // non-authority
- {
- "mailto:/webmaster@golang.org",
- &URL{
- Raw: "mailto:/webmaster@golang.org",
- Scheme: "mailto",
- RawPath: "/webmaster@golang.org",
- Path: "/webmaster@golang.org",
- },
- "",
- },
- // non-authority
- {
- "mailto:webmaster@golang.org",
- &URL{
- Raw: "mailto:webmaster@golang.org",
- Scheme: "mailto",
- RawPath: "webmaster@golang.org",
- Path: "webmaster@golang.org",
- OpaquePath: true,
- },
- "",
- },
- // unescaped :// in query should not create a scheme
- {
- "/foo?query=http://bad",
- &URL{
- Raw: "/foo?query=http://bad",
- RawPath: "/foo?query=http://bad",
- Path: "/foo",
- RawQuery: "query=http://bad",
- },
- "",
- },
- // leading // without scheme should create an authority
- {
- "//foo",
- &URL{
- RawAuthority: "foo",
- Raw: "//foo",
- Host: "foo",
- Scheme: "",
- RawPath: "",
- Path: "",
- },
- "",
- },
- // leading // without scheme, with userinfo, path, and query
- {
- "//user@foo/path?a=b",
- &URL{
- Raw: "//user@foo/path?a=b",
- RawAuthority: "user@foo",
- RawUserinfo: "user",
- Scheme: "",
- RawPath: "/path?a=b",
- Path: "/path",
- RawQuery: "a=b",
- Host: "foo",
- },
- "",
- },
- // Three leading slashes isn't an authority, but doesn't return an error.
- // (We can't return an error, as this code is also used via
- // ServeHTTP -> ReadRequest -> Parse, which is arguably a
- // different URL parsing context, but currently shares the
- // same codepath)
- {
- "///threeslashes",
- &URL{
- RawAuthority: "",
- Raw: "///threeslashes",
- Host: "",
- Scheme: "",
- RawPath: "///threeslashes",
- Path: "///threeslashes",
- },
- "",
- },
- {
- "http://user:password@google.com",
- &URL{
- Raw: "http://user:password@google.com",
- Scheme: "http",
- RawAuthority: "user:password@google.com",
- RawUserinfo: "user:password",
- Host: "google.com",
- },
- "http://user:******@google.com",
- },
- {
- "http://user:longerpass@google.com",
- &URL{
- Raw: "http://user:longerpass@google.com",
- Scheme: "http",
- RawAuthority: "user:longerpass@google.com",
- RawUserinfo: "user:longerpass",
- Host: "google.com",
- },
- "http://user:******@google.com",
- },
-}
-
-var urlnofragtests = []URLTest{
- {
- "http://www.google.com/?q=go+language#foo",
- &URL{
- Raw: "http://www.google.com/?q=go+language#foo",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language#foo",
- Path: "/",
- RawQuery: "q=go+language#foo",
- },
- "",
- },
-}
-
-var urlfragtests = []URLTest{
- {
- "http://www.google.com/?q=go+language#foo",
- &URL{
- Raw: "http://www.google.com/?q=go+language#foo",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language#foo",
- Path: "/",
- RawQuery: "q=go+language",
- Fragment: "foo",
- },
- "",
- },
- {
- "http://www.google.com/?q=go+language#foo%26bar",
- &URL{
- Raw: "http://www.google.com/?q=go+language#foo%26bar",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language#foo%26bar",
- Path: "/",
- RawQuery: "q=go+language",
- Fragment: "foo&bar",
- },
- "http://www.google.com/?q=go+language#foo&bar",
- },
-}
-
-// more useful string for debugging than fmt's struct printer
-func ufmt(u *URL) string {
- return fmt.Sprintf("raw=%q, scheme=%q, rawpath=%q, auth=%q, userinfo=%q, host=%q, path=%q, rawq=%q, frag=%q",
- u.Raw, u.Scheme, u.RawPath, u.RawAuthority, u.RawUserinfo,
- u.Host, u.Path, u.RawQuery, u.Fragment)
-}
-
-func DoTest(t *testing.T, parse func(string) (*URL, error), name string, tests []URLTest) {
- for _, tt := range tests {
- u, err := parse(tt.in)
- if err != nil {
- t.Errorf("%s(%q) returned error %s", name, tt.in, err)
- continue
- }
- if !reflect.DeepEqual(u, tt.out) {
- t.Errorf("%s(%q):\n\thave %v\n\twant %v\n",
- name, tt.in, ufmt(u), ufmt(tt.out))
- }
- }
-}
-
-func TestParse(t *testing.T) {
- DoTest(t, Parse, "Parse", urltests)
- DoTest(t, Parse, "Parse", urlnofragtests)
-}
-
-func TestParseWithReference(t *testing.T) {
- DoTest(t, ParseWithReference, "ParseWithReference", urltests)
- DoTest(t, ParseWithReference, "ParseWithReference", urlfragtests)
-}
-
-const pathThatLooksSchemeRelative = "//not.a.user@not.a.host/just/a/path"
-
-var parseRequestUrlTests = []struct {
- url string
- expectedValid bool
-}{
- {"http://foo.com", true},
- {"http://foo.com/", true},
- {"http://foo.com/path", true},
- {"/", true},
- {pathThatLooksSchemeRelative, true},
- {"//not.a.user@%66%6f%6f.com/just/a/path/also", true},
- {"foo.html", false},
- {"../dir/", false},
-}
-
-func TestParseRequest(t *testing.T) {
- for _, test := range parseRequestUrlTests {
- _, err := ParseRequest(test.url)
- valid := err == nil
- if valid != test.expectedValid {
- t.Errorf("Expected valid=%v for %q; got %v", test.expectedValid, test.url, valid)
- }
- }
-
- url, err := ParseRequest(pathThatLooksSchemeRelative)
- if err != nil {
- t.Fatalf("Unexpected error %v", err)
- }
- if url.Path != pathThatLooksSchemeRelative {
- t.Errorf("Expected path %q; got %q", pathThatLooksSchemeRelative, url.Path)
- }
-}
-
-func DoTestString(t *testing.T, parse func(string) (*URL, error), name string, tests []URLTest) {
- for _, tt := range tests {
- u, err := parse(tt.in)
- if err != nil {
- t.Errorf("%s(%q) returned error %s", name, tt.in, err)
- continue
- }
- s := u.String()
- expected := tt.in
- if len(tt.roundtrip) > 0 {
- expected = tt.roundtrip
- }
- if s != expected {
- t.Errorf("%s(%q).String() == %q (expected %q)", name, tt.in, s, expected)
- }
- }
-}
-
-func TestURLString(t *testing.T) {
- DoTestString(t, Parse, "Parse", urltests)
- DoTestString(t, Parse, "Parse", urlnofragtests)
- DoTestString(t, ParseWithReference, "ParseWithReference", urltests)
- DoTestString(t, ParseWithReference, "ParseWithReference", urlfragtests)
-}
-
-type EscapeTest struct {
- in string
- out string
- err error
-}
-
-var unescapeTests = []EscapeTest{
- {
- "",
- "",
- nil,
- },
- {
- "abc",
- "abc",
- nil,
- },
- {
- "1%41",
- "1A",
- nil,
- },
- {
- "1%41%42%43",
- "1ABC",
- nil,
- },
- {
- "%4a",
- "J",
- nil,
- },
- {
- "%6F",
- "o",
- nil,
- },
- {
- "%", // not enough characters after %
- "",
- EscapeError("%"),
- },
- {
- "%a", // not enough characters after %
- "",
- EscapeError("%a"),
- },
- {
- "%1", // not enough characters after %
- "",
- EscapeError("%1"),
- },
- {
- "123%45%6", // not enough characters after %
- "",
- EscapeError("%6"),
- },
- {
- "%zzzzz", // invalid hex digits
- "",
- EscapeError("%zz"),
- },
-}
-
-func TestUnescape(t *testing.T) {
- for _, tt := range unescapeTests {
- actual, err := QueryUnescape(tt.in)
- if actual != tt.out || (err != nil) != (tt.err != nil) {
- t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", tt.in, actual, err, tt.out, tt.err)
- }
- }
-}
-
-var escapeTests = []EscapeTest{
- {
- "",
- "",
- nil,
- },
- {
- "abc",
- "abc",
- nil,
- },
- {
- "one two",
- "one+two",
- nil,
- },
- {
- "10%",
- "10%25",
- nil,
- },
- {
- " ?&=#+%!<>#\"{}|\\^[]`☺\t",
- "+%3F%26%3D%23%2B%25!%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09",
- nil,
- },
-}
-
-func TestEscape(t *testing.T) {
- for _, tt := range escapeTests {
- actual := QueryEscape(tt.in)
- if tt.out != actual {
- t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out)
- }
-
- // for bonus points, verify that escape:unescape is an identity.
- roundtrip, err := QueryUnescape(actual)
- if roundtrip != tt.in || err != nil {
- t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]")
- }
- }
-}
-
-type UserinfoTest struct {
- User string
- Password string
- Raw string
-}
-
-var userinfoTests = []UserinfoTest{
- {"user", "password", "user:password"},
- {"foo:bar", "~!@#$%^&*()_+{}|[]\\-=`:;'\"<>?,./",
- "foo%3Abar:~!%40%23$%25%5E&*()_+%7B%7D%7C%5B%5D%5C-=%60%3A;'%22%3C%3E?,.%2F"},
-}
-
-func TestEscapeUserinfo(t *testing.T) {
- for _, tt := range userinfoTests {
- if raw := EscapeUserinfo(tt.User, tt.Password); raw != tt.Raw {
- t.Errorf("EscapeUserinfo(%q, %q) = %q, want %q", tt.User, tt.Password, raw, tt.Raw)
- }
- }
-}
-
-func TestUnescapeUserinfo(t *testing.T) {
- for _, tt := range userinfoTests {
- if user, pass, err := UnescapeUserinfo(tt.Raw); user != tt.User || pass != tt.Password || err != nil {
- t.Errorf("UnescapeUserinfo(%q) = %q, %q, %v, want %q, %q, nil", tt.Raw, user, pass, err, tt.User, tt.Password)
- }
- }
-}
-
-type EncodeQueryTest struct {
- m Values
- expected string
- expected1 string
-}
-
-var encodeQueryTests = []EncodeQueryTest{
- {nil, "", ""},
- {Values{"q": {"puppies"}, "oe": {"utf8"}}, "q=puppies&oe=utf8", "oe=utf8&q=puppies"},
- {Values{"q": {"dogs", "&", "7"}}, "q=dogs&q=%26&q=7", "q=dogs&q=%26&q=7"},
-}
-
-func TestEncodeQuery(t *testing.T) {
- for _, tt := range encodeQueryTests {
- if q := tt.m.Encode(); q != tt.expected && q != tt.expected1 {
- t.Errorf(`EncodeQuery(%+v) = %q, want %q`, tt.m, q, tt.expected)
- }
- }
-}
-
-var resolvePathTests = []struct {
- base, ref, expected string
-}{
- {"a/b", ".", "a/"},
- {"a/b", "c", "a/c"},
- {"a/b", "..", ""},
- {"a/", "..", ""},
- {"a/", "../..", ""},
- {"a/b/c", "..", "a/"},
- {"a/b/c", "../d", "a/d"},
- {"a/b/c", ".././d", "a/d"},
- {"a/b", "./..", ""},
- {"a/./b", ".", "a/./"},
- {"a/../", ".", "a/../"},
- {"a/.././b", "c", "a/.././c"},
-}
-
-func TestResolvePath(t *testing.T) {
- for _, test := range resolvePathTests {
- got := resolvePath(test.base, test.ref)
- if got != test.expected {
- t.Errorf("For %q + %q got %q; expected %q", test.base, test.ref, got, test.expected)
- }
- }
-}
-
-var resolveReferenceTests = []struct {
- base, rel, expected string
-}{
- // Absolute URL references
- {"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"},
- {"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"},
- {"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"},
-
- // Path-absolute references
- {"http://foo.com/bar", "/baz", "http://foo.com/baz"},
- {"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"},
- {"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"},
-
- // Scheme-relative
- {"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"},
-
- // Path-relative references:
-
- // ... current directory
- {"http://foo.com", ".", "http://foo.com/"},
- {"http://foo.com/bar", ".", "http://foo.com/"},
- {"http://foo.com/bar/", ".", "http://foo.com/bar/"},
-
- // ... going down
- {"http://foo.com", "bar", "http://foo.com/bar"},
- {"http://foo.com/", "bar", "http://foo.com/bar"},
- {"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"},
-
- // ... going up
- {"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"},
- {"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"},
- {"http://foo.com/bar", "..", "http://foo.com/"},
- {"http://foo.com/bar/baz", "./..", "http://foo.com/"},
-
- // "." and ".." in the base aren't special
- {"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/./dotdot/../baz"},
-
- // Triple dot isn't special
- {"http://foo.com/bar", "...", "http://foo.com/..."},
-
- // Fragment
- {"http://foo.com/bar", ".#frag", "http://foo.com/#frag"},
-}
-
-func TestResolveReference(t *testing.T) {
- mustParse := func(url string) *URL {
- u, err := ParseWithReference(url)
- if err != nil {
- t.Fatalf("Expected URL to parse: %q, got error: %v", url, err)
- }
- return u
- }
- for _, test := range resolveReferenceTests {
- base := mustParse(test.base)
- rel := mustParse(test.rel)
- url := base.ResolveReference(rel)
- urlStr := url.String()
- if urlStr != test.expected {
- t.Errorf("Resolving %q + %q != %q; got %q", test.base, test.rel, test.expected, urlStr)
- }
- }
-
- // Test that new instances are returned.
- base := mustParse("http://foo.com/")
- abs := base.ResolveReference(mustParse("."))
- if base == abs {
- t.Errorf("Expected no-op reference to return new URL instance.")
- }
- barRef := mustParse("http://bar.com/")
- abs = base.ResolveReference(barRef)
- if abs == barRef {
- t.Errorf("Expected resolution of absolute reference to return new URL instance.")
- }
-
- // Test the convenience wrapper too
- base = mustParse("http://foo.com/path/one/")
- abs, _ = base.Parse("../two")
- expected := "http://foo.com/path/two"
- if abs.String() != expected {
- t.Errorf("Parse wrapper got %q; expected %q", abs.String(), expected)
- }
- _, err := base.Parse("")
- if err == nil {
- t.Errorf("Expected an error from Parse wrapper parsing an empty string.")
- }
-
-}
-
-func TestQueryValues(t *testing.T) {
- u, _ := Parse("http://x.com?foo=bar&bar=1&bar=2")
- v := u.Query()
- if len(v) != 2 {
- t.Errorf("got %d keys in Query values, want 2", len(v))
- }
- if g, e := v.Get("foo"), "bar"; g != e {
- t.Errorf("Get(foo) = %q, want %q", g, e)
- }
- // Case sensitive:
- if g, e := v.Get("Foo"), ""; g != e {
- t.Errorf("Get(Foo) = %q, want %q", g, e)
- }
- if g, e := v.Get("bar"), "1"; g != e {
- t.Errorf("Get(bar) = %q, want %q", g, e)
- }
- if g, e := v.Get("baz"), ""; g != e {
- t.Errorf("Get(baz) = %q, want %q", g, e)
- }
- v.Del("bar")
- if g, e := v.Get("bar"), ""; g != e {
- t.Errorf("second Get(bar) = %q, want %q", g, e)
- }
-}
-
-type parseTest struct {
- query string
- out Values
-}
-
-var parseTests = []parseTest{
- {
- query: "a=1&b=2",
- out: Values{"a": []string{"1"}, "b": []string{"2"}},
- },
- {
- query: "a=1&a=2&a=banana",
- out: Values{"a": []string{"1", "2", "banana"}},
- },
- {
- query: "ascii=%3Ckey%3A+0x90%3E",
- out: Values{"ascii": []string{"<key: 0x90>"}},
- },
- {
- query: "a=1;b=2",
- out: Values{"a": []string{"1"}, "b": []string{"2"}},
- },
- {
- query: "a=1&a=2;a=banana",
- out: Values{"a": []string{"1", "2", "banana"}},
- },
-}
-
-func TestParseQuery(t *testing.T) {
- for i, test := range parseTests {
- form, err := ParseQuery(test.query)
- if err != nil {
- t.Errorf("test %d: Unexpected error: %v", i, err)
- continue
- }
- if len(form) != len(test.out) {
- t.Errorf("test %d: len(form) = %d, want %d", i, len(form), len(test.out))
- }
- for k, evs := range test.out {
- vs, ok := form[k]
- if !ok {
- t.Errorf("test %d: Missing key %q", i, k)
- continue
- }
- if len(vs) != len(evs) {
- t.Errorf("test %d: len(form[%q]) = %d, want %d", i, k, len(vs), len(evs))
- continue
- }
- for j, ev := range evs {
- if v := vs[j]; v != ev {
- t.Errorf("test %d: form[%q][%d] = %q, want %q", i, k, j, v, ev)
- }
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package utf16 implements encoding and decoding of UTF-16 sequences.
-package utf16
-
-import "unicode"
-
-const (
- // 0xd800-0xdc00 encodes the high 10 bits of a pair.
- // 0xdc00-0xe000 encodes the low 10 bits of a pair.
- // the value is those 20 bits plus 0x10000.
- surr1 = 0xd800
- surr2 = 0xdc00
- surr3 = 0xe000
-
- surrSelf = 0x10000
-)
-
-// IsSurrogate returns true if the specified Unicode code point
-// can appear in a surrogate pair.
-func IsSurrogate(r rune) bool {
- return surr1 <= r && r < surr3
-}
-
-// DecodeRune returns the UTF-16 decoding of a surrogate pair.
-// If the pair is not a valid UTF-16 surrogate pair, DecodeRune returns
-// the Unicode replacement code point U+FFFD.
-func DecodeRune(r1, r2 rune) rune {
- if surr1 <= r1 && r1 < surr2 && surr2 <= r2 && r2 < surr3 {
- return (rune(r1)-surr1)<<10 | (rune(r2) - surr2) + 0x10000
- }
- return unicode.ReplacementChar
-}
-
-// EncodeRune returns the UTF-16 surrogate pair r1, r2 for the given rune.
-// If the rune is not a valid Unicode code point or does not need encoding,
-// EncodeRune returns U+FFFD, U+FFFD.
-func EncodeRune(r rune) (r1, r2 rune) {
- if r < surrSelf || r > unicode.MaxRune || IsSurrogate(r) {
- return unicode.ReplacementChar, unicode.ReplacementChar
- }
- r -= surrSelf
- return surr1 + (r>>10)&0x3ff, surr2 + r&0x3ff
-}
-
-// Encode returns the UTF-16 encoding of the Unicode code point sequence s.
-func Encode(s []rune) []uint16 {
- n := len(s)
- for _, v := range s {
- if v >= surrSelf {
- n++
- }
- }
-
- a := make([]uint16, n)
- n = 0
- for _, v := range s {
- switch {
- case v < 0, surr1 <= v && v < surr3, v > unicode.MaxRune:
- v = unicode.ReplacementChar
- fallthrough
- case v < surrSelf:
- a[n] = uint16(v)
- n++
- default:
- r1, r2 := EncodeRune(v)
- a[n] = uint16(r1)
- a[n+1] = uint16(r2)
- n += 2
- }
- }
- return a[0:n]
-}
-
-// Decode returns the Unicode code point sequence represented
-// by the UTF-16 encoding s.
-func Decode(s []uint16) []rune {
- a := make([]rune, len(s))
- n := 0
- for i := 0; i < len(s); i++ {
- switch r := s[i]; {
- case surr1 <= r && r < surr2 && i+1 < len(s) &&
- surr2 <= s[i+1] && s[i+1] < surr3:
- // valid surrogate sequence
- a[n] = DecodeRune(rune(r), rune(s[i+1]))
- i++
- n++
- case surr1 <= r && r < surr3:
- // invalid surrogate sequence
- a[n] = unicode.ReplacementChar
- n++
- default:
- // normal rune
- a[n] = rune(r)
- n++
- }
- }
- return a[0:n]
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utf16_test
-
-import (
- "reflect"
- "testing"
- "unicode"
- . "utf16"
-)
-
-type encodeTest struct {
- in []rune
- out []uint16
-}
-
-var encodeTests = []encodeTest{
- {[]rune{1, 2, 3, 4}, []uint16{1, 2, 3, 4}},
- {[]rune{0xffff, 0x10000, 0x10001, 0x12345, 0x10ffff},
- []uint16{0xffff, 0xd800, 0xdc00, 0xd800, 0xdc01, 0xd808, 0xdf45, 0xdbff, 0xdfff}},
- {[]rune{'a', 'b', 0xd7ff, 0xd800, 0xdfff, 0xe000, 0x110000, -1},
- []uint16{'a', 'b', 0xd7ff, 0xfffd, 0xfffd, 0xe000, 0xfffd, 0xfffd}},
-}
-
-func TestEncode(t *testing.T) {
- for _, tt := range encodeTests {
- out := Encode(tt.in)
- if !reflect.DeepEqual(out, tt.out) {
- t.Errorf("Encode(%x) = %x; want %x", tt.in, out, tt.out)
- }
- }
-}
-
-func TestEncodeRune(t *testing.T) {
- for i, tt := range encodeTests {
- j := 0
- for _, r := range tt.in {
- r1, r2 := EncodeRune(r)
- if r < 0x10000 || r > unicode.MaxRune {
- if j >= len(tt.out) {
- t.Errorf("#%d: ran out of tt.out", i)
- break
- }
- if r1 != unicode.ReplacementChar || r2 != unicode.ReplacementChar {
- t.Errorf("EncodeRune(%#x) = %#x, %#x; want 0xfffd, 0xfffd", r, r1, r2)
- }
- j++
- } else {
- if j+1 >= len(tt.out) {
- t.Errorf("#%d: ran out of tt.out", i)
- break
- }
- if r1 != rune(tt.out[j]) || r2 != rune(tt.out[j+1]) {
- t.Errorf("EncodeRune(%#x) = %#x, %#x; want %#x, %#x", r, r1, r2, tt.out[j], tt.out[j+1])
- }
- j += 2
- dec := DecodeRune(r1, r2)
- if dec != r {
- t.Errorf("DecodeRune(%#x, %#x) = %#x; want %#x", r1, r2, dec, r)
- }
- }
- }
- if j != len(tt.out) {
- t.Errorf("#%d: EncodeRune didn't generate enough output", i)
- }
- }
-}
-
-type decodeTest struct {
- in []uint16
- out []rune
-}
-
-var decodeTests = []decodeTest{
- {[]uint16{1, 2, 3, 4}, []rune{1, 2, 3, 4}},
- {[]uint16{0xffff, 0xd800, 0xdc00, 0xd800, 0xdc01, 0xd808, 0xdf45, 0xdbff, 0xdfff},
- []rune{0xffff, 0x10000, 0x10001, 0x12345, 0x10ffff}},
- {[]uint16{0xd800, 'a'}, []rune{0xfffd, 'a'}},
- {[]uint16{0xdfff}, []rune{0xfffd}},
-}
-
-func TestDecode(t *testing.T) {
- for _, tt := range decodeTests {
- out := Decode(tt.in)
- if !reflect.DeepEqual(out, tt.out) {
- t.Errorf("Decode(%x) = %x; want %x", tt.in, out, tt.out)
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utf8
-
-import "errors"
-
-// String wraps a regular string with a small structure that provides more
-// efficient indexing by code point index, as opposed to byte index.
-// Scanning incrementally forwards or backwards is O(1) per index operation
-// (although not as fast a range clause going forwards). Random access is
-// O(N) in the length of the string, but the overhead is less than always
-// scanning from the beginning.
-// If the string is ASCII, random access is O(1).
-// Unlike the built-in string type, String has internal mutable state and
-// is not thread-safe.
-type String struct {
- str string
- numRunes int
- // If width > 0, the rune at runePos starts at bytePos and has the specified width.
- width int
- bytePos int
- runePos int
- nonASCII int // byte index of the first non-ASCII rune.
-}
-
-// NewString returns a new UTF-8 string with the provided contents.
-func NewString(contents string) *String {
- return new(String).Init(contents)
-}
-
-// Init initializes an existing String to hold the provided contents.
-// It returns a pointer to the initialized String.
-func (s *String) Init(contents string) *String {
- s.str = contents
- s.bytePos = 0
- s.runePos = 0
- for i := 0; i < len(contents); i++ {
- if contents[i] >= RuneSelf {
- // Not ASCII.
- s.numRunes = RuneCountInString(contents)
- _, s.width = DecodeRuneInString(contents)
- s.nonASCII = i
- return s
- }
- }
- // ASCII is simple. Also, the empty string is ASCII.
- s.numRunes = len(contents)
- s.width = 0
- s.nonASCII = len(contents)
- return s
-}
-
-// String returns the contents of the String. This method also means the
-// String is directly printable by fmt.Print.
-func (s *String) String() string {
- return s.str
-}
-
-// RuneCount returns the number of runes (Unicode code points) in the String.
-func (s *String) RuneCount() int {
- return s.numRunes
-}
-
-// IsASCII returns a boolean indicating whether the String contains only ASCII bytes.
-func (s *String) IsASCII() bool {
- return s.width == 0
-}
-
-// Slice returns the string sliced at rune positions [i:j].
-func (s *String) Slice(i, j int) string {
- // ASCII is easy. Let the compiler catch the indexing error if there is one.
- if j < s.nonASCII {
- return s.str[i:j]
- }
- if i < 0 || j > s.numRunes || i > j {
- panic(sliceOutOfRange)
- }
- if i == j {
- return ""
- }
- // For non-ASCII, after At(i), bytePos is always the position of the indexed character.
- var low, high int
- switch {
- case i < s.nonASCII:
- low = i
- case i == s.numRunes:
- low = len(s.str)
- default:
- s.At(i)
- low = s.bytePos
- }
- switch {
- case j == s.numRunes:
- high = len(s.str)
- default:
- s.At(j)
- high = s.bytePos
- }
- return s.str[low:high]
-}
-
-// At returns the rune with index i in the String. The sequence of runes is the same
-// as iterating over the contents with a "for range" clause.
-func (s *String) At(i int) rune {
- // ASCII is easy. Let the compiler catch the indexing error if there is one.
- if i < s.nonASCII {
- return rune(s.str[i])
- }
-
- // Now we do need to know the index is valid.
- if i < 0 || i >= s.numRunes {
- panic(outOfRange)
- }
-
- var r rune
-
- // Five easy common cases: within 1 spot of bytePos/runePos, or the beginning, or the end.
- // With these cases, all scans from beginning or end work in O(1) time per rune.
- switch {
-
- case i == s.runePos-1: // backing up one rune
- r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos])
- s.runePos = i
- s.bytePos -= s.width
- return r
- case i == s.runePos+1: // moving ahead one rune
- s.runePos = i
- s.bytePos += s.width
- fallthrough
- case i == s.runePos:
- r, s.width = DecodeRuneInString(s.str[s.bytePos:])
- return r
- case i == 0: // start of string
- r, s.width = DecodeRuneInString(s.str)
- s.runePos = 0
- s.bytePos = 0
- return r
-
- case i == s.numRunes-1: // last rune in string
- r, s.width = DecodeLastRuneInString(s.str)
- s.runePos = i
- s.bytePos = len(s.str) - s.width
- return r
- }
-
- // We need to do a linear scan. There are three places to start from:
- // 1) The beginning
- // 2) bytePos/runePos.
- // 3) The end
- // Choose the closest in rune count, scanning backwards if necessary.
- forward := true
- if i < s.runePos {
- // Between beginning and pos. Which is closer?
- // Since both i and runePos are guaranteed >= nonASCII, that's the
- // lowest location we need to start from.
- if i < (s.runePos-s.nonASCII)/2 {
- // Scan forward from beginning
- s.bytePos, s.runePos = s.nonASCII, s.nonASCII
- } else {
- // Scan backwards from where we are
- forward = false
- }
- } else {
- // Between pos and end. Which is closer?
- if i-s.runePos < (s.numRunes-s.runePos)/2 {
- // Scan forward from pos
- } else {
- // Scan backwards from end
- s.bytePos, s.runePos = len(s.str), s.numRunes
- forward = false
- }
- }
- if forward {
- // TODO: Is it much faster to use a range loop for this scan?
- for {
- r, s.width = DecodeRuneInString(s.str[s.bytePos:])
- if s.runePos == i {
- break
- }
- s.runePos++
- s.bytePos += s.width
- }
- } else {
- for {
- r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos])
- s.runePos--
- s.bytePos -= s.width
- if s.runePos == i {
- break
- }
- }
- }
- return r
-}
-
-var outOfRange = errors.New("utf8.String: index out of range")
-var sliceOutOfRange = errors.New("utf8.String: slice index out of range")
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utf8_test
-
-import (
- "rand"
- "testing"
- . "utf8"
-)
-
-func TestScanForwards(t *testing.T) {
- for _, s := range testStrings {
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for i, expect := range runes {
- got := str.At(i)
- if got != expect {
- t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
- }
- }
- }
-}
-
-func TestScanBackwards(t *testing.T) {
- for _, s := range testStrings {
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for i := len(runes) - 1; i >= 0; i-- {
- expect := runes[i]
- got := str.At(i)
- if got != expect {
- t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
- }
- }
- }
-}
-
-func randCount() int {
- if testing.Short() {
- return 100
- }
- return 100000
-}
-
-func TestRandomAccess(t *testing.T) {
- for _, s := range testStrings {
- if len(s) == 0 {
- continue
- }
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for j := 0; j < randCount(); j++ {
- i := rand.Intn(len(runes))
- expect := runes[i]
- got := str.At(i)
- if got != expect {
- t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
- }
- }
- }
-}
-
-func TestRandomSliceAccess(t *testing.T) {
- for _, s := range testStrings {
- if len(s) == 0 || s[0] == '\x80' { // the bad-UTF-8 string fools this simple test
- continue
- }
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for k := 0; k < randCount(); k++ {
- i := rand.Intn(len(runes))
- j := rand.Intn(len(runes) + 1)
- if i > j { // include empty strings
- continue
- }
- expect := string(runes[i:j])
- got := str.Slice(i, j)
- if got != expect {
- t.Errorf("%s[%d:%d]: expected %q got %q", s, i, j, expect, got)
- }
- }
- }
-}
-
-func TestLimitSliceAccess(t *testing.T) {
- for _, s := range testStrings {
- str := NewString(s)
- if str.Slice(0, 0) != "" {
- t.Error("failure with empty slice at beginning")
- }
- nr := RuneCountInString(s)
- if str.Slice(nr, nr) != "" {
- t.Error("failure with empty slice at end")
- }
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package utf8 implements functions and constants to support text encoded in
-// UTF-8. This package calls a Unicode character a rune for brevity.
-package utf8
-
-import "unicode" // only needed for a couple of constants
-
-// Numbers fundamental to the encoding.
-const (
- RuneError = unicode.ReplacementChar // the "error" Rune or "replacement character".
- RuneSelf = 0x80 // characters below Runeself are represented as themselves in a single byte.
- UTFMax = 4 // maximum number of bytes of a UTF-8 encoded Unicode character.
-)
-
-const (
- t1 = 0x00 // 0000 0000
- tx = 0x80 // 1000 0000
- t2 = 0xC0 // 1100 0000
- t3 = 0xE0 // 1110 0000
- t4 = 0xF0 // 1111 0000
- t5 = 0xF8 // 1111 1000
-
- maskx = 0x3F // 0011 1111
- mask2 = 0x1F // 0001 1111
- mask3 = 0x0F // 0000 1111
- mask4 = 0x07 // 0000 0111
-
- rune1Max = 1<<7 - 1
- rune2Max = 1<<11 - 1
- rune3Max = 1<<16 - 1
- rune4Max = 1<<21 - 1
-)
-
-func decodeRuneInternal(p []byte) (r rune, size int, short bool) {
- n := len(p)
- if n < 1 {
- return RuneError, 0, true
- }
- c0 := p[0]
-
- // 1-byte, 7-bit sequence?
- if c0 < tx {
- return rune(c0), 1, false
- }
-
- // unexpected continuation byte?
- if c0 < t2 {
- return RuneError, 1, false
- }
-
- // need first continuation byte
- if n < 2 {
- return RuneError, 1, true
- }
- c1 := p[1]
- if c1 < tx || t2 <= c1 {
- return RuneError, 1, false
- }
-
- // 2-byte, 11-bit sequence?
- if c0 < t3 {
- r = rune(c0&mask2)<<6 | rune(c1&maskx)
- if r <= rune1Max {
- return RuneError, 1, false
- }
- return r, 2, false
- }
-
- // need second continuation byte
- if n < 3 {
- return RuneError, 1, true
- }
- c2 := p[2]
- if c2 < tx || t2 <= c2 {
- return RuneError, 1, false
- }
-
- // 3-byte, 16-bit sequence?
- if c0 < t4 {
- r = rune(c0&mask3)<<12 | rune(c1&maskx)<<6 | rune(c2&maskx)
- if r <= rune2Max {
- return RuneError, 1, false
- }
- return r, 3, false
- }
-
- // need third continuation byte
- if n < 4 {
- return RuneError, 1, true
- }
- c3 := p[3]
- if c3 < tx || t2 <= c3 {
- return RuneError, 1, false
- }
-
- // 4-byte, 21-bit sequence?
- if c0 < t5 {
- r = rune(c0&mask4)<<18 | rune(c1&maskx)<<12 | rune(c2&maskx)<<6 | rune(c3&maskx)
- if r <= rune3Max {
- return RuneError, 1, false
- }
- return r, 4, false
- }
-
- // error
- return RuneError, 1, false
-}
-
-func decodeRuneInStringInternal(s string) (r rune, size int, short bool) {
- n := len(s)
- if n < 1 {
- return RuneError, 0, true
- }
- c0 := s[0]
-
- // 1-byte, 7-bit sequence?
- if c0 < tx {
- return rune(c0), 1, false
- }
-
- // unexpected continuation byte?
- if c0 < t2 {
- return RuneError, 1, false
- }
-
- // need first continuation byte
- if n < 2 {
- return RuneError, 1, true
- }
- c1 := s[1]
- if c1 < tx || t2 <= c1 {
- return RuneError, 1, false
- }
-
- // 2-byte, 11-bit sequence?
- if c0 < t3 {
- r = rune(c0&mask2)<<6 | rune(c1&maskx)
- if r <= rune1Max {
- return RuneError, 1, false
- }
- return r, 2, false
- }
-
- // need second continuation byte
- if n < 3 {
- return RuneError, 1, true
- }
- c2 := s[2]
- if c2 < tx || t2 <= c2 {
- return RuneError, 1, false
- }
-
- // 3-byte, 16-bit sequence?
- if c0 < t4 {
- r = rune(c0&mask3)<<12 | rune(c1&maskx)<<6 | rune(c2&maskx)
- if r <= rune2Max {
- return RuneError, 1, false
- }
- return r, 3, false
- }
-
- // need third continuation byte
- if n < 4 {
- return RuneError, 1, true
- }
- c3 := s[3]
- if c3 < tx || t2 <= c3 {
- return RuneError, 1, false
- }
-
- // 4-byte, 21-bit sequence?
- if c0 < t5 {
- r = rune(c0&mask4)<<18 | rune(c1&maskx)<<12 | rune(c2&maskx)<<6 | rune(c3&maskx)
- if r <= rune3Max {
- return RuneError, 1, false
- }
- return r, 4, false
- }
-
- // error
- return RuneError, 1, false
-}
-
-// FullRune reports whether the bytes in p begin with a full UTF-8 encoding of a rune.
-// An invalid encoding is considered a full Rune since it will convert as a width-1 error rune.
-func FullRune(p []byte) bool {
- _, _, short := decodeRuneInternal(p)
- return !short
-}
-
-// FullRuneInString is like FullRune but its input is a string.
-func FullRuneInString(s string) bool {
- _, _, short := decodeRuneInStringInternal(s)
- return !short
-}
-
-// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and its width in bytes.
-func DecodeRune(p []byte) (r rune, size int) {
- r, size, _ = decodeRuneInternal(p)
- return
-}
-
-// DecodeRuneInString is like DecodeRune but its input is a string.
-func DecodeRuneInString(s string) (r rune, size int) {
- r, size, _ = decodeRuneInStringInternal(s)
- return
-}
-
-// DecodeLastRune unpacks the last UTF-8 encoding in p
-// and returns the rune and its width in bytes.
-func DecodeLastRune(p []byte) (r rune, size int) {
- end := len(p)
- if end == 0 {
- return RuneError, 0
- }
- start := end - 1
- r = rune(p[start])
- if r < RuneSelf {
- return r, 1
- }
- // guard against O(n^2) behavior when traversing
- // backwards through strings with long sequences of
- // invalid UTF-8.
- lim := end - UTFMax
- if lim < 0 {
- lim = 0
- }
- for start--; start >= lim; start-- {
- if RuneStart(p[start]) {
- break
- }
- }
- if start < 0 {
- start = 0
- }
- r, size = DecodeRune(p[start:end])
- if start+size != end {
- return RuneError, 1
- }
- return r, size
-}
-
-// DecodeLastRuneInString is like DecodeLastRune but its input is a string.
-func DecodeLastRuneInString(s string) (r rune, size int) {
- end := len(s)
- if end == 0 {
- return RuneError, 0
- }
- start := end - 1
- r = rune(s[start])
- if r < RuneSelf {
- return r, 1
- }
- // guard against O(n^2) behavior when traversing
- // backwards through strings with long sequences of
- // invalid UTF-8.
- lim := end - UTFMax
- if lim < 0 {
- lim = 0
- }
- for start--; start >= lim; start-- {
- if RuneStart(s[start]) {
- break
- }
- }
- if start < 0 {
- start = 0
- }
- r, size = DecodeRuneInString(s[start:end])
- if start+size != end {
- return RuneError, 1
- }
- return r, size
-}
-
-// RuneLen returns the number of bytes required to encode the rune.
-func RuneLen(r rune) int {
- switch {
- case r <= rune1Max:
- return 1
- case r <= rune2Max:
- return 2
- case r <= rune3Max:
- return 3
- case r <= rune4Max:
- return 4
- }
- return -1
-}
-
-// EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
-// It returns the number of bytes written.
-func EncodeRune(p []byte, r rune) int {
- // Negative values are erroneous. Making it unsigned addresses the problem.
- if uint32(r) <= rune1Max {
- p[0] = byte(r)
- return 1
- }
-
- if uint32(r) <= rune2Max {
- p[0] = t2 | byte(r>>6)
- p[1] = tx | byte(r)&maskx
- return 2
- }
-
- if uint32(r) > unicode.MaxRune {
- r = RuneError
- }
-
- if uint32(r) <= rune3Max {
- p[0] = t3 | byte(r>>12)
- p[1] = tx | byte(r>>6)&maskx
- p[2] = tx | byte(r)&maskx
- return 3
- }
-
- p[0] = t4 | byte(r>>18)
- p[1] = tx | byte(r>>12)&maskx
- p[2] = tx | byte(r>>6)&maskx
- p[3] = tx | byte(r)&maskx
- return 4
-}
-
-// RuneCount returns the number of runes in p. Erroneous and short
-// encodings are treated as single runes of width 1 byte.
-func RuneCount(p []byte) int {
- i := 0
- var n int
- for n = 0; i < len(p); n++ {
- if p[i] < RuneSelf {
- i++
- } else {
- _, size := DecodeRune(p[i:])
- i += size
- }
- }
- return n
-}
-
-// RuneCountInString is like RuneCount but its input is a string.
-func RuneCountInString(s string) (n int) {
- for _ = range s {
- n++
- }
- return
-}
-
-// RuneStart reports whether the byte could be the first byte of
-// an encoded rune. Second and subsequent bytes always have the top
-// two bits set to 10.
-func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
-
-// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
-func Valid(p []byte) bool {
- i := 0
- for i < len(p) {
- if p[i] < RuneSelf {
- i++
- } else {
- _, size := DecodeRune(p[i:])
- if size == 1 {
- // All valid runes of size of 1 (those
- // below RuneSelf) were handled above.
- // This must be a RuneError.
- return false
- }
- i += size
- }
- }
- return true
-}
-
-// ValidString reports whether s consists entirely of valid UTF-8-encoded runes.
-func ValidString(s string) bool {
- for i, r := range s {
- if r == RuneError {
- // The RuneError value can be an error
- // sentinel value (if it's size 1) or the same
- // value encoded properly. Decode it to see if
- // it's the 1 byte sentinel value.
- _, size := DecodeRuneInString(s[i:])
- if size == 1 {
- return false
- }
- }
- }
- return true
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utf8_test
-
-import (
- "bytes"
- "testing"
- . "utf8"
-)
-
-type Utf8Map struct {
- r rune
- str string
-}
-
-var utf8map = []Utf8Map{
- {0x0000, "\x00"},
- {0x0001, "\x01"},
- {0x007e, "\x7e"},
- {0x007f, "\x7f"},
- {0x0080, "\xc2\x80"},
- {0x0081, "\xc2\x81"},
- {0x00bf, "\xc2\xbf"},
- {0x00c0, "\xc3\x80"},
- {0x00c1, "\xc3\x81"},
- {0x00c8, "\xc3\x88"},
- {0x00d0, "\xc3\x90"},
- {0x00e0, "\xc3\xa0"},
- {0x00f0, "\xc3\xb0"},
- {0x00f8, "\xc3\xb8"},
- {0x00ff, "\xc3\xbf"},
- {0x0100, "\xc4\x80"},
- {0x07ff, "\xdf\xbf"},
- {0x0800, "\xe0\xa0\x80"},
- {0x0801, "\xe0\xa0\x81"},
- {0xfffe, "\xef\xbf\xbe"},
- {0xffff, "\xef\xbf\xbf"},
- {0x10000, "\xf0\x90\x80\x80"},
- {0x10001, "\xf0\x90\x80\x81"},
- {0x10fffe, "\xf4\x8f\xbf\xbe"},
- {0x10ffff, "\xf4\x8f\xbf\xbf"},
- {0xFFFD, "\xef\xbf\xbd"},
-}
-
-var testStrings = []string{
- "",
- "abcd",
- "☺☻☹",
- "日a本b語ç日ð本Ê語þ日¥本¼語i日©",
- "日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©",
- "\x80\x80\x80\x80",
-}
-
-func TestFullRune(t *testing.T) {
- for i := 0; i < len(utf8map); i++ {
- m := utf8map[i]
- b := []byte(m.str)
- if !FullRune(b) {
- t.Errorf("FullRune(%q) (%U) = false, want true", b, m.r)
- }
- s := m.str
- if !FullRuneInString(s) {
- t.Errorf("FullRuneInString(%q) (%U) = false, want true", s, m.r)
- }
- b1 := b[0 : len(b)-1]
- if FullRune(b1) {
- t.Errorf("FullRune(%q) = true, want false", b1)
- }
- s1 := string(b1)
- if FullRuneInString(s1) {
- t.Errorf("FullRune(%q) = true, want false", s1)
- }
- }
-}
-
-func TestEncodeRune(t *testing.T) {
- for i := 0; i < len(utf8map); i++ {
- m := utf8map[i]
- b := []byte(m.str)
- var buf [10]byte
- n := EncodeRune(buf[0:], m.r)
- b1 := buf[0:n]
- if !bytes.Equal(b, b1) {
- t.Errorf("EncodeRune(%#04x) = %q want %q", m.r, b1, b)
- }
- }
-}
-
-func TestDecodeRune(t *testing.T) {
- for i := 0; i < len(utf8map); i++ {
- m := utf8map[i]
- b := []byte(m.str)
- r, size := DecodeRune(b)
- if r != m.r || size != len(b) {
- t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b, r, size, m.r, len(b))
- }
- s := m.str
- r, size = DecodeRuneInString(s)
- if r != m.r || size != len(b) {
- t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", s, r, size, m.r, len(b))
- }
-
- // there's an extra byte that bytes left behind - make sure trailing byte works
- r, size = DecodeRune(b[0:cap(b)])
- if r != m.r || size != len(b) {
- t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b, r, size, m.r, len(b))
- }
- s = m.str + "\x00"
- r, size = DecodeRuneInString(s)
- if r != m.r || size != len(b) {
- t.Errorf("DecodeRuneInString(%q) = %#04x, %d want %#04x, %d", s, r, size, m.r, len(b))
- }
-
- // make sure missing bytes fail
- wantsize := 1
- if wantsize >= len(b) {
- wantsize = 0
- }
- r, size = DecodeRune(b[0 : len(b)-1])
- if r != RuneError || size != wantsize {
- t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b[0:len(b)-1], r, size, RuneError, wantsize)
- }
- s = m.str[0 : len(m.str)-1]
- r, size = DecodeRuneInString(s)
- if r != RuneError || size != wantsize {
- t.Errorf("DecodeRuneInString(%q) = %#04x, %d want %#04x, %d", s, r, size, RuneError, wantsize)
- }
-
- // make sure bad sequences fail
- if len(b) == 1 {
- b[0] = 0x80
- } else {
- b[len(b)-1] = 0x7F
- }
- r, size = DecodeRune(b)
- if r != RuneError || size != 1 {
- t.Errorf("DecodeRune(%q) = %#04x, %d want %#04x, %d", b, r, size, RuneError, 1)
- }
- s = string(b)
- r, size = DecodeRune(b)
- if r != RuneError || size != 1 {
- t.Errorf("DecodeRuneInString(%q) = %#04x, %d want %#04x, %d", s, r, size, RuneError, 1)
- }
-
- }
-}
-
-// Check that DecodeRune and DecodeLastRune correspond to
-// the equivalent range loop.
-func TestSequencing(t *testing.T) {
- for _, ts := range testStrings {
- for _, m := range utf8map {
- for _, s := range []string{ts + m.str, m.str + ts, ts + m.str + ts} {
- testSequence(t, s)
- }
- }
- }
-}
-
-// Check that a range loop and a []int conversion visit the same runes.
-// Not really a test of this package, but the assumption is used here and
-// it's good to verify
-func TestIntConversion(t *testing.T) {
- for _, ts := range testStrings {
- runes := []rune(ts)
- if RuneCountInString(ts) != len(runes) {
- t.Errorf("%q: expected %d runes; got %d", ts, len(runes), RuneCountInString(ts))
- break
- }
- i := 0
- for _, r := range ts {
- if r != runes[i] {
- t.Errorf("%q[%d]: expected %c (%U); got %c (%U)", ts, i, runes[i], runes[i], r, r)
- }
- i++
- }
- }
-}
-
-func testSequence(t *testing.T, s string) {
- type info struct {
- index int
- r rune
- }
- index := make([]info, len(s))
- b := []byte(s)
- si := 0
- j := 0
- for i, r := range s {
- if si != i {
- t.Errorf("Sequence(%q) mismatched index %d, want %d", s, si, i)
- return
- }
- index[j] = info{i, r}
- j++
- r1, size1 := DecodeRune(b[i:])
- if r != r1 {
- t.Errorf("DecodeRune(%q) = %#04x, want %#04x", s[i:], r1, r)
- return
- }
- r2, size2 := DecodeRuneInString(s[i:])
- if r != r2 {
- t.Errorf("DecodeRuneInString(%q) = %#04x, want %#04x", s[i:], r2, r)
- return
- }
- if size1 != size2 {
- t.Errorf("DecodeRune/DecodeRuneInString(%q) size mismatch %d/%d", s[i:], size1, size2)
- return
- }
- si += size1
- }
- j--
- for si = len(s); si > 0; {
- r1, size1 := DecodeLastRune(b[0:si])
- r2, size2 := DecodeLastRuneInString(s[0:si])
- if size1 != size2 {
- t.Errorf("DecodeLastRune/DecodeLastRuneInString(%q, %d) size mismatch %d/%d", s, si, size1, size2)
- return
- }
- if r1 != index[j].r {
- t.Errorf("DecodeLastRune(%q, %d) = %#04x, want %#04x", s, si, r1, index[j].r)
- return
- }
- if r2 != index[j].r {
- t.Errorf("DecodeLastRuneInString(%q, %d) = %#04x, want %#04x", s, si, r2, index[j].r)
- return
- }
- si -= size1
- if si != index[j].index {
- t.Errorf("DecodeLastRune(%q) index mismatch at %d, want %d", s, si, index[j].index)
- return
- }
- j--
- }
- if si != 0 {
- t.Errorf("DecodeLastRune(%q) finished at %d, not 0", s, si)
- }
-}
-
-// Check that negative runes encode as U+FFFD.
-func TestNegativeRune(t *testing.T) {
- errorbuf := make([]byte, UTFMax)
- errorbuf = errorbuf[0:EncodeRune(errorbuf, RuneError)]
- buf := make([]byte, UTFMax)
- buf = buf[0:EncodeRune(buf, -1)]
- if !bytes.Equal(buf, errorbuf) {
- t.Errorf("incorrect encoding [% x] for -1; expected [% x]", buf, errorbuf)
- }
-}
-
-type RuneCountTest struct {
- in string
- out int
-}
-
-var runecounttests = []RuneCountTest{
- {"abcd", 4},
- {"☺☻☹", 3},
- {"1,2,3,4", 7},
- {"\xe2\x00", 2},
-}
-
-func TestRuneCount(t *testing.T) {
- for i := 0; i < len(runecounttests); i++ {
- tt := runecounttests[i]
- if out := RuneCountInString(tt.in); out != tt.out {
- t.Errorf("RuneCountInString(%q) = %d, want %d", tt.in, out, tt.out)
- }
- if out := RuneCount([]byte(tt.in)); out != tt.out {
- t.Errorf("RuneCount(%q) = %d, want %d", tt.in, out, tt.out)
- }
- }
-}
-
-type ValidTest struct {
- in string
- out bool
-}
-
-var validTests = []ValidTest{
- {"", true},
- {"a", true},
- {"abc", true},
- {"Ж", true},
- {"ЖЖ", true},
- {"брэд-ЛГТМ", true},
- {"☺☻☹", true},
- {string([]byte{66, 250}), false},
- {string([]byte{66, 250, 67}), false},
- {"a\uFFFDb", true},
-}
-
-func TestValid(t *testing.T) {
- for i, tt := range validTests {
- if Valid([]byte(tt.in)) != tt.out {
- t.Errorf("%d. Valid(%q) = %v; want %v", i, tt.in, !tt.out, tt.out)
- }
- if ValidString(tt.in) != tt.out {
- t.Errorf("%d. ValidString(%q) = %v; want %v", i, tt.in, !tt.out, tt.out)
- }
- }
-}
-
-func BenchmarkRuneCountTenASCIIChars(b *testing.B) {
- for i := 0; i < b.N; i++ {
- RuneCountInString("0123456789")
- }
-}
-
-func BenchmarkRuneCountTenJapaneseChars(b *testing.B) {
- for i := 0; i < b.N; i++ {
- RuneCountInString("日本語日本語日本語日")
- }
-}
-
-func BenchmarkEncodeASCIIRune(b *testing.B) {
- buf := make([]byte, UTFMax)
- for i := 0; i < b.N; i++ {
- EncodeRune(buf, 'a')
- }
-}
-
-func BenchmarkEncodeJapaneseRune(b *testing.B) {
- buf := make([]byte, UTFMax)
- for i := 0; i < b.N; i++ {
- EncodeRune(buf, '本')
- }
-}
-
-func BenchmarkDecodeASCIIRune(b *testing.B) {
- a := []byte{'a'}
- for i := 0; i < b.N; i++ {
- DecodeRune(a)
- }
-}
-
-func BenchmarkDecodeJapaneseRune(b *testing.B) {
- nihon := []byte("本")
- for i := 0; i < b.N; i++ {
- DecodeRune(nihon)
- }
-}
"crypto/tls"
"io"
"net"
- "url"
+ "net/url"
)
// DialError is an error that occurs while dialling a websocket server.
"crypto/md5"
"encoding/binary"
"fmt"
- "http"
"io"
"io/ioutil"
- "rand"
+ "math/rand"
+ "net/http"
+ "net/url"
"strconv"
"strings"
- "url"
)
// An aray of characters to be randomly inserted to construct Sec-WebSocket-Key
"bufio"
"bytes"
"fmt"
- "http"
"io"
+ "net/http"
+ "net/url"
"strings"
"testing"
- "url"
)
// Test the getChallengeResponse function with values from section
"encoding/base64"
"encoding/binary"
"fmt"
- "http"
"io"
"io/ioutil"
+ "net/http"
+ "net/url"
"strings"
- "url"
)
const (
"bufio"
"bytes"
"fmt"
- "http"
"io"
+ "net/http"
+ "net/url"
"strings"
"testing"
- "url"
)
// Test the getNonceAccept function with values in
import (
"bufio"
"fmt"
- "http"
"io"
+ "net/http"
)
func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request) (conn *Conn, err error) {
fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
buf.WriteString("\r\n")
buf.WriteString(err.Error())
+ buf.Flush()
return
}
if err != nil {
fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
buf.WriteString("\r\n")
buf.WriteString(err.Error())
+ buf.Flush()
return
}
config.Protocol = nil
err = hs.AcceptHandshake(buf.Writer)
if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
return
}
conn = hs.NewServerConn(buf, rwc, req)
import (
"bufio"
"crypto/tls"
- "http"
+ "encoding/json"
"io"
"io/ioutil"
- "json"
"net"
+ "net/http"
+ "net/url"
"os"
"sync"
- "url"
)
const (
import (
"bytes"
"fmt"
- "http"
- "http/httptest"
"io"
"log"
"net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
"strings"
"sync"
"testing"
- "url"
)
var serverAddr string
once.Do(startServer)
// If the client did not send a handshake that matches the protocol
- // specification, the server should abort the WebSocket connection.
- _, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr))
- if err == nil {
- t.Error("Get: unexpected success")
+ // specification, the server MUST return an HTTP respose with an
+ // appropriate error code (such as 400 Bad Request)
+ resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr))
+ if err != nil {
+ t.Errorf("Get: error %#v", err)
return
}
- urlerr, ok := err.(*url.Error)
- if !ok {
- t.Errorf("Get: not url.Error %#v", err)
+ if resp == nil {
+ t.Error("Get: resp is null")
return
}
- if urlerr.Err != io.ErrUnexpectedEOF {
- t.Errorf("Get: error %#v", err)
- return
+ if resp.StatusCode != http.StatusBadRequest {
+ t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode)
}
}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-var atomValue = &Feed{
- Title: "Example Feed",
- Link: []Link{{Href: "http://example.org/"}},
- Updated: ParseTime("2003-12-13T18:30:02Z"),
- Author: Person{Name: "John Doe"},
- Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
-
- Entry: []Entry{
- {
- Title: "Atom-Powered Robots Run Amok",
- Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
- Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
- Updated: ParseTime("2003-12-13T18:30:02Z"),
- Summary: NewText("Some text."),
- },
- },
-}
-
-var atomXml = `` +
- `<feed xmlns="http://www.w3.org/2005/Atom">` +
- `<Title>Example Feed</Title>` +
- `<Id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</Id>` +
- `<Link href="http://example.org/"></Link>` +
- `<Updated>2003-12-13T18:30:02Z</Updated>` +
- `<Author><Name>John Doe</Name><URI></URI><Email></Email></Author>` +
- `<Entry>` +
- `<Title>Atom-Powered Robots Run Amok</Title>` +
- `<Id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</Id>` +
- `<Link href="http://example.org/2003/12/13/atom03"></Link>` +
- `<Updated>2003-12-13T18:30:02Z</Updated>` +
- `<Author><Name></Name><URI></URI><Email></Email></Author>` +
- `<Summary>Some text.</Summary>` +
- `</Entry>` +
- `</feed>`
-
-func ParseTime(str string) Time {
- return Time(str)
-}
-
-func NewText(text string) Text {
- return Text{
- Body: text,
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-import "testing"
-
-type C struct {
- Name string
- Open bool
-}
-
-type A struct {
- XMLName Name `xml:"http://domain a"`
- C
- B B
- FieldA string
-}
-
-type B struct {
- XMLName Name `xml:"b"`
- C
- FieldB string
-}
-
-const _1a = `
-<?xml version="1.0" encoding="UTF-8"?>
-<a xmlns="http://domain">
- <name>KmlFile</name>
- <open>1</open>
- <b>
- <name>Absolute</name>
- <open>0</open>
- <fieldb>bar</fieldb>
- </b>
- <fielda>foo</fielda>
-</a>
-`
-
-// Tests that embedded structs are marshalled.
-func TestEmbedded1(t *testing.T) {
- var a A
- if e := Unmarshal(StringReader(_1a), &a); e != nil {
- t.Fatalf("Unmarshal: %s", e)
- }
- if a.FieldA != "foo" {
- t.Fatalf("Unmarshal: expected 'foo' but found '%s'", a.FieldA)
- }
- if a.Name != "KmlFile" {
- t.Fatalf("Unmarshal: expected 'KmlFile' but found '%s'", a.Name)
- }
- if !a.Open {
- t.Fatal("Unmarshal: expected 'true' but found otherwise")
- }
- if a.B.FieldB != "bar" {
- t.Fatalf("Unmarshal: expected 'bar' but found '%s'", a.B.FieldB)
- }
- if a.B.Name != "Absolute" {
- t.Fatalf("Unmarshal: expected 'Absolute' but found '%s'", a.B.Name)
- }
- if a.B.Open {
- t.Fatal("Unmarshal: expected 'false' but found otherwise")
- }
-}
-
-type A2 struct {
- XMLName Name `xml:"http://domain a"`
- XY string
- Xy string
-}
-
-const _2a = `
-<?xml version="1.0" encoding="UTF-8"?>
-<a xmlns="http://domain">
- <xy>foo</xy>
-</a>
-`
-
-// Tests that conflicting field names get excluded.
-func TestEmbedded2(t *testing.T) {
- var a A2
- if e := Unmarshal(StringReader(_2a), &a); e != nil {
- t.Fatalf("Unmarshal: %s", e)
- }
- if a.XY != "" {
- t.Fatalf("Unmarshal: expected empty string but found '%s'", a.XY)
- }
- if a.Xy != "" {
- t.Fatalf("Unmarshal: expected empty string but found '%s'", a.Xy)
- }
-}
-
-type A3 struct {
- XMLName Name `xml:"http://domain a"`
- xy string
-}
-
-// Tests that private fields are not set.
-func TestEmbedded3(t *testing.T) {
- var a A3
- if e := Unmarshal(StringReader(_2a), &a); e != nil {
- t.Fatalf("Unmarshal: %s", e)
- }
- if a.xy != "" {
- t.Fatalf("Unmarshal: expected empty string but found '%s'", a.xy)
- }
-}
-
-type A4 struct {
- XMLName Name `xml:"http://domain a"`
- Any string
-}
-
-// Tests that private fields are not set.
-func TestEmbedded4(t *testing.T) {
- var a A4
- if e := Unmarshal(StringReader(_2a), &a); e != nil {
- t.Fatalf("Unmarshal: %s", e)
- }
- if a.Any != "foo" {
- t.Fatalf("Unmarshal: expected 'foo' but found '%s'", a.Any)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-import (
- "bufio"
- "io"
- "reflect"
- "strconv"
- "strings"
-)
-
-const (
- // A generic XML header suitable for use with the output of Marshal and
- // MarshalIndent. This is not automatically added to any output of this
- // package, it is provided as a convenience.
- Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
-)
-
-// A Marshaler can produce well-formatted XML representing its internal state.
-// It is used by both Marshal and MarshalIndent.
-type Marshaler interface {
- MarshalXML() ([]byte, error)
-}
-
-type printer struct {
- *bufio.Writer
-}
-
-// Marshal writes an XML-formatted representation of v to w.
-//
-// If v implements Marshaler, then Marshal calls its MarshalXML method.
-// Otherwise, Marshal uses the following procedure to create the XML.
-//
-// Marshal handles an array or slice by marshalling each of the elements.
-// Marshal handles a pointer by marshalling the value it points at or, if the
-// pointer is nil, by writing nothing. Marshal handles an interface value by
-// marshalling the value it contains or, if the interface value is nil, by
-// writing nothing. Marshal handles all other data by writing one or more XML
-// elements containing the data.
-//
-// The name for the XML elements is taken from, in order of preference:
-// - the tag on an XMLName field, if the data is a struct
-// - the value of an XMLName field of type xml.Name
-// - the tag of the struct field used to obtain the data
-// - the name of the struct field used to obtain the data
-// - the name '???'.
-//
-// The XML element for a struct contains marshalled elements for each of the
-// exported fields of the struct, with these exceptions:
-// - the XMLName field, described above, is omitted.
-// - a field with tag "attr" becomes an attribute in the XML element.
-// - a field with tag "chardata" is written as character data,
-// not as an XML element.
-// - a field with tag "innerxml" is written verbatim,
-// not subject to the usual marshalling procedure.
-//
-// If a field uses a tag "a>b>c", then the element c will be nested inside
-// parent elements a and b. Fields that appear next to each other that name
-// the same parent will be enclosed in one XML element. For example:
-//
-// type Result struct {
-// XMLName xml.Name `xml:"result"`
-// FirstName string `xml:"person>name>first"`
-// LastName string `xml:"person>name>last"`
-// Age int `xml:"person>age"`
-// }
-//
-// xml.Marshal(w, &Result{FirstName: "John", LastName: "Doe", Age: 42})
-//
-// would be marshalled as:
-//
-// <result>
-// <person>
-// <name>
-// <first>John</first>
-// <last>Doe</last>
-// </name>
-// <age>42</age>
-// </person>
-// </result>
-//
-// Marshal will return an error if asked to marshal a channel, function, or map.
-func Marshal(w io.Writer, v interface{}) (err error) {
- p := &printer{bufio.NewWriter(w)}
- err = p.marshalValue(reflect.ValueOf(v), "???")
- p.Flush()
- return err
-}
-
-func (p *printer) marshalValue(val reflect.Value, name string) error {
- if !val.IsValid() {
- return nil
- }
-
- kind := val.Kind()
- typ := val.Type()
-
- // Try Marshaler
- if typ.NumMethod() > 0 {
- if marshaler, ok := val.Interface().(Marshaler); ok {
- bytes, err := marshaler.MarshalXML()
- if err != nil {
- return err
- }
- p.Write(bytes)
- return nil
- }
- }
-
- // Drill into pointers/interfaces
- if kind == reflect.Ptr || kind == reflect.Interface {
- if val.IsNil() {
- return nil
- }
- return p.marshalValue(val.Elem(), name)
- }
-
- // Slices and arrays iterate over the elements. They do not have an enclosing tag.
- if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {
- for i, n := 0, val.Len(); i < n; i++ {
- if err := p.marshalValue(val.Index(i), name); err != nil {
- return err
- }
- }
- return nil
- }
-
- // Find XML name
- xmlns := ""
- if kind == reflect.Struct {
- if f, ok := typ.FieldByName("XMLName"); ok {
- if tag := f.Tag.Get("xml"); tag != "" {
- if i := strings.Index(tag, " "); i >= 0 {
- xmlns, name = tag[:i], tag[i+1:]
- } else {
- name = tag
- }
- } else if v, ok := val.FieldByIndex(f.Index).Interface().(Name); ok && v.Local != "" {
- xmlns, name = v.Space, v.Local
- }
- }
- }
-
- p.WriteByte('<')
- p.WriteString(name)
-
- // Attributes
- if kind == reflect.Struct {
- if len(xmlns) > 0 {
- p.WriteString(` xmlns="`)
- Escape(p, []byte(xmlns))
- p.WriteByte('"')
- }
-
- for i, n := 0, typ.NumField(); i < n; i++ {
- if f := typ.Field(i); f.PkgPath == "" && f.Tag.Get("xml") == "attr" {
- if f.Type.Kind() == reflect.String {
- if str := val.Field(i).String(); str != "" {
- p.WriteByte(' ')
- p.WriteString(strings.ToLower(f.Name))
- p.WriteString(`="`)
- Escape(p, []byte(str))
- p.WriteByte('"')
- }
- }
- }
- }
- }
- p.WriteByte('>')
-
- switch k := val.Kind(); k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p.WriteString(strconv.Itoa64(val.Int()))
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p.WriteString(strconv.Uitoa64(val.Uint()))
- case reflect.Float32, reflect.Float64:
- p.WriteString(strconv.Ftoa64(val.Float(), 'g', -1))
- case reflect.String:
- Escape(p, []byte(val.String()))
- case reflect.Bool:
- p.WriteString(strconv.Btoa(val.Bool()))
- case reflect.Array:
- // will be [...]byte
- bytes := make([]byte, val.Len())
- for i := range bytes {
- bytes[i] = val.Index(i).Interface().(byte)
- }
- Escape(p, bytes)
- case reflect.Slice:
- // will be []byte
- bytes := val.Interface().([]byte)
- Escape(p, bytes)
- case reflect.Struct:
- s := parentStack{printer: p}
- for i, n := 0, val.NumField(); i < n; i++ {
- if f := typ.Field(i); f.Name != "XMLName" && f.PkgPath == "" {
- name := f.Name
- vf := val.Field(i)
- switch tag := f.Tag.Get("xml"); tag {
- case "":
- s.trim(nil)
- case "chardata":
- if tk := f.Type.Kind(); tk == reflect.String {
- Escape(p, []byte(vf.String()))
- } else if tk == reflect.Slice {
- if elem, ok := vf.Interface().([]byte); ok {
- Escape(p, elem)
- }
- }
- continue
- case "innerxml":
- iface := vf.Interface()
- switch raw := iface.(type) {
- case []byte:
- p.Write(raw)
- continue
- case string:
- p.WriteString(raw)
- continue
- }
- case "attr":
- continue
- default:
- parents := strings.Split(tag, ">")
- if len(parents) == 1 {
- parents, name = nil, tag
- } else {
- parents, name = parents[:len(parents)-1], parents[len(parents)-1]
- if parents[0] == "" {
- parents[0] = f.Name
- }
- }
-
- s.trim(parents)
- if !(vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) || !vf.IsNil() {
- s.push(parents[len(s.stack):])
- }
- }
-
- if err := p.marshalValue(vf, name); err != nil {
- return err
- }
- }
- }
- s.trim(nil)
- default:
- return &UnsupportedTypeError{typ}
- }
-
- p.WriteByte('<')
- p.WriteByte('/')
- p.WriteString(name)
- p.WriteByte('>')
-
- return nil
-}
-
-type parentStack struct {
- *printer
- stack []string
-}
-
-// trim updates the XML context to match the longest common prefix of the stack
-// and the given parents. A closing tag will be written for every parent
-// popped. Passing a zero slice or nil will close all the elements.
-func (s *parentStack) trim(parents []string) {
- split := 0
- for ; split < len(parents) && split < len(s.stack); split++ {
- if parents[split] != s.stack[split] {
- break
- }
- }
-
- for i := len(s.stack) - 1; i >= split; i-- {
- s.WriteString("</")
- s.WriteString(s.stack[i])
- s.WriteByte('>')
- }
-
- s.stack = parents[:split]
-}
-
-// push adds parent elements to the stack and writes open tags.
-func (s *parentStack) push(parents []string) {
- for i := 0; i < len(parents); i++ {
- s.WriteString("<")
- s.WriteString(parents[i])
- s.WriteByte('>')
- }
- s.stack = append(s.stack, parents...)
-}
-
-// A MarshalXMLError is returned when Marshal or MarshalIndent encounter a type
-// that cannot be converted into XML.
-type UnsupportedTypeError struct {
- Type reflect.Type
-}
-
-func (e *UnsupportedTypeError) Error() string {
- return "xml: unsupported type: " + e.Type.String()
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-import (
- "reflect"
- "testing"
- "bytes"
- "strings"
- "strconv"
-)
-
-type DriveType int
-
-const (
- HyperDrive DriveType = iota
- ImprobabilityDrive
-)
-
-type Passenger struct {
- Name []string `xml:"name"`
- Weight float32 `xml:"weight"`
-}
-
-type Ship struct {
- XMLName Name `xml:"spaceship"`
-
- Name string `xml:"attr"`
- Pilot string `xml:"attr"`
- Drive DriveType `xml:"drive"`
- Age uint `xml:"age"`
- Passenger []*Passenger `xml:"passenger"`
- secret string
-}
-
-type RawXML string
-
-func (rx RawXML) MarshalXML() ([]byte, error) {
- return []byte(rx), nil
-}
-
-type NamedType string
-
-type Port struct {
- XMLName Name `xml:"port"`
- Type string `xml:"attr"`
- Number string `xml:"chardata"`
-}
-
-type Domain struct {
- XMLName Name `xml:"domain"`
- Country string `xml:"attr"`
- Name []byte `xml:"chardata"`
-}
-
-type Book struct {
- XMLName Name `xml:"book"`
- Title string `xml:"chardata"`
-}
-
-type SecretAgent struct {
- XMLName Name `xml:"agent"`
- Handle string `xml:"attr"`
- Identity string
- Obfuscate string `xml:"innerxml"`
-}
-
-type NestedItems struct {
- XMLName Name `xml:"result"`
- Items []string `xml:">item"`
- Item1 []string `xml:"Items>item1"`
-}
-
-type NestedOrder struct {
- XMLName Name `xml:"result"`
- Field1 string `xml:"parent>c"`
- Field2 string `xml:"parent>b"`
- Field3 string `xml:"parent>a"`
-}
-
-type MixedNested struct {
- XMLName Name `xml:"result"`
- A string `xml:"parent1>a"`
- B string `xml:"b"`
- C string `xml:"parent1>parent2>c"`
- D string `xml:"parent1>d"`
-}
-
-type NilTest struct {
- A interface{} `xml:"parent1>parent2>a"`
- B interface{} `xml:"parent1>b"`
- C interface{} `xml:"parent1>parent2>c"`
-}
-
-type Service struct {
- XMLName Name `xml:"service"`
- Domain *Domain `xml:"host>domain"`
- Port *Port `xml:"host>port"`
- Extra1 interface{}
- Extra2 interface{} `xml:"host>extra2"`
-}
-
-var nilStruct *Ship
-
-var marshalTests = []struct {
- Value interface{}
- ExpectXML string
-}{
- // Test nil marshals to nothing
- {Value: nil, ExpectXML: ``},
- {Value: nilStruct, ExpectXML: ``},
-
- // Test value types (no tag name, so ???)
- {Value: true, ExpectXML: `<???>true</???>`},
- {Value: int(42), ExpectXML: `<???>42</???>`},
- {Value: int8(42), ExpectXML: `<???>42</???>`},
- {Value: int16(42), ExpectXML: `<???>42</???>`},
- {Value: int32(42), ExpectXML: `<???>42</???>`},
- {Value: uint(42), ExpectXML: `<???>42</???>`},
- {Value: uint8(42), ExpectXML: `<???>42</???>`},
- {Value: uint16(42), ExpectXML: `<???>42</???>`},
- {Value: uint32(42), ExpectXML: `<???>42</???>`},
- {Value: float32(1.25), ExpectXML: `<???>1.25</???>`},
- {Value: float64(1.25), ExpectXML: `<???>1.25</???>`},
- {Value: uintptr(0xFFDD), ExpectXML: `<???>65501</???>`},
- {Value: "gopher", ExpectXML: `<???>gopher</???>`},
- {Value: []byte("gopher"), ExpectXML: `<???>gopher</???>`},
- {Value: "</>", ExpectXML: `<???></></???>`},
- {Value: []byte("</>"), ExpectXML: `<???></></???>`},
- {Value: [3]byte{'<', '/', '>'}, ExpectXML: `<???></></???>`},
- {Value: NamedType("potato"), ExpectXML: `<???>potato</???>`},
- {Value: []int{1, 2, 3}, ExpectXML: `<???>1</???><???>2</???><???>3</???>`},
- {Value: [3]int{1, 2, 3}, ExpectXML: `<???>1</???><???>2</???><???>3</???>`},
-
- // Test innerxml
- {Value: RawXML("</>"), ExpectXML: `</>`},
- {
- Value: &SecretAgent{
- Handle: "007",
- Identity: "James Bond",
- Obfuscate: "<redacted/>",
- },
- //ExpectXML: `<agent handle="007"><redacted/></agent>`,
- ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`,
- },
-
- // Test structs
- {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `<port type="ssl">443</port>`},
- {Value: &Port{Number: "443"}, ExpectXML: `<port>443</port>`},
- {Value: &Port{Type: "<unix>"}, ExpectXML: `<port type="<unix>"></port>`},
- {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `<domain>google.com&friends</domain>`},
- {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `<book>Pride & Prejudice</book>`},
- {Value: atomValue, ExpectXML: atomXml},
- {
- Value: &Ship{
- Name: "Heart of Gold",
- Pilot: "Computer",
- Age: 1,
- Drive: ImprobabilityDrive,
- Passenger: []*Passenger{
- &Passenger{
- Name: []string{"Zaphod", "Beeblebrox"},
- Weight: 7.25,
- },
- &Passenger{
- Name: []string{"Trisha", "McMillen"},
- Weight: 5.5,
- },
- &Passenger{
- Name: []string{"Ford", "Prefect"},
- Weight: 7,
- },
- &Passenger{
- Name: []string{"Arthur", "Dent"},
- Weight: 6.75,
- },
- },
- },
- ExpectXML: `<spaceship name="Heart of Gold" pilot="Computer">` +
- `<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` +
- `<age>1</age>` +
- `<passenger>` +
- `<name>Zaphod</name>` +
- `<name>Beeblebrox</name>` +
- `<weight>7.25</weight>` +
- `</passenger>` +
- `<passenger>` +
- `<name>Trisha</name>` +
- `<name>McMillen</name>` +
- `<weight>5.5</weight>` +
- `</passenger>` +
- `<passenger>` +
- `<name>Ford</name>` +
- `<name>Prefect</name>` +
- `<weight>7</weight>` +
- `</passenger>` +
- `<passenger>` +
- `<name>Arthur</name>` +
- `<name>Dent</name>` +
- `<weight>6.75</weight>` +
- `</passenger>` +
- `</spaceship>`,
- },
- // Test a>b
- {
- Value: NestedItems{Items: []string{}, Item1: []string{}},
- ExpectXML: `<result>` +
- `<Items>` +
- `</Items>` +
- `</result>`,
- },
- {
- Value: NestedItems{Items: []string{}, Item1: []string{"A"}},
- ExpectXML: `<result>` +
- `<Items>` +
- `<item1>A</item1>` +
- `</Items>` +
- `</result>`,
- },
- {
- Value: NestedItems{Items: []string{"A", "B"}, Item1: []string{}},
- ExpectXML: `<result>` +
- `<Items>` +
- `<item>A</item>` +
- `<item>B</item>` +
- `</Items>` +
- `</result>`,
- },
- {
- Value: NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}},
- ExpectXML: `<result>` +
- `<Items>` +
- `<item>A</item>` +
- `<item>B</item>` +
- `<item1>C</item1>` +
- `</Items>` +
- `</result>`,
- },
- {
- Value: NestedOrder{Field1: "C", Field2: "B", Field3: "A"},
- ExpectXML: `<result>` +
- `<parent>` +
- `<c>C</c>` +
- `<b>B</b>` +
- `<a>A</a>` +
- `</parent>` +
- `</result>`,
- },
- {
- Value: NilTest{A: "A", B: nil, C: "C"},
- ExpectXML: `<???>` +
- `<parent1>` +
- `<parent2><a>A</a></parent2>` +
- `<parent2><c>C</c></parent2>` +
- `</parent1>` +
- `</???>`,
- },
- {
- Value: MixedNested{A: "A", B: "B", C: "C", D: "D"},
- ExpectXML: `<result>` +
- `<parent1><a>A</a></parent1>` +
- `<b>B</b>` +
- `<parent1>` +
- `<parent2><c>C</c></parent2>` +
- `<d>D</d>` +
- `</parent1>` +
- `</result>`,
- },
- {
- Value: Service{Port: &Port{Number: "80"}},
- ExpectXML: `<service><host><port>80</port></host></service>`,
- },
- {
- Value: Service{},
- ExpectXML: `<service></service>`,
- },
- {
- Value: Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"},
- ExpectXML: `<service>` +
- `<host><port>80</port></host>` +
- `<Extra1>A</Extra1>` +
- `<host><extra2>B</extra2></host>` +
- `</service>`,
- },
- {
- Value: Service{Port: &Port{Number: "80"}, Extra2: "example"},
- ExpectXML: `<service>` +
- `<host><port>80</port></host>` +
- `<host><extra2>example</extra2></host>` +
- `</service>`,
- },
-}
-
-func TestMarshal(t *testing.T) {
- for idx, test := range marshalTests {
- buf := bytes.NewBuffer(nil)
- err := Marshal(buf, test.Value)
- if err != nil {
- t.Errorf("#%d: Error: %s", idx, err)
- continue
- }
- if got, want := buf.String(), test.ExpectXML; got != want {
- if strings.Contains(want, "\n") {
- t.Errorf("#%d: marshal(%#v) - GOT:\n%s\nWANT:\n%s", idx, test.Value, got, want)
- } else {
- t.Errorf("#%d: marshal(%#v) = %#q want %#q", idx, test.Value, got, want)
- }
- }
- }
-}
-
-var marshalErrorTests = []struct {
- Value interface{}
- Err string
- Kind reflect.Kind
-}{
- {
- Value: make(chan bool),
- Err: "xml: unsupported type: chan bool",
- Kind: reflect.Chan,
- },
- {
- Value: map[string]string{
- "question": "What do you get when you multiply six by nine?",
- "answer": "42",
- },
- Err: "xml: unsupported type: map[string] string",
- Kind: reflect.Map,
- },
- {
- Value: map[*Ship]bool{nil: false},
- Err: "xml: unsupported type: map[*xml.Ship] bool",
- Kind: reflect.Map,
- },
-}
-
-func TestMarshalErrors(t *testing.T) {
- for idx, test := range marshalErrorTests {
- buf := bytes.NewBuffer(nil)
- err := Marshal(buf, test.Value)
- if err == nil || err.Error() != test.Err {
- t.Errorf("#%d: marshal(%#v) = [error] %q, want %q", idx, test.Value, err, test.Err)
- }
- if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind {
- t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind)
- }
- }
-}
-
-// Do invertibility testing on the various structures that we test
-func TestUnmarshal(t *testing.T) {
- for i, test := range marshalTests {
- // Skip the nil pointers
- if i <= 1 {
- continue
- }
-
- var dest interface{}
-
- switch test.Value.(type) {
- case *Ship, Ship:
- dest = &Ship{}
- case *Port, Port:
- dest = &Port{}
- case *Domain, Domain:
- dest = &Domain{}
- case *Feed, Feed:
- dest = &Feed{}
- default:
- continue
- }
-
- buffer := bytes.NewBufferString(test.ExpectXML)
- err := Unmarshal(buffer, dest)
-
- // Don't compare XMLNames
- switch fix := dest.(type) {
- case *Ship:
- fix.XMLName = Name{}
- case *Port:
- fix.XMLName = Name{}
- case *Domain:
- fix.XMLName = Name{}
- case *Feed:
- fix.XMLName = Name{}
- fix.Author.InnerXML = ""
- for i := range fix.Entry {
- fix.Entry[i].Author.InnerXML = ""
- }
- }
-
- if err != nil {
- t.Errorf("#%d: unexpected error: %#v", i, err)
- } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {
- t.Errorf("#%d: unmarshal(%#s) = %#v, want %#v", i, test.ExpectXML, got, want)
- }
- }
-}
-
-func BenchmarkMarshal(b *testing.B) {
- idx := len(marshalTests) - 1
- test := marshalTests[idx]
-
- buf := bytes.NewBuffer(nil)
- for i := 0; i < b.N; i++ {
- Marshal(buf, test.Value)
- buf.Truncate(0)
- }
-}
-
-func BenchmarkUnmarshal(b *testing.B) {
- idx := len(marshalTests) - 1
- test := marshalTests[idx]
- sm := &Ship{}
- xml := []byte(test.ExpectXML)
-
- for i := 0; i < b.N; i++ {
- buffer := bytes.NewBuffer(xml)
- Unmarshal(buffer, sm)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "strings"
- "unicode"
- "utf8"
-)
-
-// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
-// an XML element is an order-dependent collection of anonymous
-// values, while a data structure is an order-independent collection
-// of named values.
-// See package json for a textual representation more suitable
-// to data structures.
-
-// Unmarshal parses an XML element from r and uses the
-// reflect library to fill in an arbitrary struct, slice, or string
-// pointed at by val. Well-formed data that does not fit
-// into val is discarded.
-//
-// For example, given these definitions:
-//
-// type Email struct {
-// Where string `xml:"attr"`
-// Addr string
-// }
-//
-// type Result struct {
-// XMLName xml.Name `xml:"result"`
-// Name string
-// Phone string
-// Email []Email
-// Groups []string `xml:"group>value"`
-// }
-//
-// result := Result{Name: "name", Phone: "phone", Email: nil}
-//
-// unmarshalling the XML input
-//
-// <result>
-// <email where="home">
-// <addr>gre@example.com</addr>
-// </email>
-// <email where='work'>
-// <addr>gre@work.com</addr>
-// </email>
-// <name>Grace R. Emlin</name>
-// <group>
-// <value>Friends</value>
-// <value>Squash</value>
-// </group>
-// <address>123 Main Street</address>
-// </result>
-//
-// via Unmarshal(r, &result) is equivalent to assigning
-//
-// r = Result{xml.Name{"", "result"},
-// "Grace R. Emlin", // name
-// "phone", // no phone given
-// []Email{
-// Email{"home", "gre@example.com"},
-// Email{"work", "gre@work.com"},
-// },
-// []string{"Friends", "Squash"},
-// }
-//
-// Note that the field r.Phone has not been modified and
-// that the XML <address> element was discarded. Also, the field
-// Groups was assigned considering the element path provided in the
-// field tag.
-//
-// Because Unmarshal uses the reflect package, it can only assign
-// to exported (upper case) fields. Unmarshal uses a case-insensitive
-// comparison to match XML element names to struct field names.
-//
-// Unmarshal maps an XML element to a struct using the following rules.
-// In the rules, the tag of a field refers to the value associated with the
-// key 'xml' in the struct field's tag (see the example above).
-//
-// * If the struct has a field of type []byte or string with tag "innerxml",
-// Unmarshal accumulates the raw XML nested inside the element
-// in that field. The rest of the rules still apply.
-//
-// * If the struct has a field named XMLName of type xml.Name,
-// Unmarshal records the element name in that field.
-//
-// * If the XMLName field has an associated tag of the form
-// "name" or "namespace-URL name", the XML element must have
-// the given name (and, optionally, name space) or else Unmarshal
-// returns an error.
-//
-// * If the XML element has an attribute whose name matches a
-// struct field of type string with tag "attr", Unmarshal records
-// the attribute value in that field.
-//
-// * If the XML element contains character data, that data is
-// accumulated in the first struct field that has tag "chardata".
-// The struct field may have type []byte or string.
-// If there is no such field, the character data is discarded.
-//
-// * If the XML element contains comments, they are accumulated in
-// the first struct field that has tag "comments". The struct
-// field may have type []byte or string. If there is no such
-// field, the comments are discarded.
-//
-// * If the XML element contains a sub-element whose name matches
-// the prefix of a tag formatted as "a>b>c", unmarshal
-// will descend into the XML structure looking for elements with the
-// given names, and will map the innermost elements to that struct field.
-// A tag starting with ">" is equivalent to one starting
-// with the field name followed by ">".
-//
-// * If the XML element contains a sub-element whose name
-// matches a field whose tag is neither "attr" nor "chardata",
-// Unmarshal maps the sub-element to that struct field.
-// Otherwise, if the struct has a field named Any, unmarshal
-// maps the sub-element to that struct field.
-//
-// Unmarshal maps an XML element to a string or []byte by saving the
-// concatenation of that element's character data in the string or
-// []byte.
-//
-// Unmarshal maps an attribute value to a string or []byte by saving
-// the value in the string or slice.
-//
-// Unmarshal maps an XML element to a slice by extending the length of
-// the slice and mapping the element to the newly created value.
-//
-// Unmarshal maps an XML element or attribute value to a bool by
-// setting it to the boolean value represented by the string.
-//
-// Unmarshal maps an XML element or attribute value to an integer or
-// floating-point field by setting the field to the result of
-// interpreting the string value in decimal. There is no check for
-// overflow.
-//
-// Unmarshal maps an XML element to an xml.Name by recording the
-// element name.
-//
-// Unmarshal maps an XML element to a pointer by setting the pointer
-// to a freshly allocated value and then mapping the element to that value.
-//
-func Unmarshal(r io.Reader, val interface{}) error {
- v := reflect.ValueOf(val)
- if v.Kind() != reflect.Ptr {
- return errors.New("non-pointer passed to Unmarshal")
- }
- p := NewParser(r)
- elem := v.Elem()
- err := p.unmarshal(elem, nil)
- if err != nil {
- return err
- }
- return nil
-}
-
-// An UnmarshalError represents an error in the unmarshalling process.
-type UnmarshalError string
-
-func (e UnmarshalError) Error() string { return string(e) }
-
-// A TagPathError represents an error in the unmarshalling process
-// caused by the use of field tags with conflicting paths.
-type TagPathError struct {
- Struct reflect.Type
- Field1, Tag1 string
- Field2, Tag2 string
-}
-
-func (e *TagPathError) Error() string {
- return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
-}
-
-// The Parser's Unmarshal method is like xml.Unmarshal
-// except that it can be passed a pointer to the initial start element,
-// useful when a client reads some raw XML tokens itself
-// but also defers to Unmarshal for some elements.
-// Passing a nil start element indicates that Unmarshal should
-// read the token stream to find the start element.
-func (p *Parser) Unmarshal(val interface{}, start *StartElement) error {
- v := reflect.ValueOf(val)
- if v.Kind() != reflect.Ptr {
- return errors.New("non-pointer passed to Unmarshal")
- }
- return p.unmarshal(v.Elem(), start)
-}
-
-// fieldName strips invalid characters from an XML name
-// to create a valid Go struct name. It also converts the
-// name to lower case letters.
-func fieldName(original string) string {
-
- var i int
- //remove leading underscores
- for i = 0; i < len(original) && original[i] == '_'; i++ {
- }
-
- return strings.Map(
- func(x rune) rune {
- if x == '_' || unicode.IsDigit(x) || unicode.IsLetter(x) {
- return unicode.ToLower(x)
- }
- return -1
- },
- original[i:])
-}
-
-// Unmarshal a single XML element into val.
-func (p *Parser) unmarshal(val reflect.Value, start *StartElement) error {
- // Find start element if we need it.
- if start == nil {
- for {
- tok, err := p.Token()
- if err != nil {
- return err
- }
- if t, ok := tok.(StartElement); ok {
- start = &t
- break
- }
- }
- }
-
- if pv := val; pv.Kind() == reflect.Ptr {
- if pv.IsNil() {
- pv.Set(reflect.New(pv.Type().Elem()))
- }
- val = pv.Elem()
- }
-
- var (
- data []byte
- saveData reflect.Value
- comment []byte
- saveComment reflect.Value
- saveXML reflect.Value
- saveXMLIndex int
- saveXMLData []byte
- sv reflect.Value
- styp reflect.Type
- fieldPaths map[string]pathInfo
- )
-
- switch v := val; v.Kind() {
- default:
- return errors.New("unknown type " + v.Type().String())
-
- case reflect.Slice:
- typ := v.Type()
- if typ.Elem().Kind() == reflect.Uint8 {
- // []byte
- saveData = v
- break
- }
-
- // Slice of element values.
- // Grow slice.
- n := v.Len()
- if n >= v.Cap() {
- ncap := 2 * n
- if ncap < 4 {
- ncap = 4
- }
- new := reflect.MakeSlice(typ, n, ncap)
- reflect.Copy(new, v)
- v.Set(new)
- }
- v.SetLen(n + 1)
-
- // Recur to read element into slice.
- if err := p.unmarshal(v.Index(n), start); err != nil {
- v.SetLen(n)
- return err
- }
- return nil
-
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
- saveData = v
-
- case reflect.Struct:
- if _, ok := v.Interface().(Name); ok {
- v.Set(reflect.ValueOf(start.Name))
- break
- }
-
- sv = v
- typ := sv.Type()
- styp = typ
- // Assign name.
- if f, ok := typ.FieldByName("XMLName"); ok {
- // Validate element name.
- if tag := f.Tag.Get("xml"); tag != "" {
- ns := ""
- i := strings.LastIndex(tag, " ")
- if i >= 0 {
- ns, tag = tag[0:i], tag[i+1:]
- }
- if tag != start.Name.Local {
- return UnmarshalError("expected element type <" + tag + "> but have <" + start.Name.Local + ">")
- }
- if ns != "" && ns != start.Name.Space {
- e := "expected element <" + tag + "> in name space " + ns + " but have "
- if start.Name.Space == "" {
- e += "no name space"
- } else {
- e += start.Name.Space
- }
- return UnmarshalError(e)
- }
- }
-
- // Save
- v := sv.FieldByIndex(f.Index)
- if _, ok := v.Interface().(Name); ok {
- v.Set(reflect.ValueOf(start.Name))
- }
- }
-
- // Assign attributes.
- // Also, determine whether we need to save character data or comments.
- for i, n := 0, typ.NumField(); i < n; i++ {
- f := typ.Field(i)
- switch f.Tag.Get("xml") {
- case "attr":
- strv := sv.FieldByIndex(f.Index)
- // Look for attribute.
- val := ""
- k := strings.ToLower(f.Name)
- for _, a := range start.Attr {
- if fieldName(a.Name.Local) == k {
- val = a.Value
- break
- }
- }
- copyValue(strv, []byte(val))
-
- case "comment":
- if !saveComment.IsValid() {
- saveComment = sv.FieldByIndex(f.Index)
- }
-
- case "chardata":
- if !saveData.IsValid() {
- saveData = sv.FieldByIndex(f.Index)
- }
-
- case "innerxml":
- if !saveXML.IsValid() {
- saveXML = sv.FieldByIndex(f.Index)
- if p.saved == nil {
- saveXMLIndex = 0
- p.saved = new(bytes.Buffer)
- } else {
- saveXMLIndex = p.savedOffset()
- }
- }
-
- default:
- if tag := f.Tag.Get("xml"); strings.Contains(tag, ">") {
- if fieldPaths == nil {
- fieldPaths = make(map[string]pathInfo)
- }
- path := strings.ToLower(tag)
- if strings.HasPrefix(tag, ">") {
- path = strings.ToLower(f.Name) + path
- }
- if strings.HasSuffix(tag, ">") {
- path = path[:len(path)-1]
- }
- err := addFieldPath(sv, fieldPaths, path, f.Index)
- if err != nil {
- return err
- }
- }
- }
- }
- }
-
- // Find end element.
- // Process sub-elements along the way.
-Loop:
- for {
- var savedOffset int
- if saveXML.IsValid() {
- savedOffset = p.savedOffset()
- }
- tok, err := p.Token()
- if err != nil {
- return err
- }
- switch t := tok.(type) {
- case StartElement:
- // Sub-element.
- // Look up by tag name.
- if sv.IsValid() {
- k := fieldName(t.Name.Local)
-
- if fieldPaths != nil {
- if _, found := fieldPaths[k]; found {
- if err := p.unmarshalPaths(sv, fieldPaths, k, &t); err != nil {
- return err
- }
- continue Loop
- }
- }
-
- match := func(s string) bool {
- // check if the name matches ignoring case
- if strings.ToLower(s) != k {
- return false
- }
- // now check that it's public
- c, _ := utf8.DecodeRuneInString(s)
- return unicode.IsUpper(c)
- }
-
- f, found := styp.FieldByNameFunc(match)
- if !found { // fall back to mop-up field named "Any"
- f, found = styp.FieldByName("Any")
- }
- if found {
- if err := p.unmarshal(sv.FieldByIndex(f.Index), &t); err != nil {
- return err
- }
- continue Loop
- }
- }
- // Not saving sub-element but still have to skip over it.
- if err := p.Skip(); err != nil {
- return err
- }
-
- case EndElement:
- if saveXML.IsValid() {
- saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
- if saveXMLIndex == 0 {
- p.saved = nil
- }
- }
- break Loop
-
- case CharData:
- if saveData.IsValid() {
- data = append(data, t...)
- }
-
- case Comment:
- if saveComment.IsValid() {
- comment = append(comment, t...)
- }
- }
- }
-
- if err := copyValue(saveData, data); err != nil {
- return err
- }
-
- switch t := saveComment; t.Kind() {
- case reflect.String:
- t.SetString(string(comment))
- case reflect.Slice:
- t.Set(reflect.ValueOf(comment))
- }
-
- switch t := saveXML; t.Kind() {
- case reflect.String:
- t.SetString(string(saveXMLData))
- case reflect.Slice:
- t.Set(reflect.ValueOf(saveXMLData))
- }
-
- return nil
-}
-
-func copyValue(dst reflect.Value, src []byte) (err error) {
- // Helper functions for integer and unsigned integer conversions
- var itmp int64
- getInt64 := func() bool {
- itmp, err = strconv.Atoi64(string(src))
- // TODO: should check sizes
- return err == nil
- }
- var utmp uint64
- getUint64 := func() bool {
- utmp, err = strconv.Atoui64(string(src))
- // TODO: check for overflow?
- return err == nil
- }
- var ftmp float64
- getFloat64 := func() bool {
- ftmp, err = strconv.Atof64(string(src))
- // TODO: check for overflow?
- return err == nil
- }
-
- // Save accumulated data and comments
- switch t := dst; t.Kind() {
- case reflect.Invalid:
- // Probably a comment, handled below
- default:
- return errors.New("cannot happen: unknown type " + t.Type().String())
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if !getInt64() {
- return err
- }
- t.SetInt(itmp)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- if !getUint64() {
- return err
- }
- t.SetUint(utmp)
- case reflect.Float32, reflect.Float64:
- if !getFloat64() {
- return err
- }
- t.SetFloat(ftmp)
- case reflect.Bool:
- value, err := strconv.Atob(strings.TrimSpace(string(src)))
- if err != nil {
- return err
- }
- t.SetBool(value)
- case reflect.String:
- t.SetString(string(src))
- case reflect.Slice:
- t.Set(reflect.ValueOf(src))
- }
- return nil
-}
-
-type pathInfo struct {
- fieldIdx []int
- complete bool
-}
-
-// addFieldPath takes an element path such as "a>b>c" and fills the
-// paths map with all paths leading to it ("a", "a>b", and "a>b>c").
-// It is okay for paths to share a common, shorter prefix but not ok
-// for one path to itself be a prefix of another.
-func addFieldPath(sv reflect.Value, paths map[string]pathInfo, path string, fieldIdx []int) error {
- if info, found := paths[path]; found {
- return tagError(sv, info.fieldIdx, fieldIdx)
- }
- paths[path] = pathInfo{fieldIdx, true}
- for {
- i := strings.LastIndex(path, ">")
- if i < 0 {
- break
- }
- path = path[:i]
- if info, found := paths[path]; found {
- if info.complete {
- return tagError(sv, info.fieldIdx, fieldIdx)
- }
- } else {
- paths[path] = pathInfo{fieldIdx, false}
- }
- }
- return nil
-
-}
-
-func tagError(sv reflect.Value, idx1 []int, idx2 []int) error {
- t := sv.Type()
- f1 := t.FieldByIndex(idx1)
- f2 := t.FieldByIndex(idx2)
- return &TagPathError{t, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
-}
-
-// unmarshalPaths walks down an XML structure looking for
-// wanted paths, and calls unmarshal on them.
-func (p *Parser) unmarshalPaths(sv reflect.Value, paths map[string]pathInfo, path string, start *StartElement) error {
- if info, _ := paths[path]; info.complete {
- return p.unmarshal(sv.FieldByIndex(info.fieldIdx), start)
- }
- for {
- tok, err := p.Token()
- if err != nil {
- return err
- }
- switch t := tok.(type) {
- case StartElement:
- k := path + ">" + fieldName(t.Name.Local)
- if _, found := paths[k]; found {
- if err := p.unmarshalPaths(sv, paths, k, &t); err != nil {
- return err
- }
- continue
- }
- if err := p.Skip(); err != nil {
- return err
- }
- case EndElement:
- return nil
- }
- }
- panic("unreachable")
-}
-
-// Have already read a start element.
-// Read tokens until we find the end element.
-// Token is taking care of making sure the
-// end element matches the start element we saw.
-func (p *Parser) Skip() error {
- for {
- tok, err := p.Token()
- if err != nil {
- return err
- }
- switch t := tok.(type) {
- case StartElement:
- if err := p.Skip(); err != nil {
- return err
- }
- case EndElement:
- return nil
- }
- }
- panic("unreachable")
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-import (
- "reflect"
- "testing"
-)
-
-// Stripped down Atom feed data structures.
-
-func TestUnmarshalFeed(t *testing.T) {
- var f Feed
- if err := Unmarshal(StringReader(atomFeedString), &f); err != nil {
- t.Fatalf("Unmarshal: %s", err)
- }
- if !reflect.DeepEqual(f, atomFeed) {
- t.Fatalf("have %#v\nwant %#v", f, atomFeed)
- }
-}
-
-// hget http://codereview.appspot.com/rss/mine/rsc
-const atomFeedString = `
-<?xml version="1.0" encoding="utf-8"?>
-<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><li-nk href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></li-nk><id>http://codereview.appspot.com/</id><updated>2009-10-04T01:35:58+00:00</updated><author><name>rietveld<></name></author><entry><title>rietveld: an attempt at pubsubhubbub
-</title><link hre-f="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
- An attempt at adding pubsubhubbub support to Rietveld.
-http://code.google.com/p/pubsubhubbub
-http://code.google.com/p/rietveld/issues/detail?id=155
-
-The server side of the protocol is trivial:
- 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
- feeds that will be pubsubhubbubbed.
- 2. every time one of those feeds changes, tell the hub
- with a simple POST request.
-
-I have tested this by adding debug prints to a local hub
-server and checking that the server got the right publish
-requests.
-
-I can&#39;t quite get the server to work, but I think the bug
-is not in my code. I think that the server expects to be
-able to grab the feed and see the feed&#39;s actual URL in
-the link rel=&quot;self&quot;, but the default value for that drops
-the :port from the URL, and I cannot for the life of me
-figure out how to get the Atom generator deep inside
-django not to do that, or even where it is doing that,
-or even what code is running to generate the Atom feed.
-(I thought I knew but I added some assert False statements
-and it kept running!)
-
-Ignoring that particular problem, I would appreciate
-feedback on the right way to get the two values at
-the top of feeds.py marked NOTE(rsc).
-
-
-</summary></entry><entry><title>rietveld: correct tab handling
-</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
- This fixes the buggy tab rendering that can be seen at
-http://codereview.appspot.com/116075/diff/1/2
-
-The fundamental problem was that the tab code was
-not being told what column the text began in, so it
-didn&#39;t know where to put the tab stops. Another problem
-was that some of the code assumed that string byte
-offsets were the same as column offsets, which is only
-true if there are no tabs.
-
-In the process of fixing this, I cleaned up the arguments
-to Fold and ExpandTabs and renamed them Break and
-_ExpandTabs so that I could be sure that I found all the
-call sites. I also wanted to verify that ExpandTabs was
-not being used from outside intra_region_diff.py.
-
-
-</summary></entry></feed> `
-
-type Feed struct {
- XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
- Title string
- Id string
- Link []Link
- Updated Time
- Author Person
- Entry []Entry
-}
-
-type Entry struct {
- Title string
- Id string
- Link []Link
- Updated Time
- Author Person
- Summary Text
-}
-
-type Link struct {
- Rel string `xml:"attr"`
- Href string `xml:"attr"`
-}
-
-type Person struct {
- Name string
- URI string
- Email string
- InnerXML string `xml:"innerxml"`
-}
-
-type Text struct {
- Type string `xml:"attr"`
- Body string `xml:"chardata"`
-}
-
-type Time string
-
-var atomFeed = Feed{
- XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
- Title: "Code Review - My issues",
- Link: []Link{
- {Rel: "alternate", Href: "http://codereview.appspot.com/"},
- {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
- },
- Id: "http://codereview.appspot.com/",
- Updated: "2009-10-04T01:35:58+00:00",
- Author: Person{
- Name: "rietveld<>",
- InnerXML: "<name>rietveld<></name>",
- },
- Entry: []Entry{
- {
- Title: "rietveld: an attempt at pubsubhubbub\n",
- Link: []Link{
- {Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
- },
- Updated: "2009-10-04T01:35:58+00:00",
- Author: Person{
- Name: "email-address-removed",
- InnerXML: "<name>email-address-removed</name>",
- },
- Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
- Summary: Text{
- Type: "html",
- Body: `
- An attempt at adding pubsubhubbub support to Rietveld.
-http://code.google.com/p/pubsubhubbub
-http://code.google.com/p/rietveld/issues/detail?id=155
-
-The server side of the protocol is trivial:
- 1. add a <link rel="hub" href="hub-server"> tag to all
- feeds that will be pubsubhubbubbed.
- 2. every time one of those feeds changes, tell the hub
- with a simple POST request.
-
-I have tested this by adding debug prints to a local hub
-server and checking that the server got the right publish
-requests.
-
-I can't quite get the server to work, but I think the bug
-is not in my code. I think that the server expects to be
-able to grab the feed and see the feed's actual URL in
-the link rel="self", but the default value for that drops
-the :port from the URL, and I cannot for the life of me
-figure out how to get the Atom generator deep inside
-django not to do that, or even where it is doing that,
-or even what code is running to generate the Atom feed.
-(I thought I knew but I added some assert False statements
-and it kept running!)
-
-Ignoring that particular problem, I would appreciate
-feedback on the right way to get the two values at
-the top of feeds.py marked NOTE(rsc).
-
-
-`,
- },
- },
- {
- Title: "rietveld: correct tab handling\n",
- Link: []Link{
- {Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
- },
- Updated: "2009-10-03T23:02:17+00:00",
- Author: Person{
- Name: "email-address-removed",
- InnerXML: "<name>email-address-removed</name>",
- },
- Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
- Summary: Text{
- Type: "html",
- Body: `
- This fixes the buggy tab rendering that can be seen at
-http://codereview.appspot.com/116075/diff/1/2
-
-The fundamental problem was that the tab code was
-not being told what column the text began in, so it
-didn't know where to put the tab stops. Another problem
-was that some of the code assumed that string byte
-offsets were the same as column offsets, which is only
-true if there are no tabs.
-
-In the process of fixing this, I cleaned up the arguments
-to Fold and ExpandTabs and renamed them Break and
-_ExpandTabs so that I could be sure that I found all the
-call sites. I also wanted to verify that ExpandTabs was
-not being used from outside intra_region_diff.py.
-
-
-`,
- },
- },
- },
-}
-
-type FieldNameTest struct {
- in, out string
-}
-
-var FieldNameTests = []FieldNameTest{
- {"Profile-Image", "profileimage"},
- {"_score", "score"},
-}
-
-func TestFieldName(t *testing.T) {
- for _, tt := range FieldNameTests {
- a := fieldName(tt.in)
- if a != tt.out {
- t.Fatalf("have %#v\nwant %#v\n\n", a, tt.out)
- }
- }
-}
-
-const pathTestString = `
-<result>
- <before>1</before>
- <items>
- <item1>
- <value>A</value>
- </item1>
- <item2>
- <value>B</value>
- </item2>
- <Item1>
- <Value>C</Value>
- <Value>D</Value>
- </Item1>
- </items>
- <after>2</after>
-</result>
-`
-
-type PathTestItem struct {
- Value string
-}
-
-type PathTestA struct {
- Items []PathTestItem `xml:">item1"`
- Before, After string
-}
-
-type PathTestB struct {
- Other []PathTestItem `xml:"items>Item1"`
- Before, After string
-}
-
-type PathTestC struct {
- Values1 []string `xml:"items>item1>value"`
- Values2 []string `xml:"items>item2>value"`
- Before, After string
-}
-
-type PathTestSet struct {
- Item1 []PathTestItem
-}
-
-type PathTestD struct {
- Other PathTestSet `xml:"items>"`
- Before, After string
-}
-
-var pathTests = []interface{}{
- &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
- &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
- &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
- &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
-}
-
-func TestUnmarshalPaths(t *testing.T) {
- for _, pt := range pathTests {
- v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
- if err := Unmarshal(StringReader(pathTestString), v); err != nil {
- t.Fatalf("Unmarshal: %s", err)
- }
- if !reflect.DeepEqual(v, pt) {
- t.Fatalf("have %#v\nwant %#v", v, pt)
- }
- }
-}
-
-type BadPathTestA struct {
- First string `xml:"items>item1"`
- Other string `xml:"items>item2"`
- Second string `xml:"items>"`
-}
-
-type BadPathTestB struct {
- Other string `xml:"items>item2>value"`
- First string `xml:"items>item1"`
- Second string `xml:"items>item1>value"`
-}
-
-var badPathTests = []struct {
- v, e interface{}
-}{
- {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items>"}},
- {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
-}
-
-func TestUnmarshalBadPaths(t *testing.T) {
- for _, tt := range badPathTests {
- err := Unmarshal(StringReader(pathTestString), tt.v)
- if !reflect.DeepEqual(err, tt.e) {
- t.Fatalf("Unmarshal with %#v didn't fail properly: %#v", tt.v, err)
- }
- }
-}
-
-func TestUnmarshalAttrs(t *testing.T) {
- var f AttrTest
- if err := Unmarshal(StringReader(attrString), &f); err != nil {
- t.Fatalf("Unmarshal: %s", err)
- }
- if !reflect.DeepEqual(f, attrStruct) {
- t.Fatalf("have %#v\nwant %#v", f, attrStruct)
- }
-}
-
-type AttrTest struct {
- Test1 Test1
- Test2 Test2
-}
-
-type Test1 struct {
- Int int `xml:"attr"`
- Float float64 `xml:"attr"`
- Uint8 uint8 `xml:"attr"`
-}
-
-type Test2 struct {
- Bool bool `xml:"attr"`
-}
-
-const attrString = `
-<?xml version="1.0" charset="utf-8"?>
-<attrtest>
- <test1 int="8" float="23.5" uint8="255"/>
- <test2 bool="true"/>
-</attrtest>
-`
-
-var attrStruct = AttrTest{
- Test1: Test1{
- Int: 8,
- Float: 23.5,
- Uint8: 255,
- },
- Test2: Test2{
- Bool: true,
- },
-}
-
-// test data for TestUnmarshalWithoutNameType
-
-const OK = "OK"
-const withoutNameTypeData = `
-<?xml version="1.0" charset="utf-8"?>
-<Test3 attr="OK" />`
-
-type TestThree struct {
- XMLName bool `xml:"Test3"` // XMLName field without an xml.Name type
- Attr string `xml:"attr"`
-}
-
-func TestUnmarshalWithoutNameType(t *testing.T) {
- var x TestThree
- if err := Unmarshal(StringReader(withoutNameTypeData), &x); err != nil {
- t.Fatalf("Unmarshal: %s", err)
- }
- if x.Attr != OK {
- t.Fatalf("have %v\nwant %v", x.Attr, OK)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package xml implements a simple XML 1.0 parser that
-// understands XML name spaces.
-package xml
-
-// References:
-// Annotated XML spec: http://www.xml.com/axml/testaxml.htm
-// XML name spaces: http://www.w3.org/TR/REC-xml-names/
-
-// TODO(rsc):
-// Test error handling.
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
- "unicode"
- "utf8"
-)
-
-// A SyntaxError represents a syntax error in the XML input stream.
-type SyntaxError struct {
- Msg string
- Line int
-}
-
-func (e *SyntaxError) Error() string {
- return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg
-}
-
-// A Name represents an XML name (Local) annotated
-// with a name space identifier (Space).
-// In tokens returned by Parser.Token, the Space identifier
-// is given as a canonical URL, not the short prefix used
-// in the document being parsed.
-type Name struct {
- Space, Local string
-}
-
-// An Attr represents an attribute in an XML element (Name=Value).
-type Attr struct {
- Name Name
- Value string
-}
-
-// A Token is an interface holding one of the token types:
-// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.
-type Token interface{}
-
-// A StartElement represents an XML start element.
-type StartElement struct {
- Name Name
- Attr []Attr
-}
-
-func (e StartElement) Copy() StartElement {
- attrs := make([]Attr, len(e.Attr))
- copy(e.Attr, attrs)
- e.Attr = attrs
- return e
-}
-
-// An EndElement represents an XML end element.
-type EndElement struct {
- Name Name
-}
-
-// A CharData represents XML character data (raw text),
-// in which XML escape sequences have been replaced by
-// the characters they represent.
-type CharData []byte
-
-func makeCopy(b []byte) []byte {
- b1 := make([]byte, len(b))
- copy(b1, b)
- return b1
-}
-
-func (c CharData) Copy() CharData { return CharData(makeCopy(c)) }
-
-// A Comment represents an XML comment of the form <!--comment-->.
-// The bytes do not include the <!-- and --> comment markers.
-type Comment []byte
-
-func (c Comment) Copy() Comment { return Comment(makeCopy(c)) }
-
-// A ProcInst represents an XML processing instruction of the form <?target inst?>
-type ProcInst struct {
- Target string
- Inst []byte
-}
-
-func (p ProcInst) Copy() ProcInst {
- p.Inst = makeCopy(p.Inst)
- return p
-}
-
-// A Directive represents an XML directive of the form <!text>.
-// The bytes do not include the <! and > markers.
-type Directive []byte
-
-func (d Directive) Copy() Directive { return Directive(makeCopy(d)) }
-
-// CopyToken returns a copy of a Token.
-func CopyToken(t Token) Token {
- switch v := t.(type) {
- case CharData:
- return v.Copy()
- case Comment:
- return v.Copy()
- case Directive:
- return v.Copy()
- case ProcInst:
- return v.Copy()
- case StartElement:
- return v.Copy()
- }
- return t
-}
-
-// A Parser represents an XML parser reading a particular input stream.
-// The parser assumes that its input is encoded in UTF-8.
-type Parser struct {
- // Strict defaults to true, enforcing the requirements
- // of the XML specification.
- // If set to false, the parser allows input containing common
- // mistakes:
- // * If an element is missing an end tag, the parser invents
- // end tags as necessary to keep the return values from Token
- // properly balanced.
- // * In attribute values and character data, unknown or malformed
- // character entities (sequences beginning with &) are left alone.
- //
- // Setting:
- //
- // p.Strict = false;
- // p.AutoClose = HTMLAutoClose;
- // p.Entity = HTMLEntity
- //
- // creates a parser that can handle typical HTML.
- Strict bool
-
- // When Strict == false, AutoClose indicates a set of elements to
- // consider closed immediately after they are opened, regardless
- // of whether an end element is present.
- AutoClose []string
-
- // Entity can be used to map non-standard entity names to string replacements.
- // The parser behaves as if these standard mappings are present in the map,
- // regardless of the actual map content:
- //
- // "lt": "<",
- // "gt": ">",
- // "amp": "&",
- // "apos": "'",
- // "quot": `"`,
- Entity map[string]string
-
- // CharsetReader, if non-nil, defines a function to generate
- // charset-conversion readers, converting from the provided
- // non-UTF-8 charset into UTF-8. If CharsetReader is nil or
- // returns an error, parsing stops with an error. One of the
- // the CharsetReader's result values must be non-nil.
- CharsetReader func(charset string, input io.Reader) (io.Reader, error)
-
- r io.ByteReader
- buf bytes.Buffer
- saved *bytes.Buffer
- stk *stack
- free *stack
- needClose bool
- toClose Name
- nextToken Token
- nextByte int
- ns map[string]string
- err error
- line int
- tmp [32]byte
-}
-
-// NewParser creates a new XML parser reading from r.
-func NewParser(r io.Reader) *Parser {
- p := &Parser{
- ns: make(map[string]string),
- nextByte: -1,
- line: 1,
- Strict: true,
- }
- p.switchToReader(r)
- return p
-}
-
-// Token returns the next XML token in the input stream.
-// At the end of the input stream, Token returns nil, os.EOF.
-//
-// Slices of bytes in the returned token data refer to the
-// parser's internal buffer and remain valid only until the next
-// call to Token. To acquire a copy of the bytes, call CopyToken
-// or the token's Copy method.
-//
-// Token expands self-closing elements such as <br/>
-// into separate start and end elements returned by successive calls.
-//
-// Token guarantees that the StartElement and EndElement
-// tokens it returns are properly nested and matched:
-// if Token encounters an unexpected end element,
-// it will return an error.
-//
-// Token implements XML name spaces as described by
-// http://www.w3.org/TR/REC-xml-names/. Each of the
-// Name structures contained in the Token has the Space
-// set to the URL identifying its name space when known.
-// If Token encounters an unrecognized name space prefix,
-// it uses the prefix as the Space rather than report an error.
-func (p *Parser) Token() (t Token, err error) {
- if p.nextToken != nil {
- t = p.nextToken
- p.nextToken = nil
- } else if t, err = p.RawToken(); err != nil {
- return
- }
-
- if !p.Strict {
- if t1, ok := p.autoClose(t); ok {
- p.nextToken = t
- t = t1
- }
- }
- switch t1 := t.(type) {
- case StartElement:
- // In XML name spaces, the translations listed in the
- // attributes apply to the element name and
- // to the other attribute names, so process
- // the translations first.
- for _, a := range t1.Attr {
- if a.Name.Space == "xmlns" {
- v, ok := p.ns[a.Name.Local]
- p.pushNs(a.Name.Local, v, ok)
- p.ns[a.Name.Local] = a.Value
- }
- if a.Name.Space == "" && a.Name.Local == "xmlns" {
- // Default space for untagged names
- v, ok := p.ns[""]
- p.pushNs("", v, ok)
- p.ns[""] = a.Value
- }
- }
-
- p.translate(&t1.Name, true)
- for i := range t1.Attr {
- p.translate(&t1.Attr[i].Name, false)
- }
- p.pushElement(t1.Name)
- t = t1
-
- case EndElement:
- p.translate(&t1.Name, true)
- if !p.popElement(&t1) {
- return nil, p.err
- }
- t = t1
- }
- return
-}
-
-// Apply name space translation to name n.
-// The default name space (for Space=="")
-// applies only to element names, not to attribute names.
-func (p *Parser) translate(n *Name, isElementName bool) {
- switch {
- case n.Space == "xmlns":
- return
- case n.Space == "" && !isElementName:
- return
- case n.Space == "" && n.Local == "xmlns":
- return
- }
- if v, ok := p.ns[n.Space]; ok {
- n.Space = v
- }
-}
-
-func (p *Parser) switchToReader(r io.Reader) {
- // Get efficient byte at a time reader.
- // Assume that if reader has its own
- // ReadByte, it's efficient enough.
- // Otherwise, use bufio.
- if rb, ok := r.(io.ByteReader); ok {
- p.r = rb
- } else {
- p.r = bufio.NewReader(r)
- }
-}
-
-// Parsing state - stack holds old name space translations
-// and the current set of open elements. The translations to pop when
-// ending a given tag are *below* it on the stack, which is
-// more work but forced on us by XML.
-type stack struct {
- next *stack
- kind int
- name Name
- ok bool
-}
-
-const (
- stkStart = iota
- stkNs
-)
-
-func (p *Parser) push(kind int) *stack {
- s := p.free
- if s != nil {
- p.free = s.next
- } else {
- s = new(stack)
- }
- s.next = p.stk
- s.kind = kind
- p.stk = s
- return s
-}
-
-func (p *Parser) pop() *stack {
- s := p.stk
- if s != nil {
- p.stk = s.next
- s.next = p.free
- p.free = s
- }
- return s
-}
-
-// Record that we are starting an element with the given name.
-func (p *Parser) pushElement(name Name) {
- s := p.push(stkStart)
- s.name = name
-}
-
-// Record that we are changing the value of ns[local].
-// The old value is url, ok.
-func (p *Parser) pushNs(local string, url string, ok bool) {
- s := p.push(stkNs)
- s.name.Local = local
- s.name.Space = url
- s.ok = ok
-}
-
-// Creates a SyntaxError with the current line number.
-func (p *Parser) syntaxError(msg string) error {
- return &SyntaxError{Msg: msg, Line: p.line}
-}
-
-// Record that we are ending an element with the given name.
-// The name must match the record at the top of the stack,
-// which must be a pushElement record.
-// After popping the element, apply any undo records from
-// the stack to restore the name translations that existed
-// before we saw this element.
-func (p *Parser) popElement(t *EndElement) bool {
- s := p.pop()
- name := t.Name
- switch {
- case s == nil || s.kind != stkStart:
- p.err = p.syntaxError("unexpected end element </" + name.Local + ">")
- return false
- case s.name.Local != name.Local:
- if !p.Strict {
- p.needClose = true
- p.toClose = t.Name
- t.Name = s.name
- return true
- }
- p.err = p.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">")
- return false
- case s.name.Space != name.Space:
- p.err = p.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space +
- "closed by </" + name.Local + "> in space " + name.Space)
- return false
- }
-
- // Pop stack until a Start is on the top, undoing the
- // translations that were associated with the element we just closed.
- for p.stk != nil && p.stk.kind != stkStart {
- s := p.pop()
- if s.ok {
- p.ns[s.name.Local] = s.name.Space
- } else {
- delete(p.ns, s.name.Local)
- }
- }
-
- return true
-}
-
-// If the top element on the stack is autoclosing and
-// t is not the end tag, invent the end tag.
-func (p *Parser) autoClose(t Token) (Token, bool) {
- if p.stk == nil || p.stk.kind != stkStart {
- return nil, false
- }
- name := strings.ToLower(p.stk.name.Local)
- for _, s := range p.AutoClose {
- if strings.ToLower(s) == name {
- // This one should be auto closed if t doesn't close it.
- et, ok := t.(EndElement)
- if !ok || et.Name.Local != name {
- return EndElement{p.stk.name}, true
- }
- break
- }
- }
- return nil, false
-}
-
-// RawToken is like Token but does not verify that
-// start and end elements match and does not translate
-// name space prefixes to their corresponding URLs.
-func (p *Parser) RawToken() (Token, error) {
- if p.err != nil {
- return nil, p.err
- }
- if p.needClose {
- // The last element we read was self-closing and
- // we returned just the StartElement half.
- // Return the EndElement half now.
- p.needClose = false
- return EndElement{p.toClose}, nil
- }
-
- b, ok := p.getc()
- if !ok {
- return nil, p.err
- }
-
- if b != '<' {
- // Text section.
- p.ungetc(b)
- data := p.text(-1, false)
- if data == nil {
- return nil, p.err
- }
- return CharData(data), nil
- }
-
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- switch b {
- case '/':
- // </: End element
- var name Name
- if name, ok = p.nsname(); !ok {
- if p.err == nil {
- p.err = p.syntaxError("expected element name after </")
- }
- return nil, p.err
- }
- p.space()
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if b != '>' {
- p.err = p.syntaxError("invalid characters between </" + name.Local + " and >")
- return nil, p.err
- }
- return EndElement{name}, nil
-
- case '?':
- // <?: Processing instruction.
- // TODO(rsc): Should parse the <?xml declaration to make sure
- // the version is 1.0 and the encoding is UTF-8.
- var target string
- if target, ok = p.name(); !ok {
- if p.err == nil {
- p.err = p.syntaxError("expected target name after <?")
- }
- return nil, p.err
- }
- p.space()
- p.buf.Reset()
- var b0 byte
- for {
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- p.buf.WriteByte(b)
- if b0 == '?' && b == '>' {
- break
- }
- b0 = b
- }
- data := p.buf.Bytes()
- data = data[0 : len(data)-2] // chop ?>
-
- if target == "xml" {
- enc := procInstEncoding(string(data))
- if enc != "" && enc != "utf-8" && enc != "UTF-8" {
- if p.CharsetReader == nil {
- p.err = fmt.Errorf("xml: encoding %q declared but Parser.CharsetReader is nil", enc)
- return nil, p.err
- }
- newr, err := p.CharsetReader(enc, p.r.(io.Reader))
- if err != nil {
- p.err = fmt.Errorf("xml: opening charset %q: %v", enc, err)
- return nil, p.err
- }
- if newr == nil {
- panic("CharsetReader returned a nil Reader for charset " + enc)
- }
- p.switchToReader(newr)
- }
- }
- return ProcInst{target, data}, nil
-
- case '!':
- // <!: Maybe comment, maybe CDATA.
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- switch b {
- case '-': // <!-
- // Probably <!-- for a comment.
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if b != '-' {
- p.err = p.syntaxError("invalid sequence <!- not part of <!--")
- return nil, p.err
- }
- // Look for terminator.
- p.buf.Reset()
- var b0, b1 byte
- for {
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- p.buf.WriteByte(b)
- if b0 == '-' && b1 == '-' && b == '>' {
- break
- }
- b0, b1 = b1, b
- }
- data := p.buf.Bytes()
- data = data[0 : len(data)-3] // chop -->
- return Comment(data), nil
-
- case '[': // <![
- // Probably <![CDATA[.
- for i := 0; i < 6; i++ {
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if b != "CDATA["[i] {
- p.err = p.syntaxError("invalid <![ sequence")
- return nil, p.err
- }
- }
- // Have <![CDATA[. Read text until ]]>.
- data := p.text(-1, true)
- if data == nil {
- return nil, p.err
- }
- return CharData(data), nil
- }
-
- // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.
- // We don't care, but accumulate for caller. Quoted angle
- // brackets do not count for nesting.
- p.buf.Reset()
- p.buf.WriteByte(b)
- inquote := uint8(0)
- depth := 0
- for {
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if inquote == 0 && b == '>' && depth == 0 {
- break
- }
- p.buf.WriteByte(b)
- switch {
- case b == inquote:
- inquote = 0
-
- case inquote != 0:
- // in quotes, no special action
-
- case b == '\'' || b == '"':
- inquote = b
-
- case b == '>' && inquote == 0:
- depth--
-
- case b == '<' && inquote == 0:
- depth++
- }
- }
- return Directive(p.buf.Bytes()), nil
- }
-
- // Must be an open element like <a href="foo">
- p.ungetc(b)
-
- var (
- name Name
- empty bool
- attr []Attr
- )
- if name, ok = p.nsname(); !ok {
- if p.err == nil {
- p.err = p.syntaxError("expected element name after <")
- }
- return nil, p.err
- }
-
- attr = make([]Attr, 0, 4)
- for {
- p.space()
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if b == '/' {
- empty = true
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if b != '>' {
- p.err = p.syntaxError("expected /> in element")
- return nil, p.err
- }
- break
- }
- if b == '>' {
- break
- }
- p.ungetc(b)
-
- n := len(attr)
- if n >= cap(attr) {
- nattr := make([]Attr, n, 2*cap(attr))
- copy(nattr, attr)
- attr = nattr
- }
- attr = attr[0 : n+1]
- a := &attr[n]
- if a.Name, ok = p.nsname(); !ok {
- if p.err == nil {
- p.err = p.syntaxError("expected attribute name in element")
- }
- return nil, p.err
- }
- p.space()
- if b, ok = p.mustgetc(); !ok {
- return nil, p.err
- }
- if b != '=' {
- if p.Strict {
- p.err = p.syntaxError("attribute name without = in element")
- return nil, p.err
- } else {
- p.ungetc(b)
- a.Value = a.Name.Local
- }
- } else {
- p.space()
- data := p.attrval()
- if data == nil {
- return nil, p.err
- }
- a.Value = string(data)
- }
- }
- if empty {
- p.needClose = true
- p.toClose = name
- }
- return StartElement{name, attr}, nil
-}
-
-func (p *Parser) attrval() []byte {
- b, ok := p.mustgetc()
- if !ok {
- return nil
- }
- // Handle quoted attribute values
- if b == '"' || b == '\'' {
- return p.text(int(b), false)
- }
- // Handle unquoted attribute values for strict parsers
- if p.Strict {
- p.err = p.syntaxError("unquoted or missing attribute value in element")
- return nil
- }
- // Handle unquoted attribute values for unstrict parsers
- p.ungetc(b)
- p.buf.Reset()
- for {
- b, ok = p.mustgetc()
- if !ok {
- return nil
- }
- // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
- if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
- '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
- p.buf.WriteByte(b)
- } else {
- p.ungetc(b)
- break
- }
- }
- return p.buf.Bytes()
-}
-
-// Skip spaces if any
-func (p *Parser) space() {
- for {
- b, ok := p.getc()
- if !ok {
- return
- }
- switch b {
- case ' ', '\r', '\n', '\t':
- default:
- p.ungetc(b)
- return
- }
- }
-}
-
-// Read a single byte.
-// If there is no byte to read, return ok==false
-// and leave the error in p.err.
-// Maintain line number.
-func (p *Parser) getc() (b byte, ok bool) {
- if p.err != nil {
- return 0, false
- }
- if p.nextByte >= 0 {
- b = byte(p.nextByte)
- p.nextByte = -1
- } else {
- b, p.err = p.r.ReadByte()
- if p.err != nil {
- return 0, false
- }
- if p.saved != nil {
- p.saved.WriteByte(b)
- }
- }
- if b == '\n' {
- p.line++
- }
- return b, true
-}
-
-// Return saved offset.
-// If we did ungetc (nextByte >= 0), have to back up one.
-func (p *Parser) savedOffset() int {
- n := p.saved.Len()
- if p.nextByte >= 0 {
- n--
- }
- return n
-}
-
-// Must read a single byte.
-// If there is no byte to read,
-// set p.err to SyntaxError("unexpected EOF")
-// and return ok==false
-func (p *Parser) mustgetc() (b byte, ok bool) {
- if b, ok = p.getc(); !ok {
- if p.err == io.EOF {
- p.err = p.syntaxError("unexpected EOF")
- }
- }
- return
-}
-
-// Unread a single byte.
-func (p *Parser) ungetc(b byte) {
- if b == '\n' {
- p.line--
- }
- p.nextByte = int(b)
-}
-
-var entity = map[string]int{
- "lt": '<',
- "gt": '>',
- "amp": '&',
- "apos": '\'',
- "quot": '"',
-}
-
-// Read plain text section (XML calls it character data).
-// If quote >= 0, we are in a quoted string and need to find the matching quote.
-// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.
-// On failure return nil and leave the error in p.err.
-func (p *Parser) text(quote int, cdata bool) []byte {
- var b0, b1 byte
- var trunc int
- p.buf.Reset()
-Input:
- for {
- b, ok := p.getc()
- if !ok {
- if cdata {
- if p.err == io.EOF {
- p.err = p.syntaxError("unexpected EOF in CDATA section")
- }
- return nil
- }
- break Input
- }
-
- // <![CDATA[ section ends with ]]>.
- // It is an error for ]]> to appear in ordinary text.
- if b0 == ']' && b1 == ']' && b == '>' {
- if cdata {
- trunc = 2
- break Input
- }
- p.err = p.syntaxError("unescaped ]]> not in CDATA section")
- return nil
- }
-
- // Stop reading text if we see a <.
- if b == '<' && !cdata {
- if quote >= 0 {
- p.err = p.syntaxError("unescaped < inside quoted string")
- return nil
- }
- p.ungetc('<')
- break Input
- }
- if quote >= 0 && b == byte(quote) {
- break Input
- }
- if b == '&' && !cdata {
- // Read escaped character expression up to semicolon.
- // XML in all its glory allows a document to define and use
- // its own character names with <!ENTITY ...> directives.
- // Parsers are required to recognize lt, gt, amp, apos, and quot
- // even if they have not been declared. That's all we allow.
- var i int
- for i = 0; i < len(p.tmp); i++ {
- var ok bool
- p.tmp[i], ok = p.getc()
- if !ok {
- if p.err == io.EOF {
- p.err = p.syntaxError("unexpected EOF")
- }
- return nil
- }
- c := p.tmp[i]
- if c == ';' {
- break
- }
- if 'a' <= c && c <= 'z' ||
- 'A' <= c && c <= 'Z' ||
- '0' <= c && c <= '9' ||
- c == '_' || c == '#' {
- continue
- }
- p.ungetc(c)
- break
- }
- s := string(p.tmp[0:i])
- if i >= len(p.tmp) {
- if !p.Strict {
- b0, b1 = 0, 0
- p.buf.WriteByte('&')
- p.buf.Write(p.tmp[0:i])
- continue Input
- }
- p.err = p.syntaxError("character entity expression &" + s + "... too long")
- return nil
- }
- var haveText bool
- var text string
- if i >= 2 && s[0] == '#' {
- var n uint64
- var err error
- if i >= 3 && s[1] == 'x' {
- n, err = strconv.Btoui64(s[2:], 16)
- } else {
- n, err = strconv.Btoui64(s[1:], 10)
- }
- if err == nil && n <= unicode.MaxRune {
- text = string(n)
- haveText = true
- }
- } else {
- if r, ok := entity[s]; ok {
- text = string(r)
- haveText = true
- } else if p.Entity != nil {
- text, haveText = p.Entity[s]
- }
- }
- if !haveText {
- if !p.Strict {
- b0, b1 = 0, 0
- p.buf.WriteByte('&')
- p.buf.Write(p.tmp[0:i])
- continue Input
- }
- p.err = p.syntaxError("invalid character entity &" + s + ";")
- return nil
- }
- p.buf.Write([]byte(text))
- b0, b1 = 0, 0
- continue Input
- }
- p.buf.WriteByte(b)
- b0, b1 = b1, b
- }
- data := p.buf.Bytes()
- data = data[0 : len(data)-trunc]
-
- // Inspect each rune for being a disallowed character.
- buf := data
- for len(buf) > 0 {
- r, size := utf8.DecodeRune(buf)
- if r == utf8.RuneError && size == 1 {
- p.err = p.syntaxError("invalid UTF-8")
- return nil
- }
- buf = buf[size:]
- if !isInCharacterRange(r) {
- p.err = p.syntaxError(fmt.Sprintf("illegal character code %U", r))
- return nil
- }
- }
-
- // Must rewrite \r and \r\n into \n.
- w := 0
- for r := 0; r < len(data); r++ {
- b := data[r]
- if b == '\r' {
- if r+1 < len(data) && data[r+1] == '\n' {
- continue
- }
- b = '\n'
- }
- data[w] = b
- w++
- }
- return data[0:w]
-}
-
-// Decide whether the given rune is in the XML Character Range, per
-// the Char production of http://www.xml.com/axml/testaxml.htm,
-// Section 2.2 Characters.
-func isInCharacterRange(r rune) (inrange bool) {
- return r == 0x09 ||
- r == 0x0A ||
- r == 0x0D ||
- r >= 0x20 && r <= 0xDF77 ||
- r >= 0xE000 && r <= 0xFFFD ||
- r >= 0x10000 && r <= 0x10FFFF
-}
-
-// Get name space name: name with a : stuck in the middle.
-// The part before the : is the name space identifier.
-func (p *Parser) nsname() (name Name, ok bool) {
- s, ok := p.name()
- if !ok {
- return
- }
- i := strings.Index(s, ":")
- if i < 0 {
- name.Local = s
- } else {
- name.Space = s[0:i]
- name.Local = s[i+1:]
- }
- return name, true
-}
-
-// Get name: /first(first|second)*/
-// Do not set p.err if the name is missing (unless unexpected EOF is received):
-// let the caller provide better context.
-func (p *Parser) name() (s string, ok bool) {
- var b byte
- if b, ok = p.mustgetc(); !ok {
- return
- }
-
- // As a first approximation, we gather the bytes [A-Za-z_:.-\x80-\xFF]*
- if b < utf8.RuneSelf && !isNameByte(b) {
- p.ungetc(b)
- return "", false
- }
- p.buf.Reset()
- p.buf.WriteByte(b)
- for {
- if b, ok = p.mustgetc(); !ok {
- return
- }
- if b < utf8.RuneSelf && !isNameByte(b) {
- p.ungetc(b)
- break
- }
- p.buf.WriteByte(b)
- }
-
- // Then we check the characters.
- s = p.buf.String()
- for i, c := range s {
- if !unicode.Is(first, c) && (i == 0 || !unicode.Is(second, c)) {
- p.err = p.syntaxError("invalid XML name: " + s)
- return "", false
- }
- }
- return s, true
-}
-
-func isNameByte(c byte) bool {
- return 'A' <= c && c <= 'Z' ||
- 'a' <= c && c <= 'z' ||
- '0' <= c && c <= '9' ||
- c == '_' || c == ':' || c == '.' || c == '-'
-}
-
-// These tables were generated by cut and paste from Appendix B of
-// the XML spec at http://www.xml.com/axml/testaxml.htm
-// and then reformatting. First corresponds to (Letter | '_' | ':')
-// and second corresponds to NameChar.
-
-var first = &unicode.RangeTable{
- R16: []unicode.Range16{
- {0x003A, 0x003A, 1},
- {0x0041, 0x005A, 1},
- {0x005F, 0x005F, 1},
- {0x0061, 0x007A, 1},
- {0x00C0, 0x00D6, 1},
- {0x00D8, 0x00F6, 1},
- {0x00F8, 0x00FF, 1},
- {0x0100, 0x0131, 1},
- {0x0134, 0x013E, 1},
- {0x0141, 0x0148, 1},
- {0x014A, 0x017E, 1},
- {0x0180, 0x01C3, 1},
- {0x01CD, 0x01F0, 1},
- {0x01F4, 0x01F5, 1},
- {0x01FA, 0x0217, 1},
- {0x0250, 0x02A8, 1},
- {0x02BB, 0x02C1, 1},
- {0x0386, 0x0386, 1},
- {0x0388, 0x038A, 1},
- {0x038C, 0x038C, 1},
- {0x038E, 0x03A1, 1},
- {0x03A3, 0x03CE, 1},
- {0x03D0, 0x03D6, 1},
- {0x03DA, 0x03E0, 2},
- {0x03E2, 0x03F3, 1},
- {0x0401, 0x040C, 1},
- {0x040E, 0x044F, 1},
- {0x0451, 0x045C, 1},
- {0x045E, 0x0481, 1},
- {0x0490, 0x04C4, 1},
- {0x04C7, 0x04C8, 1},
- {0x04CB, 0x04CC, 1},
- {0x04D0, 0x04EB, 1},
- {0x04EE, 0x04F5, 1},
- {0x04F8, 0x04F9, 1},
- {0x0531, 0x0556, 1},
- {0x0559, 0x0559, 1},
- {0x0561, 0x0586, 1},
- {0x05D0, 0x05EA, 1},
- {0x05F0, 0x05F2, 1},
- {0x0621, 0x063A, 1},
- {0x0641, 0x064A, 1},
- {0x0671, 0x06B7, 1},
- {0x06BA, 0x06BE, 1},
- {0x06C0, 0x06CE, 1},
- {0x06D0, 0x06D3, 1},
- {0x06D5, 0x06D5, 1},
- {0x06E5, 0x06E6, 1},
- {0x0905, 0x0939, 1},
- {0x093D, 0x093D, 1},
- {0x0958, 0x0961, 1},
- {0x0985, 0x098C, 1},
- {0x098F, 0x0990, 1},
- {0x0993, 0x09A8, 1},
- {0x09AA, 0x09B0, 1},
- {0x09B2, 0x09B2, 1},
- {0x09B6, 0x09B9, 1},
- {0x09DC, 0x09DD, 1},
- {0x09DF, 0x09E1, 1},
- {0x09F0, 0x09F1, 1},
- {0x0A05, 0x0A0A, 1},
- {0x0A0F, 0x0A10, 1},
- {0x0A13, 0x0A28, 1},
- {0x0A2A, 0x0A30, 1},
- {0x0A32, 0x0A33, 1},
- {0x0A35, 0x0A36, 1},
- {0x0A38, 0x0A39, 1},
- {0x0A59, 0x0A5C, 1},
- {0x0A5E, 0x0A5E, 1},
- {0x0A72, 0x0A74, 1},
- {0x0A85, 0x0A8B, 1},
- {0x0A8D, 0x0A8D, 1},
- {0x0A8F, 0x0A91, 1},
- {0x0A93, 0x0AA8, 1},
- {0x0AAA, 0x0AB0, 1},
- {0x0AB2, 0x0AB3, 1},
- {0x0AB5, 0x0AB9, 1},
- {0x0ABD, 0x0AE0, 0x23},
- {0x0B05, 0x0B0C, 1},
- {0x0B0F, 0x0B10, 1},
- {0x0B13, 0x0B28, 1},
- {0x0B2A, 0x0B30, 1},
- {0x0B32, 0x0B33, 1},
- {0x0B36, 0x0B39, 1},
- {0x0B3D, 0x0B3D, 1},
- {0x0B5C, 0x0B5D, 1},
- {0x0B5F, 0x0B61, 1},
- {0x0B85, 0x0B8A, 1},
- {0x0B8E, 0x0B90, 1},
- {0x0B92, 0x0B95, 1},
- {0x0B99, 0x0B9A, 1},
- {0x0B9C, 0x0B9C, 1},
- {0x0B9E, 0x0B9F, 1},
- {0x0BA3, 0x0BA4, 1},
- {0x0BA8, 0x0BAA, 1},
- {0x0BAE, 0x0BB5, 1},
- {0x0BB7, 0x0BB9, 1},
- {0x0C05, 0x0C0C, 1},
- {0x0C0E, 0x0C10, 1},
- {0x0C12, 0x0C28, 1},
- {0x0C2A, 0x0C33, 1},
- {0x0C35, 0x0C39, 1},
- {0x0C60, 0x0C61, 1},
- {0x0C85, 0x0C8C, 1},
- {0x0C8E, 0x0C90, 1},
- {0x0C92, 0x0CA8, 1},
- {0x0CAA, 0x0CB3, 1},
- {0x0CB5, 0x0CB9, 1},
- {0x0CDE, 0x0CDE, 1},
- {0x0CE0, 0x0CE1, 1},
- {0x0D05, 0x0D0C, 1},
- {0x0D0E, 0x0D10, 1},
- {0x0D12, 0x0D28, 1},
- {0x0D2A, 0x0D39, 1},
- {0x0D60, 0x0D61, 1},
- {0x0E01, 0x0E2E, 1},
- {0x0E30, 0x0E30, 1},
- {0x0E32, 0x0E33, 1},
- {0x0E40, 0x0E45, 1},
- {0x0E81, 0x0E82, 1},
- {0x0E84, 0x0E84, 1},
- {0x0E87, 0x0E88, 1},
- {0x0E8A, 0x0E8D, 3},
- {0x0E94, 0x0E97, 1},
- {0x0E99, 0x0E9F, 1},
- {0x0EA1, 0x0EA3, 1},
- {0x0EA5, 0x0EA7, 2},
- {0x0EAA, 0x0EAB, 1},
- {0x0EAD, 0x0EAE, 1},
- {0x0EB0, 0x0EB0, 1},
- {0x0EB2, 0x0EB3, 1},
- {0x0EBD, 0x0EBD, 1},
- {0x0EC0, 0x0EC4, 1},
- {0x0F40, 0x0F47, 1},
- {0x0F49, 0x0F69, 1},
- {0x10A0, 0x10C5, 1},
- {0x10D0, 0x10F6, 1},
- {0x1100, 0x1100, 1},
- {0x1102, 0x1103, 1},
- {0x1105, 0x1107, 1},
- {0x1109, 0x1109, 1},
- {0x110B, 0x110C, 1},
- {0x110E, 0x1112, 1},
- {0x113C, 0x1140, 2},
- {0x114C, 0x1150, 2},
- {0x1154, 0x1155, 1},
- {0x1159, 0x1159, 1},
- {0x115F, 0x1161, 1},
- {0x1163, 0x1169, 2},
- {0x116D, 0x116E, 1},
- {0x1172, 0x1173, 1},
- {0x1175, 0x119E, 0x119E - 0x1175},
- {0x11A8, 0x11AB, 0x11AB - 0x11A8},
- {0x11AE, 0x11AF, 1},
- {0x11B7, 0x11B8, 1},
- {0x11BA, 0x11BA, 1},
- {0x11BC, 0x11C2, 1},
- {0x11EB, 0x11F0, 0x11F0 - 0x11EB},
- {0x11F9, 0x11F9, 1},
- {0x1E00, 0x1E9B, 1},
- {0x1EA0, 0x1EF9, 1},
- {0x1F00, 0x1F15, 1},
- {0x1F18, 0x1F1D, 1},
- {0x1F20, 0x1F45, 1},
- {0x1F48, 0x1F4D, 1},
- {0x1F50, 0x1F57, 1},
- {0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
- {0x1F5D, 0x1F5D, 1},
- {0x1F5F, 0x1F7D, 1},
- {0x1F80, 0x1FB4, 1},
- {0x1FB6, 0x1FBC, 1},
- {0x1FBE, 0x1FBE, 1},
- {0x1FC2, 0x1FC4, 1},
- {0x1FC6, 0x1FCC, 1},
- {0x1FD0, 0x1FD3, 1},
- {0x1FD6, 0x1FDB, 1},
- {0x1FE0, 0x1FEC, 1},
- {0x1FF2, 0x1FF4, 1},
- {0x1FF6, 0x1FFC, 1},
- {0x2126, 0x2126, 1},
- {0x212A, 0x212B, 1},
- {0x212E, 0x212E, 1},
- {0x2180, 0x2182, 1},
- {0x3007, 0x3007, 1},
- {0x3021, 0x3029, 1},
- {0x3041, 0x3094, 1},
- {0x30A1, 0x30FA, 1},
- {0x3105, 0x312C, 1},
- {0x4E00, 0x9FA5, 1},
- {0xAC00, 0xD7A3, 1},
- },
-}
-
-var second = &unicode.RangeTable{
- R16: []unicode.Range16{
- {0x002D, 0x002E, 1},
- {0x0030, 0x0039, 1},
- {0x00B7, 0x00B7, 1},
- {0x02D0, 0x02D1, 1},
- {0x0300, 0x0345, 1},
- {0x0360, 0x0361, 1},
- {0x0387, 0x0387, 1},
- {0x0483, 0x0486, 1},
- {0x0591, 0x05A1, 1},
- {0x05A3, 0x05B9, 1},
- {0x05BB, 0x05BD, 1},
- {0x05BF, 0x05BF, 1},
- {0x05C1, 0x05C2, 1},
- {0x05C4, 0x0640, 0x0640 - 0x05C4},
- {0x064B, 0x0652, 1},
- {0x0660, 0x0669, 1},
- {0x0670, 0x0670, 1},
- {0x06D6, 0x06DC, 1},
- {0x06DD, 0x06DF, 1},
- {0x06E0, 0x06E4, 1},
- {0x06E7, 0x06E8, 1},
- {0x06EA, 0x06ED, 1},
- {0x06F0, 0x06F9, 1},
- {0x0901, 0x0903, 1},
- {0x093C, 0x093C, 1},
- {0x093E, 0x094C, 1},
- {0x094D, 0x094D, 1},
- {0x0951, 0x0954, 1},
- {0x0962, 0x0963, 1},
- {0x0966, 0x096F, 1},
- {0x0981, 0x0983, 1},
- {0x09BC, 0x09BC, 1},
- {0x09BE, 0x09BF, 1},
- {0x09C0, 0x09C4, 1},
- {0x09C7, 0x09C8, 1},
- {0x09CB, 0x09CD, 1},
- {0x09D7, 0x09D7, 1},
- {0x09E2, 0x09E3, 1},
- {0x09E6, 0x09EF, 1},
- {0x0A02, 0x0A3C, 0x3A},
- {0x0A3E, 0x0A3F, 1},
- {0x0A40, 0x0A42, 1},
- {0x0A47, 0x0A48, 1},
- {0x0A4B, 0x0A4D, 1},
- {0x0A66, 0x0A6F, 1},
- {0x0A70, 0x0A71, 1},
- {0x0A81, 0x0A83, 1},
- {0x0ABC, 0x0ABC, 1},
- {0x0ABE, 0x0AC5, 1},
- {0x0AC7, 0x0AC9, 1},
- {0x0ACB, 0x0ACD, 1},
- {0x0AE6, 0x0AEF, 1},
- {0x0B01, 0x0B03, 1},
- {0x0B3C, 0x0B3C, 1},
- {0x0B3E, 0x0B43, 1},
- {0x0B47, 0x0B48, 1},
- {0x0B4B, 0x0B4D, 1},
- {0x0B56, 0x0B57, 1},
- {0x0B66, 0x0B6F, 1},
- {0x0B82, 0x0B83, 1},
- {0x0BBE, 0x0BC2, 1},
- {0x0BC6, 0x0BC8, 1},
- {0x0BCA, 0x0BCD, 1},
- {0x0BD7, 0x0BD7, 1},
- {0x0BE7, 0x0BEF, 1},
- {0x0C01, 0x0C03, 1},
- {0x0C3E, 0x0C44, 1},
- {0x0C46, 0x0C48, 1},
- {0x0C4A, 0x0C4D, 1},
- {0x0C55, 0x0C56, 1},
- {0x0C66, 0x0C6F, 1},
- {0x0C82, 0x0C83, 1},
- {0x0CBE, 0x0CC4, 1},
- {0x0CC6, 0x0CC8, 1},
- {0x0CCA, 0x0CCD, 1},
- {0x0CD5, 0x0CD6, 1},
- {0x0CE6, 0x0CEF, 1},
- {0x0D02, 0x0D03, 1},
- {0x0D3E, 0x0D43, 1},
- {0x0D46, 0x0D48, 1},
- {0x0D4A, 0x0D4D, 1},
- {0x0D57, 0x0D57, 1},
- {0x0D66, 0x0D6F, 1},
- {0x0E31, 0x0E31, 1},
- {0x0E34, 0x0E3A, 1},
- {0x0E46, 0x0E46, 1},
- {0x0E47, 0x0E4E, 1},
- {0x0E50, 0x0E59, 1},
- {0x0EB1, 0x0EB1, 1},
- {0x0EB4, 0x0EB9, 1},
- {0x0EBB, 0x0EBC, 1},
- {0x0EC6, 0x0EC6, 1},
- {0x0EC8, 0x0ECD, 1},
- {0x0ED0, 0x0ED9, 1},
- {0x0F18, 0x0F19, 1},
- {0x0F20, 0x0F29, 1},
- {0x0F35, 0x0F39, 2},
- {0x0F3E, 0x0F3F, 1},
- {0x0F71, 0x0F84, 1},
- {0x0F86, 0x0F8B, 1},
- {0x0F90, 0x0F95, 1},
- {0x0F97, 0x0F97, 1},
- {0x0F99, 0x0FAD, 1},
- {0x0FB1, 0x0FB7, 1},
- {0x0FB9, 0x0FB9, 1},
- {0x20D0, 0x20DC, 1},
- {0x20E1, 0x3005, 0x3005 - 0x20E1},
- {0x302A, 0x302F, 1},
- {0x3031, 0x3035, 1},
- {0x3099, 0x309A, 1},
- {0x309D, 0x309E, 1},
- {0x30FC, 0x30FE, 1},
- },
-}
-
-// HTMLEntity is an entity map containing translations for the
-// standard HTML entity characters.
-var HTMLEntity = htmlEntity
-
-var htmlEntity = map[string]string{
- /*
- hget http://www.w3.org/TR/html4/sgml/entities.html |
- ssam '
- ,y /\>/ x/\<(.|\n)+/ s/\n/ /g
- ,x v/^\<!ENTITY/d
- ,s/\<!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g
- '
- */
- "nbsp": "\u00A0",
- "iexcl": "\u00A1",
- "cent": "\u00A2",
- "pound": "\u00A3",
- "curren": "\u00A4",
- "yen": "\u00A5",
- "brvbar": "\u00A6",
- "sect": "\u00A7",
- "uml": "\u00A8",
- "copy": "\u00A9",
- "ordf": "\u00AA",
- "laquo": "\u00AB",
- "not": "\u00AC",
- "shy": "\u00AD",
- "reg": "\u00AE",
- "macr": "\u00AF",
- "deg": "\u00B0",
- "plusmn": "\u00B1",
- "sup2": "\u00B2",
- "sup3": "\u00B3",
- "acute": "\u00B4",
- "micro": "\u00B5",
- "para": "\u00B6",
- "middot": "\u00B7",
- "cedil": "\u00B8",
- "sup1": "\u00B9",
- "ordm": "\u00BA",
- "raquo": "\u00BB",
- "frac14": "\u00BC",
- "frac12": "\u00BD",
- "frac34": "\u00BE",
- "iquest": "\u00BF",
- "Agrave": "\u00C0",
- "Aacute": "\u00C1",
- "Acirc": "\u00C2",
- "Atilde": "\u00C3",
- "Auml": "\u00C4",
- "Aring": "\u00C5",
- "AElig": "\u00C6",
- "Ccedil": "\u00C7",
- "Egrave": "\u00C8",
- "Eacute": "\u00C9",
- "Ecirc": "\u00CA",
- "Euml": "\u00CB",
- "Igrave": "\u00CC",
- "Iacute": "\u00CD",
- "Icirc": "\u00CE",
- "Iuml": "\u00CF",
- "ETH": "\u00D0",
- "Ntilde": "\u00D1",
- "Ograve": "\u00D2",
- "Oacute": "\u00D3",
- "Ocirc": "\u00D4",
- "Otilde": "\u00D5",
- "Ouml": "\u00D6",
- "times": "\u00D7",
- "Oslash": "\u00D8",
- "Ugrave": "\u00D9",
- "Uacute": "\u00DA",
- "Ucirc": "\u00DB",
- "Uuml": "\u00DC",
- "Yacute": "\u00DD",
- "THORN": "\u00DE",
- "szlig": "\u00DF",
- "agrave": "\u00E0",
- "aacute": "\u00E1",
- "acirc": "\u00E2",
- "atilde": "\u00E3",
- "auml": "\u00E4",
- "aring": "\u00E5",
- "aelig": "\u00E6",
- "ccedil": "\u00E7",
- "egrave": "\u00E8",
- "eacute": "\u00E9",
- "ecirc": "\u00EA",
- "euml": "\u00EB",
- "igrave": "\u00EC",
- "iacute": "\u00ED",
- "icirc": "\u00EE",
- "iuml": "\u00EF",
- "eth": "\u00F0",
- "ntilde": "\u00F1",
- "ograve": "\u00F2",
- "oacute": "\u00F3",
- "ocirc": "\u00F4",
- "otilde": "\u00F5",
- "ouml": "\u00F6",
- "divide": "\u00F7",
- "oslash": "\u00F8",
- "ugrave": "\u00F9",
- "uacute": "\u00FA",
- "ucirc": "\u00FB",
- "uuml": "\u00FC",
- "yacute": "\u00FD",
- "thorn": "\u00FE",
- "yuml": "\u00FF",
- "fnof": "\u0192",
- "Alpha": "\u0391",
- "Beta": "\u0392",
- "Gamma": "\u0393",
- "Delta": "\u0394",
- "Epsilon": "\u0395",
- "Zeta": "\u0396",
- "Eta": "\u0397",
- "Theta": "\u0398",
- "Iota": "\u0399",
- "Kappa": "\u039A",
- "Lambda": "\u039B",
- "Mu": "\u039C",
- "Nu": "\u039D",
- "Xi": "\u039E",
- "Omicron": "\u039F",
- "Pi": "\u03A0",
- "Rho": "\u03A1",
- "Sigma": "\u03A3",
- "Tau": "\u03A4",
- "Upsilon": "\u03A5",
- "Phi": "\u03A6",
- "Chi": "\u03A7",
- "Psi": "\u03A8",
- "Omega": "\u03A9",
- "alpha": "\u03B1",
- "beta": "\u03B2",
- "gamma": "\u03B3",
- "delta": "\u03B4",
- "epsilon": "\u03B5",
- "zeta": "\u03B6",
- "eta": "\u03B7",
- "theta": "\u03B8",
- "iota": "\u03B9",
- "kappa": "\u03BA",
- "lambda": "\u03BB",
- "mu": "\u03BC",
- "nu": "\u03BD",
- "xi": "\u03BE",
- "omicron": "\u03BF",
- "pi": "\u03C0",
- "rho": "\u03C1",
- "sigmaf": "\u03C2",
- "sigma": "\u03C3",
- "tau": "\u03C4",
- "upsilon": "\u03C5",
- "phi": "\u03C6",
- "chi": "\u03C7",
- "psi": "\u03C8",
- "omega": "\u03C9",
- "thetasym": "\u03D1",
- "upsih": "\u03D2",
- "piv": "\u03D6",
- "bull": "\u2022",
- "hellip": "\u2026",
- "prime": "\u2032",
- "Prime": "\u2033",
- "oline": "\u203E",
- "frasl": "\u2044",
- "weierp": "\u2118",
- "image": "\u2111",
- "real": "\u211C",
- "trade": "\u2122",
- "alefsym": "\u2135",
- "larr": "\u2190",
- "uarr": "\u2191",
- "rarr": "\u2192",
- "darr": "\u2193",
- "harr": "\u2194",
- "crarr": "\u21B5",
- "lArr": "\u21D0",
- "uArr": "\u21D1",
- "rArr": "\u21D2",
- "dArr": "\u21D3",
- "hArr": "\u21D4",
- "forall": "\u2200",
- "part": "\u2202",
- "exist": "\u2203",
- "empty": "\u2205",
- "nabla": "\u2207",
- "isin": "\u2208",
- "notin": "\u2209",
- "ni": "\u220B",
- "prod": "\u220F",
- "sum": "\u2211",
- "minus": "\u2212",
- "lowast": "\u2217",
- "radic": "\u221A",
- "prop": "\u221D",
- "infin": "\u221E",
- "ang": "\u2220",
- "and": "\u2227",
- "or": "\u2228",
- "cap": "\u2229",
- "cup": "\u222A",
- "int": "\u222B",
- "there4": "\u2234",
- "sim": "\u223C",
- "cong": "\u2245",
- "asymp": "\u2248",
- "ne": "\u2260",
- "equiv": "\u2261",
- "le": "\u2264",
- "ge": "\u2265",
- "sub": "\u2282",
- "sup": "\u2283",
- "nsub": "\u2284",
- "sube": "\u2286",
- "supe": "\u2287",
- "oplus": "\u2295",
- "otimes": "\u2297",
- "perp": "\u22A5",
- "sdot": "\u22C5",
- "lceil": "\u2308",
- "rceil": "\u2309",
- "lfloor": "\u230A",
- "rfloor": "\u230B",
- "lang": "\u2329",
- "rang": "\u232A",
- "loz": "\u25CA",
- "spades": "\u2660",
- "clubs": "\u2663",
- "hearts": "\u2665",
- "diams": "\u2666",
- "quot": "\u0022",
- "amp": "\u0026",
- "lt": "\u003C",
- "gt": "\u003E",
- "OElig": "\u0152",
- "oelig": "\u0153",
- "Scaron": "\u0160",
- "scaron": "\u0161",
- "Yuml": "\u0178",
- "circ": "\u02C6",
- "tilde": "\u02DC",
- "ensp": "\u2002",
- "emsp": "\u2003",
- "thinsp": "\u2009",
- "zwnj": "\u200C",
- "zwj": "\u200D",
- "lrm": "\u200E",
- "rlm": "\u200F",
- "ndash": "\u2013",
- "mdash": "\u2014",
- "lsquo": "\u2018",
- "rsquo": "\u2019",
- "sbquo": "\u201A",
- "ldquo": "\u201C",
- "rdquo": "\u201D",
- "bdquo": "\u201E",
- "dagger": "\u2020",
- "Dagger": "\u2021",
- "permil": "\u2030",
- "lsaquo": "\u2039",
- "rsaquo": "\u203A",
- "euro": "\u20AC",
-}
-
-// HTMLAutoClose is the set of HTML elements that
-// should be considered to close automatically.
-var HTMLAutoClose = htmlAutoClose
-
-var htmlAutoClose = []string{
- /*
- hget http://www.w3.org/TR/html4/loose.dtd |
- 9 sed -n 's/<!ELEMENT (.*) - O EMPTY.+/ "\1",/p' | tr A-Z a-z
- */
- "basefont",
- "br",
- "area",
- "link",
- "img",
- "param",
- "hr",
- "input",
- "col ",
- "frame",
- "isindex",
- "base",
- "meta",
-}
-
-var (
- esc_quot = []byte(""") // shorter than """
- esc_apos = []byte("'") // shorter than "'"
- esc_amp = []byte("&")
- esc_lt = []byte("<")
- esc_gt = []byte(">")
-)
-
-// Escape writes to w the properly escaped XML equivalent
-// of the plain text data s.
-func Escape(w io.Writer, s []byte) {
- var esc []byte
- last := 0
- for i, c := range s {
- switch c {
- case '"':
- esc = esc_quot
- case '\'':
- esc = esc_apos
- case '&':
- esc = esc_amp
- case '<':
- esc = esc_lt
- case '>':
- esc = esc_gt
- default:
- continue
- }
- w.Write(s[last:i])
- w.Write(esc)
- last = i + 1
- }
- w.Write(s[last:])
-}
-
-// procInstEncoding parses the `encoding="..."` or `encoding='...'`
-// value out of the provided string, returning "" if not found.
-func procInstEncoding(s string) string {
- // TODO: this parsing is somewhat lame and not exact.
- // It works for all actual cases, though.
- idx := strings.Index(s, "encoding=")
- if idx == -1 {
- return ""
- }
- v := s[idx+len("encoding="):]
- if v == "" {
- return ""
- }
- if v[0] != '\'' && v[0] != '"' {
- return ""
- }
- idx = strings.IndexRune(v[1:], rune(v[0]))
- if idx == -1 {
- return ""
- }
- return v[1 : idx+1]
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xml
-
-import (
- "bytes"
- "io"
- "os"
- "reflect"
- "strings"
- "testing"
-)
-
-const testInput = `
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
- "\r\n\t" + ` >
- <hello lang="en">World <>'" 白鵬翔</hello>
- <goodbye />
- <outer foo:attr="value" xmlns:tag="ns4">
- <inner/>
- </outer>
- <tag:name>
- <![CDATA[Some text here.]]>
- </tag:name>
-</body><!-- missing final newline -->`
-
-var rawTokens = []Token{
- CharData([]byte("\n")),
- ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
- ),
- CharData([]byte("\n")),
- StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
- CharData([]byte("\n ")),
- StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
- CharData([]byte("World <>'\" 白鵬翔")),
- EndElement{Name{"", "hello"}},
- CharData([]byte("\n ")),
- StartElement{Name{"", "goodbye"}, nil},
- EndElement{Name{"", "goodbye"}},
- CharData([]byte("\n ")),
- StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
- CharData([]byte("\n ")),
- StartElement{Name{"", "inner"}, nil},
- EndElement{Name{"", "inner"}},
- CharData([]byte("\n ")),
- EndElement{Name{"", "outer"}},
- CharData([]byte("\n ")),
- StartElement{Name{"tag", "name"}, nil},
- CharData([]byte("\n ")),
- CharData([]byte("Some text here.")),
- CharData([]byte("\n ")),
- EndElement{Name{"tag", "name"}},
- CharData([]byte("\n")),
- EndElement{Name{"", "body"}},
- Comment([]byte(" missing final newline ")),
-}
-
-var cookedTokens = []Token{
- CharData([]byte("\n")),
- ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
- ),
- CharData([]byte("\n")),
- StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
- CharData([]byte("\n ")),
- StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
- CharData([]byte("World <>'\" 白鵬翔")),
- EndElement{Name{"ns2", "hello"}},
- CharData([]byte("\n ")),
- StartElement{Name{"ns2", "goodbye"}, nil},
- EndElement{Name{"ns2", "goodbye"}},
- CharData([]byte("\n ")),
- StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
- CharData([]byte("\n ")),
- StartElement{Name{"ns2", "inner"}, nil},
- EndElement{Name{"ns2", "inner"}},
- CharData([]byte("\n ")),
- EndElement{Name{"ns2", "outer"}},
- CharData([]byte("\n ")),
- StartElement{Name{"ns3", "name"}, nil},
- CharData([]byte("\n ")),
- CharData([]byte("Some text here.")),
- CharData([]byte("\n ")),
- EndElement{Name{"ns3", "name"}},
- CharData([]byte("\n")),
- EndElement{Name{"ns2", "body"}},
- Comment([]byte(" missing final newline ")),
-}
-
-const testInputAltEncoding = `
-<?xml version="1.0" encoding="x-testing-uppercase"?>
-<TAG>VALUE</TAG>`
-
-var rawTokensAltEncoding = []Token{
- CharData([]byte("\n")),
- ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
- CharData([]byte("\n")),
- StartElement{Name{"", "tag"}, nil},
- CharData([]byte("value")),
- EndElement{Name{"", "tag"}},
-}
-
-var xmlInput = []string{
- // unexpected EOF cases
- "<",
- "<t",
- "<t ",
- "<t/",
- "<!",
- "<!-",
- "<!--",
- "<!--c-",
- "<!--c--",
- "<!d",
- "<t></",
- "<t></t",
- "<?",
- "<?p",
- "<t a",
- "<t a=",
- "<t a='",
- "<t a=''",
- "<t/><![",
- "<t/><![C",
- "<t/><![CDATA[d",
- "<t/><![CDATA[d]",
- "<t/><![CDATA[d]]",
-
- // other Syntax errors
- "<>",
- "<t/a",
- "<0 />",
- "<?0 >",
- // "<!0 >", // let the Token() caller handle
- "</0>",
- "<t 0=''>",
- "<t a='&'>",
- "<t a='<'>",
- "<t> c;</t>",
- "<t a>",
- "<t a=>",
- "<t a=v>",
- // "<![CDATA[d]]>", // let the Token() caller handle
- "<t></e>",
- "<t></>",
- "<t></t!",
- "<t>cdata]]></t>",
-}
-
-type stringReader struct {
- s string
- off int
-}
-
-func (r *stringReader) Read(b []byte) (n int, err error) {
- if r.off >= len(r.s) {
- return 0, io.EOF
- }
- for r.off < len(r.s) && n < len(b) {
- b[n] = r.s[r.off]
- n++
- r.off++
- }
- return
-}
-
-func (r *stringReader) ReadByte() (b byte, err error) {
- if r.off >= len(r.s) {
- return 0, io.EOF
- }
- b = r.s[r.off]
- r.off++
- return
-}
-
-func StringReader(s string) io.Reader { return &stringReader{s, 0} }
-
-func TestRawToken(t *testing.T) {
- p := NewParser(StringReader(testInput))
- testRawToken(t, p, rawTokens)
-}
-
-type downCaser struct {
- t *testing.T
- r io.ByteReader
-}
-
-func (d *downCaser) ReadByte() (c byte, err error) {
- c, err = d.r.ReadByte()
- if c >= 'A' && c <= 'Z' {
- c += 'a' - 'A'
- }
- return
-}
-
-func (d *downCaser) Read(p []byte) (int, error) {
- d.t.Fatalf("unexpected Read call on downCaser reader")
- return 0, os.EINVAL
-}
-
-func TestRawTokenAltEncoding(t *testing.T) {
- sawEncoding := ""
- p := NewParser(StringReader(testInputAltEncoding))
- p.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
- sawEncoding = charset
- if charset != "x-testing-uppercase" {
- t.Fatalf("unexpected charset %q", charset)
- }
- return &downCaser{t, input.(io.ByteReader)}, nil
- }
- testRawToken(t, p, rawTokensAltEncoding)
-}
-
-func TestRawTokenAltEncodingNoConverter(t *testing.T) {
- p := NewParser(StringReader(testInputAltEncoding))
- token, err := p.RawToken()
- if token == nil {
- t.Fatalf("expected a token on first RawToken call")
- }
- if err != nil {
- t.Fatal(err)
- }
- token, err = p.RawToken()
- if token != nil {
- t.Errorf("expected a nil token; got %#v", token)
- }
- if err == nil {
- t.Fatalf("expected an error on second RawToken call")
- }
- const encoding = "x-testing-uppercase"
- if !strings.Contains(err.Error(), encoding) {
- t.Errorf("expected error to contain %q; got error: %v",
- encoding, err)
- }
-}
-
-func testRawToken(t *testing.T, p *Parser, rawTokens []Token) {
- for i, want := range rawTokens {
- have, err := p.RawToken()
- if err != nil {
- t.Fatalf("token %d: unexpected error: %s", i, err)
- }
- if !reflect.DeepEqual(have, want) {
- t.Errorf("token %d = %#v want %#v", i, have, want)
- }
- }
-}
-
-// Ensure that directives (specifically !DOCTYPE) include the complete
-// text of any nested directives, noting that < and > do not change
-// nesting depth if they are in single or double quotes.
-
-var nestedDirectivesInput = `
-<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
-<!DOCTYPE [<!ENTITY xlt ">">]>
-<!DOCTYPE [<!ENTITY xlt "<">]>
-<!DOCTYPE [<!ENTITY xlt '>'>]>
-<!DOCTYPE [<!ENTITY xlt '<'>]>
-<!DOCTYPE [<!ENTITY xlt '">'>]>
-<!DOCTYPE [<!ENTITY xlt "'<">]>
-`
-
-var nestedDirectivesTokens = []Token{
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`)),
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY xlt ">">]`)),
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY xlt "<">]`)),
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY xlt '>'>]`)),
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY xlt '<'>]`)),
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY xlt '">'>]`)),
- CharData([]byte("\n")),
- Directive([]byte(`DOCTYPE [<!ENTITY xlt "'<">]`)),
- CharData([]byte("\n")),
-}
-
-func TestNestedDirectives(t *testing.T) {
- p := NewParser(StringReader(nestedDirectivesInput))
-
- for i, want := range nestedDirectivesTokens {
- have, err := p.Token()
- if err != nil {
- t.Fatalf("token %d: unexpected error: %s", i, err)
- }
- if !reflect.DeepEqual(have, want) {
- t.Errorf("token %d = %#v want %#v", i, have, want)
- }
- }
-}
-
-func TestToken(t *testing.T) {
- p := NewParser(StringReader(testInput))
-
- for i, want := range cookedTokens {
- have, err := p.Token()
- if err != nil {
- t.Fatalf("token %d: unexpected error: %s", i, err)
- }
- if !reflect.DeepEqual(have, want) {
- t.Errorf("token %d = %#v want %#v", i, have, want)
- }
- }
-}
-
-func TestSyntax(t *testing.T) {
- for i := range xmlInput {
- p := NewParser(StringReader(xmlInput[i]))
- var err error
- for _, err = p.Token(); err == nil; _, err = p.Token() {
- }
- if _, ok := err.(*SyntaxError); !ok {
- t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
- }
- }
-}
-
-type allScalars struct {
- True1 bool
- True2 bool
- False1 bool
- False2 bool
- Int int
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- Uint int
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- Uintptr uintptr
- Float32 float32
- Float64 float64
- String string
- PtrString *string
-}
-
-var all = allScalars{
- True1: true,
- True2: true,
- False1: false,
- False2: false,
- Int: 1,
- Int8: -2,
- Int16: 3,
- Int32: -4,
- Int64: 5,
- Uint: 6,
- Uint8: 7,
- Uint16: 8,
- Uint32: 9,
- Uint64: 10,
- Uintptr: 11,
- Float32: 13.0,
- Float64: 14.0,
- String: "15",
- PtrString: &sixteen,
-}
-
-var sixteen = "16"
-
-const testScalarsInput = `<allscalars>
- <true1>true</true1>
- <true2>1</true2>
- <false1>false</false1>
- <false2>0</false2>
- <int>1</int>
- <int8>-2</int8>
- <int16>3</int16>
- <int32>-4</int32>
- <int64>5</int64>
- <uint>6</uint>
- <uint8>7</uint8>
- <uint16>8</uint16>
- <uint32>9</uint32>
- <uint64>10</uint64>
- <uintptr>11</uintptr>
- <float>12.0</float>
- <float32>13.0</float32>
- <float64>14.0</float64>
- <string>15</string>
- <ptrstring>16</ptrstring>
-</allscalars>`
-
-func TestAllScalars(t *testing.T) {
- var a allScalars
- buf := bytes.NewBufferString(testScalarsInput)
- err := Unmarshal(buf, &a)
-
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(a, all) {
- t.Errorf("have %+v want %+v", a, all)
- }
-}
-
-type item struct {
- Field_a string
-}
-
-func TestIssue569(t *testing.T) {
- data := `<item><field_a>abcd</field_a></item>`
- var i item
- buf := bytes.NewBufferString(data)
- err := Unmarshal(buf, &i)
-
- if err != nil || i.Field_a != "abcd" {
- t.Fatal("Expecting abcd")
- }
-}
-
-func TestUnquotedAttrs(t *testing.T) {
- data := "<tag attr=azAZ09:-_\t>"
- p := NewParser(StringReader(data))
- p.Strict = false
- token, err := p.Token()
- if _, ok := err.(*SyntaxError); ok {
- t.Errorf("Unexpected error: %v", err)
- }
- if token.(StartElement).Name.Local != "tag" {
- t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
- }
- attr := token.(StartElement).Attr[0]
- if attr.Value != "azAZ09:-_" {
- t.Errorf("Unexpected attribute value: %v", attr.Value)
- }
- if attr.Name.Local != "attr" {
- t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
- }
-}
-
-func TestValuelessAttrs(t *testing.T) {
- tests := [][3]string{
- {"<p nowrap>", "p", "nowrap"},
- {"<p nowrap >", "p", "nowrap"},
- {"<input checked/>", "input", "checked"},
- {"<input checked />", "input", "checked"},
- }
- for _, test := range tests {
- p := NewParser(StringReader(test[0]))
- p.Strict = false
- token, err := p.Token()
- if _, ok := err.(*SyntaxError); ok {
- t.Errorf("Unexpected error: %v", err)
- }
- if token.(StartElement).Name.Local != test[1] {
- t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
- }
- attr := token.(StartElement).Attr[0]
- if attr.Value != test[2] {
- t.Errorf("Unexpected attribute value: %v", attr.Value)
- }
- if attr.Name.Local != test[2] {
- t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
- }
- }
-}
-
-func TestCopyTokenCharData(t *testing.T) {
- data := []byte("same data")
- var tok1 Token = CharData(data)
- tok2 := CopyToken(tok1)
- if !reflect.DeepEqual(tok1, tok2) {
- t.Error("CopyToken(CharData) != CharData")
- }
- data[1] = 'o'
- if reflect.DeepEqual(tok1, tok2) {
- t.Error("CopyToken(CharData) uses same buffer.")
- }
-}
-
-func TestCopyTokenStartElement(t *testing.T) {
- elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
- var tok1 Token = elt
- tok2 := CopyToken(tok1)
- if !reflect.DeepEqual(tok1, tok2) {
- t.Error("CopyToken(StartElement) != StartElement")
- }
- elt.Attr[0] = Attr{Name{"", "lang"}, "de"}
- if reflect.DeepEqual(tok1, tok2) {
- t.Error("CopyToken(CharData) uses same buffer.")
- }
-}
-
-func TestSyntaxErrorLineNum(t *testing.T) {
- testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
- p := NewParser(StringReader(testInput))
- var err error
- for _, err = p.Token(); err == nil; _, err = p.Token() {
- }
- synerr, ok := err.(*SyntaxError)
- if !ok {
- t.Error("Expected SyntaxError.")
- }
- if synerr.Line != 3 {
- t.Error("SyntaxError didn't have correct line number.")
- }
-}
-
-func TestTrailingRawToken(t *testing.T) {
- input := `<FOO></FOO> `
- p := NewParser(StringReader(input))
- var err error
- for _, err = p.RawToken(); err == nil; _, err = p.RawToken() {
- }
- if err != io.EOF {
- t.Fatalf("p.RawToken() = _, %v, want _, os.EOF", err)
- }
-}
-
-func TestTrailingToken(t *testing.T) {
- input := `<FOO></FOO> `
- p := NewParser(StringReader(input))
- var err error
- for _, err = p.Token(); err == nil; _, err = p.Token() {
- }
- if err != io.EOF {
- t.Fatalf("p.Token() = _, %v, want _, os.EOF", err)
- }
-}
-
-func TestEntityInsideCDATA(t *testing.T) {
- input := `<test><![CDATA[ &val=foo ]]></test>`
- p := NewParser(StringReader(input))
- var err error
- for _, err = p.Token(); err == nil; _, err = p.Token() {
- }
- if err != io.EOF {
- t.Fatalf("p.Token() = _, %v, want _, os.EOF", err)
- }
-}
-
-// The last three tests (respectively one for characters in attribute
-// names and two for character entities) pass not because of code
-// changed for issue 1259, but instead pass with the given messages
-// from other parts of xml.Parser. I provide these to note the
-// current behavior of situations where one might think that character
-// range checking would detect the error, but it does not in fact.
-
-var characterTests = []struct {
- in string
- err string
-}{
- {"\x12<doc/>", "illegal character code U+0012"},
- {"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
- {"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
- {"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
- {"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
- {"<doc>&\x01;</doc>", "invalid character entity &;"},
- {"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &;"},
-}
-
-func TestDisallowedCharacters(t *testing.T) {
-
- for i, tt := range characterTests {
- p := NewParser(StringReader(tt.in))
- var err error
-
- for err == nil {
- _, err = p.Token()
- }
- synerr, ok := err.(*SyntaxError)
- if !ok {
- t.Fatalf("input %d p.Token() = _, %v, want _, *SyntaxError", i, err)
- }
- if synerr.Msg != tt.err {
- t.Fatalf("input %d synerr.Msg wrong: want '%s', got '%s'", i, tt.err, synerr.Msg)
- }
- }
-}
-
-type procInstEncodingTest struct {
- expect, got string
-}
-
-var procInstTests = []struct {
- input, expect string
-}{
- {`version="1.0" encoding="utf-8"`, "utf-8"},
- {`version="1.0" encoding='utf-8'`, "utf-8"},
- {`version="1.0" encoding='utf-8' `, "utf-8"},
- {`version="1.0" encoding=utf-8`, ""},
- {`encoding="FOO" `, "FOO"},
-}
-
-func TestProcInstEncoding(t *testing.T) {
- for _, test := range procInstTests {
- got := procInstEncoding(test.input)
- if got != test.expect {
- t.Errorf("procInstEncoding(%q) = %q; want %q", test.input, got, test.expect)
- }
- }
-}
done
done
-runtime="chan.c cpuprof.c goc2c.c lock_futex.c lock_sema.c mcache.c mcentral.c mfinal.c mfixalloc.c mgc0.c mheap.c msize.c proc.c runtime.c runtime.h malloc.h malloc.goc mprof.goc runtime1.goc sema.goc sigqueue.goc string.goc"
+runtime="chan.c cpuprof.c goc2c.c lock_futex.c lock_sema.c mcache.c mcentral.c mfinal.c mfixalloc.c mgc0.c mheap.c msize.c proc.c runtime.c runtime.h malloc.h malloc.goc mprof.goc runtime1.goc sema.goc sigqueue.goc string.goc time.goc"
for f in $runtime; do
merge_c $f $f
done
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Runtime implementations to help package time.
+
+package time
+
+#include "runtime.h"
+
+func Nanoseconds() (ret int64) {
+ ret = runtime_nanotime();
+}