-354b17404643
+9f2be4fbbf69
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
toolexeclibgocryptox509_DATA = \
crypto/x509/pkix.gox
+toolexeclibgodatabasedir = $(toolexeclibgodir)/database
+
+toolexeclibgodatabase_DATA = \
+ database/sql.gox
+
+toolexeclibgodatabasesqldir = $(toolexeclibgodatabasedir)/sql
+
+toolexeclibgodatabasesql_DATA = \
+ database/sql/driver.gox
+
toolexeclibgodebugdir = $(toolexeclibgodir)/debug
toolexeclibgodebug_DATA = \
exp/norm.gox \
exp/proxy.gox \
exp/spdy.gox \
- exp/sql.gox \
exp/ssh.gox \
exp/terminal.gox \
- exp/types.gox
-
-toolexeclibgoexpsqldir = $(toolexeclibgoexpdir)/sql
-
-toolexeclibgoexpsql_DATA = \
- exp/sql/driver.gox
+ exp/types.gox \
+ exp/utf8string.gox
toolexeclibgogodir = $(toolexeclibgodir)/go
go/net/dnsclient_unix.go \
go/net/dnsconfig.go \
go/net/dnsmsg.go \
+ go/net/doc.go \
$(go_net_newpollserver_file) \
go/net/fd.go \
$(go_net_fd_os_file) \
go_os_files = \
$(go_os_dir_file) \
go/os/dir.go \
+ go/os/doc.go \
go/os/env.go \
go/os/error.go \
go/os/error_posix.go \
go_crypto_ecdsa_files = \
go/crypto/ecdsa/ecdsa.go
go_crypto_elliptic_files = \
- go/crypto/elliptic/elliptic.go
+ go/crypto/elliptic/elliptic.go \
+ go/crypto/elliptic/p224.go
go_crypto_hmac_files = \
go/crypto/hmac/hmac.go
go_crypto_md4_files = \
go_crypto_x509_pkix_files = \
go/crypto/x509/pkix/pkix.go
+go_database_sql_files = \
+ go/database/sql/convert.go \
+ go/database/sql/sql.go
+
+go_database_sql_driver_files = \
+ go/database/sql/driver/driver.go \
+ go/database/sql/driver/types.go
+
go_debug_dwarf_files = \
go/debug/dwarf/buf.go \
go/debug/dwarf/const.go \
go/exp/spdy/read.go \
go/exp/spdy/types.go \
go/exp/spdy/write.go
-go_exp_sql_files = \
- go/exp/sql/convert.go \
- go/exp/sql/sql.go
go_exp_ssh_files = \
go/exp/ssh/channel.go \
go/exp/ssh/cipher.go \
go/exp/types/gcimporter.go \
go/exp/types/types.go \
go/exp/types/universe.go
-
-go_exp_sql_driver_files = \
- go/exp/sql/driver/driver.go \
- go/exp/sql/driver/types.go
+go_exp_utf8string_files = \
+ go/exp/utf8string/string.go
go_go_ast_files = \
go/go/ast/ast.go \
go_unicode_utf16_files = \
go/unicode/utf16/utf16.go
go_unicode_utf8_files = \
- go/unicode/utf8/string.go \
go/unicode/utf8/utf8.go
# Define Syscall and Syscall6.
crypto/openpgp/packet.lo \
crypto/openpgp/s2k.lo \
crypto/x509/pkix.lo \
+ database/sql.lo \
+ database/sql/driver.lo \
debug/dwarf.lo \
debug/elf.lo \
debug/gosym.lo \
exp/norm.lo \
exp/proxy.lo \
exp/spdy.lo \
- exp/sql.lo \
exp/ssh.lo \
exp/terminal.lo \
exp/types.lo \
- exp/sql/driver.lo \
+ exp/utf8string.lo \
html/template.lo \
go/ast.lo \
go/build.lo \
@$(CHECK)
.PHONY: crypto/x509/pkix/check
+@go_include@ database/sql.lo.dep
+database/sql.lo.dep: $(go_database_sql_files)
+ $(BUILDDEPS)
+database/sql.lo: $(go_database_sql_files)
+ $(BUILDPACKAGE)
+database/sql/check: $(CHECK_DEPS)
+ @$(MKDIR_P) database/sql
+ @$(CHECK)
+.PHONY: database/sql/check
+
+@go_include@ database/sql/driver.lo.dep
+database/sql/driver.lo.dep: $(go_database_sql_driver_files)
+ $(BUILDDEPS)
+database/sql/driver.lo: $(go_database_sql_driver_files)
+ $(BUILDPACKAGE)
+database/sql/driver/check: $(CHECK_DEPS)
+ @$(MKDIR_P) database/sql/driver
+ @$(CHECK)
+.PHONY: database/sql/driver/check
+
@go_include@ debug/dwarf.lo.dep
debug/dwarf.lo.dep: $(go_debug_dwarf_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/spdy/check
-@go_include@ exp/sql.lo.dep
-exp/sql.lo.dep: $(go_exp_sql_files)
- $(BUILDDEPS)
-exp/sql.lo: $(go_exp_sql_files)
- $(BUILDPACKAGE)
-exp/sql/check: $(CHECK_DEPS)
- @$(MKDIR_P) exp/sql
- @$(CHECK)
-.PHONY: exp/sql/check
-
@go_include@ exp/ssh.lo.dep
exp/ssh.lo.dep: $(go_exp_ssh_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/types/check
+@go_include@ exp/utf8string.lo.dep
+exp/utf8string.lo.dep: $(go_exp_utf8string_files)
+ $(BUILDDEPS)
+exp/utf8string.lo: $(go_exp_utf8string_files)
+ $(BUILDPACKAGE)
+exp/utf8string/check: $(CHECK_DEPS)
+ @$(MKDIR_P) exp/utf8string
+ @$(CHECK)
+.PHONY: exp/utf8string/check
+
@go_include@ exp/inotify.lo.dep
exp/inotify.lo.dep: $(go_exp_inotify_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/inotify/check
-@go_include@ exp/sql/driver.lo.dep
-exp/sql/driver.lo.dep: $(go_exp_sql_driver_files)
- $(BUILDDEPS)
-exp/sql/driver.lo: $(go_exp_sql_driver_files)
- $(BUILDPACKAGE)
-exp/sql/driver/check: $(CHECK_DEPS)
- @$(MKDIR_P) exp/sql/driver
- @$(CHECK)
-.PHONY: exp/sql/driver/check
-
@go_include@ html/template.lo.dep
html/template.lo.dep: $(go_html_template_files)
$(BUILDDEPS)
crypto/x509/pkix.gox: crypto/x509/pkix.lo
$(BUILDGOX)
+database/sql.gox: database/sql.lo
+ $(BUILDGOX)
+
+database/sql/driver.gox: database/sql/driver.lo
+ $(BUILDGOX)
+
debug/dwarf.gox: debug/dwarf.lo
$(BUILDGOX)
debug/elf.gox: debug/elf.lo
$(BUILDGOX)
exp/spdy.gox: exp/spdy.lo
$(BUILDGOX)
-exp/sql.gox: exp/sql.lo
- $(BUILDGOX)
exp/ssh.gox: exp/ssh.lo
$(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
exp/types.gox: exp/types.lo
$(BUILDGOX)
-
-exp/sql/driver.gox: exp/sql/driver.lo
+exp/utf8string.gox: exp/utf8string.lo
$(BUILDGOX)
html/template.gox: html/template.lo
crypto/openpgp/elgamal/check \
crypto/openpgp/packet/check \
crypto/openpgp/s2k/check \
+ database/sql/check \
+ database/sql/driver/check \
debug/dwarf/check \
debug/elf/check \
debug/macho/check \
exp/norm/check \
exp/proxy/check \
exp/spdy/check \
- exp/sql/check \
exp/ssh/check \
exp/terminal/check \
+ exp/utf8string/check \
html/template/check \
go/ast/check \
$(go_build_check_omitted_since_it_calls_6g) \
"$(DESTDIR)$(toolexeclibgocryptodir)" \
"$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" \
"$(DESTDIR)$(toolexeclibgocryptox509dir)" \
+ "$(DESTDIR)$(toolexeclibgodatabasedir)" \
+ "$(DESTDIR)$(toolexeclibgodatabasesqldir)" \
"$(DESTDIR)$(toolexeclibgodebugdir)" \
"$(DESTDIR)$(toolexeclibgoencodingdir)" \
"$(DESTDIR)$(toolexeclibgoexpdir)" \
- "$(DESTDIR)$(toolexeclibgoexpsqldir)" \
"$(DESTDIR)$(toolexeclibgogodir)" \
"$(DESTDIR)$(toolexeclibgohashdir)" \
"$(DESTDIR)$(toolexeclibgohtmldir)" \
crypto/tls.lo crypto/twofish.lo crypto/x509.lo crypto/xtea.lo \
crypto/openpgp/armor.lo crypto/openpgp/elgamal.lo \
crypto/openpgp/errors.lo crypto/openpgp/packet.lo \
- crypto/openpgp/s2k.lo crypto/x509/pkix.lo debug/dwarf.lo \
- debug/elf.lo debug/gosym.lo debug/macho.lo debug/pe.lo \
- encoding/ascii85.lo encoding/asn1.lo encoding/base32.lo \
- encoding/base64.lo encoding/binary.lo encoding/csv.lo \
- encoding/git85.lo encoding/gob.lo encoding/hex.lo \
- encoding/json.lo encoding/pem.lo encoding/xml.lo exp/ebnf.lo \
- exp/norm.lo exp/proxy.lo exp/spdy.lo exp/sql.lo exp/ssh.lo \
- exp/terminal.lo exp/types.lo exp/sql/driver.lo \
- html/template.lo go/ast.lo go/build.lo go/doc.lo go/parser.lo \
- go/printer.lo go/scanner.lo go/token.lo hash/adler32.lo \
- hash/crc32.lo hash/crc64.lo hash/fnv.lo net/http/cgi.lo \
- net/http/fcgi.lo net/http/httptest.lo net/http/httputil.lo \
- net/http/pprof.lo image/bmp.lo image/color.lo image/draw.lo \
- image/gif.lo image/jpeg.lo image/png.lo image/tiff.lo \
- index/suffixarray.lo io/ioutil.lo log/syslog.lo \
- log/syslog/syslog_c.lo math/big.lo math/cmplx.lo math/rand.lo \
- mime/mime.lo mime/multipart.lo net/dict.lo net/http.lo \
- net/mail.lo net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \
- old/netchan.lo old/regexp.lo old/template.lo \
- $(am__DEPENDENCIES_1) os/user.lo os/signal.lo path/filepath.lo \
- regexp/syntax.lo net/rpc/jsonrpc.lo runtime/debug.lo \
- runtime/pprof.lo sync/atomic.lo sync/atomic_c.lo \
- syscall/syscall.lo syscall/errno.lo syscall/wait.lo \
- text/scanner.lo text/tabwriter.lo text/template.lo \
- text/template/parse.lo testing/testing.lo testing/iotest.lo \
- testing/quick.lo testing/script.lo unicode/utf16.lo \
- unicode/utf8.lo
+ crypto/openpgp/s2k.lo crypto/x509/pkix.lo database/sql.lo \
+ database/sql/driver.lo debug/dwarf.lo debug/elf.lo \
+ debug/gosym.lo debug/macho.lo debug/pe.lo encoding/ascii85.lo \
+ encoding/asn1.lo encoding/base32.lo encoding/base64.lo \
+ encoding/binary.lo encoding/csv.lo encoding/git85.lo \
+ encoding/gob.lo encoding/hex.lo encoding/json.lo \
+ encoding/pem.lo encoding/xml.lo exp/ebnf.lo exp/norm.lo \
+ exp/proxy.lo exp/spdy.lo exp/ssh.lo exp/terminal.lo \
+ exp/types.lo exp/utf8string.lo html/template.lo go/ast.lo \
+ go/build.lo go/doc.lo go/parser.lo go/printer.lo go/scanner.lo \
+ go/token.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \
+ hash/fnv.lo net/http/cgi.lo net/http/fcgi.lo \
+ net/http/httptest.lo net/http/httputil.lo net/http/pprof.lo \
+ image/bmp.lo image/color.lo image/draw.lo image/gif.lo \
+ image/jpeg.lo image/png.lo image/tiff.lo index/suffixarray.lo \
+ io/ioutil.lo log/syslog.lo log/syslog/syslog_c.lo math/big.lo \
+ math/cmplx.lo math/rand.lo mime/mime.lo mime/multipart.lo \
+ net/dict.lo net/http.lo net/mail.lo net/rpc.lo net/smtp.lo \
+ net/textproto.lo net/url.lo old/netchan.lo old/regexp.lo \
+ old/template.lo $(am__DEPENDENCIES_1) os/user.lo os/signal.lo \
+ path/filepath.lo regexp/syntax.lo net/rpc/jsonrpc.lo \
+ runtime/debug.lo runtime/pprof.lo sync/atomic.lo \
+ sync/atomic_c.lo syscall/syscall.lo syscall/errno.lo \
+ syscall/wait.lo text/scanner.lo text/tabwriter.lo \
+ text/template.lo text/template/parse.lo testing/testing.lo \
+ testing/iotest.lo testing/quick.lo testing/script.lo \
+ unicode/utf16.lo unicode/utf8.lo
libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1)
DATA = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \
$(toolexeclibgocompress_DATA) $(toolexeclibgocontainer_DATA) \
$(toolexeclibgocrypto_DATA) $(toolexeclibgocryptoopenpgp_DATA) \
- $(toolexeclibgocryptox509_DATA) $(toolexeclibgodebug_DATA) \
+ $(toolexeclibgocryptox509_DATA) $(toolexeclibgodatabase_DATA) \
+ $(toolexeclibgodatabasesql_DATA) $(toolexeclibgodebug_DATA) \
$(toolexeclibgoencoding_DATA) $(toolexeclibgoexp_DATA) \
- $(toolexeclibgoexpsql_DATA) $(toolexeclibgogo_DATA) \
- $(toolexeclibgohash_DATA) $(toolexeclibgohtml_DATA) \
- $(toolexeclibgoimage_DATA) $(toolexeclibgoindex_DATA) \
- $(toolexeclibgoio_DATA) $(toolexeclibgolog_DATA) \
- $(toolexeclibgomath_DATA) $(toolexeclibgomime_DATA) \
- $(toolexeclibgonet_DATA) $(toolexeclibgonethttp_DATA) \
- $(toolexeclibgonetrpc_DATA) $(toolexeclibgoold_DATA) \
- $(toolexeclibgoos_DATA) $(toolexeclibgopath_DATA) \
- $(toolexeclibgoregexp_DATA) $(toolexeclibgoruntime_DATA) \
- $(toolexeclibgosync_DATA) $(toolexeclibgotesting_DATA) \
- $(toolexeclibgotext_DATA) $(toolexeclibgotexttemplate_DATA) \
- $(toolexeclibgounicode_DATA)
+ $(toolexeclibgogo_DATA) $(toolexeclibgohash_DATA) \
+ $(toolexeclibgohtml_DATA) $(toolexeclibgoimage_DATA) \
+ $(toolexeclibgoindex_DATA) $(toolexeclibgoio_DATA) \
+ $(toolexeclibgolog_DATA) $(toolexeclibgomath_DATA) \
+ $(toolexeclibgomime_DATA) $(toolexeclibgonet_DATA) \
+ $(toolexeclibgonethttp_DATA) $(toolexeclibgonetrpc_DATA) \
+ $(toolexeclibgoold_DATA) $(toolexeclibgoos_DATA) \
+ $(toolexeclibgopath_DATA) $(toolexeclibgoregexp_DATA) \
+ $(toolexeclibgoruntime_DATA) $(toolexeclibgosync_DATA) \
+ $(toolexeclibgotesting_DATA) $(toolexeclibgotext_DATA) \
+ $(toolexeclibgotexttemplate_DATA) $(toolexeclibgounicode_DATA)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
toolexeclibgocryptox509_DATA = \
crypto/x509/pkix.gox
+toolexeclibgodatabasedir = $(toolexeclibgodir)/database
+toolexeclibgodatabase_DATA = \
+ database/sql.gox
+
+toolexeclibgodatabasesqldir = $(toolexeclibgodatabasedir)/sql
+toolexeclibgodatabasesql_DATA = \
+ database/sql/driver.gox
+
toolexeclibgodebugdir = $(toolexeclibgodir)/debug
toolexeclibgodebug_DATA = \
debug/dwarf.gox \
exp/norm.gox \
exp/proxy.gox \
exp/spdy.gox \
- exp/sql.gox \
exp/ssh.gox \
exp/terminal.gox \
- exp/types.gox
-
-toolexeclibgoexpsqldir = $(toolexeclibgoexpdir)/sql
-toolexeclibgoexpsql_DATA = \
- exp/sql/driver.gox
+ exp/types.gox \
+ exp/utf8string.gox
toolexeclibgogodir = $(toolexeclibgodir)/go
toolexeclibgogo_DATA = \
go/net/dnsclient_unix.go \
go/net/dnsconfig.go \
go/net/dnsmsg.go \
+ go/net/doc.go \
$(go_net_newpollserver_file) \
go/net/fd.go \
$(go_net_fd_os_file) \
go_os_files = \
$(go_os_dir_file) \
go/os/dir.go \
+ go/os/doc.go \
go/os/env.go \
go/os/error.go \
go/os/error_posix.go \
go/crypto/ecdsa/ecdsa.go
go_crypto_elliptic_files = \
- go/crypto/elliptic/elliptic.go
+ go/crypto/elliptic/elliptic.go \
+ go/crypto/elliptic/p224.go
go_crypto_hmac_files = \
go/crypto/hmac/hmac.go
go_crypto_x509_pkix_files = \
go/crypto/x509/pkix/pkix.go
+go_database_sql_files = \
+ go/database/sql/convert.go \
+ go/database/sql/sql.go
+
+go_database_sql_driver_files = \
+ go/database/sql/driver/driver.go \
+ go/database/sql/driver/types.go
+
go_debug_dwarf_files = \
go/debug/dwarf/buf.go \
go/debug/dwarf/const.go \
go/exp/spdy/types.go \
go/exp/spdy/write.go
-go_exp_sql_files = \
- go/exp/sql/convert.go \
- go/exp/sql/sql.go
-
go_exp_ssh_files = \
go/exp/ssh/channel.go \
go/exp/ssh/cipher.go \
go/exp/types/types.go \
go/exp/types/universe.go
-go_exp_sql_driver_files = \
- go/exp/sql/driver/driver.go \
- go/exp/sql/driver/types.go
+go_exp_utf8string_files = \
+ go/exp/utf8string/string.go
go_go_ast_files = \
go/go/ast/ast.go \
go/unicode/utf16/utf16.go
go_unicode_utf8_files = \
- go/unicode/utf8/string.go \
go/unicode/utf8/utf8.go
@LIBGO_IS_RTEMS_FALSE@syscall_syscall_file = go/syscall/syscall_unix.go
crypto/openpgp/packet.lo \
crypto/openpgp/s2k.lo \
crypto/x509/pkix.lo \
+ database/sql.lo \
+ database/sql/driver.lo \
debug/dwarf.lo \
debug/elf.lo \
debug/gosym.lo \
exp/norm.lo \
exp/proxy.lo \
exp/spdy.lo \
- exp/sql.lo \
exp/ssh.lo \
exp/terminal.lo \
exp/types.lo \
- exp/sql/driver.lo \
+ exp/utf8string.lo \
html/template.lo \
go/ast.lo \
go/build.lo \
crypto/openpgp/elgamal/check \
crypto/openpgp/packet/check \
crypto/openpgp/s2k/check \
+ database/sql/check \
+ database/sql/driver/check \
debug/dwarf/check \
debug/elf/check \
debug/macho/check \
exp/norm/check \
exp/proxy/check \
exp/spdy/check \
- exp/sql/check \
exp/ssh/check \
exp/terminal/check \
+ exp/utf8string/check \
html/template/check \
go/ast/check \
$(go_build_check_omitted_since_it_calls_6g) \
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgocryptox509dir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgocryptox509dir)" && rm -f $$files
+install-toolexeclibgodatabaseDATA: $(toolexeclibgodatabase_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgodatabasedir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgodatabasedir)"
+ @list='$(toolexeclibgodatabase_DATA)'; test -n "$(toolexeclibgodatabasedir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgodatabasedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgodatabasedir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgodatabaseDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgodatabase_DATA)'; test -n "$(toolexeclibgodatabasedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgodatabasedir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgodatabasedir)" && rm -f $$files
+install-toolexeclibgodatabasesqlDATA: $(toolexeclibgodatabasesql_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibgodatabasesqldir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgodatabasesqldir)"
+ @list='$(toolexeclibgodatabasesql_DATA)'; test -n "$(toolexeclibgodatabasesqldir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgodatabasesqldir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgodatabasesqldir)" || exit $$?; \
+ done
+
+uninstall-toolexeclibgodatabasesqlDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclibgodatabasesql_DATA)'; test -n "$(toolexeclibgodatabasesqldir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(toolexeclibgodatabasesqldir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(toolexeclibgodatabasesqldir)" && rm -f $$files
install-toolexeclibgodebugDATA: $(toolexeclibgodebug_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgodebugdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgodebugdir)"
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgoexpdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgoexpdir)" && rm -f $$files
-install-toolexeclibgoexpsqlDATA: $(toolexeclibgoexpsql_DATA)
- @$(NORMAL_INSTALL)
- test -z "$(toolexeclibgoexpsqldir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoexpsqldir)"
- @list='$(toolexeclibgoexpsql_DATA)'; test -n "$(toolexeclibgoexpsqldir)" || list=; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgoexpsqldir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgoexpsqldir)" || exit $$?; \
- done
-
-uninstall-toolexeclibgoexpsqlDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(toolexeclibgoexpsql_DATA)'; test -n "$(toolexeclibgoexpsqldir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(toolexeclibgoexpsqldir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(toolexeclibgoexpsqldir)" && rm -f $$files
install-toolexeclibgogoDATA: $(toolexeclibgogo_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgogodir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgogodir)"
config.h
installdirs: installdirs-recursive
installdirs-am:
- for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgocryptox509dir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgoexpsqldir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohtmldir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgologdir)" "$(DESTDIR)$(toolexeclibgomathdir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgonethttpdir)" "$(DESTDIR)$(toolexeclibgonetrpcdir)" "$(DESTDIR)$(toolexeclibgoolddir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgoregexpdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)" "$(DESTDIR)$(toolexeclibgotextdir)" "$(DESTDIR)$(toolexeclibgotexttemplatedir)" "$(DESTDIR)$(toolexeclibgounicodedir)"; do \
+ for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgocryptox509dir)" "$(DESTDIR)$(toolexeclibgodatabasedir)" "$(DESTDIR)$(toolexeclibgodatabasesqldir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohtmldir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgologdir)" "$(DESTDIR)$(toolexeclibgomathdir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgonethttpdir)" "$(DESTDIR)$(toolexeclibgonetrpcdir)" "$(DESTDIR)$(toolexeclibgoolddir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgoregexpdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)" "$(DESTDIR)$(toolexeclibgotextdir)" "$(DESTDIR)$(toolexeclibgotexttemplatedir)" "$(DESTDIR)$(toolexeclibgounicodedir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-recursive
install-toolexeclibgocryptoDATA \
install-toolexeclibgocryptoopenpgpDATA \
install-toolexeclibgocryptox509DATA \
+ install-toolexeclibgodatabaseDATA \
+ install-toolexeclibgodatabasesqlDATA \
install-toolexeclibgodebugDATA \
install-toolexeclibgoencodingDATA install-toolexeclibgoexpDATA \
- install-toolexeclibgoexpsqlDATA install-toolexeclibgogoDATA \
- install-toolexeclibgohashDATA install-toolexeclibgohtmlDATA \
- install-toolexeclibgoimageDATA install-toolexeclibgoindexDATA \
- install-toolexeclibgoioDATA install-toolexeclibgologDATA \
- install-toolexeclibgomathDATA install-toolexeclibgomimeDATA \
- install-toolexeclibgonetDATA install-toolexeclibgonethttpDATA \
+ install-toolexeclibgogoDATA install-toolexeclibgohashDATA \
+ install-toolexeclibgohtmlDATA install-toolexeclibgoimageDATA \
+ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
+ install-toolexeclibgologDATA install-toolexeclibgomathDATA \
+ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
+ install-toolexeclibgonethttpDATA \
install-toolexeclibgonetrpcDATA install-toolexeclibgooldDATA \
install-toolexeclibgoosDATA install-toolexeclibgopathDATA \
install-toolexeclibgoregexpDATA \
uninstall-toolexeclibgocryptoDATA \
uninstall-toolexeclibgocryptoopenpgpDATA \
uninstall-toolexeclibgocryptox509DATA \
+ uninstall-toolexeclibgodatabaseDATA \
+ uninstall-toolexeclibgodatabasesqlDATA \
uninstall-toolexeclibgodebugDATA \
uninstall-toolexeclibgoencodingDATA \
- uninstall-toolexeclibgoexpDATA \
- uninstall-toolexeclibgoexpsqlDATA \
- uninstall-toolexeclibgogoDATA uninstall-toolexeclibgohashDATA \
+ uninstall-toolexeclibgoexpDATA uninstall-toolexeclibgogoDATA \
+ uninstall-toolexeclibgohashDATA \
uninstall-toolexeclibgohtmlDATA \
uninstall-toolexeclibgoimageDATA \
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
install-toolexeclibgocryptoDATA \
install-toolexeclibgocryptoopenpgpDATA \
install-toolexeclibgocryptox509DATA \
+ install-toolexeclibgodatabaseDATA \
+ install-toolexeclibgodatabasesqlDATA \
install-toolexeclibgodebugDATA \
install-toolexeclibgoencodingDATA install-toolexeclibgoexpDATA \
- install-toolexeclibgoexpsqlDATA install-toolexeclibgogoDATA \
- install-toolexeclibgohashDATA install-toolexeclibgohtmlDATA \
- install-toolexeclibgoimageDATA install-toolexeclibgoindexDATA \
- install-toolexeclibgoioDATA install-toolexeclibgologDATA \
- install-toolexeclibgomathDATA install-toolexeclibgomimeDATA \
- install-toolexeclibgonetDATA install-toolexeclibgonethttpDATA \
+ install-toolexeclibgogoDATA install-toolexeclibgohashDATA \
+ install-toolexeclibgohtmlDATA install-toolexeclibgoimageDATA \
+ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
+ install-toolexeclibgologDATA install-toolexeclibgomathDATA \
+ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
+ install-toolexeclibgonethttpDATA \
install-toolexeclibgonetrpcDATA install-toolexeclibgooldDATA \
install-toolexeclibgoosDATA install-toolexeclibgopathDATA \
install-toolexeclibgoregexpDATA \
uninstall-toolexeclibgocryptoDATA \
uninstall-toolexeclibgocryptoopenpgpDATA \
uninstall-toolexeclibgocryptox509DATA \
+ uninstall-toolexeclibgodatabaseDATA \
+ uninstall-toolexeclibgodatabasesqlDATA \
uninstall-toolexeclibgodebugDATA \
uninstall-toolexeclibgoencodingDATA \
- uninstall-toolexeclibgoexpDATA \
- uninstall-toolexeclibgoexpsqlDATA \
- uninstall-toolexeclibgogoDATA uninstall-toolexeclibgohashDATA \
+ uninstall-toolexeclibgoexpDATA uninstall-toolexeclibgogoDATA \
+ uninstall-toolexeclibgohashDATA \
uninstall-toolexeclibgohtmlDATA \
uninstall-toolexeclibgoimageDATA \
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
@$(CHECK)
.PHONY: crypto/x509/pkix/check
+@go_include@ database/sql.lo.dep
+database/sql.lo.dep: $(go_database_sql_files)
+ $(BUILDDEPS)
+database/sql.lo: $(go_database_sql_files)
+ $(BUILDPACKAGE)
+database/sql/check: $(CHECK_DEPS)
+ @$(MKDIR_P) database/sql
+ @$(CHECK)
+.PHONY: database/sql/check
+
+@go_include@ database/sql/driver.lo.dep
+database/sql/driver.lo.dep: $(go_database_sql_driver_files)
+ $(BUILDDEPS)
+database/sql/driver.lo: $(go_database_sql_driver_files)
+ $(BUILDPACKAGE)
+database/sql/driver/check: $(CHECK_DEPS)
+ @$(MKDIR_P) database/sql/driver
+ @$(CHECK)
+.PHONY: database/sql/driver/check
+
@go_include@ debug/dwarf.lo.dep
debug/dwarf.lo.dep: $(go_debug_dwarf_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/spdy/check
-@go_include@ exp/sql.lo.dep
-exp/sql.lo.dep: $(go_exp_sql_files)
- $(BUILDDEPS)
-exp/sql.lo: $(go_exp_sql_files)
- $(BUILDPACKAGE)
-exp/sql/check: $(CHECK_DEPS)
- @$(MKDIR_P) exp/sql
- @$(CHECK)
-.PHONY: exp/sql/check
-
@go_include@ exp/ssh.lo.dep
exp/ssh.lo.dep: $(go_exp_ssh_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/types/check
+@go_include@ exp/utf8string.lo.dep
+exp/utf8string.lo.dep: $(go_exp_utf8string_files)
+ $(BUILDDEPS)
+exp/utf8string.lo: $(go_exp_utf8string_files)
+ $(BUILDPACKAGE)
+exp/utf8string/check: $(CHECK_DEPS)
+ @$(MKDIR_P) exp/utf8string
+ @$(CHECK)
+.PHONY: exp/utf8string/check
+
@go_include@ exp/inotify.lo.dep
exp/inotify.lo.dep: $(go_exp_inotify_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: exp/inotify/check
-@go_include@ exp/sql/driver.lo.dep
-exp/sql/driver.lo.dep: $(go_exp_sql_driver_files)
- $(BUILDDEPS)
-exp/sql/driver.lo: $(go_exp_sql_driver_files)
- $(BUILDPACKAGE)
-exp/sql/driver/check: $(CHECK_DEPS)
- @$(MKDIR_P) exp/sql/driver
- @$(CHECK)
-.PHONY: exp/sql/driver/check
-
@go_include@ html/template.lo.dep
html/template.lo.dep: $(go_html_template_files)
$(BUILDDEPS)
crypto/x509/pkix.gox: crypto/x509/pkix.lo
$(BUILDGOX)
+database/sql.gox: database/sql.lo
+ $(BUILDGOX)
+
+database/sql/driver.gox: database/sql/driver.lo
+ $(BUILDGOX)
+
debug/dwarf.gox: debug/dwarf.lo
$(BUILDGOX)
debug/elf.gox: debug/elf.lo
$(BUILDGOX)
exp/spdy.gox: exp/spdy.lo
$(BUILDGOX)
-exp/sql.gox: exp/sql.lo
- $(BUILDGOX)
exp/ssh.gox: exp/ssh.lo
$(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
exp/types.gox: exp/types.lo
$(BUILDGOX)
-
-exp/sql/driver.gox: exp/sql/driver.lo
+exp/utf8string.gox: exp/utf8string.lo
$(BUILDGOX)
html/template.gox: html/template.lo
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package bytes_test
import (
// license that can be found in the LICENSE file.
// Package heap provides heap operations for any type that implements
-// heap.Interface.
+// heap.Interface. A heap is a tree with the property that each node is the
+// highest-valued node in its subtree.
+//
+// A heap is a common way to impement a priority queue. To build a priority
+// queue, implement the Heap interface with the (negative) priority as the
+// ordering for the Less method, so Push adds items while Pop removes the
+// highest-priority item from the queue.
//
package heap
// PublicKey represents an ECDSA public key.
type PublicKey struct {
- *elliptic.Curve
+ elliptic.Curve
X, Y *big.Int
}
// randFieldElement returns a random element of the field underlying the given
// curve using the procedure given in [NSA] A.2.1.
-func randFieldElement(c *elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
- b := make([]byte, c.BitSize/8+8)
+func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
+ params := c.Params()
+ b := make([]byte, params.BitSize/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
- n := new(big.Int).Sub(c.N, one)
+ n := new(big.Int).Sub(params.N, one)
k.Mod(k, n)
k.Add(k, one)
return
}
// GenerateKey generates a public&private key pair.
-func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) {
+func GenerateKey(c elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) {
k, err := randFieldElement(c, rand)
if err != nil {
return
// about how this is done. [NSA] suggests that this is done in the obvious
// manner, but [SECG] truncates the hash to the bit-length of the curve order
// first. We follow [SECG] because that's what OpenSSL does.
-func hashToInt(hash []byte, c *elliptic.Curve) *big.Int {
- orderBits := c.N.BitLen()
+func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
+ orderBits := c.Params().N.BitLen()
orderBytes := (orderBits + 7) / 8
if len(hash) > orderBytes {
hash = hash[:orderBytes]
func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
// See [NSA] 3.4.1
c := priv.PublicKey.Curve
+ N := c.Params().N
var k, kInv *big.Int
for {
return
}
- kInv = new(big.Int).ModInverse(k, c.N)
+ kInv = new(big.Int).ModInverse(k, N)
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
- r.Mod(r, priv.Curve.N)
+ r.Mod(r, N)
if r.Sign() != 0 {
break
}
s = new(big.Int).Mul(priv.D, r)
s.Add(s, e)
s.Mul(s, kInv)
- s.Mod(s, priv.PublicKey.Curve.N)
+ s.Mod(s, N)
if s.Sign() != 0 {
break
}
func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
// See [NSA] 3.4.2
c := pub.Curve
+ N := c.Params().N
if r.Sign() == 0 || s.Sign() == 0 {
return false
}
- if r.Cmp(c.N) >= 0 || s.Cmp(c.N) >= 0 {
+ if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
e := hashToInt(hash, c)
- w := new(big.Int).ModInverse(s, c.N)
+ w := new(big.Int).ModInverse(s, N)
u1 := e.Mul(e, w)
u2 := w.Mul(r, w)
return false
}
x, _ := c.Add(x1, y1, x2, y2)
- x.Mod(x, c.N)
+ x.Mod(x, N)
return x.Cmp(r) == 0
}
"testing"
)
-func testKeyGeneration(t *testing.T, c *elliptic.Curve, tag string) {
+func testKeyGeneration(t *testing.T, c elliptic.Curve, tag string) {
priv, err := GenerateKey(c, rand.Reader)
if err != nil {
t.Errorf("%s: error: %s", tag, err)
testKeyGeneration(t, elliptic.P521(), "p521")
}
-func testSignAndVerify(t *testing.T, c *elliptic.Curve, tag string) {
+func testSignAndVerify(t *testing.T, c elliptic.Curve, tag string) {
priv, _ := GenerateKey(c, rand.Reader)
hashed := []byte("testing")
// A Curve represents a short-form Weierstrass curve with a=-3.
// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
-type Curve struct {
+type Curve interface {
+ // Params returns the parameters for the curve.
+ Params() *CurveParams
+ // IsOnCurve returns true if the given (x,y) lies on the curve.
+ IsOnCurve(x, y *big.Int) bool
+ // Add returns the sum of (x1,y1) and (x2,y2)
+ Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int)
+ // Double returns 2*(x,y)
+ Double(x1, y1 *big.Int) (x, y *big.Int)
+ // ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
+ ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int)
+ // ScalarBaseMult returns k*G, where G is the base point of the group and k
+ // is an integer in big-endian form.
+ ScalarBaseMult(scalar []byte) (x, y *big.Int)
+}
+
+// CurveParams contains the parameters of an elliptic curve and also provides
+// a generic, non-constant time implementation of Curve.
+type CurveParams struct {
P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // the constant of the curve equation
BitSize int // the size of the underlying field
}
-// IsOnCurve returns true if the given (x,y) lies on the curve.
-func (curve *Curve) IsOnCurve(x, y *big.Int) bool {
+func (curve *CurveParams) Params() *CurveParams {
+ return curve
+}
+
+func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
// y² = x³ - 3x + b
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.P)
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file.
-func (curve *Curve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
+func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
zinv := new(big.Int).ModInverse(z, curve.P)
zinvsq := new(big.Int).Mul(zinv, zinv)
return
}
-// Add returns the sum of (x1,y1) and (x2,y2)
-func (curve *Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
z := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z, x2, y2, z))
}
// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
-func (curve *Curve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
+func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
return x3, y3, z3
}
-// Double returns 2*(x,y)
-func (curve *Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
z1 := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
-func (curve *Curve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
+func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
// See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
delta := new(big.Int).Mul(z, z)
delta.Mod(delta, curve.P)
return x3, y3, z3
}
-// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
-func (curve *Curve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
+func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// We have a slight problem in that the identity of the group (the
// point at infinity) cannot be represented in (x, y) form on a finite
// machine. Thus the standard add/double algorithm has to be tweaked
return curve.affineFromJacobian(x, y, z)
}
-// ScalarBaseMult returns k*G, where G is the base point of the group and k is
-// an integer in big-endian form.
-func (curve *Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
+func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
-// GenerateKey returns a public/private key pair. The private key is generated
-// using the given reader, which must return random data.
-func (curve *Curve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) {
- byteLen := (curve.BitSize + 7) >> 3
+// GenerateKey returns a public/private key pair. The private key is
+// generated using the given reader, which must return random data.
+func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
+ bitSize := curve.Params().BitSize
+ byteLen := (bitSize + 7) >> 3
priv = make([]byte, byteLen)
for x == nil {
}
// We have to mask off any excess bits in the case that the size of the
// underlying field is not a whole number of bytes.
- priv[0] &= mask[curve.BitSize%8]
+ priv[0] &= mask[bitSize%8]
// This is because, in tests, rand will return all zeros and we don't
// want to get the point at infinity and loop forever.
priv[1] ^= 0x42
return
}
-// Marshal converts a point into the form specified in section 4.3.6 of ANSI
-// X9.62.
-func (curve *Curve) Marshal(x, y *big.Int) []byte {
- byteLen := (curve.BitSize + 7) >> 3
+// Marshal converts a point into the form specified in section 4.3.6 of ANSI X9.62.
+func Marshal(curve Curve, x, y *big.Int) []byte {
+ byteLen := (curve.Params().BitSize + 7) >> 3
ret := make([]byte, 1+2*byteLen)
ret[0] = 4 // uncompressed point
return ret
}
-// Unmarshal converts a point, serialized by Marshal, into an x, y pair. On
-// error, x = nil.
-func (curve *Curve) Unmarshal(data []byte) (x, y *big.Int) {
- byteLen := (curve.BitSize + 7) >> 3
+// Unmarshal converts a point, serialized by Marshal, into an x, y pair. On error, x = nil.
+func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
+ byteLen := (curve.Params().BitSize + 7) >> 3
if len(data) != 1+2*byteLen {
return
}
}
var initonce sync.Once
-var p224 *Curve
-var p256 *Curve
-var p384 *Curve
-var p521 *Curve
+var p256 *CurveParams
+var p384 *CurveParams
+var p521 *CurveParams
func initAll() {
initP224()
initP521()
}
-func initP224() {
- // See FIPS 186-3, section D.2.2
- p224 = new(Curve)
- p224.P, _ = new(big.Int).SetString("26959946667150639794667015087019630673557916260026308143510066298881", 10)
- p224.N, _ = new(big.Int).SetString("26959946667150639794667015087019625940457807714424391721682722368061", 10)
- p224.B, _ = new(big.Int).SetString("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", 16)
- p224.Gx, _ = new(big.Int).SetString("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", 16)
- p224.Gy, _ = new(big.Int).SetString("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", 16)
- p224.BitSize = 224
-}
-
func initP256() {
// See FIPS 186-3, section D.2.3
- p256 = new(Curve)
+ p256 = new(CurveParams)
p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
func initP384() {
// See FIPS 186-3, section D.2.4
- p384 = new(Curve)
+ p384 = new(CurveParams)
p384.P, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319", 10)
p384.N, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643", 10)
p384.B, _ = new(big.Int).SetString("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", 16)
func initP521() {
// See FIPS 186-3, section D.2.5
- p521 = new(Curve)
+ p521 = new(CurveParams)
p521.P, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151", 10)
p521.N, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449", 10)
p521.B, _ = new(big.Int).SetString("051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", 16)
p521.BitSize = 521
}
-// P224 returns a Curve which implements P-224 (see FIPS 186-3, section D.2.2)
-func P224() *Curve {
- initonce.Do(initAll)
- return p224
-}
-
// P256 returns a Curve which implements P-256 (see FIPS 186-3, section D.2.3)
-func P256() *Curve {
+func P256() Curve {
initonce.Do(initAll)
return p256
}
// P384 returns a Curve which implements P-384 (see FIPS 186-3, section D.2.4)
-func P384() *Curve {
+func P384() Curve {
initonce.Do(initAll)
return p384
}
// P256 returns a Curve which implements P-521 (see FIPS 186-3, section D.2.5)
-func P521() *Curve {
+func P521() Curve {
initonce.Do(initAll)
return p521
}
func TestOnCurve(t *testing.T) {
p224 := P224()
- if !p224.IsOnCurve(p224.Gx, p224.Gy) {
+ if !p224.IsOnCurve(p224.Params().Gx, p224.Params().Gy) {
t.Errorf("FAIL")
}
}
}
x, y := p224.ScalarBaseMult(k.Bytes())
if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
- t.Errorf("%d: bad output for k=%s: got (%x, %s), want (%x, %s)", i, e.k, x, y, e.x, e.y)
+ t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y)
+ }
+ if testing.Short() && i > 5 {
+ break
+ }
+ }
+}
+
+func TestGenericBaseMult(t *testing.T) {
+ // We use the P224 CurveParams directly in order to test the generic implementation.
+ p224 := P224().Params()
+ for i, e := range p224BaseMultTests {
+ k, ok := new(big.Int).SetString(e.k, 10)
+ if !ok {
+ t.Errorf("%d: bad value for k: %s", i, e.k)
+ }
+ x, y := p224.ScalarBaseMult(k.Bytes())
+ if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
+ t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y)
}
if testing.Short() && i > 5 {
break
func TestMarshal(t *testing.T) {
p224 := P224()
- _, x, y, err := p224.GenerateKey(rand.Reader)
+ _, x, y, err := GenerateKey(p224, rand.Reader)
if err != nil {
t.Error(err)
return
}
- serialized := p224.Marshal(x, y)
- xx, yy := p224.Unmarshal(serialized)
+ serialized := Marshal(p224, x, y)
+ xx, yy := Unmarshal(p224, serialized)
if xx == nil {
t.Error("failed to unmarshal")
return
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+// This is a constant-time, 32-bit implementation of P224. See FIPS 186-3,
+// section D.2.2.
+//
+// See http://www.imperialviolet.org/2010/12/04/ecc.html ([1]) for background.
+
+import (
+ "math/big"
+)
+
+var p224 p224Curve
+
+type p224Curve struct {
+ *CurveParams
+ gx, gy, b p224FieldElement
+}
+
+func initP224() {
+ // See FIPS 186-3, section D.2.2
+ p224.CurveParams = new(CurveParams)
+ p224.P, _ = new(big.Int).SetString("26959946667150639794667015087019630673557916260026308143510066298881", 10)
+ p224.N, _ = new(big.Int).SetString("26959946667150639794667015087019625940457807714424391721682722368061", 10)
+ p224.B, _ = new(big.Int).SetString("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", 16)
+ p224.Gx, _ = new(big.Int).SetString("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", 16)
+ p224.Gy, _ = new(big.Int).SetString("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", 16)
+ p224.BitSize = 224
+
+ p224FromBig(&p224.gx, p224.Gx)
+ p224FromBig(&p224.gy, p224.Gy)
+ p224FromBig(&p224.b, p224.B)
+}
+
+// P224 returns a Curve which implements P-224 (see FIPS 186-3, section D.2.2)
+func P224() Curve {
+ initonce.Do(initAll)
+ return p224
+}
+
+func (curve p224Curve) Params() *CurveParams {
+ return curve.CurveParams
+}
+
+func (curve p224Curve) IsOnCurve(bigX, bigY *big.Int) bool {
+ var x, y p224FieldElement
+ p224FromBig(&x, bigX)
+ p224FromBig(&y, bigY)
+
+ // y² = x³ - 3x + b
+ var tmp p224LargeFieldElement
+ var x3 p224FieldElement
+ p224Square(&x3, &x, &tmp)
+ p224Mul(&x3, &x3, &x, &tmp)
+
+ for i := 0; i < 8; i++ {
+ x[i] *= 3
+ }
+ p224Sub(&x3, &x3, &x)
+ p224Reduce(&x3)
+ p224Add(&x3, &x3, &curve.b)
+ p224Contract(&x3, &x3)
+
+ p224Square(&y, &y, &tmp)
+ p224Contract(&y, &y)
+
+ for i := 0; i < 8; i++ {
+ if y[i] != x3[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (p224Curve) Add(bigX1, bigY1, bigX2, bigY2 *big.Int) (x, y *big.Int) {
+ var x1, y1, z1, x2, y2, z2, x3, y3, z3 p224FieldElement
+
+ p224FromBig(&x1, bigX1)
+ p224FromBig(&y1, bigY1)
+ z1[0] = 1
+ p224FromBig(&x2, bigX2)
+ p224FromBig(&y2, bigY2)
+ z2[0] = 1
+
+ p224AddJacobian(&x3, &y3, &z3, &x1, &y1, &z1, &x2, &y2, &z2)
+ return p224ToAffine(&x3, &y3, &z3)
+}
+
+func (p224Curve) Double(bigX1, bigY1 *big.Int) (x, y *big.Int) {
+ var x1, y1, z1, x2, y2, z2 p224FieldElement
+
+ p224FromBig(&x1, bigX1)
+ p224FromBig(&y1, bigY1)
+ z1[0] = 1
+
+ p224DoubleJacobian(&x2, &y2, &z2, &x1, &y1, &z1)
+ return p224ToAffine(&x2, &y2, &z2)
+}
+
+func (p224Curve) ScalarMult(bigX1, bigY1 *big.Int, scalar []byte) (x, y *big.Int) {
+ var x1, y1, z1, x2, y2, z2 p224FieldElement
+
+ p224FromBig(&x1, bigX1)
+ p224FromBig(&y1, bigY1)
+ z1[0] = 1
+
+ p224ScalarMult(&x2, &y2, &z2, &x1, &y1, &z1, scalar)
+ return p224ToAffine(&x2, &y2, &z2)
+}
+
+func (curve p224Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
+ var z1, x2, y2, z2 p224FieldElement
+
+ z1[0] = 1
+ p224ScalarMult(&x2, &y2, &z2, &curve.gx, &curve.gy, &z1, scalar)
+ return p224ToAffine(&x2, &y2, &z2)
+}
+
+// Field element functions.
+//
+// The field that we're dealing with is ℤ/pℤ where p = 2**224 - 2**96 + 1.
+//
+// Field elements are represented by a FieldElement, which is a typedef to an
+// array of 8 uint32's. The value of a FieldElement, a, is:
+// a[0] + 2**28·a[1] + 2**56·a[1] + ... + 2**196·a[7]
+//
+// Using 28-bit limbs means that there's only 4 bits of headroom, which is less
+// than we would really like. But it has the useful feature that we hit 2**224
+// exactly, making the reflections during a reduce much nicer.
+type p224FieldElement [8]uint32
+
+// p224Add computes *out = a+b
+//
+// a[i] + b[i] < 2**32
+func p224Add(out, a, b *p224FieldElement) {
+ for i := 0; i < 8; i++ {
+ out[i] = a[i] + b[i]
+ }
+}
+
+const two31p3 = 1<<31 + 1<<3
+const two31m3 = 1<<31 - 1<<3
+const two31m15m3 = 1<<31 - 1<<15 - 1<<3
+
+// p224ZeroModP31 is 0 mod p where bit 31 is set in all limbs so that we can
+// subtract smaller amounts without underflow. See the section "Subtraction" in
+// [1] for reasoning.
+var p224ZeroModP31 = []uint32{two31p3, two31m3, two31m3, two31m15m3, two31m3, two31m3, two31m3, two31m3}
+
+// p224Sub computes *out = a-b
+//
+// a[i], b[i] < 2**30
+// out[i] < 2**32
+func p224Sub(out, a, b *p224FieldElement) {
+ for i := 0; i < 8; i++ {
+ out[i] = a[i] + p224ZeroModP31[i] - b[i]
+ }
+}
+
+// LargeFieldElement also represents an element of the field. The limbs are
+// still spaced 28-bits apart and in little-endian order. So the limbs are at
+// 0, 28, 56, ..., 392 bits, each 64-bits wide.
+type p224LargeFieldElement [15]uint64
+
+const two63p35 = 1<<63 + 1<<35
+const two63m35 = 1<<63 - 1<<35
+const two63m35m19 = 1<<63 - 1<<35 - 1<<19
+
+// p224ZeroModP63 is 0 mod p where bit 63 is set in all limbs. See the section
+// "Subtraction" in [1] for why.
+var p224ZeroModP63 = [8]uint64{two63p35, two63m35, two63m35, two63m35, two63m35m19, two63m35, two63m35, two63m35}
+
+const bottom12Bits = 0xfff
+const bottom28Bits = 0xfffffff
+
+// p224Mul computes *out = a*b
+//
+// a[i] < 2**29, b[i] < 2**30 (or vice versa)
+// out[i] < 2**29
+func p224Mul(out, a, b *p224FieldElement, tmp *p224LargeFieldElement) {
+ for i := 0; i < 15; i++ {
+ tmp[i] = 0
+ }
+
+ for i := 0; i < 8; i++ {
+ for j := 0; j < 8; j++ {
+ tmp[i+j] += uint64(a[i]) * uint64(b[j])
+ }
+ }
+
+ p224ReduceLarge(out, tmp)
+}
+
+// Square computes *out = a*a
+//
+// a[i] < 2**29
+// out[i] < 2**29
+func p224Square(out, a *p224FieldElement, tmp *p224LargeFieldElement) {
+ for i := 0; i < 15; i++ {
+ tmp[i] = 0
+ }
+
+ for i := 0; i < 8; i++ {
+ for j := 0; j <= i; j++ {
+ r := uint64(a[i]) * uint64(a[j])
+ if i == j {
+ tmp[i+j] += r
+ } else {
+ tmp[i+j] += r << 1
+ }
+ }
+ }
+
+ p224ReduceLarge(out, tmp)
+}
+
+// ReduceLarge converts a p224LargeFieldElement to a p224FieldElement.
+//
+// in[i] < 2**62
+func p224ReduceLarge(out *p224FieldElement, in *p224LargeFieldElement) {
+ for i := 0; i < 8; i++ {
+ in[i] += p224ZeroModP63[i]
+ }
+
+ // Elimintate the coefficients at 2**224 and greater.
+ for i := 14; i >= 8; i-- {
+ in[i-8] -= in[i]
+ in[i-5] += (in[i] & 0xffff) << 12
+ in[i-4] += in[i] >> 16
+ }
+ in[8] = 0
+ // in[0..8] < 2**64
+
+ // As the values become small enough, we start to store them in |out|
+ // and use 32-bit operations.
+ for i := 1; i < 8; i++ {
+ in[i+1] += in[i] >> 28
+ out[i] = uint32(in[i] & bottom28Bits)
+ }
+ in[0] -= in[8]
+ out[3] += uint32(in[8]&0xffff) << 12
+ out[4] += uint32(in[8] >> 16)
+ // in[0] < 2**64
+ // out[3] < 2**29
+ // out[4] < 2**29
+ // out[1,2,5..7] < 2**28
+
+ out[0] = uint32(in[0] & bottom28Bits)
+ out[1] += uint32((in[0] >> 28) & bottom28Bits)
+ out[2] += uint32(in[0] >> 56)
+ // out[0] < 2**28
+ // out[1..4] < 2**29
+ // out[5..7] < 2**28
+}
+
+// Reduce reduces the coefficients of a to smaller bounds.
+//
+// On entry: a[i] < 2**31 + 2**30
+// On exit: a[i] < 2**29
+func p224Reduce(a *p224FieldElement) {
+ for i := 0; i < 7; i++ {
+ a[i+1] += a[i] >> 28
+ a[i] &= bottom28Bits
+ }
+ top := a[7] >> 28
+ a[7] &= bottom28Bits
+
+ // top < 2**4
+ mask := top
+ mask |= mask >> 2
+ mask |= mask >> 1
+ mask <<= 31
+ mask = uint32(int32(mask) >> 31)
+ // Mask is all ones if top != 0, all zero otherwise
+
+ a[0] -= top
+ a[3] += top << 12
+
+ // We may have just made a[0] negative but, if we did, then we must
+ // have added something to a[3], this it's > 2**12. Therefore we can
+ // carry down to a[0].
+ a[3] -= 1 & mask
+ a[2] += mask & (1<<28 - 1)
+ a[1] += mask & (1<<28 - 1)
+ a[0] += mask & (1 << 28)
+}
+
+// p224Invert calcuates *out = in**-1 by computing in**(2**224 - 2**96 - 1),
+// i.e. Fermat's little theorem.
+func p224Invert(out, in *p224FieldElement) {
+ var f1, f2, f3, f4 p224FieldElement
+ var c p224LargeFieldElement
+
+ p224Square(&f1, in, &c) // 2
+ p224Mul(&f1, &f1, in, &c) // 2**2 - 1
+ p224Square(&f1, &f1, &c) // 2**3 - 2
+ p224Mul(&f1, &f1, in, &c) // 2**3 - 1
+ p224Square(&f2, &f1, &c) // 2**4 - 2
+ p224Square(&f2, &f2, &c) // 2**5 - 4
+ p224Square(&f2, &f2, &c) // 2**6 - 8
+ p224Mul(&f1, &f1, &f2, &c) // 2**6 - 1
+ p224Square(&f2, &f1, &c) // 2**7 - 2
+ for i := 0; i < 5; i++ { // 2**12 - 2**6
+ p224Square(&f2, &f2, &c)
+ }
+ p224Mul(&f2, &f2, &f1, &c) // 2**12 - 1
+ p224Square(&f3, &f2, &c) // 2**13 - 2
+ for i := 0; i < 11; i++ { // 2**24 - 2**12
+ p224Square(&f3, &f3, &c)
+ }
+ p224Mul(&f2, &f3, &f2, &c) // 2**24 - 1
+ p224Square(&f3, &f2, &c) // 2**25 - 2
+ for i := 0; i < 23; i++ { // 2**48 - 2**24
+ p224Square(&f3, &f3, &c)
+ }
+ p224Mul(&f3, &f3, &f2, &c) // 2**48 - 1
+ p224Square(&f4, &f3, &c) // 2**49 - 2
+ for i := 0; i < 47; i++ { // 2**96 - 2**48
+ p224Square(&f4, &f4, &c)
+ }
+ p224Mul(&f3, &f3, &f4, &c) // 2**96 - 1
+ p224Square(&f4, &f3, &c) // 2**97 - 2
+ for i := 0; i < 23; i++ { // 2**120 - 2**24
+ p224Square(&f4, &f4, &c)
+ }
+ p224Mul(&f2, &f4, &f2, &c) // 2**120 - 1
+ for i := 0; i < 6; i++ { // 2**126 - 2**6
+ p224Square(&f2, &f2, &c)
+ }
+ p224Mul(&f1, &f1, &f2, &c) // 2**126 - 1
+ p224Square(&f1, &f1, &c) // 2**127 - 2
+ p224Mul(&f1, &f1, in, &c) // 2**127 - 1
+ for i := 0; i < 97; i++ { // 2**224 - 2**97
+ p224Square(&f1, &f1, &c)
+ }
+ p224Mul(out, &f1, &f3, &c) // 2**224 - 2**96 - 1
+}
+
+// p224Contract converts a FieldElement to its unique, minimal form.
+//
+// On entry, in[i] < 2**32
+// On exit, in[i] < 2**28
+func p224Contract(out, in *p224FieldElement) {
+ copy(out[:], in[:])
+
+ for i := 0; i < 7; i++ {
+ out[i+1] += out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+ top := out[7] >> 28
+ out[7] &= bottom28Bits
+
+ out[0] -= top
+ out[3] += top << 12
+
+ // We may just have made out[i] negative. So we carry down. If we made
+ // out[0] negative then we know that out[3] is sufficiently positive
+ // because we just added to it.
+ for i := 0; i < 3; i++ {
+ mask := uint32(int32(out[i]) >> 31)
+ out[i] += (1 << 28) & mask
+ out[i+1] -= 1 & mask
+ }
+
+ // Now we see if the value is >= p and, if so, subtract p.
+
+ // First we build a mask from the top four limbs, which must all be
+ // equal to bottom28Bits if the whole value is >= p. If top4AllOnes
+ // ends up with any zero bits in the bottom 28 bits, then this wasn't
+ // true.
+ top4AllOnes := uint32(0xffffffff)
+ for i := 4; i < 8; i++ {
+ top4AllOnes &= (out[i] & bottom28Bits) - 1
+ }
+ top4AllOnes |= 0xf0000000
+ // Now we replicate any zero bits to all the bits in top4AllOnes.
+ top4AllOnes &= top4AllOnes >> 16
+ top4AllOnes &= top4AllOnes >> 8
+ top4AllOnes &= top4AllOnes >> 4
+ top4AllOnes &= top4AllOnes >> 2
+ top4AllOnes &= top4AllOnes >> 1
+ top4AllOnes = uint32(int32(top4AllOnes<<31) >> 31)
+
+ // Now we test whether the bottom three limbs are non-zero.
+ bottom3NonZero := out[0] | out[1] | out[2]
+ bottom3NonZero |= bottom3NonZero >> 16
+ bottom3NonZero |= bottom3NonZero >> 8
+ bottom3NonZero |= bottom3NonZero >> 4
+ bottom3NonZero |= bottom3NonZero >> 2
+ bottom3NonZero |= bottom3NonZero >> 1
+ bottom3NonZero = uint32(int32(bottom3NonZero<<31) >> 31)
+
+ // Everything depends on the value of out[3].
+ // If it's > 0xffff000 and top4AllOnes != 0 then the whole value is >= p
+ // If it's = 0xffff000 and top4AllOnes != 0 and bottom3NonZero != 0,
+ // then the whole value is >= p
+ // If it's < 0xffff000, then the whole value is < p
+ n := out[3] - 0xffff000
+ out3Equal := n
+ out3Equal |= out3Equal >> 16
+ out3Equal |= out3Equal >> 8
+ out3Equal |= out3Equal >> 4
+ out3Equal |= out3Equal >> 2
+ out3Equal |= out3Equal >> 1
+ out3Equal = ^uint32(int32(out3Equal<<31) >> 31)
+
+ // If out[3] > 0xffff000 then n's MSB will be zero.
+ out3GT := ^uint32(int32(n<<31) >> 31)
+
+ mask := top4AllOnes & ((out3Equal & bottom3NonZero) | out3GT)
+ out[0] -= 1 & mask
+ out[3] -= 0xffff000 & mask
+ out[4] -= 0xfffffff & mask
+ out[5] -= 0xfffffff & mask
+ out[6] -= 0xfffffff & mask
+ out[7] -= 0xfffffff & mask
+}
+
+// Group element functions.
+//
+// These functions deal with group elements. The group is an elliptic curve
+// group with a = -3 defined in FIPS 186-3, section D.2.2.
+
+// p224AddJacobian computes *out = a+b where a != b.
+func p224AddJacobian(x3, y3, z3, x1, y1, z1, x2, y2, z2 *p224FieldElement) {
+ // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-p224Add-2007-bl
+ var z1z1, z2z2, u1, u2, s1, s2, h, i, j, r, v p224FieldElement
+ var c p224LargeFieldElement
+
+ // Z1Z1 = Z1²
+ p224Square(&z1z1, z1, &c)
+ // Z2Z2 = Z2²
+ p224Square(&z2z2, z2, &c)
+ // U1 = X1*Z2Z2
+ p224Mul(&u1, x1, &z2z2, &c)
+ // U2 = X2*Z1Z1
+ p224Mul(&u2, x2, &z1z1, &c)
+ // S1 = Y1*Z2*Z2Z2
+ p224Mul(&s1, z2, &z2z2, &c)
+ p224Mul(&s1, y1, &s1, &c)
+ // S2 = Y2*Z1*Z1Z1
+ p224Mul(&s2, z1, &z1z1, &c)
+ p224Mul(&s2, y2, &s2, &c)
+ // H = U2-U1
+ p224Sub(&h, &u2, &u1)
+ p224Reduce(&h)
+ // I = (2*H)²
+ for j := 0; j < 8; j++ {
+ i[j] = h[j] << 1
+ }
+ p224Reduce(&i)
+ p224Square(&i, &i, &c)
+ // J = H*I
+ p224Mul(&j, &h, &i, &c)
+ // r = 2*(S2-S1)
+ p224Sub(&r, &s2, &s1)
+ p224Reduce(&r)
+ for i := 0; i < 8; i++ {
+ r[i] <<= 1
+ }
+ p224Reduce(&r)
+ // V = U1*I
+ p224Mul(&v, &u1, &i, &c)
+ // Z3 = ((Z1+Z2)²-Z1Z1-Z2Z2)*H
+ p224Add(&z1z1, &z1z1, &z2z2)
+ p224Add(&z2z2, z1, z2)
+ p224Reduce(&z2z2)
+ p224Square(&z2z2, &z2z2, &c)
+ p224Sub(z3, &z2z2, &z1z1)
+ p224Reduce(z3)
+ p224Mul(z3, z3, &h, &c)
+ // X3 = r²-J-2*V
+ for i := 0; i < 8; i++ {
+ z1z1[i] = v[i] << 1
+ }
+ p224Add(&z1z1, &j, &z1z1)
+ p224Reduce(&z1z1)
+ p224Square(x3, &r, &c)
+ p224Sub(x3, x3, &z1z1)
+ p224Reduce(x3)
+ // Y3 = r*(V-X3)-2*S1*J
+ for i := 0; i < 8; i++ {
+ s1[i] <<= 1
+ }
+ p224Mul(&s1, &s1, &j, &c)
+ p224Sub(&z1z1, &v, x3)
+ p224Reduce(&z1z1)
+ p224Mul(&z1z1, &z1z1, &r, &c)
+ p224Sub(y3, &z1z1, &s1)
+ p224Reduce(y3)
+}
+
+// p224DoubleJacobian computes *out = a+a.
+func p224DoubleJacobian(x3, y3, z3, x1, y1, z1 *p224FieldElement) {
+ var delta, gamma, beta, alpha, t p224FieldElement
+ var c p224LargeFieldElement
+
+ p224Square(&delta, z1, &c)
+ p224Square(&gamma, y1, &c)
+ p224Mul(&beta, x1, &gamma, &c)
+
+ // alpha = 3*(X1-delta)*(X1+delta)
+ p224Add(&t, x1, &delta)
+ for i := 0; i < 8; i++ {
+ t[i] += t[i] << 1
+ }
+ p224Reduce(&t)
+ p224Sub(&alpha, x1, &delta)
+ p224Reduce(&alpha)
+ p224Mul(&alpha, &alpha, &t, &c)
+
+ // Z3 = (Y1+Z1)²-gamma-delta
+ p224Add(z3, y1, z1)
+ p224Reduce(z3)
+ p224Square(z3, z3, &c)
+ p224Sub(z3, z3, &gamma)
+ p224Reduce(z3)
+ p224Sub(z3, z3, &delta)
+ p224Reduce(z3)
+
+ // X3 = alpha²-8*beta
+ for i := 0; i < 8; i++ {
+ delta[i] = beta[i] << 3
+ }
+ p224Reduce(&delta)
+ p224Square(x3, &alpha, &c)
+ p224Sub(x3, x3, &delta)
+ p224Reduce(x3)
+
+ // Y3 = alpha*(4*beta-X3)-8*gamma²
+ for i := 0; i < 8; i++ {
+ beta[i] <<= 2
+ }
+ p224Sub(&beta, &beta, x3)
+ p224Reduce(&beta)
+ p224Square(&gamma, &gamma, &c)
+ for i := 0; i < 8; i++ {
+ gamma[i] <<= 3
+ }
+ p224Reduce(&gamma)
+ p224Mul(y3, &alpha, &beta, &c)
+ p224Sub(y3, y3, &gamma)
+ p224Reduce(y3)
+}
+
+// p224CopyConditional sets *out = *in iff the least-significant-bit of control
+// is true, and it runs in constant time.
+func p224CopyConditional(out, in *p224FieldElement, control uint32) {
+ control <<= 31
+ control = uint32(int32(control) >> 31)
+
+ for i := 0; i < 8; i++ {
+ out[i] ^= (out[i] ^ in[i]) & control
+ }
+}
+
+func p224ScalarMult(outX, outY, outZ, inX, inY, inZ *p224FieldElement, scalar []byte) {
+ var xx, yy, zz p224FieldElement
+ for i := 0; i < 8; i++ {
+ outZ[i] = 0
+ }
+
+ firstBit := uint32(1)
+ for _, byte := range scalar {
+ for bitNum := uint(0); bitNum < 8; bitNum++ {
+ p224DoubleJacobian(outX, outY, outZ, outX, outY, outZ)
+ bit := uint32((byte >> (7 - bitNum)) & 1)
+ p224AddJacobian(&xx, &yy, &zz, inX, inY, inZ, outX, outY, outZ)
+ p224CopyConditional(outX, inX, firstBit&bit)
+ p224CopyConditional(outY, inY, firstBit&bit)
+ p224CopyConditional(outZ, inZ, firstBit&bit)
+ p224CopyConditional(outX, &xx, ^firstBit&bit)
+ p224CopyConditional(outY, &yy, ^firstBit&bit)
+ p224CopyConditional(outZ, &zz, ^firstBit&bit)
+ firstBit = firstBit & ^bit
+ }
+ }
+}
+
+// p224ToAffine converts from Jacobian to affine form.
+func p224ToAffine(x, y, z *p224FieldElement) (*big.Int, *big.Int) {
+ var zinv, zinvsq, outx, outy p224FieldElement
+ var tmp p224LargeFieldElement
+
+ isPointAtInfinity := true
+ for i := 0; i < 8; i++ {
+ if z[i] != 0 {
+ isPointAtInfinity = false
+ break
+ }
+ }
+
+ if isPointAtInfinity {
+ return nil, nil
+ }
+
+ p224Invert(&zinv, z)
+ p224Square(&zinvsq, &zinv, &tmp)
+ p224Mul(x, x, &zinvsq, &tmp)
+ p224Mul(&zinvsq, &zinvsq, &zinv, &tmp)
+ p224Mul(y, y, &zinvsq, &tmp)
+
+ p224Contract(&outx, x)
+ p224Contract(&outy, y)
+ return p224ToBig(&outx), p224ToBig(&outy)
+}
+
+// get28BitsFromEnd returns the least-significant 28 bits from buf>>shift,
+// where buf is interpreted as a big-endian number.
+func get28BitsFromEnd(buf []byte, shift uint) (uint32, []byte) {
+ var ret uint32
+
+ for i := uint(0); i < 4; i++ {
+ var b byte
+ if l := len(buf); l > 0 {
+ b = buf[l-1]
+ // We don't remove the byte if we're about to return and we're not
+ // reading all of it.
+ if i != 3 || shift == 4 {
+ buf = buf[:l-1]
+ }
+ }
+ ret |= uint32(b) << (8 * i) >> shift
+ }
+ ret &= bottom28Bits
+ return ret, buf
+}
+
+// p224FromBig sets *out = *in.
+func p224FromBig(out *p224FieldElement, in *big.Int) {
+ bytes := in.Bytes()
+ out[0], bytes = get28BitsFromEnd(bytes, 0)
+ out[1], bytes = get28BitsFromEnd(bytes, 4)
+ out[2], bytes = get28BitsFromEnd(bytes, 0)
+ out[3], bytes = get28BitsFromEnd(bytes, 4)
+ out[4], bytes = get28BitsFromEnd(bytes, 0)
+ out[5], bytes = get28BitsFromEnd(bytes, 4)
+ out[6], bytes = get28BitsFromEnd(bytes, 0)
+ out[7], bytes = get28BitsFromEnd(bytes, 4)
+}
+
+// p224ToBig returns in as a big.Int.
+func p224ToBig(in *p224FieldElement) *big.Int {
+ var buf [28]byte
+ buf[27] = byte(in[0])
+ buf[26] = byte(in[0] >> 8)
+ buf[25] = byte(in[0] >> 16)
+ buf[24] = byte(((in[0] >> 24) & 0x0f) | (in[1]<<4)&0xf0)
+
+ buf[23] = byte(in[1] >> 4)
+ buf[22] = byte(in[1] >> 12)
+ buf[21] = byte(in[1] >> 20)
+
+ buf[20] = byte(in[2])
+ buf[19] = byte(in[2] >> 8)
+ buf[18] = byte(in[2] >> 16)
+ buf[17] = byte(((in[2] >> 24) & 0x0f) | (in[3]<<4)&0xf0)
+
+ buf[16] = byte(in[3] >> 4)
+ buf[15] = byte(in[3] >> 12)
+ buf[14] = byte(in[3] >> 20)
+
+ buf[13] = byte(in[4])
+ buf[12] = byte(in[4] >> 8)
+ buf[11] = byte(in[4] >> 16)
+ buf[10] = byte(((in[4] >> 24) & 0x0f) | (in[5]<<4)&0xf0)
+
+ buf[9] = byte(in[5] >> 4)
+ buf[8] = byte(in[5] >> 12)
+ buf[7] = byte(in[5] >> 20)
+
+ buf[6] = byte(in[6])
+ buf[5] = byte(in[6] >> 8)
+ buf[4] = byte(in[6] >> 16)
+ buf[3] = byte(((in[6] >> 24) & 0x0f) | (in[7]<<4)&0xf0)
+
+ buf[2] = byte(in[7] >> 4)
+ buf[1] = byte(in[7] >> 12)
+ buf[0] = byte(in[7] >> 20)
+
+ return new(big.Int).SetBytes(buf[:])
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "math/big"
+ "testing"
+)
+
+var toFromBigTests = []string{
+ "0",
+ "1",
+ "23",
+ "b70e0cb46bb4bf7f321390b94a03c1d356c01122343280d6105c1d21",
+ "706a46d476dcb76798e6046d89474788d164c18032d268fd10704fa6",
+}
+
+func p224AlternativeToBig(in *p224FieldElement) *big.Int {
+ ret := new(big.Int)
+ tmp := new(big.Int)
+
+ for i := uint(0); i < 8; i++ {
+ tmp.SetInt64(int64(in[i]))
+ tmp.Lsh(tmp, 28*i)
+ ret.Add(ret, tmp)
+ }
+ ret.Mod(ret, p224.P)
+ return ret
+}
+
+func TestToFromBig(t *testing.T) {
+ for i, test := range toFromBigTests {
+ n, _ := new(big.Int).SetString(test, 16)
+ var x p224FieldElement
+ p224FromBig(&x, n)
+ m := p224ToBig(&x)
+ if n.Cmp(m) != 0 {
+ t.Errorf("#%d: %x != %x", i, n, m)
+ }
+ q := p224AlternativeToBig(&x)
+ if n.Cmp(q) != 0 {
+ t.Errorf("#%d: %x != %x (alternative)", i, n, m)
+ }
+ }
+}
package hmac
import (
- "crypto/md5"
- "crypto/sha1"
- "crypto/sha256"
"hash"
)
// FIPS 198:
// http://csrc.nist.gov/publications/fips/fips198/fips-198a.pdf
-// key is zero padded to 64 bytes
-// ipad = 0x36 byte repeated to 64 bytes
-// opad = 0x5c byte repeated to 64 bytes
+// key is zero padded to the block size of the hash function
+// ipad = 0x36 byte repeated for key length
+// opad = 0x5c byte repeated for key length
// hmac = H([key ^ opad] H([key ^ ipad] text))
-const (
- // NOTE(rsc): This constant is actually the
- // underlying hash function's block size.
- // HMAC is only conventionally used with
- // MD5 and SHA1, and both use 64-byte blocks.
- // The hash.Hash interface doesn't provide a
- // way to find out the block size.
- padSize = 64
-)
-
type hmac struct {
size int
+ blocksize int
key, tmp []byte
outer, inner hash.Hash
}
for i, k := range h.key {
h.tmp[i] = xor ^ k
}
- for i := len(h.key); i < padSize; i++ {
+ for i := len(h.key); i < h.blocksize; i++ {
h.tmp[i] = xor
}
}
origLen := len(in)
in = h.inner.Sum(in)
h.tmpPad(0x5c)
- copy(h.tmp[padSize:], in[origLen:])
+ copy(h.tmp[h.blocksize:], in[origLen:])
h.outer.Reset()
h.outer.Write(h.tmp)
return h.outer.Sum(in[:origLen])
func (h *hmac) Size() int { return h.size }
+func (h *hmac) BlockSize() int { return h.blocksize }
+
func (h *hmac) Reset() {
h.inner.Reset()
h.tmpPad(0x36)
- h.inner.Write(h.tmp[0:padSize])
+ h.inner.Write(h.tmp[0:h.blocksize])
}
-// New returns a new HMAC hash using the given hash generator and key.
+// New returns a new HMAC hash using the given hash.Hash type and key.
func New(h func() hash.Hash, key []byte) hash.Hash {
hm := new(hmac)
hm.outer = h()
hm.inner = h()
hm.size = hm.inner.Size()
- hm.tmp = make([]byte, padSize+hm.size)
- if len(key) > padSize {
+ hm.blocksize = hm.inner.BlockSize()
+ hm.tmp = make([]byte, hm.blocksize+hm.size)
+ if len(key) > hm.blocksize {
// If key is too big, hash it.
hm.outer.Write(key)
key = hm.outer.Sum(nil)
hm.Reset()
return hm
}
-
-// NewMD5 returns a new HMAC-MD5 hash using the given key.
-func NewMD5(key []byte) hash.Hash { return New(md5.New, key) }
-
-// NewSHA1 returns a new HMAC-SHA1 hash using the given key.
-func NewSHA1(key []byte) hash.Hash { return New(sha1.New, key) }
-
-// NewSHA256 returns a new HMAC-SHA256 hash using the given key.
-func NewSHA256(key []byte) hash.Hash { return New(sha256.New, key) }
package hmac
import (
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
"fmt"
"hash"
"testing"
)
type hmacTest struct {
- hash func([]byte) hash.Hash
+ hash func() hash.Hash
key []byte
in []byte
out string
// Tests from US FIPS 198
// http://csrc.nist.gov/publications/fips/fips198/fips-198a.pdf
{
- NewSHA1,
+ sha1.New,
[]byte{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
"4f4ca3d5d68ba7cc0a1208c9c61e9c5da0403c0a",
},
{
- NewSHA1,
+ sha1.New,
[]byte{
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
"0922d3405faa3d194f82a45830737d5cc6c75d24",
},
{
- NewSHA1,
+ sha1.New,
[]byte{
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
// Test from Plan 9.
{
- NewMD5,
+ md5.New,
[]byte("Jefe"),
[]byte("what do ya want for nothing?"),
"750c783e6ab0b503eaa86e310a5db738",
// Tests from RFC 4231
{
- NewSHA256,
+ sha256.New,
[]byte{
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
"b0344c61d8db38535ca8afceaf0bf12b881dc200c9833da726e9376c2e32cff7",
},
{
- NewSHA256,
+ sha256.New,
[]byte("Jefe"),
[]byte("what do ya want for nothing?"),
"5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843",
},
{
- NewSHA256,
+ sha256.New,
[]byte{
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
"773ea91e36800e46854db8ebd09181a72959098b3ef8c122d9635514ced565fe",
},
{
- NewSHA256,
+ sha256.New,
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
"82558a389a443c0ea4cc819899f2083a85f0faa3e578f8077a2e3ff46729665b",
},
{
- NewSHA256,
+ sha256.New,
[]byte{
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
"60e431591ee0b67f0d8a26aacbf5b77f8e0bc6213728c5140546040f0ee37f54",
},
{
- NewSHA256,
+ sha256.New,
[]byte{
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
"be hashed before being used by the HMAC algorithm."),
"9b09ffa71b942fcb27635fbcd5b0e944bfdc63644f0713938a7f51535c3a35e2",
},
+
+ // Tests from http://csrc.nist.gov/groups/ST/toolkit/examples.html
+ // (truncated tag tests are left out)
+ {
+ sha1.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "5fd596ee78d5553c8ff4e72d266dfd192366da29",
+ },
+ {
+ sha1.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13,
+ },
+ []byte("Sample message for keylen<blocklen"),
+ "4c99ff0cb1b31bd33f8431dbaf4d17fcd356a807",
+ },
+ {
+ sha1.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "2d51b2f7750e410584662e38f133435f4c4fd42a",
+ },
+ {
+ sha256.New224,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "c7405e3ae058e8cd30b08b4140248581ed174cb34e1224bcc1efc81b",
+ },
+ {
+ sha256.New224,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b,
+ },
+ []byte("Sample message for keylen<blocklen"),
+ "e3d249a8cfb67ef8b7a169e9a0a599714a2cecba65999a51beb8fbbe",
+ },
+ {
+ sha256.New224,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "91c52509e5af8531601ae6230099d90bef88aaefb961f4080abc014d",
+ },
+ {
+ sha256.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "8bb9a1db9806f20df7f77b82138c7914d174d59e13dc4d0169c9057b133e1d62",
+ },
+ {
+ sha256.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ },
+ []byte("Sample message for keylen<blocklen"),
+ "a28cf43130ee696a98f14a37678b56bcfcbdd9e5cf69717fecf5480f0ebdf790",
+ },
+ {
+ sha256.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "bdccb6c72ddeadb500ae768386cb38cc41c63dbb0878ddb9c7a38a431b78378d",
+ },
+ {
+ sha512.New384,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "63c5daa5e651847ca897c95814ab830bededc7d25e83eef9195cd45857a37f448947858f5af50cc2b1b730ddf29671a9",
+ },
+ {
+ sha512.New384,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ },
+ []byte("Sample message for keylen<blocklen"),
+ "6eb242bdbb582ca17bebfa481b1e23211464d2b7f8c20b9ff2201637b93646af5ae9ac316e98db45d9cae773675eeed0",
+ },
+ {
+ sha512.New384,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "5b664436df69b0ca22551231a3f0a3d5b4f97991713cfa84bff4d0792eff96c27dccbbb6f79b65d548b40e8564cef594",
+ },
+ {
+ sha512.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "fc25e240658ca785b7a811a8d3f7b4ca" +
+ "48cfa26a8a366bf2cd1f836b05fcb024bd36853081811d6c" +
+ "ea4216ebad79da1cfcb95ea4586b8a0ce356596a55fb1347",
+ },
+ {
+ sha512.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ },
+ []byte("Sample message for keylen<blocklen"),
+ "fd44c18bda0bb0a6ce0e82b031bf2818" +
+ "f6539bd56ec00bdc10a8a2d730b3634de2545d639b0f2cf7" +
+ "10d0692c72a1896f1f211c2b922d1a96c392e07e7ea9fedc",
+ },
+ {
+ sha512.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ },
+ []byte("Sample message for keylen=blocklen"),
+ "d93ec8d2de1ad2a9957cb9b83f14e76a" +
+ "d6b5e0cce285079a127d3b14bccb7aa7286d4ac0d4ce6421" +
+ "5f2bc9e6870b33d97438be4aaa20cda5c5a912b48b8e27f3",
+ },
}
func TestHMAC(t *testing.T) {
for i, tt := range hmacTests {
- h := tt.hash(tt.key)
+ h := New(tt.hash, tt.key)
for j := 0; j < 2; j++ {
n, err := h.Write(tt.in)
if n != len(tt.in) || err != nil {
// The size of an MD4 checksum in bytes.
const Size = 16
+// The blocksize of MD4 in bytes.
+const BlockSize = 64
+
const (
_Chunk = 64
_Init0 = 0x67452301
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return BlockSize }
+
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
// The size of an MD5 checksum in bytes.
const Size = 16
+// The blocksize of MD5 in bytes.
+const BlockSize = 64
+
const (
_Chunk = 64
_Init0 = 0x67452301
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return BlockSize }
+
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package ocsp
import (
func (cth *canonicalTextHash) Size() int {
return cth.h.Size()
}
+
+func (cth *canonicalTextHash) BlockSize() int {
+ return cth.h.BlockSize()
+}
panic("shouldn't be called")
}
+func (r recordingHash) BlockSize() int {
+ panic("shouldn't be called")
+}
+
func testCanonicalText(t *testing.T, input, expected string) {
r := recordingHash{bytes.NewBuffer(nil)}
c := NewCanonicalTextHash(r)
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return BlockSize }
+
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.tc += uint64(nn)
// The size of a SHA1 checksum in bytes.
const Size = 20
+// The blocksize of SHA1 in bytes.
+const BlockSize = 64
+
const (
_Chunk = 64
_Init0 = 0x67452301
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return BlockSize }
+
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
// The size of a SHA224 checksum in bytes.
const Size224 = 28
+// The blocksize of SHA256 and SHA224 in bytes.
+const BlockSize = 64
+
const (
_Chunk = 64
_Init0 = 0x6A09E667
return Size224
}
+func (d *digest) BlockSize() int { return BlockSize }
+
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
// The size of a SHA384 checksum in bytes.
const Size384 = 48
+// The blocksize of SHA512 and SHA384 in bytes.
+const BlockSize = 128
+
const (
_Chunk = 128
_Init0 = 0x6a09e667f3bcc908
return Size384
}
+func (d *digest) BlockSize() int { return BlockSize }
+
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
copy(mac.key, key)
return mac
}
- return tls10MAC{hmac.NewSHA1(key)}
+ return tls10MAC{hmac.New(sha1.New, key)}
}
type macFunction interface {
"io"
"net"
"sync"
+ "time"
)
// A Conn represents a secured connection.
return c.conn.RemoteAddr()
}
-// SetTimeout sets the read deadline associated with the connection.
+// SetDeadline sets the read deadline associated with the connection.
// There is no write deadline.
-func (c *Conn) SetTimeout(nsec int64) error {
- return c.conn.SetTimeout(nsec)
+// A zero value for t means Read will not time out.
+func (c *Conn) SetDeadline(t time.Time) error {
+ return c.conn.SetDeadline(t)
}
-// SetReadTimeout sets the time (in nanoseconds) that
-// Read will wait for data before returning a net.Error
-// with Timeout() == true.
-// Setting nsec == 0 (the default) disables the deadline.
-func (c *Conn) SetReadTimeout(nsec int64) error {
- return c.conn.SetReadTimeout(nsec)
+// SetReadDeadline sets the read deadline on the underlying connection.
+// A zero value for t means Read will not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
}
-// SetWriteTimeout exists to satisfy the net.Conn interface
+// SetWriteDeadline exists to satisfy the net.Conn interface
// but is not implemented by TLS. It always returns an error.
-func (c *Conn) SetWriteTimeout(nsec int64) error {
- return errors.New("TLS does not support SetWriteTimeout")
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ return errors.New("TLS does not support SetWriteDeadline")
}
// A halfConn represents one direction of the record layer
}
// Read can be made to time out and return a net.Error with Timeout() == true
-// after a fixed time limit; see SetTimeout and SetReadTimeout.
+// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (n int, err error) {
if err = c.Handshake(); err != nil {
return
// pre-master secret is then calculated using ECDH.
type ecdheRSAKeyAgreement struct {
privateKey []byte
- curve *elliptic.Curve
+ curve elliptic.Curve
x, y *big.Int
}
var x, y *big.Int
var err error
- ka.privateKey, x, y, err = ka.curve.GenerateKey(config.rand())
+ ka.privateKey, x, y, err = elliptic.GenerateKey(ka.curve, config.rand())
if err != nil {
return nil, err
}
- ecdhePublic := ka.curve.Marshal(x, y)
+ ecdhePublic := elliptic.Marshal(ka.curve, x, y)
// http://tools.ietf.org/html/rfc4492#section-5.4
serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic))
if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {
return nil, errors.New("bad ClientKeyExchange")
}
- x, y := ka.curve.Unmarshal(ckx.ciphertext[1:])
+ x, y := elliptic.Unmarshal(ka.curve, ckx.ciphertext[1:])
if x == nil {
return nil, errors.New("bad ClientKeyExchange")
}
x, _ = ka.curve.ScalarMult(x, y, ka.privateKey)
- preMasterSecret := make([]byte, (ka.curve.BitSize+7)>>3)
+ preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3)
xBytes := x.Bytes()
copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes)
if publicLen+4 > len(skx.key) {
return errServerKeyExchange
}
- ka.x, ka.y = ka.curve.Unmarshal(skx.key[4 : 4+publicLen])
+ ka.x, ka.y = elliptic.Unmarshal(ka.curve, skx.key[4:4+publicLen])
if ka.x == nil {
return errServerKeyExchange
}
if ka.curve == nil {
return nil, nil, errors.New("missing ServerKeyExchange message")
}
- priv, mx, my, err := ka.curve.GenerateKey(config.rand())
+ priv, mx, my, err := elliptic.GenerateKey(ka.curve, config.rand())
if err != nil {
return nil, nil, err
}
x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv)
- preMasterSecret := make([]byte, (ka.curve.BitSize+7)>>3)
+ preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3)
xBytes := x.Bytes()
copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes)
- serialized := ka.curve.Marshal(mx, my)
+ serialized := elliptic.Marshal(ka.curve, mx, my)
ckx := new(clientKeyExchangeMsg)
ckx.ciphertext = make([]byte, 1+len(serialized))
// Possible certificate files; stop after finding one.
var certFiles = []string{
- "/etc/ssl/certs/ca-certificates.crt", // Linux etc
- "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
- "/etc/ssl/ca-bundle.pem", // OpenSUSE
- "/etc/ssl/cert.pem", // OpenBSD
+ "/etc/ssl/certs/ca-certificates.crt", // Linux etc
+ "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
+ "/etc/ssl/ca-bundle.pem", // OpenSUSE
+ "/etc/ssl/cert.pem", // OpenBSD
+ "/usr/local/share/certs/ca-root-nss.crt", // FreeBSD
}
func initDefaultRoots() {
oidRSA = []int{1, 2, 840, 113549, 1, 1, 1}
)
+func subjectBytes(cert *Certificate) ([]byte, error) {
+ if len(cert.RawSubject) > 0 {
+ return cert.RawSubject, nil
+ }
+
+ return asn1.Marshal(cert.Subject.ToRDNSequence())
+}
+
// CreateCertificate creates a new certificate based on a template. The
// following members of template are used: SerialNumber, Subject, NotBefore,
// NotAfter, KeyUsage, BasicConstraintsValid, IsCA, MaxPathLen, SubjectKeyId,
// signee and priv is the private key of the signer.
//
// The returned slice is the certificate in DER encoding.
-func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.PublicKey, priv *rsa.PrivateKey) (cert []byte, err error) {
+//
+// The only supported key type is RSA (*rsa.PublicKey for pub, *rsa.PrivateKey
+// for priv).
+func CreateCertificate(rand io.Reader, template, parent *Certificate, pub interface{}, priv interface{}) (cert []byte, err error) {
+ rsaPub, ok := pub.(*rsa.PublicKey)
+ if !ok {
+ return nil, errors.New("x509: non-RSA public keys not supported")
+ }
+
+ rsaPriv, ok := priv.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("x509: non-RSA private keys not supported")
+ }
+
asn1PublicKey, err := asn1.Marshal(rsaPublicKey{
- N: pub.N,
- E: pub.E,
+ N: rsaPub.N,
+ E: rsaPub.E,
})
if err != nil {
return
return
}
- var asn1Issuer []byte
- if len(parent.RawSubject) > 0 {
- asn1Issuer = parent.RawSubject
- } else {
- if asn1Issuer, err = asn1.Marshal(parent.Subject.ToRDNSequence()); err != nil {
- return
- }
+ asn1Issuer, err := subjectBytes(parent)
+ if err != nil {
+ return
}
- asn1Subject, err := asn1.Marshal(template.Subject.ToRDNSequence())
+ asn1Subject, err := subjectBytes(template)
if err != nil {
return
}
h.Write(tbsCertContents)
digest := h.Sum(nil)
- signature, err := rsa.SignPKCS1v15(rand, priv, crypto.SHA1, digest)
+ signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
if err != nil {
return
}
// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
// contains the given list of revoked certificates.
-func (c *Certificate) CreateCRL(rand io.Reader, priv *rsa.PrivateKey, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
+//
+// The only supported key type is RSA (*rsa.PrivateKey for priv).
+func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
+ rsaPriv, ok := priv.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("x509: non-RSA private keys not supported")
+ }
tbsCertList := pkix.TBSCertificateList{
Version: 2,
Signature: pkix.AlgorithmIdentifier{
h.Write(tbsCertListContents)
digest := h.Sum(nil)
- signature, err := rsa.SignPKCS1v15(rand, priv, crypto.SHA1, digest)
+ signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
if err != nil {
return
}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Type conversions for Scan.
+
+package sql
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// subsetTypeArgs takes a slice of arguments from callers of the sql
+// package and converts them into a slice of the driver package's
+// "subset types".
+func subsetTypeArgs(args []interface{}) ([]interface{}, error) {
+ out := make([]interface{}, len(args))
+ for n, arg := range args {
+ var err error
+ out[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sql: converting argument #%d's type: %v", n+1, err)
+ }
+ }
+ return out, nil
+}
+
+// convertAssign copies to dest the value in src, converting it if possible.
+// An error is returned if the copy would result in loss of information.
+// dest should be a pointer type.
+func convertAssign(dest, src interface{}) error {
+ // Common cases, without reflect. Fall through.
+ switch s := src.(type) {
+ case string:
+ switch d := dest.(type) {
+ case *string:
+ *d = s
+ return nil
+ }
+ case []byte:
+ switch d := dest.(type) {
+ case *string:
+ *d = string(s)
+ return nil
+ case *[]byte:
+ *d = s
+ return nil
+ }
+ }
+
+ var sv reflect.Value
+
+ switch d := dest.(type) {
+ case *string:
+ sv = reflect.ValueOf(src)
+ switch sv.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+ *d = fmt.Sprintf("%v", src)
+ return nil
+ }
+ case *bool:
+ bv, err := driver.Bool.ConvertValue(src)
+ if err == nil {
+ *d = bv.(bool)
+ }
+ return err
+ }
+
+ if scanner, ok := dest.(ScannerInto); ok {
+ return scanner.ScanInto(src)
+ }
+
+ dpv := reflect.ValueOf(dest)
+ if dpv.Kind() != reflect.Ptr {
+ return errors.New("destination not a pointer")
+ }
+
+ if !sv.IsValid() {
+ sv = reflect.ValueOf(src)
+ }
+
+ dv := reflect.Indirect(dpv)
+ if dv.Kind() == sv.Kind() {
+ dv.Set(sv)
+ return nil
+ }
+
+ switch dv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s := asString(src)
+ i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
+ if err != nil {
+ return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
+ }
+ dv.SetInt(i64)
+ return nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ s := asString(src)
+ u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
+ if err != nil {
+ return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
+ }
+ dv.SetUint(u64)
+ return nil
+ case reflect.Float32, reflect.Float64:
+ s := asString(src)
+ f64, err := strconv.ParseFloat(s, dv.Type().Bits())
+ if err != nil {
+ return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
+ }
+ dv.SetFloat(f64)
+ return nil
+ }
+
+ return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest)
+}
+
+func asString(src interface{}) string {
+ switch v := src.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ }
+ return fmt.Sprintf("%v", src)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sql
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+)
+
+var someTime = time.Unix(123, 0)
+
+type conversionTest struct {
+ s, d interface{} // source and destination
+
+ // following are used if they're non-zero
+ wantint int64
+ wantuint uint64
+ wantstr string
+ wantf32 float32
+ wantf64 float64
+ wanttime time.Time
+ wantbool bool // used if d is of type *bool
+ wanterr string
+}
+
+// Target variables for scanning into.
+var (
+ scanstr string
+ scanint int
+ scanint8 int8
+ scanint16 int16
+ scanint32 int32
+ scanuint8 uint8
+ scanuint16 uint16
+ scanbool bool
+ scanf32 float32
+ scanf64 float64
+ scantime time.Time
+)
+
+var conversionTests = []conversionTest{
+ // Exact conversions (destination pointer type matches source type)
+ {s: "foo", d: &scanstr, wantstr: "foo"},
+ {s: 123, d: &scanint, wantint: 123},
+ {s: someTime, d: &scantime, wanttime: someTime},
+
+ // To strings
+ {s: []byte("byteslice"), d: &scanstr, wantstr: "byteslice"},
+ {s: 123, d: &scanstr, wantstr: "123"},
+ {s: int8(123), d: &scanstr, wantstr: "123"},
+ {s: int64(123), d: &scanstr, wantstr: "123"},
+ {s: uint8(123), d: &scanstr, wantstr: "123"},
+ {s: uint16(123), d: &scanstr, wantstr: "123"},
+ {s: uint32(123), d: &scanstr, wantstr: "123"},
+ {s: uint64(123), d: &scanstr, wantstr: "123"},
+ {s: 1.5, d: &scanstr, wantstr: "1.5"},
+
+ // Strings to integers
+ {s: "255", d: &scanuint8, wantuint: 255},
+ {s: "256", d: &scanuint8, wanterr: `converting string "256" to a uint8: strconv.ParseUint: parsing "256": value out of range`},
+ {s: "256", d: &scanuint16, wantuint: 256},
+ {s: "-1", d: &scanint, wantint: -1},
+ {s: "foo", d: &scanint, wanterr: `converting string "foo" to a int: strconv.ParseInt: parsing "foo": invalid syntax`},
+
+ // True bools
+ {s: true, d: &scanbool, wantbool: true},
+ {s: "True", d: &scanbool, wantbool: true},
+ {s: "TRUE", d: &scanbool, wantbool: true},
+ {s: "1", d: &scanbool, wantbool: true},
+ {s: 1, d: &scanbool, wantbool: true},
+ {s: int64(1), d: &scanbool, wantbool: true},
+ {s: uint16(1), d: &scanbool, wantbool: true},
+
+ // False bools
+ {s: false, d: &scanbool, wantbool: false},
+ {s: "false", d: &scanbool, wantbool: false},
+ {s: "FALSE", d: &scanbool, wantbool: false},
+ {s: "0", d: &scanbool, wantbool: false},
+ {s: 0, d: &scanbool, wantbool: false},
+ {s: int64(0), d: &scanbool, wantbool: false},
+ {s: uint16(0), d: &scanbool, wantbool: false},
+
+ // Not bools
+ {s: "yup", d: &scanbool, wanterr: `sql/driver: couldn't convert "yup" into type bool`},
+ {s: 2, d: &scanbool, wanterr: `sql/driver: couldn't convert 2 into type bool`},
+
+ // Floats
+ {s: float64(1.5), d: &scanf64, wantf64: float64(1.5)},
+ {s: int64(1), d: &scanf64, wantf64: float64(1)},
+ {s: float64(1.5), d: &scanf32, wantf32: float32(1.5)},
+ {s: "1.5", d: &scanf32, wantf32: float32(1.5)},
+ {s: "1.5", d: &scanf64, wantf64: float64(1.5)},
+}
+
+func intValue(intptr interface{}) int64 {
+ return reflect.Indirect(reflect.ValueOf(intptr)).Int()
+}
+
+func uintValue(intptr interface{}) uint64 {
+ return reflect.Indirect(reflect.ValueOf(intptr)).Uint()
+}
+
+func float64Value(ptr interface{}) float64 {
+ return *(ptr.(*float64))
+}
+
+func float32Value(ptr interface{}) float32 {
+ return *(ptr.(*float32))
+}
+
+func timeValue(ptr interface{}) time.Time {
+ return *(ptr.(*time.Time))
+}
+
+func TestConversions(t *testing.T) {
+ for n, ct := range conversionTests {
+ err := convertAssign(ct.d, ct.s)
+ errstr := ""
+ if err != nil {
+ errstr = err.Error()
+ }
+ errf := func(format string, args ...interface{}) {
+ base := fmt.Sprintf("convertAssign #%d: for %v (%T) -> %T, ", n, ct.s, ct.s, ct.d)
+ t.Errorf(base+format, args...)
+ }
+ if errstr != ct.wanterr {
+ errf("got error %q, want error %q", errstr, ct.wanterr)
+ }
+ if ct.wantstr != "" && ct.wantstr != scanstr {
+ errf("want string %q, got %q", ct.wantstr, scanstr)
+ }
+ if ct.wantint != 0 && ct.wantint != intValue(ct.d) {
+ errf("want int %d, got %d", ct.wantint, intValue(ct.d))
+ }
+ if ct.wantuint != 0 && ct.wantuint != uintValue(ct.d) {
+ errf("want uint %d, got %d", ct.wantuint, uintValue(ct.d))
+ }
+ if ct.wantf32 != 0 && ct.wantf32 != float32Value(ct.d) {
+ errf("want float32 %v, got %v", ct.wantf32, float32Value(ct.d))
+ }
+ if ct.wantf64 != 0 && ct.wantf64 != float64Value(ct.d) {
+ errf("want float32 %v, got %v", ct.wantf64, float64Value(ct.d))
+ }
+ if bp, boolTest := ct.d.(*bool); boolTest && *bp != ct.wantbool && ct.wanterr == "" {
+ errf("want bool %v, got %v", ct.wantbool, *bp)
+ }
+ if !ct.wanttime.IsZero() && !ct.wanttime.Equal(timeValue(ct.d)) {
+ errf("want time %v, got %v", ct.wanttime, timeValue(ct.d))
+ }
+ }
+}
+
+func TestNullString(t *testing.T) {
+ var ns NullString
+ convertAssign(&ns, []byte("foo"))
+ if !ns.Valid {
+ t.Errorf("expecting not null")
+ }
+ if ns.String != "foo" {
+ t.Errorf("expecting foo; got %q", ns.String)
+ }
+ convertAssign(&ns, nil)
+ if ns.Valid {
+ t.Errorf("expecting null on nil")
+ }
+ if ns.String != "" {
+ t.Errorf("expecting blank on nil; got %q", ns.String)
+ }
+}
+
+type valueConverterTest struct {
+ c driver.ValueConverter
+ in, out interface{}
+ err string
+}
+
+var valueConverterTests = []valueConverterTest{
+ {driver.DefaultParameterConverter, NullString{"hi", true}, "hi", ""},
+ {driver.DefaultParameterConverter, NullString{"", false}, nil, ""},
+}
+
+func TestValueConverters(t *testing.T) {
+ for i, tt := range valueConverterTests {
+ out, err := tt.c.ConvertValue(tt.in)
+ goterr := ""
+ if err != nil {
+ goterr = err.Error()
+ }
+ if goterr != tt.err {
+ t.Errorf("test %d: %s(%T(%v)) error = %q; want error = %q",
+ i, tt.c, tt.in, tt.in, goterr, tt.err)
+ }
+ if tt.err != "" {
+ continue
+ }
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("test %d: %s(%T(%v)) = %v (%T); want %v (%T)",
+ i, tt.c, tt.in, tt.in, out, out, tt.out, tt.out)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package driver defines interfaces to be implemented by database
+// drivers as used by package sql.
+//
+// Code simply using databases should use package sql.
+//
+// Drivers only need to be aware of a subset of Go's types. The sql package
+// will convert all types into one of the following:
+//
+// int64
+// float64
+// bool
+// nil
+// []byte
+// string [*] everywhere except from Rows.Next.
+// time.Time
+//
+package driver
+
+import "errors"
+
+// Driver is the interface that must be implemented by a database
+// driver.
+type Driver interface {
+ // Open returns a new connection to the database.
+ // The name is a string in a driver-specific format.
+ //
+ // Open may return a cached connection (one previously
+ // closed), but doing so is unnecessary; the sql package
+ // maintains a pool of idle connections for efficient re-use.
+ //
+ // The returned connection is only used by one goroutine at a
+ // time.
+ Open(name string) (Conn, error)
+}
+
+// ErrSkip may be returned by some optional interfaces' methods to
+// indicate at runtime that the fast path is unavailable and the sql
+// package should continue as if the optional interface was not
+// implemented. ErrSkip is only supported where explicitly
+// documented.
+var ErrSkip = errors.New("driver: skip fast-path; continue as if unimplemented")
+
+// Execer is an optional interface that may be implemented by a Conn.
+//
+// If a Conn does not implement Execer, the db package's DB.Exec will
+// first prepare a query, execute the statement, and then close the
+// statement.
+//
+// All arguments are of a subset type as defined in the package docs.
+//
+// Exec may return ErrSkip.
+type Execer interface {
+ Exec(query string, args []interface{}) (Result, error)
+}
+
+// Conn is a connection to a database. It is not used concurrently
+// by multiple goroutines.
+//
+// Conn is assumed to be stateful.
+type Conn interface {
+ // Prepare returns a prepared statement, bound to this connection.
+ Prepare(query string) (Stmt, error)
+
+ // Close invalidates and potentially stops any current
+ // prepared statements and transactions, marking this
+ // connection as no longer in use.
+ //
+ // Because the sql package maintains a free pool of
+ // connections and only calls Close when there's a surplus of
+ // idle connections, it shouldn't be necessary for drivers to
+ // do their own connection caching.
+ Close() error
+
+ // Begin starts and returns a new transaction.
+ Begin() (Tx, error)
+}
+
+// Result is the result of a query execution.
+type Result interface {
+ // LastInsertId returns the database's auto-generated ID
+ // after, for example, an INSERT into a table with primary
+ // key.
+ LastInsertId() (int64, error)
+
+ // RowsAffected returns the number of rows affected by the
+ // query.
+ RowsAffected() (int64, error)
+}
+
+// Stmt is a prepared statement. It is bound to a Conn and not
+// used by multiple goroutines concurrently.
+type Stmt interface {
+ // Close closes the statement.
+ //
+ // Closing a statement should not interrupt any outstanding
+ // query created from that statement. That is, the following
+ // order of operations is valid:
+ //
+ // * create a driver statement
+ // * call Query on statement, returning Rows
+ // * close the statement
+ // * read from Rows
+ //
+ // If closing a statement invalidates currently-running
+ // queries, the final step above will incorrectly fail.
+ //
+ // TODO(bradfitz): possibly remove the restriction above, if
+ // enough driver authors object and find it complicates their
+ // code too much. The sql package could be smarter about
+ // refcounting the statement and closing it at the appropriate
+ // time.
+ Close() error
+
+ // NumInput returns the number of placeholder parameters.
+ //
+ // If NumInput returns >= 0, the sql package will sanity check
+ // argument counts from callers and return errors to the caller
+ // before the statement's Exec or Query methods are called.
+ //
+ // NumInput may also return -1, if the driver doesn't know
+ // its number of placeholders. In that case, the sql package
+ // will not sanity check Exec or Query argument counts.
+ NumInput() int
+
+ // Exec executes a query that doesn't return rows, such
+ // as an INSERT or UPDATE. The args are all of a subset
+ // type as defined above.
+ Exec(args []interface{}) (Result, error)
+
+ // Exec executes a query that may return rows, such as a
+ // SELECT. The args of all of a subset type as defined above.
+ Query(args []interface{}) (Rows, error)
+}
+
+// ColumnConverter may be optionally implemented by Stmt if the
+// the statement is aware of its own columns' types and can
+// convert from any type to a driver subset type.
+type ColumnConverter interface {
+ // ColumnConverter returns a ValueConverter for the provided
+ // column index. If the type of a specific column isn't known
+ // or shouldn't be handled specially, DefaultValueConverter
+ // can be returned.
+ ColumnConverter(idx int) ValueConverter
+}
+
+// Rows is an iterator over an executed query's results.
+type Rows interface {
+ // Columns returns the names of the columns. The number of
+ // columns of the result is inferred from the length of the
+ // slice. If a particular column name isn't known, an empty
+ // string should be returned for that entry.
+ Columns() []string
+
+ // Close closes the rows iterator.
+ Close() error
+
+ // Next is called to populate the next row of data into
+ // the provided slice. The provided slice will be the same
+ // size as the Columns() are wide.
+ //
+ // The dest slice may be populated with only with values
+ // of subset types defined above, but excluding string.
+ // All string values must be converted to []byte.
+ //
+ // Next should return io.EOF when there are no more rows.
+ Next(dest []interface{}) error
+}
+
+// Tx is a transaction.
+type Tx interface {
+ Commit() error
+ Rollback() error
+}
+
+// RowsAffected implements Result for an INSERT or UPDATE operation
+// which mutates a number of rows.
+type RowsAffected int64
+
+var _ Result = RowsAffected(0)
+
+func (RowsAffected) LastInsertId() (int64, error) {
+ return 0, errors.New("no LastInsertId available")
+}
+
+func (v RowsAffected) RowsAffected() (int64, error) {
+ return int64(v), nil
+}
+
+// DDLSuccess is a pre-defined Result for drivers to return when a DDL
+// command succeeds.
+var DDLSuccess ddlSuccess
+
+type ddlSuccess struct{}
+
+var _ Result = ddlSuccess{}
+
+func (ddlSuccess) LastInsertId() (int64, error) {
+ return 0, errors.New("no LastInsertId available after DDL statement")
+}
+
+func (ddlSuccess) RowsAffected() (int64, error) {
+ return 0, errors.New("no RowsAffected available after DDL statement")
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package driver
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ValueConverter is the interface providing the ConvertValue method.
+//
+// Various implementations of ValueConverter are provided by the
+// driver package to provide consistent implementations of conversions
+// between drivers. The ValueConverters have several uses:
+//
+// * converting from the subset types as provided by the sql package
+// into a database table's specific column type and making sure it
+// fits, such as making sure a particular int64 fits in a
+// table's uint16 column.
+//
+// * converting a value as given from the database into one of the
+// subset types.
+//
+// * by the sql package, for converting from a driver's subset type
+// to a user's type in a scan.
+type ValueConverter interface {
+ // ConvertValue converts a value to a restricted subset type.
+ ConvertValue(v interface{}) (interface{}, error)
+}
+
+// SubsetValuer is the interface providing the SubsetValue method.
+//
+// Types implementing SubsetValuer interface are able to convert
+// themselves to one of the driver's allowed subset values.
+type SubsetValuer interface {
+ // SubsetValue returns a driver parameter subset value.
+ SubsetValue() (interface{}, error)
+}
+
+// Bool is a ValueConverter that converts input values to bools.
+//
+// The conversion rules are:
+// - booleans are returned unchanged
+// - for integer types,
+// 1 is true
+// 0 is false,
+// other integers are an error
+// - for strings and []byte, same rules as strconv.ParseBool
+// - all other types are an error
+var Bool boolType
+
+type boolType struct{}
+
+var _ ValueConverter = boolType{}
+
+func (boolType) String() string { return "Bool" }
+
+func (boolType) ConvertValue(src interface{}) (interface{}, error) {
+ switch s := src.(type) {
+ case bool:
+ return s, nil
+ case string:
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
+ }
+ return b, nil
+ case []byte:
+ b, err := strconv.ParseBool(string(s))
+ if err != nil {
+ return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
+ }
+ return b, nil
+ }
+
+ sv := reflect.ValueOf(src)
+ switch sv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ iv := sv.Int()
+ if iv == 1 || iv == 0 {
+ return iv == 1, nil
+ }
+ return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", iv)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ uv := sv.Uint()
+ if uv == 1 || uv == 0 {
+ return uv == 1, nil
+ }
+ return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", uv)
+ }
+
+ return nil, fmt.Errorf("sql/driver: couldn't convert %v (%T) into type bool", src, src)
+}
+
+// Int32 is a ValueConverter that converts input values to int64,
+// respecting the limits of an int32 value.
+var Int32 int32Type
+
+type int32Type struct{}
+
+var _ ValueConverter = int32Type{}
+
+func (int32Type) ConvertValue(v interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i64 := rv.Int()
+ if i64 > (1<<31)-1 || i64 < -(1<<31) {
+ return nil, fmt.Errorf("sql/driver: value %d overflows int32", v)
+ }
+ return i64, nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ u64 := rv.Uint()
+ if u64 > (1<<31)-1 {
+ return nil, fmt.Errorf("sql/driver: value %d overflows int32", v)
+ }
+ return int64(u64), nil
+ case reflect.String:
+ i, err := strconv.Atoi(rv.String())
+ if err != nil {
+ return nil, fmt.Errorf("sql/driver: value %q can't be converted to int32", v)
+ }
+ return int64(i), nil
+ }
+ return nil, fmt.Errorf("sql/driver: unsupported value %v (type %T) converting to int32", v, v)
+}
+
+// String is a ValueConverter that converts its input to a string.
+// If the value is already a string or []byte, it's unchanged.
+// If the value is of another type, conversion to string is done
+// with fmt.Sprintf("%v", v).
+var String stringType
+
+type stringType struct{}
+
+func (stringType) ConvertValue(v interface{}) (interface{}, error) {
+ switch v.(type) {
+ case string, []byte:
+ return v, nil
+ }
+ return fmt.Sprintf("%v", v), nil
+}
+
+// Null is a type that implements ValueConverter by allowing nil
+// values but otherwise delegating to another ValueConverter.
+type Null struct {
+ Converter ValueConverter
+}
+
+func (n Null) ConvertValue(v interface{}) (interface{}, error) {
+ if v == nil {
+ return nil, nil
+ }
+ return n.Converter.ConvertValue(v)
+}
+
+// NotNull is a type that implements ValueConverter by disallowing nil
+// values but otherwise delegating to another ValueConverter.
+type NotNull struct {
+ Converter ValueConverter
+}
+
+func (n NotNull) ConvertValue(v interface{}) (interface{}, error) {
+ if v == nil {
+ return nil, fmt.Errorf("nil value not allowed")
+ }
+ return n.Converter.ConvertValue(v)
+}
+
+// IsParameterSubsetType reports whether v is of a valid type for a
+// parameter. These types are:
+//
+// int64
+// float64
+// bool
+// nil
+// []byte
+// time.Time
+// string
+//
+// This is the same list as IsScanSubsetType, with the addition of
+// string.
+func IsParameterSubsetType(v interface{}) bool {
+ if IsScanSubsetType(v) {
+ return true
+ }
+ if _, ok := v.(string); ok {
+ return true
+ }
+ return false
+}
+
+// IsScanSubsetType reports whether v is of a valid type for a
+// value populated by Rows.Next. These types are:
+//
+// int64
+// float64
+// bool
+// nil
+// []byte
+// time.Time
+//
+// This is the same list as IsParameterSubsetType, without string.
+func IsScanSubsetType(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ switch v.(type) {
+ case int64, float64, []byte, bool, time.Time:
+ return true
+ }
+ return false
+}
+
+// DefaultParameterConverter is the default implementation of
+// ValueConverter that's used when a Stmt doesn't implement
+// ColumnConverter.
+//
+// DefaultParameterConverter returns the given value directly if
+// IsSubsetType(value). Otherwise integer type are converted to
+// int64, floats to float64, and strings to []byte. Other types are
+// an error.
+var DefaultParameterConverter defaultConverter
+
+type defaultConverter struct{}
+
+var _ ValueConverter = defaultConverter{}
+
+func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) {
+ if IsParameterSubsetType(v) {
+ return v, nil
+ }
+
+ if svi, ok := v.(SubsetValuer); ok {
+ sv, err := svi.SubsetValue()
+ if err != nil {
+ return nil, err
+ }
+ if !IsParameterSubsetType(sv) {
+ return nil, fmt.Errorf("non-subset type %T returned from SubsetValue", sv)
+ }
+ return sv, nil
+ }
+
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(rv.Uint()), nil
+ case reflect.Uint64:
+ u64 := rv.Uint()
+ if u64 >= 1<<63 {
+ return nil, fmt.Errorf("uint64 values with high bit set are not supported")
+ }
+ return int64(u64), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package driver
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+type valueConverterTest struct {
+ c ValueConverter
+ in interface{}
+ out interface{}
+ err string
+}
+
+var now = time.Now()
+
+var valueConverterTests = []valueConverterTest{
+ {Bool, "true", true, ""},
+ {Bool, "True", true, ""},
+ {Bool, []byte("t"), true, ""},
+ {Bool, true, true, ""},
+ {Bool, "1", true, ""},
+ {Bool, 1, true, ""},
+ {Bool, int64(1), true, ""},
+ {Bool, uint16(1), true, ""},
+ {Bool, "false", false, ""},
+ {Bool, false, false, ""},
+ {Bool, "0", false, ""},
+ {Bool, 0, false, ""},
+ {Bool, int64(0), false, ""},
+ {Bool, uint16(0), false, ""},
+ {c: Bool, in: "foo", err: "sql/driver: couldn't convert \"foo\" into type bool"},
+ {c: Bool, in: 2, err: "sql/driver: couldn't convert 2 into type bool"},
+ {DefaultParameterConverter, now, now, ""},
+}
+
+func TestValueConverters(t *testing.T) {
+ for i, tt := range valueConverterTests {
+ out, err := tt.c.ConvertValue(tt.in)
+ goterr := ""
+ if err != nil {
+ goterr = err.Error()
+ }
+ if goterr != tt.err {
+ t.Errorf("test %d: %s(%T(%v)) error = %q; want error = %q",
+ i, tt.c, tt.in, tt.in, goterr, tt.err)
+ }
+ if tt.err != "" {
+ continue
+ }
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("test %d: %s(%T(%v)) = %v (%T); want %v (%T)",
+ i, tt.c, tt.in, tt.in, out, out, tt.out, tt.out)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sql
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var _ = log.Printf
+
+// fakeDriver is a fake database that implements Go's driver.Driver
+// interface, just for testing.
+//
+// It speaks a query language that's semantically similar to but
+// syntantically different and simpler than SQL. The syntax is as
+// follows:
+//
+// WIPE
+// CREATE|<tablename>|<col>=<type>,<col>=<type>,...
+// where types are: "string", [u]int{8,16,32,64}, "bool"
+// INSERT|<tablename>|col=val,col2=val2,col3=?
+// SELECT|<tablename>|projectcol1,projectcol2|filtercol=?,filtercol2=?
+//
+// When opening a a fakeDriver's database, it starts empty with no
+// tables. All tables and data are stored in memory only.
+type fakeDriver struct {
+ mu sync.Mutex
+ openCount int
+ dbs map[string]*fakeDB
+}
+
+type fakeDB struct {
+ name string
+
+ mu sync.Mutex
+ free []*fakeConn
+ tables map[string]*table
+}
+
+type table struct {
+ mu sync.Mutex
+ colname []string
+ coltype []string
+ rows []*row
+}
+
+func (t *table) columnIndex(name string) int {
+ for n, nname := range t.colname {
+ if name == nname {
+ return n
+ }
+ }
+ return -1
+}
+
+type row struct {
+ cols []interface{} // must be same size as its table colname + coltype
+}
+
+func (r *row) clone() *row {
+ nrow := &row{cols: make([]interface{}, len(r.cols))}
+ copy(nrow.cols, r.cols)
+ return nrow
+}
+
+type fakeConn struct {
+ db *fakeDB // where to return ourselves to
+
+ currTx *fakeTx
+
+ // Stats for tests:
+ mu sync.Mutex
+ stmtsMade int
+ stmtsClosed int
+}
+
+func (c *fakeConn) incrStat(v *int) {
+ c.mu.Lock()
+ *v++
+ c.mu.Unlock()
+}
+
+type fakeTx struct {
+ c *fakeConn
+}
+
+type fakeStmt struct {
+ c *fakeConn
+ q string // just for debugging
+
+ cmd string
+ table string
+
+ closed bool
+
+ colName []string // used by CREATE, INSERT, SELECT (selected columns)
+ colType []string // used by CREATE
+ colValue []interface{} // used by INSERT (mix of strings and "?" for bound params)
+ placeholders int // used by INSERT/SELECT: number of ? params
+
+ whereCol []string // used by SELECT (all placeholders)
+
+ placeholderConverter []driver.ValueConverter // used by INSERT
+}
+
+var fdriver driver.Driver = &fakeDriver{}
+
+func init() {
+ Register("test", fdriver)
+}
+
+// Supports dsn forms:
+// <dbname>
+// <dbname>;<opts> (no currently supported options)
+func (d *fakeDriver) Open(dsn string) (driver.Conn, error) {
+ parts := strings.Split(dsn, ";")
+ if len(parts) < 1 {
+ return nil, errors.New("fakedb: no database name")
+ }
+ name := parts[0]
+
+ db := d.getDB(name)
+
+ d.mu.Lock()
+ d.openCount++
+ d.mu.Unlock()
+ return &fakeConn{db: db}, nil
+}
+
+func (d *fakeDriver) getDB(name string) *fakeDB {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ if d.dbs == nil {
+ d.dbs = make(map[string]*fakeDB)
+ }
+ db, ok := d.dbs[name]
+ if !ok {
+ db = &fakeDB{name: name}
+ d.dbs[name] = db
+ }
+ return db
+}
+
+func (db *fakeDB) wipe() {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ db.tables = nil
+}
+
+func (db *fakeDB) createTable(name string, columnNames, columnTypes []string) error {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ if db.tables == nil {
+ db.tables = make(map[string]*table)
+ }
+ if _, exist := db.tables[name]; exist {
+ return fmt.Errorf("table %q already exists", name)
+ }
+ if len(columnNames) != len(columnTypes) {
+ return fmt.Errorf("create table of %q len(names) != len(types): %d vs %d",
+ name, len(columnNames), len(columnTypes))
+ }
+ db.tables[name] = &table{colname: columnNames, coltype: columnTypes}
+ return nil
+}
+
+// must be called with db.mu lock held
+func (db *fakeDB) table(table string) (*table, bool) {
+ if db.tables == nil {
+ return nil, false
+ }
+ t, ok := db.tables[table]
+ return t, ok
+}
+
+func (db *fakeDB) columnType(table, column string) (typ string, ok bool) {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ t, ok := db.table(table)
+ if !ok {
+ return
+ }
+ for n, cname := range t.colname {
+ if cname == column {
+ return t.coltype[n], true
+ }
+ }
+ return "", false
+}
+
+func (c *fakeConn) Begin() (driver.Tx, error) {
+ if c.currTx != nil {
+ return nil, errors.New("already in a transaction")
+ }
+ c.currTx = &fakeTx{c: c}
+ return c.currTx, nil
+}
+
+func (c *fakeConn) Close() error {
+ if c.currTx != nil {
+ return errors.New("can't close; in a Transaction")
+ }
+ if c.db == nil {
+ return errors.New("can't close; already closed")
+ }
+ c.db = nil
+ return nil
+}
+
+func checkSubsetTypes(args []interface{}) error {
+ for n, arg := range args {
+ switch arg.(type) {
+ case int64, float64, bool, nil, []byte, string, time.Time:
+ default:
+ return fmt.Errorf("fakedb_test: invalid argument #%d: %v, type %T", n+1, arg, arg)
+ }
+ }
+ return nil
+}
+
+func (c *fakeConn) Exec(query string, args []interface{}) (driver.Result, error) {
+ // This is an optional interface, but it's implemented here
+ // just to check that all the args of of the proper types.
+ // ErrSkip is returned so the caller acts as if we didn't
+ // implement this at all.
+ err := checkSubsetTypes(args)
+ if err != nil {
+ return nil, err
+ }
+ return nil, driver.ErrSkip
+}
+
+func errf(msg string, args ...interface{}) error {
+ return errors.New("fakedb: " + fmt.Sprintf(msg, args...))
+}
+
+// parts are table|selectCol1,selectCol2|whereCol=?,whereCol2=?
+// (note that where where columns must always contain ? marks,
+// just a limitation for fakedb)
+func (c *fakeConn) prepareSelect(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
+ if len(parts) != 3 {
+ return nil, errf("invalid SELECT syntax with %d parts; want 3", len(parts))
+ }
+ stmt.table = parts[0]
+ stmt.colName = strings.Split(parts[1], ",")
+ for n, colspec := range strings.Split(parts[2], ",") {
+ if colspec == "" {
+ continue
+ }
+ nameVal := strings.Split(colspec, "=")
+ if len(nameVal) != 2 {
+ return nil, errf("SELECT on table %q has invalid column spec of %q (index %d)", stmt.table, colspec, n)
+ }
+ column, value := nameVal[0], nameVal[1]
+ _, ok := c.db.columnType(stmt.table, column)
+ if !ok {
+ return nil, errf("SELECT on table %q references non-existent column %q", stmt.table, column)
+ }
+ if value != "?" {
+ return nil, errf("SELECT on table %q has pre-bound value for where column %q; need a question mark",
+ stmt.table, column)
+ }
+ stmt.whereCol = append(stmt.whereCol, column)
+ stmt.placeholders++
+ }
+ return stmt, nil
+}
+
+// parts are table|col=type,col2=type2
+func (c *fakeConn) prepareCreate(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
+ if len(parts) != 2 {
+ return nil, errf("invalid CREATE syntax with %d parts; want 2", len(parts))
+ }
+ stmt.table = parts[0]
+ for n, colspec := range strings.Split(parts[1], ",") {
+ nameType := strings.Split(colspec, "=")
+ if len(nameType) != 2 {
+ return nil, errf("CREATE table %q has invalid column spec of %q (index %d)", stmt.table, colspec, n)
+ }
+ stmt.colName = append(stmt.colName, nameType[0])
+ stmt.colType = append(stmt.colType, nameType[1])
+ }
+ return stmt, nil
+}
+
+// parts are table|col=?,col2=val
+func (c *fakeConn) prepareInsert(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
+ if len(parts) != 2 {
+ return nil, errf("invalid INSERT syntax with %d parts; want 2", len(parts))
+ }
+ stmt.table = parts[0]
+ for n, colspec := range strings.Split(parts[1], ",") {
+ nameVal := strings.Split(colspec, "=")
+ if len(nameVal) != 2 {
+ return nil, errf("INSERT table %q has invalid column spec of %q (index %d)", stmt.table, colspec, n)
+ }
+ column, value := nameVal[0], nameVal[1]
+ ctype, ok := c.db.columnType(stmt.table, column)
+ if !ok {
+ return nil, errf("INSERT table %q references non-existent column %q", stmt.table, column)
+ }
+ stmt.colName = append(stmt.colName, column)
+
+ if value != "?" {
+ var subsetVal interface{}
+ // Convert to driver subset type
+ switch ctype {
+ case "string":
+ subsetVal = []byte(value)
+ case "blob":
+ subsetVal = []byte(value)
+ case "int32":
+ i, err := strconv.Atoi(value)
+ if err != nil {
+ return nil, errf("invalid conversion to int32 from %q", value)
+ }
+ subsetVal = int64(i) // int64 is a subset type, but not int32
+ default:
+ return nil, errf("unsupported conversion for pre-bound parameter %q to type %q", value, ctype)
+ }
+ stmt.colValue = append(stmt.colValue, subsetVal)
+ } else {
+ stmt.placeholders++
+ stmt.placeholderConverter = append(stmt.placeholderConverter, converterForType(ctype))
+ stmt.colValue = append(stmt.colValue, "?")
+ }
+ }
+ return stmt, nil
+}
+
+func (c *fakeConn) Prepare(query string) (driver.Stmt, error) {
+ if c.db == nil {
+ panic("nil c.db; conn = " + fmt.Sprintf("%#v", c))
+ }
+ parts := strings.Split(query, "|")
+ if len(parts) < 1 {
+ return nil, errf("empty query")
+ }
+ cmd := parts[0]
+ parts = parts[1:]
+ stmt := &fakeStmt{q: query, c: c, cmd: cmd}
+ c.incrStat(&c.stmtsMade)
+ switch cmd {
+ case "WIPE":
+ // Nothing
+ case "SELECT":
+ return c.prepareSelect(stmt, parts)
+ case "CREATE":
+ return c.prepareCreate(stmt, parts)
+ case "INSERT":
+ return c.prepareInsert(stmt, parts)
+ default:
+ return nil, errf("unsupported command type %q", cmd)
+ }
+ return stmt, nil
+}
+
+func (s *fakeStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return s.placeholderConverter[idx]
+}
+
+func (s *fakeStmt) Close() error {
+ if !s.closed {
+ s.c.incrStat(&s.c.stmtsClosed)
+ s.closed = true
+ }
+ return nil
+}
+
+var errClosed = errors.New("fakedb: statement has been closed")
+
+func (s *fakeStmt) Exec(args []interface{}) (driver.Result, error) {
+ if s.closed {
+ return nil, errClosed
+ }
+ err := checkSubsetTypes(args)
+ if err != nil {
+ return nil, err
+ }
+
+ db := s.c.db
+ switch s.cmd {
+ case "WIPE":
+ db.wipe()
+ return driver.DDLSuccess, nil
+ case "CREATE":
+ if err := db.createTable(s.table, s.colName, s.colType); err != nil {
+ return nil, err
+ }
+ return driver.DDLSuccess, nil
+ case "INSERT":
+ return s.execInsert(args)
+ }
+ fmt.Printf("EXEC statement, cmd=%q: %#v\n", s.cmd, s)
+ return nil, fmt.Errorf("unimplemented statement Exec command type of %q", s.cmd)
+}
+
+func (s *fakeStmt) execInsert(args []interface{}) (driver.Result, error) {
+ db := s.c.db
+ if len(args) != s.placeholders {
+ panic("error in pkg db; should only get here if size is correct")
+ }
+ db.mu.Lock()
+ t, ok := db.table(s.table)
+ db.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("fakedb: table %q doesn't exist", s.table)
+ }
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ cols := make([]interface{}, len(t.colname))
+ argPos := 0
+ for n, colname := range s.colName {
+ colidx := t.columnIndex(colname)
+ if colidx == -1 {
+ return nil, fmt.Errorf("fakedb: column %q doesn't exist or dropped since prepared statement was created", colname)
+ }
+ var val interface{}
+ if strvalue, ok := s.colValue[n].(string); ok && strvalue == "?" {
+ val = args[argPos]
+ argPos++
+ } else {
+ val = s.colValue[n]
+ }
+ cols[colidx] = val
+ }
+
+ t.rows = append(t.rows, &row{cols: cols})
+ return driver.RowsAffected(1), nil
+}
+
+func (s *fakeStmt) Query(args []interface{}) (driver.Rows, error) {
+ if s.closed {
+ return nil, errClosed
+ }
+ err := checkSubsetTypes(args)
+ if err != nil {
+ return nil, err
+ }
+
+ db := s.c.db
+ if len(args) != s.placeholders {
+ panic("error in pkg db; should only get here if size is correct")
+ }
+
+ db.mu.Lock()
+ t, ok := db.table(s.table)
+ db.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("fakedb: table %q doesn't exist", s.table)
+ }
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ colIdx := make(map[string]int) // select column name -> column index in table
+ for _, name := range s.colName {
+ idx := t.columnIndex(name)
+ if idx == -1 {
+ return nil, fmt.Errorf("fakedb: unknown column name %q", name)
+ }
+ colIdx[name] = idx
+ }
+
+ mrows := []*row{}
+rows:
+ for _, trow := range t.rows {
+ // Process the where clause, skipping non-match rows. This is lazy
+ // and just uses fmt.Sprintf("%v") to test equality. Good enough
+ // for test code.
+ for widx, wcol := range s.whereCol {
+ idx := t.columnIndex(wcol)
+ if idx == -1 {
+ return nil, fmt.Errorf("db: invalid where clause column %q", wcol)
+ }
+ tcol := trow.cols[idx]
+ if bs, ok := tcol.([]byte); ok {
+ // lazy hack to avoid sprintf %v on a []byte
+ tcol = string(bs)
+ }
+ if fmt.Sprintf("%v", tcol) != fmt.Sprintf("%v", args[widx]) {
+ continue rows
+ }
+ }
+ mrow := &row{cols: make([]interface{}, len(s.colName))}
+ for seli, name := range s.colName {
+ mrow.cols[seli] = trow.cols[colIdx[name]]
+ }
+ mrows = append(mrows, mrow)
+ }
+
+ cursor := &rowsCursor{
+ pos: -1,
+ rows: mrows,
+ cols: s.colName,
+ }
+ return cursor, nil
+}
+
+func (s *fakeStmt) NumInput() int {
+ return s.placeholders
+}
+
+func (tx *fakeTx) Commit() error {
+ tx.c.currTx = nil
+ return nil
+}
+
+func (tx *fakeTx) Rollback() error {
+ tx.c.currTx = nil
+ return nil
+}
+
+type rowsCursor struct {
+ cols []string
+ pos int
+ rows []*row
+ closed bool
+
+ // a clone of slices to give out to clients, indexed by the
+ // the original slice's first byte address. we clone them
+ // just so we're able to corrupt them on close.
+ bytesClone map[*byte][]byte
+}
+
+func (rc *rowsCursor) Close() error {
+ if !rc.closed {
+ for _, bs := range rc.bytesClone {
+ bs[0] = 255 // first byte corrupted
+ }
+ }
+ rc.closed = true
+ return nil
+}
+
+func (rc *rowsCursor) Columns() []string {
+ return rc.cols
+}
+
+func (rc *rowsCursor) Next(dest []interface{}) error {
+ if rc.closed {
+ return errors.New("fakedb: cursor is closed")
+ }
+ rc.pos++
+ if rc.pos >= len(rc.rows) {
+ return io.EOF // per interface spec
+ }
+ for i, v := range rc.rows[rc.pos].cols {
+ // TODO(bradfitz): convert to subset types? naah, I
+ // think the subset types should only be input to
+ // driver, but the sql package should be able to handle
+ // a wider range of types coming out of drivers. all
+ // for ease of drivers, and to prevent drivers from
+ // messing up conversions or doing them differently.
+ dest[i] = v
+
+ if bs, ok := v.([]byte); ok {
+ if rc.bytesClone == nil {
+ rc.bytesClone = make(map[*byte][]byte)
+ }
+ clone, ok := rc.bytesClone[&bs[0]]
+ if !ok {
+ clone = make([]byte, len(bs))
+ copy(clone, bs)
+ rc.bytesClone[&bs[0]] = clone
+ }
+ dest[i] = clone
+ }
+ }
+ return nil
+}
+
+func converterForType(typ string) driver.ValueConverter {
+ switch typ {
+ case "bool":
+ return driver.Bool
+ case "int32":
+ return driver.Int32
+ case "string":
+ return driver.NotNull{driver.String}
+ case "nullstring":
+ return driver.Null{driver.String}
+ case "datetime":
+ return driver.DefaultParameterConverter
+ }
+ panic("invalid fakedb column type of " + typ)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sql provides a generic interface around SQL (or SQL-like)
+// databases.
+package sql
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+)
+
+var drivers = make(map[string]driver.Driver)
+
+// Register makes a database driver available by the provided name.
+// If Register is called twice with the same name or if driver is nil,
+// it panics.
+func Register(name string, driver driver.Driver) {
+ if driver == nil {
+ panic("sql: Register driver is nil")
+ }
+ if _, dup := drivers[name]; dup {
+ panic("sql: Register called twice for driver " + name)
+ }
+ drivers[name] = driver
+}
+
+// RawBytes is a byte slice that holds a reference to memory owned by
+// the database itself. After a Scan into a RawBytes, the slice is only
+// valid until the next call to Next, Scan, or Close.
+type RawBytes []byte
+
+// NullString represents a string that may be null.
+// NullString implements the ScannerInto interface so
+// it can be used as a scan destination:
+//
+// var s NullString
+// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
+// ...
+// if s.Valid {
+// // use s.String
+// } else {
+// // NULL value
+// }
+//
+// TODO(bradfitz): add other types.
+type NullString struct {
+ String string
+ Valid bool // Valid is true if String is not NULL
+}
+
+// ScanInto implements the ScannerInto interface.
+func (ns *NullString) ScanInto(value interface{}) error {
+ if value == nil {
+ ns.String, ns.Valid = "", false
+ return nil
+ }
+ ns.Valid = true
+ return convertAssign(&ns.String, value)
+}
+
+// SubsetValue implements the driver SubsetValuer interface.
+func (ns NullString) SubsetValue() (interface{}, error) {
+ if !ns.Valid {
+ return nil, nil
+ }
+ return ns.String, nil
+}
+
+// ScannerInto is an interface used by Scan.
+type ScannerInto interface {
+ // ScanInto assigns a value from a database driver.
+ //
+ // The value will be of one of the following restricted
+ // set of types:
+ //
+ // int64
+ // float64
+ // bool
+ // []byte
+ // nil - for NULL values
+ //
+ // An error should be returned if the value can not be stored
+ // without loss of information.
+ ScanInto(value interface{}) error
+}
+
+// ErrNoRows is returned by Scan when QueryRow doesn't return a
+// row. In such a case, QueryRow returns a placeholder *Row value that
+// defers this error until a Scan.
+var ErrNoRows = errors.New("sql: no rows in result set")
+
+// DB is a database handle. It's safe for concurrent use by multiple
+// goroutines.
+type DB struct {
+ driver driver.Driver
+ dsn string
+
+ mu sync.Mutex // protects freeConn and closed
+ freeConn []driver.Conn
+ closed bool
+}
+
+// Open opens a database specified by its database driver name and a
+// driver-specific data source name, usually consisting of at least a
+// database name and connection information.
+//
+// Most users will open a database via a driver-specific connection
+// helper function that returns a *DB.
+func Open(driverName, dataSourceName string) (*DB, error) {
+ driver, ok := drivers[driverName]
+ if !ok {
+ return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
+ }
+ return &DB{driver: driver, dsn: dataSourceName}, nil
+}
+
+// Close closes the database, releasing any open resources.
+func (db *DB) Close() error {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ var err error
+ for _, c := range db.freeConn {
+ err1 := c.Close()
+ if err1 != nil {
+ err = err1
+ }
+ }
+ db.freeConn = nil
+ db.closed = true
+ return err
+}
+
+func (db *DB) maxIdleConns() int {
+ const defaultMaxIdleConns = 2
+ // TODO(bradfitz): ask driver, if supported, for its default preference
+ // TODO(bradfitz): let users override?
+ return defaultMaxIdleConns
+}
+
+// conn returns a newly-opened or cached driver.Conn
+func (db *DB) conn() (driver.Conn, error) {
+ db.mu.Lock()
+ if db.closed {
+ db.mu.Unlock()
+ return nil, errors.New("sql: database is closed")
+ }
+ if n := len(db.freeConn); n > 0 {
+ conn := db.freeConn[n-1]
+ db.freeConn = db.freeConn[:n-1]
+ db.mu.Unlock()
+ return conn, nil
+ }
+ db.mu.Unlock()
+ return db.driver.Open(db.dsn)
+}
+
+func (db *DB) connIfFree(wanted driver.Conn) (conn driver.Conn, ok bool) {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ for n, conn := range db.freeConn {
+ if conn == wanted {
+ db.freeConn[n] = db.freeConn[len(db.freeConn)-1]
+ db.freeConn = db.freeConn[:len(db.freeConn)-1]
+ return wanted, true
+ }
+ }
+ return nil, false
+}
+
+func (db *DB) putConn(c driver.Conn) {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ if n := len(db.freeConn); !db.closed && n < db.maxIdleConns() {
+ db.freeConn = append(db.freeConn, c)
+ return
+ }
+ db.closeConn(c) // TODO(bradfitz): release lock before calling this?
+}
+
+func (db *DB) closeConn(c driver.Conn) {
+ // TODO: check to see if we need this Conn for any prepared statements
+ // that are active.
+ c.Close()
+}
+
+// Prepare creates a prepared statement for later execution.
+func (db *DB) Prepare(query string) (*Stmt, error) {
+ // TODO: check if db.driver supports an optional
+ // driver.Preparer interface and call that instead, if so,
+ // otherwise we make a prepared statement that's bound
+ // to a connection, and to execute this prepared statement
+ // we either need to use this connection (if it's free), else
+ // get a new connection + re-prepare + execute on that one.
+ ci, err := db.conn()
+ if err != nil {
+ return nil, err
+ }
+ defer db.putConn(ci)
+ si, err := ci.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ stmt := &Stmt{
+ db: db,
+ query: query,
+ css: []connStmt{{ci, si}},
+ }
+ return stmt, nil
+}
+
+// Exec executes a query without returning any rows.
+func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
+ sargs, err := subsetTypeArgs(args)
+ if err != nil {
+ return nil, err
+ }
+
+ ci, err := db.conn()
+ if err != nil {
+ return nil, err
+ }
+ defer db.putConn(ci)
+
+ if execer, ok := ci.(driver.Execer); ok {
+ resi, err := execer.Exec(query, sargs)
+ if err != driver.ErrSkip {
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
+ }
+ }
+
+ sti, err := ci.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ defer sti.Close()
+
+ resi, err := sti.Exec(sargs)
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
+}
+
+// Query executes a query that returns rows, typically a SELECT.
+func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ rows, err := stmt.Query(args...)
+ if err != nil {
+ stmt.Close()
+ return nil, err
+ }
+ rows.closeStmt = stmt
+ return rows, nil
+}
+
+// QueryRow executes a query that is expected to return at most one row.
+// QueryRow always return a non-nil value. Errors are deferred until
+// Row's Scan method is called.
+func (db *DB) QueryRow(query string, args ...interface{}) *Row {
+ rows, err := db.Query(query, args...)
+ return &Row{rows: rows, err: err}
+}
+
+// Begin starts a transaction. The isolation level is dependent on
+// the driver.
+func (db *DB) Begin() (*Tx, error) {
+ ci, err := db.conn()
+ if err != nil {
+ return nil, err
+ }
+ txi, err := ci.Begin()
+ if err != nil {
+ db.putConn(ci)
+ return nil, fmt.Errorf("sql: failed to Begin transaction: %v", err)
+ }
+ return &Tx{
+ db: db,
+ ci: ci,
+ txi: txi,
+ }, nil
+}
+
+// DriverDatabase returns the database's underlying driver.
+func (db *DB) Driver() driver.Driver {
+ return db.driver
+}
+
+// Tx is an in-progress database transaction.
+//
+// A transaction must end with a call to Commit or Rollback.
+//
+// After a call to Commit or Rollback, all operations on the
+// transaction fail with ErrTransactionFinished.
+type Tx struct {
+ db *DB
+
+ // ci is owned exclusively until Commit or Rollback, at which point
+ // it's returned with putConn.
+ ci driver.Conn
+ txi driver.Tx
+
+ // cimu is held while somebody is using ci (between grabConn
+ // and releaseConn)
+ cimu sync.Mutex
+
+ // done transitions from false to true exactly once, on Commit
+ // or Rollback. once done, all operations fail with
+ // ErrTransactionFinished.
+ done bool
+}
+
+var ErrTransactionFinished = errors.New("sql: Transaction has already been committed or rolled back")
+
+func (tx *Tx) close() {
+ if tx.done {
+ panic("double close") // internal error
+ }
+ tx.done = true
+ tx.db.putConn(tx.ci)
+ tx.ci = nil
+ tx.txi = nil
+}
+
+func (tx *Tx) grabConn() (driver.Conn, error) {
+ if tx.done {
+ return nil, ErrTransactionFinished
+ }
+ tx.cimu.Lock()
+ return tx.ci, nil
+}
+
+func (tx *Tx) releaseConn() {
+ tx.cimu.Unlock()
+}
+
+// Commit commits the transaction.
+func (tx *Tx) Commit() error {
+ if tx.done {
+ return ErrTransactionFinished
+ }
+ defer tx.close()
+ return tx.txi.Commit()
+}
+
+// Rollback aborts the transaction.
+func (tx *Tx) Rollback() error {
+ if tx.done {
+ return ErrTransactionFinished
+ }
+ defer tx.close()
+ return tx.txi.Rollback()
+}
+
+// Prepare creates a prepared statement for use within a transaction.
+//
+// The returned statement operates within the transaction and can no longer
+// be used once the transaction has been committed or rolled back.
+//
+// To use an existing prepared statement on this transaction, see Tx.Stmt.
+func (tx *Tx) Prepare(query string) (*Stmt, error) {
+ // TODO(bradfitz): We could be more efficient here and either
+ // provide a method to take an existing Stmt (created on
+ // perhaps a different Conn), and re-create it on this Conn if
+ // necessary. Or, better: keep a map in DB of query string to
+ // Stmts, and have Stmt.Execute do the right thing and
+ // re-prepare if the Conn in use doesn't have that prepared
+ // statement. But we'll want to avoid caching the statement
+ // in the case where we only call conn.Prepare implicitly
+ // (such as in db.Exec or tx.Exec), but the caller package
+ // can't be holding a reference to the returned statement.
+ // Perhaps just looking at the reference count (by noting
+ // Stmt.Close) would be enough. We might also want a finalizer
+ // on Stmt to drop the reference count.
+ ci, err := tx.grabConn()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.releaseConn()
+
+ si, err := ci.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+
+ stmt := &Stmt{
+ db: tx.db,
+ tx: tx,
+ txsi: si,
+ query: query,
+ }
+ return stmt, nil
+}
+
+// Stmt returns a transaction-specific prepared statement from
+// an existing statement.
+//
+// Example:
+// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
+// ...
+// tx, err := db.Begin()
+// ...
+// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
+func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
+ // TODO(bradfitz): optimize this. Currently this re-prepares
+ // each time. This is fine for now to illustrate the API but
+ // we should really cache already-prepared statements
+ // per-Conn. See also the big comment in Tx.Prepare.
+
+ if tx.db != stmt.db {
+ return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
+ }
+ ci, err := tx.grabConn()
+ if err != nil {
+ return &Stmt{stickyErr: err}
+ }
+ defer tx.releaseConn()
+ si, err := ci.Prepare(stmt.query)
+ return &Stmt{
+ db: tx.db,
+ tx: tx,
+ txsi: si,
+ query: stmt.query,
+ stickyErr: err,
+ }
+}
+
+// Exec executes a query that doesn't return rows.
+// For example: an INSERT and UPDATE.
+func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
+ ci, err := tx.grabConn()
+ if err != nil {
+ return nil, err
+ }
+ defer tx.releaseConn()
+
+ if execer, ok := ci.(driver.Execer); ok {
+ resi, err := execer.Exec(query, args)
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
+ }
+
+ sti, err := ci.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ defer sti.Close()
+
+ sargs, err := subsetTypeArgs(args)
+ if err != nil {
+ return nil, err
+ }
+
+ resi, err := sti.Exec(sargs)
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
+}
+
+// Query executes a query that returns rows, typically a SELECT.
+func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
+ if tx.done {
+ return nil, ErrTransactionFinished
+ }
+ stmt, err := tx.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ defer stmt.Close()
+ return stmt.Query(args...)
+}
+
+// QueryRow executes a query that is expected to return at most one row.
+// QueryRow always return a non-nil value. Errors are deferred until
+// Row's Scan method is called.
+func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
+ rows, err := tx.Query(query, args...)
+ return &Row{rows: rows, err: err}
+}
+
+// connStmt is a prepared statement on a particular connection.
+type connStmt struct {
+ ci driver.Conn
+ si driver.Stmt
+}
+
+// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
+type Stmt struct {
+ // Immutable:
+ db *DB // where we came from
+ query string // that created the Stmt
+ stickyErr error // if non-nil, this error is returned for all operations
+
+ // If in a transaction, else both nil:
+ tx *Tx
+ txsi driver.Stmt
+
+ mu sync.Mutex // protects the rest of the fields
+ closed bool
+
+ // css is a list of underlying driver statement interfaces
+ // that are valid on particular connections. This is only
+ // used if tx == nil and one is found that has idle
+ // connections. If tx != nil, txsi is always used.
+ css []connStmt
+}
+
+// Exec executes a prepared statement with the given arguments and
+// returns a Result summarizing the effect of the statement.
+func (s *Stmt) Exec(args ...interface{}) (Result, error) {
+ _, releaseConn, si, err := s.connStmt()
+ if err != nil {
+ return nil, err
+ }
+ defer releaseConn()
+
+ // -1 means the driver doesn't know how to count the number of
+ // placeholders, so we won't sanity check input here and instead let the
+ // driver deal with errors.
+ if want := si.NumInput(); want != -1 && len(args) != want {
+ return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
+ }
+
+ // Convert args to subset types.
+ if cc, ok := si.(driver.ColumnConverter); ok {
+ for n, arg := range args {
+ // First, see if the value itself knows how to convert
+ // itself to a driver type. For example, a NullString
+ // struct changing into a string or nil.
+ if svi, ok := arg.(driver.SubsetValuer); ok {
+ sv, err := svi.SubsetValue()
+ if err != nil {
+ return nil, fmt.Errorf("sql: argument index %d from SubsetValue: %v", n, err)
+ }
+ if !driver.IsParameterSubsetType(sv) {
+ return nil, fmt.Errorf("sql: argument index %d: non-subset type %T returned from SubsetValue", n, sv)
+ }
+ arg = sv
+ }
+
+ // Second, ask the column to sanity check itself. For
+ // example, drivers might use this to make sure that
+ // an int64 values being inserted into a 16-bit
+ // integer field is in range (before getting
+ // truncated), or that a nil can't go into a NOT NULL
+ // column before going across the network to get the
+ // same error.
+ args[n], err = cc.ColumnConverter(n).ConvertValue(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
+ }
+ if !driver.IsParameterSubsetType(args[n]) {
+ return nil, fmt.Errorf("sql: driver ColumnConverter error converted %T to unsupported type %T",
+ arg, args[n])
+ }
+ }
+ } else {
+ for n, arg := range args {
+ args[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
+ }
+ }
+ }
+
+ resi, err := si.Exec(args)
+ if err != nil {
+ return nil, err
+ }
+ return result{resi}, nil
+}
+
+// connStmt returns a free driver connection on which to execute the
+// statement, a function to call to release the connection, and a
+// statement bound to that connection.
+func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(), si driver.Stmt, err error) {
+ if err = s.stickyErr; err != nil {
+ return
+ }
+ s.mu.Lock()
+ if s.closed {
+ s.mu.Unlock()
+ err = errors.New("sql: statement is closed")
+ return
+ }
+
+ // In a transaction, we always use the connection that the
+ // transaction was created on.
+ if s.tx != nil {
+ s.mu.Unlock()
+ ci, err = s.tx.grabConn() // blocks, waiting for the connection.
+ if err != nil {
+ return
+ }
+ releaseConn = func() { s.tx.releaseConn() }
+ return ci, releaseConn, s.txsi, nil
+ }
+
+ var cs connStmt
+ match := false
+ for _, v := range s.css {
+ // TODO(bradfitz): lazily clean up entries in this
+ // list with dead conns while enumerating
+ if _, match = s.db.connIfFree(cs.ci); match {
+ cs = v
+ break
+ }
+ }
+ s.mu.Unlock()
+
+ // Make a new conn if all are busy.
+ // TODO(bradfitz): or wait for one? make configurable later?
+ if !match {
+ ci, err := s.db.conn()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ si, err := ci.Prepare(s.query)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ s.mu.Lock()
+ cs = connStmt{ci, si}
+ s.css = append(s.css, cs)
+ s.mu.Unlock()
+ }
+
+ conn := cs.ci
+ releaseConn = func() { s.db.putConn(conn) }
+ return conn, releaseConn, cs.si, nil
+}
+
+// Query executes a prepared query statement with the given arguments
+// and returns the query results as a *Rows.
+func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
+ ci, releaseConn, si, err := s.connStmt()
+ if err != nil {
+ return nil, err
+ }
+
+ // -1 means the driver doesn't know how to count the number of
+ // placeholders, so we won't sanity check input here and instead let the
+ // driver deal with errors.
+ if want := si.NumInput(); want != -1 && len(args) != want {
+ return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", si.NumInput(), len(args))
+ }
+ sargs, err := subsetTypeArgs(args)
+ if err != nil {
+ return nil, err
+ }
+ rowsi, err := si.Query(sargs)
+ if err != nil {
+ s.db.putConn(ci)
+ return nil, err
+ }
+ // Note: ownership of ci passes to the *Rows, to be freed
+ // with releaseConn.
+ rows := &Rows{
+ db: s.db,
+ ci: ci,
+ releaseConn: releaseConn,
+ rowsi: rowsi,
+ }
+ return rows, nil
+}
+
+// QueryRow executes a prepared query statement with the given arguments.
+// If an error occurs during the execution of the statement, that error will
+// be returned by a call to Scan on the returned *Row, which is always non-nil.
+// If the query selects no rows, the *Row's Scan will return ErrNoRows.
+// Otherwise, the *Row's Scan scans the first selected row and discards
+// the rest.
+//
+// Example usage:
+//
+// var name string
+// err := nameByUseridStmt.QueryRow(id).Scan(&s)
+func (s *Stmt) QueryRow(args ...interface{}) *Row {
+ rows, err := s.Query(args...)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return &Row{rows: rows}
+}
+
+// Close closes the statement.
+func (s *Stmt) Close() error {
+ if s.stickyErr != nil {
+ return s.stickyErr
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+
+ if s.tx != nil {
+ s.txsi.Close()
+ } else {
+ for _, v := range s.css {
+ if ci, match := s.db.connIfFree(v.ci); match {
+ v.si.Close()
+ s.db.putConn(ci)
+ } else {
+ // TODO(bradfitz): care that we can't close
+ // this statement because the statement's
+ // connection is in use?
+ }
+ }
+ }
+ return nil
+}
+
+// Rows is the result of a query. Its cursor starts before the first row
+// of the result set. Use Next to advance through the rows:
+//
+// rows, err := db.Query("SELECT ...")
+// ...
+// for rows.Next() {
+// var id int
+// var name string
+// err = rows.Scan(&id, &name)
+// ...
+// }
+// err = rows.Err() // get any error encountered during iteration
+// ...
+type Rows struct {
+ db *DB
+ ci driver.Conn // owned; must call putconn when closed to release
+ releaseConn func()
+ rowsi driver.Rows
+
+ closed bool
+ lastcols []interface{}
+ lasterr error
+ closeStmt *Stmt // if non-nil, statement to Close on close
+}
+
+// Next prepares the next result row for reading with the Scan method.
+// It returns true on success, false if there is no next result row.
+// Every call to Scan, even the first one, must be preceded by a call
+// to Next.
+func (rs *Rows) Next() bool {
+ if rs.closed {
+ return false
+ }
+ if rs.lasterr != nil {
+ return false
+ }
+ if rs.lastcols == nil {
+ rs.lastcols = make([]interface{}, len(rs.rowsi.Columns()))
+ }
+ rs.lasterr = rs.rowsi.Next(rs.lastcols)
+ if rs.lasterr == io.EOF {
+ rs.Close()
+ }
+ return rs.lasterr == nil
+}
+
+// Err returns the error, if any, that was encountered during iteration.
+func (rs *Rows) Err() error {
+ if rs.lasterr == io.EOF {
+ return nil
+ }
+ return rs.lasterr
+}
+
+// Columns returns the column names.
+// Columns returns an error if the rows are closed, or if the rows
+// are from QueryRow and there was a deferred error.
+func (rs *Rows) Columns() ([]string, error) {
+ if rs.closed {
+ return nil, errors.New("sql: Rows are closed")
+ }
+ if rs.rowsi == nil {
+ return nil, errors.New("sql: no Rows available")
+ }
+ return rs.rowsi.Columns(), nil
+}
+
+// Scan copies the columns in the current row into the values pointed
+// at by dest.
+//
+// If an argument has type *[]byte, Scan saves in that argument a copy
+// of the corresponding data. The copy is owned by the caller and can
+// be modified and held indefinitely. The copy can be avoided by using
+// an argument of type *RawBytes instead; see the documentation for
+// RawBytes for restrictions on its use.
+func (rs *Rows) Scan(dest ...interface{}) error {
+ if rs.closed {
+ return errors.New("sql: Rows closed")
+ }
+ if rs.lasterr != nil {
+ return rs.lasterr
+ }
+ if rs.lastcols == nil {
+ return errors.New("sql: Scan called without calling Next")
+ }
+ if len(dest) != len(rs.lastcols) {
+ return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
+ }
+ for i, sv := range rs.lastcols {
+ err := convertAssign(dest[i], sv)
+ if err != nil {
+ return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
+ }
+ }
+ for _, dp := range dest {
+ b, ok := dp.(*[]byte)
+ if !ok {
+ continue
+ }
+ if _, ok = dp.(*RawBytes); ok {
+ continue
+ }
+ clone := make([]byte, len(*b))
+ copy(clone, *b)
+ *b = clone
+ }
+ return nil
+}
+
+// Close closes the Rows, preventing further enumeration. If the
+// end is encountered, the Rows are closed automatically. Close
+// is idempotent.
+func (rs *Rows) Close() error {
+ if rs.closed {
+ return nil
+ }
+ rs.closed = true
+ err := rs.rowsi.Close()
+ rs.releaseConn()
+ if rs.closeStmt != nil {
+ rs.closeStmt.Close()
+ }
+ return err
+}
+
+// Row is the result of calling QueryRow to select a single row.
+type Row struct {
+ // One of these two will be non-nil:
+ err error // deferred error for easy chaining
+ rows *Rows
+}
+
+// Scan copies the columns from the matched row into the values
+// pointed at by dest. If more than one row matches the query,
+// Scan uses the first row and discards the rest. If no row matches
+// the query, Scan returns ErrNoRows.
+func (r *Row) Scan(dest ...interface{}) error {
+ if r.err != nil {
+ return r.err
+ }
+ defer r.rows.Close()
+ if !r.rows.Next() {
+ return ErrNoRows
+ }
+ err := r.rows.Scan(dest...)
+ if err != nil {
+ return err
+ }
+
+ // TODO(bradfitz): for now we need to defensively clone all
+ // []byte that the driver returned, since we're about to close
+ // the Rows in our defer, when we return from this function.
+ // the contract with the driver.Next(...) interface is that it
+ // can return slices into read-only temporary memory that's
+ // only valid until the next Scan/Close. But the TODO is that
+ // for a lot of drivers, this copy will be unnecessary. We
+ // should provide an optional interface for drivers to
+ // implement to say, "don't worry, the []bytes that I return
+ // from Next will not be modified again." (for instance, if
+ // they were obtained from the network anyway) But for now we
+ // don't care.
+ for _, dp := range dest {
+ if _, ok := dp.(*RawBytes); ok {
+ return errors.New("sql: RawBytes isn't allowed on Row.Scan")
+ }
+ b, ok := dp.(*[]byte)
+ if !ok {
+ continue
+ }
+ clone := make([]byte, len(*b))
+ copy(clone, *b)
+ *b = clone
+ }
+ return nil
+}
+
+// A Result summarizes an executed SQL command.
+type Result interface {
+ LastInsertId() (int64, error)
+ RowsAffected() (int64, error)
+}
+
+type result struct {
+ driver.Result
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sql
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+const fakeDBName = "foo"
+
+var chrisBirthday = time.Unix(123456789, 0)
+
+func newTestDB(t *testing.T, name string) *DB {
+ db, err := Open("test", fakeDBName)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+ if _, err := db.Exec("WIPE"); err != nil {
+ t.Fatalf("exec wipe: %v", err)
+ }
+ if name == "people" {
+ exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
+ exec(t, db, "INSERT|people|name=Alice,age=?,photo=APHOTO", 1)
+ exec(t, db, "INSERT|people|name=Bob,age=?,photo=BPHOTO", 2)
+ exec(t, db, "INSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
+ }
+ return db
+}
+
+func exec(t *testing.T, db *DB, query string, args ...interface{}) {
+ _, err := db.Exec(query, args...)
+ if err != nil {
+ t.Fatalf("Exec of %q: %v", query, err)
+ }
+}
+
+func closeDB(t *testing.T, db *DB) {
+ err := db.Close()
+ if err != nil {
+ t.Fatalf("error closing DB: %v", err)
+ }
+}
+
+func TestQuery(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ rows, err := db.Query("SELECT|people|age,name|")
+ if err != nil {
+ t.Fatalf("Query: %v", err)
+ }
+ type row struct {
+ age int
+ name string
+ }
+ got := []row{}
+ for rows.Next() {
+ var r row
+ err = rows.Scan(&r.age, &r.name)
+ if err != nil {
+ t.Fatalf("Scan: %v", err)
+ }
+ got = append(got, r)
+ }
+ err = rows.Err()
+ if err != nil {
+ t.Fatalf("Err: %v", err)
+ }
+ want := []row{
+ {age: 1, name: "Alice"},
+ {age: 2, name: "Bob"},
+ {age: 3, name: "Chris"},
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
+ }
+
+ // And verify that the final rows.Next() call, which hit EOF,
+ // also closed the rows connection.
+ if n := len(db.freeConn); n != 1 {
+ t.Errorf("free conns after query hitting EOF = %d; want 1", n)
+ }
+}
+
+func TestByteOwnership(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ rows, err := db.Query("SELECT|people|name,photo|")
+ if err != nil {
+ t.Fatalf("Query: %v", err)
+ }
+ type row struct {
+ name []byte
+ photo RawBytes
+ }
+ got := []row{}
+ for rows.Next() {
+ var r row
+ err = rows.Scan(&r.name, &r.photo)
+ if err != nil {
+ t.Fatalf("Scan: %v", err)
+ }
+ got = append(got, r)
+ }
+ corruptMemory := []byte("\xffPHOTO")
+ want := []row{
+ {name: []byte("Alice"), photo: corruptMemory},
+ {name: []byte("Bob"), photo: corruptMemory},
+ {name: []byte("Chris"), photo: corruptMemory},
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
+ }
+
+ var photo RawBytes
+ err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
+ if err == nil {
+ t.Error("want error scanning into RawBytes from QueryRow")
+ }
+}
+
+func TestRowsColumns(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ rows, err := db.Query("SELECT|people|age,name|")
+ if err != nil {
+ t.Fatalf("Query: %v", err)
+ }
+ cols, err := rows.Columns()
+ if err != nil {
+ t.Fatalf("Columns: %v", err)
+ }
+ want := []string{"age", "name"}
+ if !reflect.DeepEqual(cols, want) {
+ t.Errorf("got %#v; want %#v", cols, want)
+ }
+}
+
+func TestQueryRow(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ var name string
+ var age int
+ var birthday time.Time
+
+ err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age)
+ if err == nil || !strings.Contains(err.Error(), "expected 2 destination arguments") {
+ t.Errorf("expected error from wrong number of arguments; actually got: %v", err)
+ }
+
+ err = db.QueryRow("SELECT|people|bdate|age=?", 3).Scan(&birthday)
+ if err != nil || !birthday.Equal(chrisBirthday) {
+ t.Errorf("chris birthday = %v, err = %v; want %v", birthday, err, chrisBirthday)
+ }
+
+ err = db.QueryRow("SELECT|people|age,name|age=?", 2).Scan(&age, &name)
+ if err != nil {
+ t.Fatalf("age QueryRow+Scan: %v", err)
+ }
+ if name != "Bob" {
+ t.Errorf("expected name Bob, got %q", name)
+ }
+ if age != 2 {
+ t.Errorf("expected age 2, got %d", age)
+ }
+
+ err = db.QueryRow("SELECT|people|age,name|name=?", "Alice").Scan(&age, &name)
+ if err != nil {
+ t.Fatalf("name QueryRow+Scan: %v", err)
+ }
+ if name != "Alice" {
+ t.Errorf("expected name Alice, got %q", name)
+ }
+ if age != 1 {
+ t.Errorf("expected age 1, got %d", age)
+ }
+
+ var photo []byte
+ err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
+ if err != nil {
+ t.Fatalf("photo QueryRow+Scan: %v", err)
+ }
+ want := []byte("APHOTO")
+ if !reflect.DeepEqual(photo, want) {
+ t.Errorf("photo = %q; want %q", photo, want)
+ }
+}
+
+func TestStatementErrorAfterClose(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ stmt, err := db.Prepare("SELECT|people|age|name=?")
+ if err != nil {
+ t.Fatalf("Prepare: %v", err)
+ }
+ err = stmt.Close()
+ if err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ var name string
+ err = stmt.QueryRow("foo").Scan(&name)
+ if err == nil {
+ t.Errorf("expected error from QueryRow.Scan after Stmt.Close")
+ }
+}
+
+func TestStatementQueryRow(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ stmt, err := db.Prepare("SELECT|people|age|name=?")
+ if err != nil {
+ t.Fatalf("Prepare: %v", err)
+ }
+ var age int
+ for n, tt := range []struct {
+ name string
+ want int
+ }{
+ {"Alice", 1},
+ {"Bob", 2},
+ {"Chris", 3},
+ } {
+ if err := stmt.QueryRow(tt.name).Scan(&age); err != nil {
+ t.Errorf("%d: on %q, QueryRow/Scan: %v", n, tt.name, err)
+ } else if age != tt.want {
+ t.Errorf("%d: age=%d, want %d", n, age, tt.want)
+ }
+ }
+
+}
+
+// just a test of fakedb itself
+func TestBogusPreboundParameters(t *testing.T) {
+ db := newTestDB(t, "foo")
+ defer closeDB(t, db)
+ exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
+ _, err := db.Prepare("INSERT|t1|name=?,age=bogusconversion")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+ if err.Error() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
+ t.Errorf("unexpected error: %v", err)
+ }
+}
+
+func TestExec(t *testing.T) {
+ db := newTestDB(t, "foo")
+ defer closeDB(t, db)
+ exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
+ stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
+ if err != nil {
+ t.Errorf("Stmt, err = %v, %v", stmt, err)
+ }
+
+ type execTest struct {
+ args []interface{}
+ wantErr string
+ }
+ execTests := []execTest{
+ // Okay:
+ {[]interface{}{"Brad", 31}, ""},
+ {[]interface{}{"Brad", int64(31)}, ""},
+ {[]interface{}{"Bob", "32"}, ""},
+ {[]interface{}{7, 9}, ""},
+
+ // Invalid conversions:
+ {[]interface{}{"Brad", int64(0xFFFFFFFF)}, "sql: converting Exec argument #1's type: sql/driver: value 4294967295 overflows int32"},
+ {[]interface{}{"Brad", "strconv fail"}, "sql: converting Exec argument #1's type: sql/driver: value \"strconv fail\" can't be converted to int32"},
+
+ // Wrong number of args:
+ {[]interface{}{}, "sql: expected 2 arguments, got 0"},
+ {[]interface{}{1, 2, 3}, "sql: expected 2 arguments, got 3"},
+ }
+ for n, et := range execTests {
+ _, err := stmt.Exec(et.args...)
+ errStr := ""
+ if err != nil {
+ errStr = err.Error()
+ }
+ if errStr != et.wantErr {
+ t.Errorf("stmt.Execute #%d: for %v, got error %q, want error %q",
+ n, et.args, errStr, et.wantErr)
+ }
+ }
+}
+
+func TestTxStmt(t *testing.T) {
+ db := newTestDB(t, "")
+ defer closeDB(t, db)
+ exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
+ stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
+ if err != nil {
+ t.Fatalf("Stmt, err = %v, %v", stmt, err)
+ }
+ tx, err := db.Begin()
+ if err != nil {
+ t.Fatalf("Begin = %v", err)
+ }
+ _, err = tx.Stmt(stmt).Exec("Bobby", 7)
+ if err != nil {
+ t.Fatalf("Exec = %v", err)
+ }
+ err = tx.Commit()
+ if err != nil {
+ t.Fatalf("Commit = %v", err)
+ }
+}
+
+// Tests fix for issue 2542, that we release a lock when querying on
+// a closed connection.
+func TestIssue2542Deadlock(t *testing.T) {
+ db := newTestDB(t, "people")
+ closeDB(t, db)
+ for i := 0; i < 2; i++ {
+ _, err := db.Query("SELECT|people|age,name|")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+ }
+}
+
+func TestQueryRowClosingStmt(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+ var name string
+ var age int
+ err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age, &name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(db.freeConn) != 1 {
+ t.Fatalf("expected 1 free conn")
+ }
+ fakeConn := db.freeConn[0].(*fakeConn)
+ if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed {
+ t.Errorf("statement close mismatch: made %d, closed %d", made, closed)
+ }
+}
+
+func TestNullStringParam(t *testing.T) {
+ db := newTestDB(t, "")
+ defer closeDB(t, db)
+ exec(t, db, "CREATE|t|id=int32,name=string,favcolor=nullstring")
+
+ // Inserts with db.Exec:
+ exec(t, db, "INSERT|t|id=?,name=?,favcolor=?", 1, "alice", NullString{"aqua", true})
+ exec(t, db, "INSERT|t|id=?,name=?,favcolor=?", 2, "bob", NullString{"brown", false})
+
+ _, err := db.Exec("INSERT|t|id=?,name=?,favcolor=?", 999, nil, nil)
+ if err == nil {
+ // TODO: this test fails, but it's just because
+ // fakeConn implements the optional Execer interface,
+ // so arguably this is the correct behavior. But
+ // maybe I should flesh out the fakeConn.Exec
+ // implementation so this properly fails.
+ // t.Errorf("expected error inserting nil name with Exec")
+ }
+
+ // Inserts with a prepared statement:
+ stmt, err := db.Prepare("INSERT|t|id=?,name=?,favcolor=?")
+ if err != nil {
+ t.Fatalf("prepare: %v", err)
+ }
+ if _, err := stmt.Exec(3, "chris", "chartreuse"); err != nil {
+ t.Errorf("exec insert chris: %v", err)
+ }
+ if _, err := stmt.Exec(4, "dave", NullString{"darkred", true}); err != nil {
+ t.Errorf("exec insert dave: %v", err)
+ }
+ if _, err := stmt.Exec(5, "eleanor", NullString{"eel", false}); err != nil {
+ t.Errorf("exec insert dave: %v", err)
+ }
+
+ // Can't put null name into non-nullstring column,
+ if _, err := stmt.Exec(5, NullString{"", false}, nil); err == nil {
+ t.Errorf("expected error inserting nil name with prepared statement Exec")
+ }
+
+ type nameColor struct {
+ name string
+ favColor NullString
+ }
+
+ wantMap := map[int]nameColor{
+ 1: nameColor{"alice", NullString{"aqua", true}},
+ 2: nameColor{"bob", NullString{"", false}},
+ 3: nameColor{"chris", NullString{"chartreuse", true}},
+ 4: nameColor{"dave", NullString{"darkred", true}},
+ 5: nameColor{"eleanor", NullString{"", false}},
+ }
+ for id, want := range wantMap {
+ var got nameColor
+ if err := db.QueryRow("SELECT|t|name,favcolor|id=?", id).Scan(&got.name, &got.favColor); err != nil {
+ t.Errorf("id=%d Scan: %v", id, err)
+ }
+ if got != want {
+ t.Errorf("id=%d got %#v, want %#v", id, got, want)
+ }
+ }
+}
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package gob
// This file is not normally included in the gob package. Used only for debugging the package itself.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package main
// Need to compile package gob with debug.go to build this program.
//
// String values encode as JSON strings, with each invalid UTF-8 sequence
// replaced by the encoding of the Unicode replacement character U+FFFD.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
//
// Array and slice values encode as JSON arrays, except that
// []byte encodes as a base64-encoded string.
// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
-// only Unicode letters, digits, dollar signs, hyphens, and underscores.
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
//
// Map values encode as JSON objects.
// The map's key type must be string; the object keys are used directly
return false
}
for _, c := range s {
- if c != '$' && c != '-' && c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
- return false
+ switch c {
+ case '$', '-', '_', '/', '%':
+ // Acceptable
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
}
}
return true
)
type basicLatin2xTag struct {
- V string `json:"$-"`
+ V string `json:"$%-/"`
}
type basicLatin3xTag struct {
V string `json:"色は匂へど"`
}
+type percentSlashTag struct {
+ V string `json:"text/html%"` // http://golang.org/issue/2718
+}
+
type emptyTag struct {
W string
}
}
type badCodeTag struct {
- Z string `json:" !\"#%&'()*+,./"`
+ Z string `json:" !\"#&'()*+,."`
}
var structTagObjectKeyTests = []struct {
value string
key string
}{
- {basicLatin2xTag{"2x"}, "2x", "$-"},
+ {basicLatin2xTag{"2x"}, "2x", "$%-/"},
{basicLatin3xTag{"3x"}, "3x", "0123456789"},
{basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
{basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
{misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
{badFormatTag{"Orfevre"}, "Orfevre", "Y"},
{badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
+ {percentSlashTag{"brut"}, "brut", "text/html%"},
}
func TestStructTagObjectKey(t *testing.T) {
t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
}
default:
- t.Fatalf("Unexpected key: %#q", i)
+ t.Fatalf("Unexpected key: %#q, from %#q", i, b)
}
}
}
Value string ",chardata"
}
+type NameInField struct {
+ Foo Name `xml:"ns foo"`
+}
+
type AttrTest struct {
Int int `xml:",attr"`
Lower int `xml:"int,attr"`
UnmarshalOnly: true,
},
+ // xml.Name works in a plain field as well.
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
+ },
+
+ // Marshaling zero xml.Name uses the tag or field name.
+ {
+ Value: &NameInField{},
+ ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
+ MarshalOnly: true,
+ },
+
// Test attributes
{
Value: &AttrTest{
case reflect.Struct:
sv = v
typ := sv.Type()
+ if typ == nameType {
+ v.Set(reflect.ValueOf(start.Name))
+ break
+ }
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
+var nameType = reflect.TypeOf(Name{})
+
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
return tinfo, nil
}
tinfo = &typeInfo{}
- if typ.Kind() == reflect.Struct {
+ if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package p
func _() {
watcher, _ := NewWatcher()
watcher.Close()
- done := false
+ done := make(chan bool)
go func() {
watcher.Close()
- done = true
+ done <- true
}()
- time.Sleep(50 * time.Millisecond)
- if !done {
+ select {
+ case <-done:
+ case <-time.After(50 * time.Millisecond):
t.Fatal("double Close() test failed: second Close() call didn't return")
}
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package norm
import (
"net"
"net/url"
"os"
- "strings"
)
// A Dialer is a means to establish a connection.
// Dialer for it to make network requests.
func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
var auth *Auth
- if len(u.RawUserinfo) > 0 {
+ if u.User != nil {
auth = new(Auth)
- parts := strings.SplitN(u.RawUserinfo, ":", 1)
- if len(parts) == 1 {
- auth.User = parts[0]
- } else if len(parts) >= 2 {
- auth.User = parts[0]
- auth.Password = parts[1]
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
}
}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Type conversions for Scan.
-
-package sql
-
-import (
- "errors"
- "exp/sql/driver"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// subsetTypeArgs takes a slice of arguments from callers of the sql
-// package and converts them into a slice of the driver package's
-// "subset types".
-func subsetTypeArgs(args []interface{}) ([]interface{}, error) {
- out := make([]interface{}, len(args))
- for n, arg := range args {
- var err error
- out[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
- if err != nil {
- return nil, fmt.Errorf("sql: converting argument #%d's type: %v", n+1, err)
- }
- }
- return out, nil
-}
-
-// convertAssign copies to dest the value in src, converting it if possible.
-// An error is returned if the copy would result in loss of information.
-// dest should be a pointer type.
-func convertAssign(dest, src interface{}) error {
- // Common cases, without reflect. Fall through.
- switch s := src.(type) {
- case string:
- switch d := dest.(type) {
- case *string:
- *d = s
- return nil
- }
- case []byte:
- switch d := dest.(type) {
- case *string:
- *d = string(s)
- return nil
- case *[]byte:
- *d = s
- return nil
- }
- }
-
- var sv reflect.Value
-
- switch d := dest.(type) {
- case *string:
- sv = reflect.ValueOf(src)
- switch sv.Kind() {
- case reflect.Bool,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Float32, reflect.Float64:
- *d = fmt.Sprintf("%v", src)
- return nil
- }
- case *bool:
- bv, err := driver.Bool.ConvertValue(src)
- if err == nil {
- *d = bv.(bool)
- }
- return err
- }
-
- if scanner, ok := dest.(ScannerInto); ok {
- return scanner.ScanInto(src)
- }
-
- dpv := reflect.ValueOf(dest)
- if dpv.Kind() != reflect.Ptr {
- return errors.New("destination not a pointer")
- }
-
- if !sv.IsValid() {
- sv = reflect.ValueOf(src)
- }
-
- dv := reflect.Indirect(dpv)
- if dv.Kind() == sv.Kind() {
- dv.Set(sv)
- return nil
- }
-
- switch dv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- s := asString(src)
- i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
- if err != nil {
- return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
- }
- dv.SetInt(i64)
- return nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- s := asString(src)
- u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
- if err != nil {
- return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
- }
- dv.SetUint(u64)
- return nil
- case reflect.Float32, reflect.Float64:
- s := asString(src)
- f64, err := strconv.ParseFloat(s, dv.Type().Bits())
- if err != nil {
- return fmt.Errorf("converting string %q to a %s: %v", s, dv.Kind(), err)
- }
- dv.SetFloat(f64)
- return nil
- }
-
- return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest)
-}
-
-func asString(src interface{}) string {
- switch v := src.(type) {
- case string:
- return v
- case []byte:
- return string(v)
- }
- return fmt.Sprintf("%v", src)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sql
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-)
-
-var someTime = time.Unix(123, 0)
-
-type conversionTest struct {
- s, d interface{} // source and destination
-
- // following are used if they're non-zero
- wantint int64
- wantuint uint64
- wantstr string
- wantf32 float32
- wantf64 float64
- wanttime time.Time
- wantbool bool // used if d is of type *bool
- wanterr string
-}
-
-// Target variables for scanning into.
-var (
- scanstr string
- scanint int
- scanint8 int8
- scanint16 int16
- scanint32 int32
- scanuint8 uint8
- scanuint16 uint16
- scanbool bool
- scanf32 float32
- scanf64 float64
- scantime time.Time
-)
-
-var conversionTests = []conversionTest{
- // Exact conversions (destination pointer type matches source type)
- {s: "foo", d: &scanstr, wantstr: "foo"},
- {s: 123, d: &scanint, wantint: 123},
- {s: someTime, d: &scantime, wanttime: someTime},
-
- // To strings
- {s: []byte("byteslice"), d: &scanstr, wantstr: "byteslice"},
- {s: 123, d: &scanstr, wantstr: "123"},
- {s: int8(123), d: &scanstr, wantstr: "123"},
- {s: int64(123), d: &scanstr, wantstr: "123"},
- {s: uint8(123), d: &scanstr, wantstr: "123"},
- {s: uint16(123), d: &scanstr, wantstr: "123"},
- {s: uint32(123), d: &scanstr, wantstr: "123"},
- {s: uint64(123), d: &scanstr, wantstr: "123"},
- {s: 1.5, d: &scanstr, wantstr: "1.5"},
-
- // Strings to integers
- {s: "255", d: &scanuint8, wantuint: 255},
- {s: "256", d: &scanuint8, wanterr: `converting string "256" to a uint8: strconv.ParseUint: parsing "256": value out of range`},
- {s: "256", d: &scanuint16, wantuint: 256},
- {s: "-1", d: &scanint, wantint: -1},
- {s: "foo", d: &scanint, wanterr: `converting string "foo" to a int: strconv.ParseInt: parsing "foo": invalid syntax`},
-
- // True bools
- {s: true, d: &scanbool, wantbool: true},
- {s: "True", d: &scanbool, wantbool: true},
- {s: "TRUE", d: &scanbool, wantbool: true},
- {s: "1", d: &scanbool, wantbool: true},
- {s: 1, d: &scanbool, wantbool: true},
- {s: int64(1), d: &scanbool, wantbool: true},
- {s: uint16(1), d: &scanbool, wantbool: true},
-
- // False bools
- {s: false, d: &scanbool, wantbool: false},
- {s: "false", d: &scanbool, wantbool: false},
- {s: "FALSE", d: &scanbool, wantbool: false},
- {s: "0", d: &scanbool, wantbool: false},
- {s: 0, d: &scanbool, wantbool: false},
- {s: int64(0), d: &scanbool, wantbool: false},
- {s: uint16(0), d: &scanbool, wantbool: false},
-
- // Not bools
- {s: "yup", d: &scanbool, wanterr: `sql/driver: couldn't convert "yup" into type bool`},
- {s: 2, d: &scanbool, wanterr: `sql/driver: couldn't convert 2 into type bool`},
-
- // Floats
- {s: float64(1.5), d: &scanf64, wantf64: float64(1.5)},
- {s: int64(1), d: &scanf64, wantf64: float64(1)},
- {s: float64(1.5), d: &scanf32, wantf32: float32(1.5)},
- {s: "1.5", d: &scanf32, wantf32: float32(1.5)},
- {s: "1.5", d: &scanf64, wantf64: float64(1.5)},
-}
-
-func intValue(intptr interface{}) int64 {
- return reflect.Indirect(reflect.ValueOf(intptr)).Int()
-}
-
-func uintValue(intptr interface{}) uint64 {
- return reflect.Indirect(reflect.ValueOf(intptr)).Uint()
-}
-
-func float64Value(ptr interface{}) float64 {
- return *(ptr.(*float64))
-}
-
-func float32Value(ptr interface{}) float32 {
- return *(ptr.(*float32))
-}
-
-func timeValue(ptr interface{}) time.Time {
- return *(ptr.(*time.Time))
-}
-
-func TestConversions(t *testing.T) {
- for n, ct := range conversionTests {
- err := convertAssign(ct.d, ct.s)
- errstr := ""
- if err != nil {
- errstr = err.Error()
- }
- errf := func(format string, args ...interface{}) {
- base := fmt.Sprintf("convertAssign #%d: for %v (%T) -> %T, ", n, ct.s, ct.s, ct.d)
- t.Errorf(base+format, args...)
- }
- if errstr != ct.wanterr {
- errf("got error %q, want error %q", errstr, ct.wanterr)
- }
- if ct.wantstr != "" && ct.wantstr != scanstr {
- errf("want string %q, got %q", ct.wantstr, scanstr)
- }
- if ct.wantint != 0 && ct.wantint != intValue(ct.d) {
- errf("want int %d, got %d", ct.wantint, intValue(ct.d))
- }
- if ct.wantuint != 0 && ct.wantuint != uintValue(ct.d) {
- errf("want uint %d, got %d", ct.wantuint, uintValue(ct.d))
- }
- if ct.wantf32 != 0 && ct.wantf32 != float32Value(ct.d) {
- errf("want float32 %v, got %v", ct.wantf32, float32Value(ct.d))
- }
- if ct.wantf64 != 0 && ct.wantf64 != float64Value(ct.d) {
- errf("want float32 %v, got %v", ct.wantf64, float64Value(ct.d))
- }
- if bp, boolTest := ct.d.(*bool); boolTest && *bp != ct.wantbool && ct.wanterr == "" {
- errf("want bool %v, got %v", ct.wantbool, *bp)
- }
- if !ct.wanttime.IsZero() && !ct.wanttime.Equal(timeValue(ct.d)) {
- errf("want time %v, got %v", ct.wanttime, timeValue(ct.d))
- }
- }
-}
-
-func TestNullableString(t *testing.T) {
- var ns NullableString
- convertAssign(&ns, []byte("foo"))
- if !ns.Valid {
- t.Errorf("expecting not null")
- }
- if ns.String != "foo" {
- t.Errorf("expecting foo; got %q", ns.String)
- }
- convertAssign(&ns, nil)
- if ns.Valid {
- t.Errorf("expecting null on nil")
- }
- if ns.String != "" {
- t.Errorf("expecting blank on nil; got %q", ns.String)
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package driver defines interfaces to be implemented by database
-// drivers as used by package sql.
-//
-// Code simply using databases should use package sql.
-//
-// Drivers only need to be aware of a subset of Go's types. The sql package
-// will convert all types into one of the following:
-//
-// int64
-// float64
-// bool
-// nil
-// []byte
-// string [*] everywhere except from Rows.Next.
-// time.Time
-//
-package driver
-
-import "errors"
-
-// Driver is the interface that must be implemented by a database
-// driver.
-type Driver interface {
- // Open returns a new connection to the database.
- // The name is a string in a driver-specific format.
- //
- // Open may return a cached connection (one previously
- // closed), but doing so is unnecessary; the sql package
- // maintains a pool of idle connections for efficient re-use.
- //
- // The returned connection is only used by one goroutine at a
- // time.
- Open(name string) (Conn, error)
-}
-
-// ErrSkip may be returned by some optional interfaces' methods to
-// indicate at runtime that the fast path is unavailable and the sql
-// package should continue as if the optional interface was not
-// implemented. ErrSkip is only supported where explicitly
-// documented.
-var ErrSkip = errors.New("driver: skip fast-path; continue as if unimplemented")
-
-// Execer is an optional interface that may be implemented by a Conn.
-//
-// If a Conn does not implement Execer, the db package's DB.Exec will
-// first prepare a query, execute the statement, and then close the
-// statement.
-//
-// All arguments are of a subset type as defined in the package docs.
-//
-// Exec may return ErrSkip.
-type Execer interface {
- Exec(query string, args []interface{}) (Result, error)
-}
-
-// Conn is a connection to a database. It is not used concurrently
-// by multiple goroutines.
-//
-// Conn is assumed to be stateful.
-type Conn interface {
- // Prepare returns a prepared statement, bound to this connection.
- Prepare(query string) (Stmt, error)
-
- // Close invalidates and potentially stops any current
- // prepared statements and transactions, marking this
- // connection as no longer in use.
- //
- // Because the sql package maintains a free pool of
- // connections and only calls Close when there's a surplus of
- // idle connections, it shouldn't be necessary for drivers to
- // do their own connection caching.
- Close() error
-
- // Begin starts and returns a new transaction.
- Begin() (Tx, error)
-}
-
-// Result is the result of a query execution.
-type Result interface {
- // LastInsertId returns the database's auto-generated ID
- // after, for example, an INSERT into a table with primary
- // key.
- LastInsertId() (int64, error)
-
- // RowsAffected returns the number of rows affected by the
- // query.
- RowsAffected() (int64, error)
-}
-
-// Stmt is a prepared statement. It is bound to a Conn and not
-// used by multiple goroutines concurrently.
-type Stmt interface {
- // Close closes the statement.
- //
- // Closing a statement should not interrupt any outstanding
- // query created from that statement. That is, the following
- // order of operations is valid:
- //
- // * create a driver statement
- // * call Query on statement, returning Rows
- // * close the statement
- // * read from Rows
- //
- // If closing a statement invalidates currently-running
- // queries, the final step above will incorrectly fail.
- //
- // TODO(bradfitz): possibly remove the restriction above, if
- // enough driver authors object and find it complicates their
- // code too much. The sql package could be smarter about
- // refcounting the statement and closing it at the appropriate
- // time.
- Close() error
-
- // NumInput returns the number of placeholder parameters.
- //
- // If NumInput returns >= 0, the sql package will sanity check
- // argument counts from callers and return errors to the caller
- // before the statement's Exec or Query methods are called.
- //
- // NumInput may also return -1, if the driver doesn't know
- // its number of placeholders. In that case, the sql package
- // will not sanity check Exec or Query argument counts.
- NumInput() int
-
- // Exec executes a query that doesn't return rows, such
- // as an INSERT or UPDATE. The args are all of a subset
- // type as defined above.
- Exec(args []interface{}) (Result, error)
-
- // Exec executes a query that may return rows, such as a
- // SELECT. The args of all of a subset type as defined above.
- Query(args []interface{}) (Rows, error)
-}
-
-// ColumnConverter may be optionally implemented by Stmt if the
-// the statement is aware of its own columns' types and can
-// convert from any type to a driver subset type.
-type ColumnConverter interface {
- // ColumnConverter returns a ValueConverter for the provided
- // column index. If the type of a specific column isn't known
- // or shouldn't be handled specially, DefaultValueConverter
- // can be returned.
- ColumnConverter(idx int) ValueConverter
-}
-
-// Rows is an iterator over an executed query's results.
-type Rows interface {
- // Columns returns the names of the columns. The number of
- // columns of the result is inferred from the length of the
- // slice. If a particular column name isn't known, an empty
- // string should be returned for that entry.
- Columns() []string
-
- // Close closes the rows iterator.
- Close() error
-
- // Next is called to populate the next row of data into
- // the provided slice. The provided slice will be the same
- // size as the Columns() are wide.
- //
- // The dest slice may be populated with only with values
- // of subset types defined above, but excluding string.
- // All string values must be converted to []byte.
- //
- // Next should return io.EOF when there are no more rows.
- Next(dest []interface{}) error
-}
-
-// Tx is a transaction.
-type Tx interface {
- Commit() error
- Rollback() error
-}
-
-// RowsAffected implements Result for an INSERT or UPDATE operation
-// which mutates a number of rows.
-type RowsAffected int64
-
-var _ Result = RowsAffected(0)
-
-func (RowsAffected) LastInsertId() (int64, error) {
- return 0, errors.New("no LastInsertId available")
-}
-
-func (v RowsAffected) RowsAffected() (int64, error) {
- return int64(v), nil
-}
-
-// DDLSuccess is a pre-defined Result for drivers to return when a DDL
-// command succeeds.
-var DDLSuccess ddlSuccess
-
-type ddlSuccess struct{}
-
-var _ Result = ddlSuccess{}
-
-func (ddlSuccess) LastInsertId() (int64, error) {
- return 0, errors.New("no LastInsertId available after DDL statement")
-}
-
-func (ddlSuccess) RowsAffected() (int64, error) {
- return 0, errors.New("no RowsAffected available after DDL statement")
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package driver
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "time"
-)
-
-// ValueConverter is the interface providing the ConvertValue method.
-//
-// Various implementations of ValueConverter are provided by the
-// driver package to provide consistent implementations of conversions
-// between drivers. The ValueConverters have several uses:
-//
-// * converting from the subset types as provided by the sql package
-// into a database table's specific column type and making sure it
-// fits, such as making sure a particular int64 fits in a
-// table's uint16 column.
-//
-// * converting a value as given from the database into one of the
-// subset types.
-//
-// * by the sql package, for converting from a driver's subset type
-// to a user's type in a scan.
-type ValueConverter interface {
- // ConvertValue converts a value to a restricted subset type.
- ConvertValue(v interface{}) (interface{}, error)
-}
-
-// Bool is a ValueConverter that converts input values to bools.
-//
-// The conversion rules are:
-// - booleans are returned unchanged
-// - for integer types,
-// 1 is true
-// 0 is false,
-// other integers are an error
-// - for strings and []byte, same rules as strconv.ParseBool
-// - all other types are an error
-var Bool boolType
-
-type boolType struct{}
-
-var _ ValueConverter = boolType{}
-
-func (boolType) String() string { return "Bool" }
-
-func (boolType) ConvertValue(src interface{}) (interface{}, error) {
- switch s := src.(type) {
- case bool:
- return s, nil
- case string:
- b, err := strconv.ParseBool(s)
- if err != nil {
- return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
- }
- return b, nil
- case []byte:
- b, err := strconv.ParseBool(string(s))
- if err != nil {
- return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
- }
- return b, nil
- }
-
- sv := reflect.ValueOf(src)
- switch sv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- iv := sv.Int()
- if iv == 1 || iv == 0 {
- return iv == 1, nil
- }
- return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", iv)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- uv := sv.Uint()
- if uv == 1 || uv == 0 {
- return uv == 1, nil
- }
- return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", uv)
- }
-
- return nil, fmt.Errorf("sql/driver: couldn't convert %v (%T) into type bool", src, src)
-}
-
-// Int32 is a ValueConverter that converts input values to int64,
-// respecting the limits of an int32 value.
-var Int32 int32Type
-
-type int32Type struct{}
-
-var _ ValueConverter = int32Type{}
-
-func (int32Type) ConvertValue(v interface{}) (interface{}, error) {
- rv := reflect.ValueOf(v)
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- i64 := rv.Int()
- if i64 > (1<<31)-1 || i64 < -(1<<31) {
- return nil, fmt.Errorf("sql/driver: value %d overflows int32", v)
- }
- return i64, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- u64 := rv.Uint()
- if u64 > (1<<31)-1 {
- return nil, fmt.Errorf("sql/driver: value %d overflows int32", v)
- }
- return int64(u64), nil
- case reflect.String:
- i, err := strconv.Atoi(rv.String())
- if err != nil {
- return nil, fmt.Errorf("sql/driver: value %q can't be converted to int32", v)
- }
- return int64(i), nil
- }
- return nil, fmt.Errorf("sql/driver: unsupported value %v (type %T) converting to int32", v, v)
-}
-
-// String is a ValueConverter that converts its input to a string.
-// If the value is already a string or []byte, it's unchanged.
-// If the value is of another type, conversion to string is done
-// with fmt.Sprintf("%v", v).
-var String stringType
-
-type stringType struct{}
-
-func (stringType) ConvertValue(v interface{}) (interface{}, error) {
- switch v.(type) {
- case string, []byte:
- return v, nil
- }
- return fmt.Sprintf("%v", v), nil
-}
-
-// IsParameterSubsetType reports whether v is of a valid type for a
-// parameter. These types are:
-//
-// int64
-// float64
-// bool
-// nil
-// []byte
-// time.Time
-// string
-//
-// This is the same list as IsScanSubsetType, with the addition of
-// string.
-func IsParameterSubsetType(v interface{}) bool {
- if IsScanSubsetType(v) {
- return true
- }
- if _, ok := v.(string); ok {
- return true
- }
- return false
-}
-
-// IsScanSubsetType reports whether v is of a valid type for a
-// value populated by Rows.Next. These types are:
-//
-// int64
-// float64
-// bool
-// nil
-// []byte
-// time.Time
-//
-// This is the same list as IsParameterSubsetType, without string.
-func IsScanSubsetType(v interface{}) bool {
- if v == nil {
- return true
- }
- switch v.(type) {
- case int64, float64, []byte, bool, time.Time:
- return true
- }
- return false
-}
-
-// DefaultParameterConverter is the default implementation of
-// ValueConverter that's used when a Stmt doesn't implement
-// ColumnConverter.
-//
-// DefaultParameterConverter returns the given value directly if
-// IsSubsetType(value). Otherwise integer type are converted to
-// int64, floats to float64, and strings to []byte. Other types are
-// an error.
-var DefaultParameterConverter defaultConverter
-
-type defaultConverter struct{}
-
-var _ ValueConverter = defaultConverter{}
-
-func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) {
- if IsParameterSubsetType(v) {
- return v, nil
- }
-
- rv := reflect.ValueOf(v)
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int(), nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
- return int64(rv.Uint()), nil
- case reflect.Uint64:
- u64 := rv.Uint()
- if u64 >= 1<<63 {
- return nil, fmt.Errorf("uint64 values with high bit set are not supported")
- }
- return int64(u64), nil
- case reflect.Float32, reflect.Float64:
- return rv.Float(), nil
- }
- return nil, fmt.Errorf("unsupported type %s", rv.Kind())
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package driver
-
-import (
- "reflect"
- "testing"
- "time"
-)
-
-type valueConverterTest struct {
- c ValueConverter
- in interface{}
- out interface{}
- err string
-}
-
-var now = time.Now()
-
-var valueConverterTests = []valueConverterTest{
- {Bool, "true", true, ""},
- {Bool, "True", true, ""},
- {Bool, []byte("t"), true, ""},
- {Bool, true, true, ""},
- {Bool, "1", true, ""},
- {Bool, 1, true, ""},
- {Bool, int64(1), true, ""},
- {Bool, uint16(1), true, ""},
- {Bool, "false", false, ""},
- {Bool, false, false, ""},
- {Bool, "0", false, ""},
- {Bool, 0, false, ""},
- {Bool, int64(0), false, ""},
- {Bool, uint16(0), false, ""},
- {c: Bool, in: "foo", err: "sql/driver: couldn't convert \"foo\" into type bool"},
- {c: Bool, in: 2, err: "sql/driver: couldn't convert 2 into type bool"},
- {DefaultParameterConverter, now, now, ""},
-}
-
-func TestValueConverters(t *testing.T) {
- for i, tt := range valueConverterTests {
- out, err := tt.c.ConvertValue(tt.in)
- goterr := ""
- if err != nil {
- goterr = err.Error()
- }
- if goterr != tt.err {
- t.Errorf("test %d: %s(%T(%v)) error = %q; want error = %q",
- i, tt.c, tt.in, tt.in, goterr, tt.err)
- }
- if tt.err != "" {
- continue
- }
- if !reflect.DeepEqual(out, tt.out) {
- t.Errorf("test %d: %s(%T(%v)) = %v (%T); want %v (%T)",
- i, tt.c, tt.in, tt.in, out, out, tt.out, tt.out)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sql
-
-import (
- "errors"
- "fmt"
- "io"
- "log"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "exp/sql/driver"
-)
-
-var _ = log.Printf
-
-// fakeDriver is a fake database that implements Go's driver.Driver
-// interface, just for testing.
-//
-// It speaks a query language that's semantically similar to but
-// syntantically different and simpler than SQL. The syntax is as
-// follows:
-//
-// WIPE
-// CREATE|<tablename>|<col>=<type>,<col>=<type>,...
-// where types are: "string", [u]int{8,16,32,64}, "bool"
-// INSERT|<tablename>|col=val,col2=val2,col3=?
-// SELECT|<tablename>|projectcol1,projectcol2|filtercol=?,filtercol2=?
-//
-// When opening a a fakeDriver's database, it starts empty with no
-// tables. All tables and data are stored in memory only.
-type fakeDriver struct {
- mu sync.Mutex
- openCount int
- dbs map[string]*fakeDB
-}
-
-type fakeDB struct {
- name string
-
- mu sync.Mutex
- free []*fakeConn
- tables map[string]*table
-}
-
-type table struct {
- mu sync.Mutex
- colname []string
- coltype []string
- rows []*row
-}
-
-func (t *table) columnIndex(name string) int {
- for n, nname := range t.colname {
- if name == nname {
- return n
- }
- }
- return -1
-}
-
-type row struct {
- cols []interface{} // must be same size as its table colname + coltype
-}
-
-func (r *row) clone() *row {
- nrow := &row{cols: make([]interface{}, len(r.cols))}
- copy(nrow.cols, r.cols)
- return nrow
-}
-
-type fakeConn struct {
- db *fakeDB // where to return ourselves to
-
- currTx *fakeTx
-
- // Stats for tests:
- mu sync.Mutex
- stmtsMade int
- stmtsClosed int
-}
-
-func (c *fakeConn) incrStat(v *int) {
- c.mu.Lock()
- *v++
- c.mu.Unlock()
-}
-
-type fakeTx struct {
- c *fakeConn
-}
-
-type fakeStmt struct {
- c *fakeConn
- q string // just for debugging
-
- cmd string
- table string
-
- closed bool
-
- colName []string // used by CREATE, INSERT, SELECT (selected columns)
- colType []string // used by CREATE
- colValue []interface{} // used by INSERT (mix of strings and "?" for bound params)
- placeholders int // used by INSERT/SELECT: number of ? params
-
- whereCol []string // used by SELECT (all placeholders)
-
- placeholderConverter []driver.ValueConverter // used by INSERT
-}
-
-var fdriver driver.Driver = &fakeDriver{}
-
-func init() {
- Register("test", fdriver)
-}
-
-// Supports dsn forms:
-// <dbname>
-// <dbname>;<opts> (no currently supported options)
-func (d *fakeDriver) Open(dsn string) (driver.Conn, error) {
- parts := strings.Split(dsn, ";")
- if len(parts) < 1 {
- return nil, errors.New("fakedb: no database name")
- }
- name := parts[0]
-
- db := d.getDB(name)
-
- d.mu.Lock()
- d.openCount++
- d.mu.Unlock()
- return &fakeConn{db: db}, nil
-}
-
-func (d *fakeDriver) getDB(name string) *fakeDB {
- d.mu.Lock()
- defer d.mu.Unlock()
- if d.dbs == nil {
- d.dbs = make(map[string]*fakeDB)
- }
- db, ok := d.dbs[name]
- if !ok {
- db = &fakeDB{name: name}
- d.dbs[name] = db
- }
- return db
-}
-
-func (db *fakeDB) wipe() {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.tables = nil
-}
-
-func (db *fakeDB) createTable(name string, columnNames, columnTypes []string) error {
- db.mu.Lock()
- defer db.mu.Unlock()
- if db.tables == nil {
- db.tables = make(map[string]*table)
- }
- if _, exist := db.tables[name]; exist {
- return fmt.Errorf("table %q already exists", name)
- }
- if len(columnNames) != len(columnTypes) {
- return fmt.Errorf("create table of %q len(names) != len(types): %d vs %d",
- name, len(columnNames), len(columnTypes))
- }
- db.tables[name] = &table{colname: columnNames, coltype: columnTypes}
- return nil
-}
-
-// must be called with db.mu lock held
-func (db *fakeDB) table(table string) (*table, bool) {
- if db.tables == nil {
- return nil, false
- }
- t, ok := db.tables[table]
- return t, ok
-}
-
-func (db *fakeDB) columnType(table, column string) (typ string, ok bool) {
- db.mu.Lock()
- defer db.mu.Unlock()
- t, ok := db.table(table)
- if !ok {
- return
- }
- for n, cname := range t.colname {
- if cname == column {
- return t.coltype[n], true
- }
- }
- return "", false
-}
-
-func (c *fakeConn) Begin() (driver.Tx, error) {
- if c.currTx != nil {
- return nil, errors.New("already in a transaction")
- }
- c.currTx = &fakeTx{c: c}
- return c.currTx, nil
-}
-
-func (c *fakeConn) Close() error {
- if c.currTx != nil {
- return errors.New("can't close; in a Transaction")
- }
- if c.db == nil {
- return errors.New("can't close; already closed")
- }
- c.db = nil
- return nil
-}
-
-func checkSubsetTypes(args []interface{}) error {
- for n, arg := range args {
- switch arg.(type) {
- case int64, float64, bool, nil, []byte, string, time.Time:
- default:
- return fmt.Errorf("fakedb_test: invalid argument #%d: %v, type %T", n+1, arg, arg)
- }
- }
- return nil
-}
-
-func (c *fakeConn) Exec(query string, args []interface{}) (driver.Result, error) {
- // This is an optional interface, but it's implemented here
- // just to check that all the args of of the proper types.
- // ErrSkip is returned so the caller acts as if we didn't
- // implement this at all.
- err := checkSubsetTypes(args)
- if err != nil {
- return nil, err
- }
- return nil, driver.ErrSkip
-}
-
-func errf(msg string, args ...interface{}) error {
- return errors.New("fakedb: " + fmt.Sprintf(msg, args...))
-}
-
-// parts are table|selectCol1,selectCol2|whereCol=?,whereCol2=?
-// (note that where where columns must always contain ? marks,
-// just a limitation for fakedb)
-func (c *fakeConn) prepareSelect(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
- if len(parts) != 3 {
- return nil, errf("invalid SELECT syntax with %d parts; want 3", len(parts))
- }
- stmt.table = parts[0]
- stmt.colName = strings.Split(parts[1], ",")
- for n, colspec := range strings.Split(parts[2], ",") {
- if colspec == "" {
- continue
- }
- nameVal := strings.Split(colspec, "=")
- if len(nameVal) != 2 {
- return nil, errf("SELECT on table %q has invalid column spec of %q (index %d)", stmt.table, colspec, n)
- }
- column, value := nameVal[0], nameVal[1]
- _, ok := c.db.columnType(stmt.table, column)
- if !ok {
- return nil, errf("SELECT on table %q references non-existent column %q", stmt.table, column)
- }
- if value != "?" {
- return nil, errf("SELECT on table %q has pre-bound value for where column %q; need a question mark",
- stmt.table, column)
- }
- stmt.whereCol = append(stmt.whereCol, column)
- stmt.placeholders++
- }
- return stmt, nil
-}
-
-// parts are table|col=type,col2=type2
-func (c *fakeConn) prepareCreate(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
- if len(parts) != 2 {
- return nil, errf("invalid CREATE syntax with %d parts; want 2", len(parts))
- }
- stmt.table = parts[0]
- for n, colspec := range strings.Split(parts[1], ",") {
- nameType := strings.Split(colspec, "=")
- if len(nameType) != 2 {
- return nil, errf("CREATE table %q has invalid column spec of %q (index %d)", stmt.table, colspec, n)
- }
- stmt.colName = append(stmt.colName, nameType[0])
- stmt.colType = append(stmt.colType, nameType[1])
- }
- return stmt, nil
-}
-
-// parts are table|col=?,col2=val
-func (c *fakeConn) prepareInsert(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
- if len(parts) != 2 {
- return nil, errf("invalid INSERT syntax with %d parts; want 2", len(parts))
- }
- stmt.table = parts[0]
- for n, colspec := range strings.Split(parts[1], ",") {
- nameVal := strings.Split(colspec, "=")
- if len(nameVal) != 2 {
- return nil, errf("INSERT table %q has invalid column spec of %q (index %d)", stmt.table, colspec, n)
- }
- column, value := nameVal[0], nameVal[1]
- ctype, ok := c.db.columnType(stmt.table, column)
- if !ok {
- return nil, errf("INSERT table %q references non-existent column %q", stmt.table, column)
- }
- stmt.colName = append(stmt.colName, column)
-
- if value != "?" {
- var subsetVal interface{}
- // Convert to driver subset type
- switch ctype {
- case "string":
- subsetVal = []byte(value)
- case "blob":
- subsetVal = []byte(value)
- case "int32":
- i, err := strconv.Atoi(value)
- if err != nil {
- return nil, errf("invalid conversion to int32 from %q", value)
- }
- subsetVal = int64(i) // int64 is a subset type, but not int32
- default:
- return nil, errf("unsupported conversion for pre-bound parameter %q to type %q", value, ctype)
- }
- stmt.colValue = append(stmt.colValue, subsetVal)
- } else {
- stmt.placeholders++
- stmt.placeholderConverter = append(stmt.placeholderConverter, converterForType(ctype))
- stmt.colValue = append(stmt.colValue, "?")
- }
- }
- return stmt, nil
-}
-
-func (c *fakeConn) Prepare(query string) (driver.Stmt, error) {
- if c.db == nil {
- panic("nil c.db; conn = " + fmt.Sprintf("%#v", c))
- }
- parts := strings.Split(query, "|")
- if len(parts) < 1 {
- return nil, errf("empty query")
- }
- cmd := parts[0]
- parts = parts[1:]
- stmt := &fakeStmt{q: query, c: c, cmd: cmd}
- c.incrStat(&c.stmtsMade)
- switch cmd {
- case "WIPE":
- // Nothing
- case "SELECT":
- return c.prepareSelect(stmt, parts)
- case "CREATE":
- return c.prepareCreate(stmt, parts)
- case "INSERT":
- return c.prepareInsert(stmt, parts)
- default:
- return nil, errf("unsupported command type %q", cmd)
- }
- return stmt, nil
-}
-
-func (s *fakeStmt) ColumnConverter(idx int) driver.ValueConverter {
- return s.placeholderConverter[idx]
-}
-
-func (s *fakeStmt) Close() error {
- if !s.closed {
- s.c.incrStat(&s.c.stmtsClosed)
- s.closed = true
- }
- return nil
-}
-
-var errClosed = errors.New("fakedb: statement has been closed")
-
-func (s *fakeStmt) Exec(args []interface{}) (driver.Result, error) {
- if s.closed {
- return nil, errClosed
- }
- err := checkSubsetTypes(args)
- if err != nil {
- return nil, err
- }
-
- db := s.c.db
- switch s.cmd {
- case "WIPE":
- db.wipe()
- return driver.DDLSuccess, nil
- case "CREATE":
- if err := db.createTable(s.table, s.colName, s.colType); err != nil {
- return nil, err
- }
- return driver.DDLSuccess, nil
- case "INSERT":
- return s.execInsert(args)
- }
- fmt.Printf("EXEC statement, cmd=%q: %#v\n", s.cmd, s)
- return nil, fmt.Errorf("unimplemented statement Exec command type of %q", s.cmd)
-}
-
-func (s *fakeStmt) execInsert(args []interface{}) (driver.Result, error) {
- db := s.c.db
- if len(args) != s.placeholders {
- panic("error in pkg db; should only get here if size is correct")
- }
- db.mu.Lock()
- t, ok := db.table(s.table)
- db.mu.Unlock()
- if !ok {
- return nil, fmt.Errorf("fakedb: table %q doesn't exist", s.table)
- }
-
- t.mu.Lock()
- defer t.mu.Unlock()
-
- cols := make([]interface{}, len(t.colname))
- argPos := 0
- for n, colname := range s.colName {
- colidx := t.columnIndex(colname)
- if colidx == -1 {
- return nil, fmt.Errorf("fakedb: column %q doesn't exist or dropped since prepared statement was created", colname)
- }
- var val interface{}
- if strvalue, ok := s.colValue[n].(string); ok && strvalue == "?" {
- val = args[argPos]
- argPos++
- } else {
- val = s.colValue[n]
- }
- cols[colidx] = val
- }
-
- t.rows = append(t.rows, &row{cols: cols})
- return driver.RowsAffected(1), nil
-}
-
-func (s *fakeStmt) Query(args []interface{}) (driver.Rows, error) {
- if s.closed {
- return nil, errClosed
- }
- err := checkSubsetTypes(args)
- if err != nil {
- return nil, err
- }
-
- db := s.c.db
- if len(args) != s.placeholders {
- panic("error in pkg db; should only get here if size is correct")
- }
-
- db.mu.Lock()
- t, ok := db.table(s.table)
- db.mu.Unlock()
- if !ok {
- return nil, fmt.Errorf("fakedb: table %q doesn't exist", s.table)
- }
- t.mu.Lock()
- defer t.mu.Unlock()
-
- colIdx := make(map[string]int) // select column name -> column index in table
- for _, name := range s.colName {
- idx := t.columnIndex(name)
- if idx == -1 {
- return nil, fmt.Errorf("fakedb: unknown column name %q", name)
- }
- colIdx[name] = idx
- }
-
- mrows := []*row{}
-rows:
- for _, trow := range t.rows {
- // Process the where clause, skipping non-match rows. This is lazy
- // and just uses fmt.Sprintf("%v") to test equality. Good enough
- // for test code.
- for widx, wcol := range s.whereCol {
- idx := t.columnIndex(wcol)
- if idx == -1 {
- return nil, fmt.Errorf("db: invalid where clause column %q", wcol)
- }
- tcol := trow.cols[idx]
- if bs, ok := tcol.([]byte); ok {
- // lazy hack to avoid sprintf %v on a []byte
- tcol = string(bs)
- }
- if fmt.Sprintf("%v", tcol) != fmt.Sprintf("%v", args[widx]) {
- continue rows
- }
- }
- mrow := &row{cols: make([]interface{}, len(s.colName))}
- for seli, name := range s.colName {
- mrow.cols[seli] = trow.cols[colIdx[name]]
- }
- mrows = append(mrows, mrow)
- }
-
- cursor := &rowsCursor{
- pos: -1,
- rows: mrows,
- cols: s.colName,
- }
- return cursor, nil
-}
-
-func (s *fakeStmt) NumInput() int {
- return s.placeholders
-}
-
-func (tx *fakeTx) Commit() error {
- tx.c.currTx = nil
- return nil
-}
-
-func (tx *fakeTx) Rollback() error {
- tx.c.currTx = nil
- return nil
-}
-
-type rowsCursor struct {
- cols []string
- pos int
- rows []*row
- closed bool
-
- // a clone of slices to give out to clients, indexed by the
- // the original slice's first byte address. we clone them
- // just so we're able to corrupt them on close.
- bytesClone map[*byte][]byte
-}
-
-func (rc *rowsCursor) Close() error {
- if !rc.closed {
- for _, bs := range rc.bytesClone {
- bs[0] = 255 // first byte corrupted
- }
- }
- rc.closed = true
- return nil
-}
-
-func (rc *rowsCursor) Columns() []string {
- return rc.cols
-}
-
-func (rc *rowsCursor) Next(dest []interface{}) error {
- if rc.closed {
- return errors.New("fakedb: cursor is closed")
- }
- rc.pos++
- if rc.pos >= len(rc.rows) {
- return io.EOF // per interface spec
- }
- for i, v := range rc.rows[rc.pos].cols {
- // TODO(bradfitz): convert to subset types? naah, I
- // think the subset types should only be input to
- // driver, but the sql package should be able to handle
- // a wider range of types coming out of drivers. all
- // for ease of drivers, and to prevent drivers from
- // messing up conversions or doing them differently.
- dest[i] = v
-
- if bs, ok := v.([]byte); ok {
- if rc.bytesClone == nil {
- rc.bytesClone = make(map[*byte][]byte)
- }
- clone, ok := rc.bytesClone[&bs[0]]
- if !ok {
- clone = make([]byte, len(bs))
- copy(clone, bs)
- rc.bytesClone[&bs[0]] = clone
- }
- dest[i] = clone
- }
- }
- return nil
-}
-
-func converterForType(typ string) driver.ValueConverter {
- switch typ {
- case "bool":
- return driver.Bool
- case "int32":
- return driver.Int32
- case "string":
- return driver.String
- case "datetime":
- return driver.DefaultParameterConverter
- }
- panic("invalid fakedb column type of " + typ)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sql provides a generic interface around SQL (or SQL-like)
-// databases.
-package sql
-
-import (
- "errors"
- "fmt"
- "io"
- "sync"
-
- "exp/sql/driver"
-)
-
-var drivers = make(map[string]driver.Driver)
-
-// Register makes a database driver available by the provided name.
-// If Register is called twice with the same name or if driver is nil,
-// it panics.
-func Register(name string, driver driver.Driver) {
- if driver == nil {
- panic("sql: Register driver is nil")
- }
- if _, dup := drivers[name]; dup {
- panic("sql: Register called twice for driver " + name)
- }
- drivers[name] = driver
-}
-
-// NullableString represents a string that may be null.
-// NullableString implements the ScannerInto interface so
-// it can be used as a scan destination:
-//
-// var s NullableString
-// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
-// ...
-// if s.Valid {
-// // use s.String
-// } else {
-// // NULL value
-// }
-//
-// TODO(bradfitz): add other types.
-type NullableString struct {
- String string
- Valid bool // Valid is true if String is not NULL
-}
-
-// ScanInto implements the ScannerInto interface.
-func (ms *NullableString) ScanInto(value interface{}) error {
- if value == nil {
- ms.String, ms.Valid = "", false
- return nil
- }
- ms.Valid = true
- return convertAssign(&ms.String, value)
-}
-
-// ScannerInto is an interface used by Scan.
-type ScannerInto interface {
- // ScanInto assigns a value from a database driver.
- //
- // The value will be of one of the following restricted
- // set of types:
- //
- // int64
- // float64
- // bool
- // []byte
- // nil - for NULL values
- //
- // An error should be returned if the value can not be stored
- // without loss of information.
- ScanInto(value interface{}) error
-}
-
-// ErrNoRows is returned by Scan when QueryRow doesn't return a
-// row. In such a case, QueryRow returns a placeholder *Row value that
-// defers this error until a Scan.
-var ErrNoRows = errors.New("sql: no rows in result set")
-
-// DB is a database handle. It's safe for concurrent use by multiple
-// goroutines.
-type DB struct {
- driver driver.Driver
- dsn string
-
- mu sync.Mutex // protects freeConn and closed
- freeConn []driver.Conn
- closed bool
-}
-
-// Open opens a database specified by its database driver name and a
-// driver-specific data source name, usually consisting of at least a
-// database name and connection information.
-//
-// Most users will open a database via a driver-specific connection
-// helper function that returns a *DB.
-func Open(driverName, dataSourceName string) (*DB, error) {
- driver, ok := drivers[driverName]
- if !ok {
- return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
- }
- return &DB{driver: driver, dsn: dataSourceName}, nil
-}
-
-// Close closes the database, releasing any open resources.
-func (db *DB) Close() error {
- db.mu.Lock()
- defer db.mu.Unlock()
- var err error
- for _, c := range db.freeConn {
- err1 := c.Close()
- if err1 != nil {
- err = err1
- }
- }
- db.freeConn = nil
- db.closed = true
- return err
-}
-
-func (db *DB) maxIdleConns() int {
- const defaultMaxIdleConns = 2
- // TODO(bradfitz): ask driver, if supported, for its default preference
- // TODO(bradfitz): let users override?
- return defaultMaxIdleConns
-}
-
-// conn returns a newly-opened or cached driver.Conn
-func (db *DB) conn() (driver.Conn, error) {
- db.mu.Lock()
- if db.closed {
- db.mu.Unlock()
- return nil, errors.New("sql: database is closed")
- }
- if n := len(db.freeConn); n > 0 {
- conn := db.freeConn[n-1]
- db.freeConn = db.freeConn[:n-1]
- db.mu.Unlock()
- return conn, nil
- }
- db.mu.Unlock()
- return db.driver.Open(db.dsn)
-}
-
-func (db *DB) connIfFree(wanted driver.Conn) (conn driver.Conn, ok bool) {
- db.mu.Lock()
- defer db.mu.Unlock()
- for n, conn := range db.freeConn {
- if conn == wanted {
- db.freeConn[n] = db.freeConn[len(db.freeConn)-1]
- db.freeConn = db.freeConn[:len(db.freeConn)-1]
- return wanted, true
- }
- }
- return nil, false
-}
-
-func (db *DB) putConn(c driver.Conn) {
- db.mu.Lock()
- defer db.mu.Unlock()
- if n := len(db.freeConn); !db.closed && n < db.maxIdleConns() {
- db.freeConn = append(db.freeConn, c)
- return
- }
- db.closeConn(c) // TODO(bradfitz): release lock before calling this?
-}
-
-func (db *DB) closeConn(c driver.Conn) {
- // TODO: check to see if we need this Conn for any prepared statements
- // that are active.
- c.Close()
-}
-
-// Prepare creates a prepared statement for later execution.
-func (db *DB) Prepare(query string) (*Stmt, error) {
- // TODO: check if db.driver supports an optional
- // driver.Preparer interface and call that instead, if so,
- // otherwise we make a prepared statement that's bound
- // to a connection, and to execute this prepared statement
- // we either need to use this connection (if it's free), else
- // get a new connection + re-prepare + execute on that one.
- ci, err := db.conn()
- if err != nil {
- return nil, err
- }
- defer db.putConn(ci)
- si, err := ci.Prepare(query)
- if err != nil {
- return nil, err
- }
- stmt := &Stmt{
- db: db,
- query: query,
- css: []connStmt{{ci, si}},
- }
- return stmt, nil
-}
-
-// Exec executes a query without returning any rows.
-func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
- sargs, err := subsetTypeArgs(args)
- if err != nil {
- return nil, err
- }
-
- ci, err := db.conn()
- if err != nil {
- return nil, err
- }
- defer db.putConn(ci)
-
- if execer, ok := ci.(driver.Execer); ok {
- resi, err := execer.Exec(query, sargs)
- if err != driver.ErrSkip {
- if err != nil {
- return nil, err
- }
- return result{resi}, nil
- }
- }
-
- sti, err := ci.Prepare(query)
- if err != nil {
- return nil, err
- }
- defer sti.Close()
-
- resi, err := sti.Exec(sargs)
- if err != nil {
- return nil, err
- }
- return result{resi}, nil
-}
-
-// Query executes a query that returns rows, typically a SELECT.
-func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
- stmt, err := db.Prepare(query)
- if err != nil {
- return nil, err
- }
- rows, err := stmt.Query(args...)
- if err != nil {
- stmt.Close()
- return nil, err
- }
- rows.closeStmt = stmt
- return rows, nil
-}
-
-// QueryRow executes a query that is expected to return at most one row.
-// QueryRow always return a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-func (db *DB) QueryRow(query string, args ...interface{}) *Row {
- rows, err := db.Query(query, args...)
- return &Row{rows: rows, err: err}
-}
-
-// Begin starts a transaction. The isolation level is dependent on
-// the driver.
-func (db *DB) Begin() (*Tx, error) {
- ci, err := db.conn()
- if err != nil {
- return nil, err
- }
- txi, err := ci.Begin()
- if err != nil {
- db.putConn(ci)
- return nil, fmt.Errorf("sql: failed to Begin transaction: %v", err)
- }
- return &Tx{
- db: db,
- ci: ci,
- txi: txi,
- }, nil
-}
-
-// DriverDatabase returns the database's underlying driver.
-func (db *DB) Driver() driver.Driver {
- return db.driver
-}
-
-// Tx is an in-progress database transaction.
-//
-// A transaction must end with a call to Commit or Rollback.
-//
-// After a call to Commit or Rollback, all operations on the
-// transaction fail with ErrTransactionFinished.
-type Tx struct {
- db *DB
-
- // ci is owned exclusively until Commit or Rollback, at which point
- // it's returned with putConn.
- ci driver.Conn
- txi driver.Tx
-
- // cimu is held while somebody is using ci (between grabConn
- // and releaseConn)
- cimu sync.Mutex
-
- // done transitions from false to true exactly once, on Commit
- // or Rollback. once done, all operations fail with
- // ErrTransactionFinished.
- done bool
-}
-
-var ErrTransactionFinished = errors.New("sql: Transaction has already been committed or rolled back")
-
-func (tx *Tx) close() {
- if tx.done {
- panic("double close") // internal error
- }
- tx.done = true
- tx.db.putConn(tx.ci)
- tx.ci = nil
- tx.txi = nil
-}
-
-func (tx *Tx) grabConn() (driver.Conn, error) {
- if tx.done {
- return nil, ErrTransactionFinished
- }
- tx.cimu.Lock()
- return tx.ci, nil
-}
-
-func (tx *Tx) releaseConn() {
- tx.cimu.Unlock()
-}
-
-// Commit commits the transaction.
-func (tx *Tx) Commit() error {
- if tx.done {
- return ErrTransactionFinished
- }
- defer tx.close()
- return tx.txi.Commit()
-}
-
-// Rollback aborts the transaction.
-func (tx *Tx) Rollback() error {
- if tx.done {
- return ErrTransactionFinished
- }
- defer tx.close()
- return tx.txi.Rollback()
-}
-
-// Prepare creates a prepared statement for use within a transaction.
-//
-// The returned statement operates within the transaction and can no longer
-// be used once the transaction has been committed or rolled back.
-//
-// To use an existing prepared statement on this transaction, see Tx.Stmt.
-func (tx *Tx) Prepare(query string) (*Stmt, error) {
- // TODO(bradfitz): We could be more efficient here and either
- // provide a method to take an existing Stmt (created on
- // perhaps a different Conn), and re-create it on this Conn if
- // necessary. Or, better: keep a map in DB of query string to
- // Stmts, and have Stmt.Execute do the right thing and
- // re-prepare if the Conn in use doesn't have that prepared
- // statement. But we'll want to avoid caching the statement
- // in the case where we only call conn.Prepare implicitly
- // (such as in db.Exec or tx.Exec), but the caller package
- // can't be holding a reference to the returned statement.
- // Perhaps just looking at the reference count (by noting
- // Stmt.Close) would be enough. We might also want a finalizer
- // on Stmt to drop the reference count.
- ci, err := tx.grabConn()
- if err != nil {
- return nil, err
- }
- defer tx.releaseConn()
-
- si, err := ci.Prepare(query)
- if err != nil {
- return nil, err
- }
-
- stmt := &Stmt{
- db: tx.db,
- tx: tx,
- txsi: si,
- query: query,
- }
- return stmt, nil
-}
-
-// Stmt returns a transaction-specific prepared statement from
-// an existing statement.
-//
-// Example:
-// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
-// ...
-// tx, err := db.Begin()
-// ...
-// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
-func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
- // TODO(bradfitz): optimize this. Currently this re-prepares
- // each time. This is fine for now to illustrate the API but
- // we should really cache already-prepared statements
- // per-Conn. See also the big comment in Tx.Prepare.
-
- if tx.db != stmt.db {
- return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
- }
- ci, err := tx.grabConn()
- if err != nil {
- return &Stmt{stickyErr: err}
- }
- defer tx.releaseConn()
- si, err := ci.Prepare(stmt.query)
- return &Stmt{
- db: tx.db,
- tx: tx,
- txsi: si,
- query: stmt.query,
- stickyErr: err,
- }
-}
-
-// Exec executes a query that doesn't return rows.
-// For example: an INSERT and UPDATE.
-func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
- ci, err := tx.grabConn()
- if err != nil {
- return nil, err
- }
- defer tx.releaseConn()
-
- if execer, ok := ci.(driver.Execer); ok {
- resi, err := execer.Exec(query, args)
- if err != nil {
- return nil, err
- }
- return result{resi}, nil
- }
-
- sti, err := ci.Prepare(query)
- if err != nil {
- return nil, err
- }
- defer sti.Close()
-
- sargs, err := subsetTypeArgs(args)
- if err != nil {
- return nil, err
- }
-
- resi, err := sti.Exec(sargs)
- if err != nil {
- return nil, err
- }
- return result{resi}, nil
-}
-
-// Query executes a query that returns rows, typically a SELECT.
-func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
- if tx.done {
- return nil, ErrTransactionFinished
- }
- stmt, err := tx.Prepare(query)
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- return stmt.Query(args...)
-}
-
-// QueryRow executes a query that is expected to return at most one row.
-// QueryRow always return a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
- rows, err := tx.Query(query, args...)
- return &Row{rows: rows, err: err}
-}
-
-// connStmt is a prepared statement on a particular connection.
-type connStmt struct {
- ci driver.Conn
- si driver.Stmt
-}
-
-// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
-type Stmt struct {
- // Immutable:
- db *DB // where we came from
- query string // that created the Stmt
- stickyErr error // if non-nil, this error is returned for all operations
-
- // If in a transaction, else both nil:
- tx *Tx
- txsi driver.Stmt
-
- mu sync.Mutex // protects the rest of the fields
- closed bool
-
- // css is a list of underlying driver statement interfaces
- // that are valid on particular connections. This is only
- // used if tx == nil and one is found that has idle
- // connections. If tx != nil, txsi is always used.
- css []connStmt
-}
-
-// Exec executes a prepared statement with the given arguments and
-// returns a Result summarizing the effect of the statement.
-func (s *Stmt) Exec(args ...interface{}) (Result, error) {
- _, releaseConn, si, err := s.connStmt()
- if err != nil {
- return nil, err
- }
- defer releaseConn()
-
- // -1 means the driver doesn't know how to count the number of
- // placeholders, so we won't sanity check input here and instead let the
- // driver deal with errors.
- if want := si.NumInput(); want != -1 && len(args) != want {
- return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
- }
-
- // Convert args to subset types.
- if cc, ok := si.(driver.ColumnConverter); ok {
- for n, arg := range args {
- args[n], err = cc.ColumnConverter(n).ConvertValue(arg)
- if err != nil {
- return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
- }
- if !driver.IsParameterSubsetType(args[n]) {
- return nil, fmt.Errorf("sql: driver ColumnConverter error converted %T to unsupported type %T",
- arg, args[n])
- }
- }
- } else {
- for n, arg := range args {
- args[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
- if err != nil {
- return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
- }
- }
- }
-
- resi, err := si.Exec(args)
- if err != nil {
- return nil, err
- }
- return result{resi}, nil
-}
-
-// connStmt returns a free driver connection on which to execute the
-// statement, a function to call to release the connection, and a
-// statement bound to that connection.
-func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(), si driver.Stmt, err error) {
- if err = s.stickyErr; err != nil {
- return
- }
- s.mu.Lock()
- if s.closed {
- s.mu.Unlock()
- err = errors.New("sql: statement is closed")
- return
- }
-
- // In a transaction, we always use the connection that the
- // transaction was created on.
- if s.tx != nil {
- s.mu.Unlock()
- ci, err = s.tx.grabConn() // blocks, waiting for the connection.
- if err != nil {
- return
- }
- releaseConn = func() { s.tx.releaseConn() }
- return ci, releaseConn, s.txsi, nil
- }
-
- var cs connStmt
- match := false
- for _, v := range s.css {
- // TODO(bradfitz): lazily clean up entries in this
- // list with dead conns while enumerating
- if _, match = s.db.connIfFree(cs.ci); match {
- cs = v
- break
- }
- }
- s.mu.Unlock()
-
- // Make a new conn if all are busy.
- // TODO(bradfitz): or wait for one? make configurable later?
- if !match {
- ci, err := s.db.conn()
- if err != nil {
- return nil, nil, nil, err
- }
- si, err := ci.Prepare(s.query)
- if err != nil {
- return nil, nil, nil, err
- }
- s.mu.Lock()
- cs = connStmt{ci, si}
- s.css = append(s.css, cs)
- s.mu.Unlock()
- }
-
- conn := cs.ci
- releaseConn = func() { s.db.putConn(conn) }
- return conn, releaseConn, cs.si, nil
-}
-
-// Query executes a prepared query statement with the given arguments
-// and returns the query results as a *Rows.
-func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
- ci, releaseConn, si, err := s.connStmt()
- if err != nil {
- return nil, err
- }
-
- // -1 means the driver doesn't know how to count the number of
- // placeholders, so we won't sanity check input here and instead let the
- // driver deal with errors.
- if want := si.NumInput(); want != -1 && len(args) != want {
- return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", si.NumInput(), len(args))
- }
- sargs, err := subsetTypeArgs(args)
- if err != nil {
- return nil, err
- }
- rowsi, err := si.Query(sargs)
- if err != nil {
- s.db.putConn(ci)
- return nil, err
- }
- // Note: ownership of ci passes to the *Rows, to be freed
- // with releaseConn.
- rows := &Rows{
- db: s.db,
- ci: ci,
- releaseConn: releaseConn,
- rowsi: rowsi,
- }
- return rows, nil
-}
-
-// QueryRow executes a prepared query statement with the given arguments.
-// If an error occurs during the execution of the statement, that error will
-// be returned by a call to Scan on the returned *Row, which is always non-nil.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-//
-// Example usage:
-//
-// var name string
-// err := nameByUseridStmt.QueryRow(id).Scan(&s)
-func (s *Stmt) QueryRow(args ...interface{}) *Row {
- rows, err := s.Query(args...)
- if err != nil {
- return &Row{err: err}
- }
- return &Row{rows: rows}
-}
-
-// Close closes the statement.
-func (s *Stmt) Close() error {
- if s.stickyErr != nil {
- return s.stickyErr
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.closed {
- return nil
- }
- s.closed = true
-
- if s.tx != nil {
- s.txsi.Close()
- } else {
- for _, v := range s.css {
- if ci, match := s.db.connIfFree(v.ci); match {
- v.si.Close()
- s.db.putConn(ci)
- } else {
- // TODO(bradfitz): care that we can't close
- // this statement because the statement's
- // connection is in use?
- }
- }
- }
- return nil
-}
-
-// Rows is the result of a query. Its cursor starts before the first row
-// of the result set. Use Next to advance through the rows:
-//
-// rows, err := db.Query("SELECT ...")
-// ...
-// for rows.Next() {
-// var id int
-// var name string
-// err = rows.Scan(&id, &name)
-// ...
-// }
-// err = rows.Err() // get any error encountered during iteration
-// ...
-type Rows struct {
- db *DB
- ci driver.Conn // owned; must call putconn when closed to release
- releaseConn func()
- rowsi driver.Rows
-
- closed bool
- lastcols []interface{}
- lasterr error
- closeStmt *Stmt // if non-nil, statement to Close on close
-}
-
-// Next prepares the next result row for reading with the Scan method.
-// It returns true on success, false if there is no next result row.
-// Every call to Scan, even the first one, must be preceded by a call
-// to Next.
-func (rs *Rows) Next() bool {
- if rs.closed {
- return false
- }
- if rs.lasterr != nil {
- return false
- }
- if rs.lastcols == nil {
- rs.lastcols = make([]interface{}, len(rs.rowsi.Columns()))
- }
- rs.lasterr = rs.rowsi.Next(rs.lastcols)
- if rs.lasterr == io.EOF {
- rs.Close()
- }
- return rs.lasterr == nil
-}
-
-// Err returns the error, if any, that was encountered during iteration.
-func (rs *Rows) Err() error {
- if rs.lasterr == io.EOF {
- return nil
- }
- return rs.lasterr
-}
-
-// Columns returns the column names.
-// Columns returns an error if the rows are closed, or if the rows
-// are from QueryRow and there was a deferred error.
-func (rs *Rows) Columns() ([]string, error) {
- if rs.closed {
- return nil, errors.New("sql: Rows are closed")
- }
- if rs.rowsi == nil {
- return nil, errors.New("sql: no Rows available")
- }
- return rs.rowsi.Columns(), nil
-}
-
-// Scan copies the columns in the current row into the values pointed
-// at by dest. If dest contains pointers to []byte, the slices should
-// not be modified and should only be considered valid until the next
-// call to Next or Scan.
-func (rs *Rows) Scan(dest ...interface{}) error {
- if rs.closed {
- return errors.New("sql: Rows closed")
- }
- if rs.lasterr != nil {
- return rs.lasterr
- }
- if rs.lastcols == nil {
- return errors.New("sql: Scan called without calling Next")
- }
- if len(dest) != len(rs.lastcols) {
- return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
- }
- for i, sv := range rs.lastcols {
- err := convertAssign(dest[i], sv)
- if err != nil {
- return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
- }
- }
- return nil
-}
-
-// Close closes the Rows, preventing further enumeration. If the
-// end is encountered, the Rows are closed automatically. Close
-// is idempotent.
-func (rs *Rows) Close() error {
- if rs.closed {
- return nil
- }
- rs.closed = true
- err := rs.rowsi.Close()
- rs.releaseConn()
- if rs.closeStmt != nil {
- rs.closeStmt.Close()
- }
- return err
-}
-
-// Row is the result of calling QueryRow to select a single row.
-type Row struct {
- // One of these two will be non-nil:
- err error // deferred error for easy chaining
- rows *Rows
-}
-
-// Scan copies the columns from the matched row into the values
-// pointed at by dest. If more than one row matches the query,
-// Scan uses the first row and discards the rest. If no row matches
-// the query, Scan returns ErrNoRows.
-func (r *Row) Scan(dest ...interface{}) error {
- if r.err != nil {
- return r.err
- }
- defer r.rows.Close()
- if !r.rows.Next() {
- return ErrNoRows
- }
- err := r.rows.Scan(dest...)
- if err != nil {
- return err
- }
-
- // TODO(bradfitz): for now we need to defensively clone all
- // []byte that the driver returned, since we're about to close
- // the Rows in our defer, when we return from this function.
- // the contract with the driver.Next(...) interface is that it
- // can return slices into read-only temporary memory that's
- // only valid until the next Scan/Close. But the TODO is that
- // for a lot of drivers, this copy will be unnecessary. We
- // should provide an optional interface for drivers to
- // implement to say, "don't worry, the []bytes that I return
- // from Next will not be modified again." (for instance, if
- // they were obtained from the network anyway) But for now we
- // don't care.
- for _, dp := range dest {
- b, ok := dp.(*[]byte)
- if !ok {
- continue
- }
- clone := make([]byte, len(*b))
- copy(clone, *b)
- *b = clone
- }
- return nil
-}
-
-// A Result summarizes an executed SQL command.
-type Result interface {
- LastInsertId() (int64, error)
- RowsAffected() (int64, error)
-}
-
-type result struct {
- driver.Result
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sql
-
-import (
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-const fakeDBName = "foo"
-
-var chrisBirthday = time.Unix(123456789, 0)
-
-func newTestDB(t *testing.T, name string) *DB {
- db, err := Open("test", fakeDBName)
- if err != nil {
- t.Fatalf("Open: %v", err)
- }
- if _, err := db.Exec("WIPE"); err != nil {
- t.Fatalf("exec wipe: %v", err)
- }
- if name == "people" {
- exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
- exec(t, db, "INSERT|people|name=Alice,age=?,photo=APHOTO", 1)
- exec(t, db, "INSERT|people|name=Bob,age=?,photo=BPHOTO", 2)
- exec(t, db, "INSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
- }
- return db
-}
-
-func exec(t *testing.T, db *DB, query string, args ...interface{}) {
- _, err := db.Exec(query, args...)
- if err != nil {
- t.Fatalf("Exec of %q: %v", query, err)
- }
-}
-
-func closeDB(t *testing.T, db *DB) {
- err := db.Close()
- if err != nil {
- t.Fatalf("error closing DB: %v", err)
- }
-}
-
-func TestQuery(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
- rows, err := db.Query("SELECT|people|age,name|")
- if err != nil {
- t.Fatalf("Query: %v", err)
- }
- type row struct {
- age int
- name string
- }
- got := []row{}
- for rows.Next() {
- var r row
- err = rows.Scan(&r.age, &r.name)
- if err != nil {
- t.Fatalf("Scan: %v", err)
- }
- got = append(got, r)
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("Err: %v", err)
- }
- want := []row{
- {age: 1, name: "Alice"},
- {age: 2, name: "Bob"},
- {age: 3, name: "Chris"},
- }
- if !reflect.DeepEqual(got, want) {
- t.Logf(" got: %#v\nwant: %#v", got, want)
- }
-
- // And verify that the final rows.Next() call, which hit EOF,
- // also closed the rows connection.
- if n := len(db.freeConn); n != 1 {
- t.Errorf("free conns after query hitting EOF = %d; want 1", n)
- }
-}
-
-func TestRowsColumns(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
- rows, err := db.Query("SELECT|people|age,name|")
- if err != nil {
- t.Fatalf("Query: %v", err)
- }
- cols, err := rows.Columns()
- if err != nil {
- t.Fatalf("Columns: %v", err)
- }
- want := []string{"age", "name"}
- if !reflect.DeepEqual(cols, want) {
- t.Errorf("got %#v; want %#v", cols, want)
- }
-}
-
-func TestQueryRow(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
- var name string
- var age int
- var birthday time.Time
-
- err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age)
- if err == nil || !strings.Contains(err.Error(), "expected 2 destination arguments") {
- t.Errorf("expected error from wrong number of arguments; actually got: %v", err)
- }
-
- err = db.QueryRow("SELECT|people|bdate|age=?", 3).Scan(&birthday)
- if err != nil || !birthday.Equal(chrisBirthday) {
- t.Errorf("chris birthday = %v, err = %v; want %v", birthday, err, chrisBirthday)
- }
-
- err = db.QueryRow("SELECT|people|age,name|age=?", 2).Scan(&age, &name)
- if err != nil {
- t.Fatalf("age QueryRow+Scan: %v", err)
- }
- if name != "Bob" {
- t.Errorf("expected name Bob, got %q", name)
- }
- if age != 2 {
- t.Errorf("expected age 2, got %d", age)
- }
-
- err = db.QueryRow("SELECT|people|age,name|name=?", "Alice").Scan(&age, &name)
- if err != nil {
- t.Fatalf("name QueryRow+Scan: %v", err)
- }
- if name != "Alice" {
- t.Errorf("expected name Alice, got %q", name)
- }
- if age != 1 {
- t.Errorf("expected age 1, got %d", age)
- }
-
- var photo []byte
- err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
- if err != nil {
- t.Fatalf("photo QueryRow+Scan: %v", err)
- }
- want := []byte("APHOTO")
- if !reflect.DeepEqual(photo, want) {
- t.Errorf("photo = %q; want %q", photo, want)
- }
-}
-
-func TestStatementErrorAfterClose(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
- stmt, err := db.Prepare("SELECT|people|age|name=?")
- if err != nil {
- t.Fatalf("Prepare: %v", err)
- }
- err = stmt.Close()
- if err != nil {
- t.Fatalf("Close: %v", err)
- }
- var name string
- err = stmt.QueryRow("foo").Scan(&name)
- if err == nil {
- t.Errorf("expected error from QueryRow.Scan after Stmt.Close")
- }
-}
-
-func TestStatementQueryRow(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
- stmt, err := db.Prepare("SELECT|people|age|name=?")
- if err != nil {
- t.Fatalf("Prepare: %v", err)
- }
- var age int
- for n, tt := range []struct {
- name string
- want int
- }{
- {"Alice", 1},
- {"Bob", 2},
- {"Chris", 3},
- } {
- if err := stmt.QueryRow(tt.name).Scan(&age); err != nil {
- t.Errorf("%d: on %q, QueryRow/Scan: %v", n, tt.name, err)
- } else if age != tt.want {
- t.Errorf("%d: age=%d, want %d", n, age, tt.want)
- }
- }
-
-}
-
-// just a test of fakedb itself
-func TestBogusPreboundParameters(t *testing.T) {
- db := newTestDB(t, "foo")
- defer closeDB(t, db)
- exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
- _, err := db.Prepare("INSERT|t1|name=?,age=bogusconversion")
- if err == nil {
- t.Fatalf("expected error")
- }
- if err.Error() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
- t.Errorf("unexpected error: %v", err)
- }
-}
-
-func TestExec(t *testing.T) {
- db := newTestDB(t, "foo")
- defer closeDB(t, db)
- exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
- stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
- if err != nil {
- t.Errorf("Stmt, err = %v, %v", stmt, err)
- }
-
- type execTest struct {
- args []interface{}
- wantErr string
- }
- execTests := []execTest{
- // Okay:
- {[]interface{}{"Brad", 31}, ""},
- {[]interface{}{"Brad", int64(31)}, ""},
- {[]interface{}{"Bob", "32"}, ""},
- {[]interface{}{7, 9}, ""},
-
- // Invalid conversions:
- {[]interface{}{"Brad", int64(0xFFFFFFFF)}, "sql: converting Exec argument #1's type: sql/driver: value 4294967295 overflows int32"},
- {[]interface{}{"Brad", "strconv fail"}, "sql: converting Exec argument #1's type: sql/driver: value \"strconv fail\" can't be converted to int32"},
-
- // Wrong number of args:
- {[]interface{}{}, "sql: expected 2 arguments, got 0"},
- {[]interface{}{1, 2, 3}, "sql: expected 2 arguments, got 3"},
- }
- for n, et := range execTests {
- _, err := stmt.Exec(et.args...)
- errStr := ""
- if err != nil {
- errStr = err.Error()
- }
- if errStr != et.wantErr {
- t.Errorf("stmt.Execute #%d: for %v, got error %q, want error %q",
- n, et.args, errStr, et.wantErr)
- }
- }
-}
-
-func TestTxStmt(t *testing.T) {
- db := newTestDB(t, "")
- defer closeDB(t, db)
- exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
- stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
- if err != nil {
- t.Fatalf("Stmt, err = %v, %v", stmt, err)
- }
- tx, err := db.Begin()
- if err != nil {
- t.Fatalf("Begin = %v", err)
- }
- _, err = tx.Stmt(stmt).Exec("Bobby", 7)
- if err != nil {
- t.Fatalf("Exec = %v", err)
- }
- err = tx.Commit()
- if err != nil {
- t.Fatalf("Commit = %v", err)
- }
-}
-
-// Tests fix for issue 2542, that we release a lock when querying on
-// a closed connection.
-func TestIssue2542Deadlock(t *testing.T) {
- db := newTestDB(t, "people")
- closeDB(t, db)
- for i := 0; i < 2; i++ {
- _, err := db.Query("SELECT|people|age,name|")
- if err == nil {
- t.Fatalf("expected error")
- }
- }
-}
-
-func TestQueryRowClosingStmt(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
- var name string
- var age int
- err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age, &name)
- if err != nil {
- t.Fatal(err)
- }
- if len(db.freeConn) != 1 {
- t.Fatalf("expected 1 free conn")
- }
- fakeConn := db.freeConn[0].(*fakeConn)
- if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed {
- t.Logf("statement close mismatch: made %d, closed %d", made, closed)
- }
-}
stdout *chanReader // receives the payload of channelData messages
stderr *chanReader // receives the payload of channelExtendedData messages
msg chan interface{} // incoming messages
-
- theyClosed bool // indicates the close msg has been received from the remote side
- weClosed bool // incidates the close msg has been sent from our side
+ theyClosed bool // indicates the close msg has been received from the remote side
+ weClosed bool // incidates the close msg has been sent from our side
}
// newClientChan returns a partially constructed *clientChan
return length
}
+func marshalUint32(to []byte, n uint32) []byte {
+ to[0] = byte(n >> 24)
+ to[1] = byte(n >> 16)
+ to[2] = byte(n >> 8)
+ to[3] = byte(n)
+ return to[4:]
+}
+
+func marshalUint64(to []byte, n uint64) []byte {
+ to[0] = byte(n >> 56)
+ to[1] = byte(n >> 48)
+ to[2] = byte(n >> 40)
+ to[3] = byte(n >> 32)
+ to[4] = byte(n >> 24)
+ to[5] = byte(n >> 16)
+ to[6] = byte(n >> 8)
+ to[7] = byte(n)
+ return to[8:]
+}
+
func marshalInt(to []byte, n *big.Int) []byte {
lengthBytes := to
to = to[4:]
started bool // true once Start, Run or Shell is invoked.
copyFuncs []func() error
- errch chan error // one send per copyFunc
+ errors chan error // one send per copyFunc
// true if pipe method is active
stdinpipe, stdoutpipe, stderrpipe bool
setupFd(s)
}
- s.errch = make(chan error, len(s.copyFuncs))
+ s.errors = make(chan error, len(s.copyFuncs))
for _, fn := range s.copyFuncs {
go func(fn func() error) {
- s.errch <- fn()
+ s.errors <- fn()
}(fn)
}
return nil
var copyError error
for _ = range s.copyFuncs {
- if err := <-s.errch; err != nil && copyError == nil {
+ if err := <-s.errors; err != nil && copyError == nil {
copyError = err
}
}
"fmt"
"io"
"net"
+ "time"
)
// Dial initiates a connection to the addr from the remote host.
return t.raddr
}
-// SetTimeout sets the read and write deadlines associated
+// SetDeadline sets the read and write deadlines associated
// with the connection.
-func (t *tcpchanconn) SetTimeout(nsec int64) error {
- if err := t.SetReadTimeout(nsec); err != nil {
+func (t *tcpchanconn) SetDeadline(deadline time.Time) error {
+ if err := t.SetReadDeadline(deadline); err != nil {
return err
}
- return t.SetWriteTimeout(nsec)
+ return t.SetWriteDeadline(deadline)
}
-// SetReadTimeout sets the time (in nanoseconds) that
-// Read will wait for data before returning an error with Timeout() == true.
-// Setting nsec == 0 (the default) disables the deadline.
-func (t *tcpchanconn) SetReadTimeout(nsec int64) error {
- return errors.New("ssh: tcpchan: timeout not supported")
+// SetReadDeadline sets the read deadline.
+// A zero value for t means Read will not time out.
+// After the deadline, the error from Read will implement net.Error
+// with Timeout() == true.
+func (t *tcpchanconn) SetReadDeadline(deadline time.Time) error {
+ return errors.New("ssh: tcpchan: deadline not supported")
}
-// SetWriteTimeout sets the time (in nanoseconds) that
-// Write will wait to send its data before returning an error with Timeout() == true.
-// Setting nsec == 0 (the default) disables the deadline.
-// Even if write times out, it may return n > 0, indicating that
-// some of the data was successfully written.
-func (t *tcpchanconn) SetWriteTimeout(nsec int64) error {
- return errors.New("ssh: tcpchan: timeout not supported")
+// SetWriteDeadline exists to satisfy the net.Conn interface
+// but is not implemented by this type. It always returns an error.
+func (t *tcpchanconn) SetWriteDeadline(deadline time.Time) error {
+ return errors.New("ssh: tcpchan: deadline not supported")
}
"crypto"
"crypto/cipher"
"crypto/hmac"
+ "crypto/sha1"
"crypto/subtle"
"errors"
"hash"
generateKeyMaterial(key, d.keyTag, K, H, sessionId, h)
generateKeyMaterial(macKey, d.macKeyTag, K, H, sessionId, h)
- c.mac = truncatingMAC{12, hmac.NewSHA1(macKey)}
+ c.mac = truncatingMAC{12, hmac.New(sha1.New, macKey)}
cipher, err := cipherMode.createCipher(key, iv)
if err != nil {
return t.length
}
+func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
+
// maxVersionStringBytes is the maximum number of bytes that we'll accept as a
// version string. In the event that the client is talking a different protocol
// we need to set a limit otherwise we will keep using more and more memory
// Read version string as specified by RFC 4253, section 4.2.
func readVersion(r io.Reader) ([]byte, error) {
versionString := make([]byte, 0, 64)
- var ok, seenCR bool
+ var ok bool
var buf [1]byte
forEachByte:
for len(versionString) < maxVersionStringBytes {
if err != nil {
return nil, err
}
- b := buf[0]
-
- if !seenCR {
- if b == '\r' {
- seenCR = true
- }
- } else {
- if b == '\n' {
- ok = true
- break forEachByte
- } else {
- seenCR = false
- }
+ // The RFC says that the version should be terminated with \r\n
+ // but several SSH servers actually only send a \n.
+ if buf[0] == '\n' {
+ ok = true
+ break forEachByte
}
- versionString = append(versionString, b)
+ versionString = append(versionString, buf[0])
}
if !ok {
- return nil, errors.New("failed to read version string")
+ return nil, errors.New("ssh: failed to read version string")
}
- // We need to remove the CR from versionString
- return versionString[:len(versionString)-1], nil
+ // There might be a '\r' on the end which we should remove.
+ if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
+ versionString = versionString[:len(versionString)-1]
+ }
+ return versionString, nil
}
)
func TestReadVersion(t *testing.T) {
- buf := []byte(serverVersion)
+ buf := serverVersion
result, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf)))
if err != nil {
t.Errorf("readVersion didn't read version correctly: %s", err)
}
}
+func TestReadVersionWithJustLF(t *testing.T) {
+ var buf []byte
+ buf = append(buf, serverVersion...)
+ buf = buf[:len(buf)-1]
+ buf[len(buf)-1] = '\n'
+ result, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf)))
+ if err != nil {
+ t.Error("readVersion failed to handle just a \n")
+ }
+ if !bytes.Equal(buf[:len(buf)-1], result) {
+ t.Errorf("version read did not match expected: got %x, want %x", result, buf[:len(buf)-1])
+ }
+}
+
func TestReadVersionTooLong(t *testing.T) {
buf := make([]byte, maxVersionStringBytes+1)
if _, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))); err == nil {
}
func TestReadVersionWithoutCRLF(t *testing.T) {
- buf := []byte(serverVersion)
+ buf := serverVersion
buf = buf[:len(buf)-1]
if _, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))); err == nil {
t.Error("readVersion did not notice \\n was missing")
var fset = token.NewFileSet()
-// TODO(gri) This functionality should be in token.Fileset.
-func getFile(filename string) *token.File {
- for f := range fset.Files() {
+func getFile(filename string) (file *token.File) {
+ fset.Iterate(func(f *token.File) bool {
if f.Name() == filename {
- return f
+ file = f
+ return false // end iteration
}
- }
- return nil
+ return true
+ })
+ return file
}
-// TODO(gri) This functionality should be in token.Fileset.
func getPos(filename string, offset int) token.Pos {
if f := getFile(filename); f != nil {
return f.Pos(offset)
return token.NoPos
}
-// TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles
-// or a similar function instead.
func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, error) {
files := make(map[string]*ast.File)
var errors scanner.ErrorList
for _, error := range errors.(scanner.ErrorList) {
// error.Pos is a token.Position, but we want
// a token.Pos so we can do a map lookup
- // TODO(gri) Need to move scanner.Errors over
- // to use token.Pos and file set info.
pos := getPos(error.Pos.Filename, error.Pos.Offset)
if msg, found := expected[pos]; found {
// we expect a message at pos; check if it matches
return &Func{Params: params, Results: results, IsVariadic: isVariadic}
}
-// MethodSpec = ( identifier | ExportedName ) Signature .
+// MethodOrEmbedSpec = Name [ Signature ] .
//
-func (p *gcParser) parseMethodSpec() *ast.Object {
- if p.tok == scanner.Ident {
- p.expect(scanner.Ident)
- } else {
- p.parseExportedName()
+func (p *gcParser) parseMethodOrEmbedSpec() *ast.Object {
+ p.parseName()
+ if p.tok == '(' {
+ p.parseSignature()
+ // TODO(gri) compute method object
+ return ast.NewObj(ast.Fun, "_")
}
- p.parseSignature()
-
- // TODO(gri) compute method object
- return ast.NewObj(ast.Fun, "_")
+ // TODO lookup name and return that type
+ return ast.NewObj(ast.Typ, "_")
}
-// InterfaceType = "interface" "{" [ MethodList ] "}" .
-// MethodList = MethodSpec { ";" MethodSpec } .
+// InterfaceType = "interface" "{" [ MethodOrEmbedList ] "}" .
+// MethodOrEmbedList = MethodOrEmbedSpec { ";" MethodOrEmbedSpec } .
//
func (p *gcParser) parseInterfaceType() Type {
var methods ObjList
parseMethod := func() {
- meth := p.parseMethodSpec()
- methods = append(methods, meth)
+ switch m := p.parseMethodOrEmbedSpec(); m.Kind {
+ case ast.Typ:
+ // TODO expand embedded methods
+ case ast.Fun:
+ methods = append(methods, m)
+ }
}
p.expectKeyword("interface")
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utf8string provides an efficient way to index strings by rune rather than by byte.
+package utf8string
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// String wraps a regular string with a small structure that provides more
+// efficient indexing by code point index, as opposed to byte index.
+// Scanning incrementally forwards or backwards is O(1) per index operation
+// (although not as fast a range clause going forwards). Random access is
+// O(N) in the length of the string, but the overhead is less than always
+// scanning from the beginning.
+// If the string is ASCII, random access is O(1).
+// Unlike the built-in string type, String has internal mutable state and
+// is not thread-safe.
+type String struct {
+ str string
+ numRunes int
+ // If width > 0, the rune at runePos starts at bytePos and has the specified width.
+ width int
+ bytePos int
+ runePos int
+ nonASCII int // byte index of the first non-ASCII rune.
+}
+
+// NewString returns a new UTF-8 string with the provided contents.
+func NewString(contents string) *String {
+ return new(String).Init(contents)
+}
+
+// Init initializes an existing String to hold the provided contents.
+// It returns a pointer to the initialized String.
+func (s *String) Init(contents string) *String {
+ s.str = contents
+ s.bytePos = 0
+ s.runePos = 0
+ for i := 0; i < len(contents); i++ {
+ if contents[i] >= utf8.RuneSelf {
+ // Not ASCII.
+ s.numRunes = utf8.RuneCountInString(contents)
+ _, s.width = utf8.DecodeRuneInString(contents)
+ s.nonASCII = i
+ return s
+ }
+ }
+ // ASCII is simple. Also, the empty string is ASCII.
+ s.numRunes = len(contents)
+ s.width = 0
+ s.nonASCII = len(contents)
+ return s
+}
+
+// String returns the contents of the String. This method also means the
+// String is directly printable by fmt.Print.
+func (s *String) String() string {
+ return s.str
+}
+
+// RuneCount returns the number of runes (Unicode code points) in the String.
+func (s *String) RuneCount() int {
+ return s.numRunes
+}
+
+// IsASCII returns a boolean indicating whether the String contains only ASCII bytes.
+func (s *String) IsASCII() bool {
+ return s.width == 0
+}
+
+// Slice returns the string sliced at rune positions [i:j].
+func (s *String) Slice(i, j int) string {
+ // ASCII is easy. Let the compiler catch the indexing error if there is one.
+ if j < s.nonASCII {
+ return s.str[i:j]
+ }
+ if i < 0 || j > s.numRunes || i > j {
+ panic(sliceOutOfRange)
+ }
+ if i == j {
+ return ""
+ }
+ // For non-ASCII, after At(i), bytePos is always the position of the indexed character.
+ var low, high int
+ switch {
+ case i < s.nonASCII:
+ low = i
+ case i == s.numRunes:
+ low = len(s.str)
+ default:
+ s.At(i)
+ low = s.bytePos
+ }
+ switch {
+ case j == s.numRunes:
+ high = len(s.str)
+ default:
+ s.At(j)
+ high = s.bytePos
+ }
+ return s.str[low:high]
+}
+
+// At returns the rune with index i in the String. The sequence of runes is the same
+// as iterating over the contents with a "for range" clause.
+func (s *String) At(i int) rune {
+ // ASCII is easy. Let the compiler catch the indexing error if there is one.
+ if i < s.nonASCII {
+ return rune(s.str[i])
+ }
+
+ // Now we do need to know the index is valid.
+ if i < 0 || i >= s.numRunes {
+ panic(outOfRange)
+ }
+
+ var r rune
+
+ // Five easy common cases: within 1 spot of bytePos/runePos, or the beginning, or the end.
+ // With these cases, all scans from beginning or end work in O(1) time per rune.
+ switch {
+
+ case i == s.runePos-1: // backing up one rune
+ r, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])
+ s.runePos = i
+ s.bytePos -= s.width
+ return r
+ case i == s.runePos+1: // moving ahead one rune
+ s.runePos = i
+ s.bytePos += s.width
+ fallthrough
+ case i == s.runePos:
+ r, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])
+ return r
+ case i == 0: // start of string
+ r, s.width = utf8.DecodeRuneInString(s.str)
+ s.runePos = 0
+ s.bytePos = 0
+ return r
+
+ case i == s.numRunes-1: // last rune in string
+ r, s.width = utf8.DecodeLastRuneInString(s.str)
+ s.runePos = i
+ s.bytePos = len(s.str) - s.width
+ return r
+ }
+
+ // We need to do a linear scan. There are three places to start from:
+ // 1) The beginning
+ // 2) bytePos/runePos.
+ // 3) The end
+ // Choose the closest in rune count, scanning backwards if necessary.
+ forward := true
+ if i < s.runePos {
+ // Between beginning and pos. Which is closer?
+ // Since both i and runePos are guaranteed >= nonASCII, that's the
+ // lowest location we need to start from.
+ if i < (s.runePos-s.nonASCII)/2 {
+ // Scan forward from beginning
+ s.bytePos, s.runePos = s.nonASCII, s.nonASCII
+ } else {
+ // Scan backwards from where we are
+ forward = false
+ }
+ } else {
+ // Between pos and end. Which is closer?
+ if i-s.runePos < (s.numRunes-s.runePos)/2 {
+ // Scan forward from pos
+ } else {
+ // Scan backwards from end
+ s.bytePos, s.runePos = len(s.str), s.numRunes
+ forward = false
+ }
+ }
+ if forward {
+ // TODO: Is it much faster to use a range loop for this scan?
+ for {
+ r, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])
+ if s.runePos == i {
+ break
+ }
+ s.runePos++
+ s.bytePos += s.width
+ }
+ } else {
+ for {
+ r, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])
+ s.runePos--
+ s.bytePos -= s.width
+ if s.runePos == i {
+ break
+ }
+ }
+ }
+ return r
+}
+
+var outOfRange = errors.New("utf8.String: index out of range")
+var sliceOutOfRange = errors.New("utf8.String: slice index out of range")
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utf8string
+
+import (
+ "math/rand"
+ "testing"
+ "unicode/utf8"
+)
+
+var testStrings = []string{
+ "",
+ "abcd",
+ "☺☻☹",
+ "日a本b語ç日ð本Ê語þ日¥本¼語i日©",
+ "日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©",
+ "\x80\x80\x80\x80",
+}
+
+func TestScanForwards(t *testing.T) {
+ for _, s := range testStrings {
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for i, expect := range runes {
+ got := str.At(i)
+ if got != expect {
+ t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
+ }
+ }
+ }
+}
+
+func TestScanBackwards(t *testing.T) {
+ for _, s := range testStrings {
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for i := len(runes) - 1; i >= 0; i-- {
+ expect := runes[i]
+ got := str.At(i)
+ if got != expect {
+ t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
+ }
+ }
+ }
+}
+
+func randCount() int {
+ if testing.Short() {
+ return 100
+ }
+ return 100000
+}
+
+func TestRandomAccess(t *testing.T) {
+ for _, s := range testStrings {
+ if len(s) == 0 {
+ continue
+ }
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for j := 0; j < randCount(); j++ {
+ i := rand.Intn(len(runes))
+ expect := runes[i]
+ got := str.At(i)
+ if got != expect {
+ t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
+ }
+ }
+ }
+}
+
+func TestRandomSliceAccess(t *testing.T) {
+ for _, s := range testStrings {
+ if len(s) == 0 || s[0] == '\x80' { // the bad-UTF-8 string fools this simple test
+ continue
+ }
+ runes := []rune(s)
+ str := NewString(s)
+ if str.RuneCount() != len(runes) {
+ t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
+ break
+ }
+ for k := 0; k < randCount(); k++ {
+ i := rand.Intn(len(runes))
+ j := rand.Intn(len(runes) + 1)
+ if i > j { // include empty strings
+ continue
+ }
+ expect := string(runes[i:j])
+ got := str.Slice(i, j)
+ if got != expect {
+ t.Errorf("%s[%d:%d]: expected %q got %q", s, i, j, expect, got)
+ }
+ }
+ }
+}
+
+func TestLimitSliceAccess(t *testing.T) {
+ for _, s := range testStrings {
+ str := NewString(s)
+ if str.Slice(0, 0) != "" {
+ t.Error("failure with empty slice at beginning")
+ }
+ nr := utf8.RuneCountInString(s)
+ if str.Slice(nr, nr) != "" {
+ t.Error("failure with empty slice at end")
+ }
+ }
+}
var mallocBuf bytes.Buffer
+// gccgo numbers are different because gccgo does not have escape
+// analysis yet.
var mallocTest = []struct {
count int
desc string
fn func()
}{
- {0, `Sprintf("")`, func() { Sprintf("") }},
- {1, `Sprintf("xxx")`, func() { Sprintf("xxx") }},
- {1, `Sprintf("%x")`, func() { Sprintf("%x", 7) }},
- {2, `Sprintf("%s")`, func() { Sprintf("%s", "hello") }},
- {1, `Sprintf("%x %x")`, func() { Sprintf("%x %x", 7, 112) }},
- {1, `Sprintf("%g")`, func() { Sprintf("%g", 3.14159) }},
- {0, `Fprintf(buf, "%x %x %x")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%x %x %x", 7, 8, 9) }},
- {1, `Fprintf(buf, "%s")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%s", "hello") }},
+ {5, `Sprintf("")`, func() { Sprintf("") }},
+ {5, `Sprintf("xxx")`, func() { Sprintf("xxx") }},
+ {5, `Sprintf("%x")`, func() { Sprintf("%x", 7) }},
+ {5, `Sprintf("%s")`, func() { Sprintf("%s", "hello") }},
+ {5, `Sprintf("%x %x")`, func() { Sprintf("%x %x", 7, 112) }},
+ // For %g we use a float32, not float64, to guarantee passing the argument
+ // does not need to allocate memory to store the result in a pointer-sized word.
+ {20, `Sprintf("%g")`, func() { Sprintf("%g", float32(3.14159)) }},
+ {5, `Fprintf(buf, "%x %x %x")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%x %x %x", 7, 8, 9) }},
+ {5, `Fprintf(buf, "%s")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%s", "hello") }},
}
var _ bytes.Buffer
func TestCountMallocs(t *testing.T) {
- if testing.Short() {
- return
- }
for _, mt := range mallocTest {
const N = 100
runtime.UpdateMemStats()
}
runtime.UpdateMemStats()
mallocs += runtime.MemStats.Mallocs
- if mallocs/N != uint64(mt.count) {
+ if mallocs/N > uint64(mt.count) {
t.Errorf("%s: expected %d mallocs, got %d", mt.desc, mt.count, mallocs/N)
}
}
continue
}
tree = t
- pkg = path[len(tpath):]
+ pkg = filepath.ToSlash(path[len(tpath):])
return
}
err = fmt.Errorf("path %q not inside a GOPATH", path)
return
}
tree = defaultTree
- pkg = path
+ pkg = filepath.ToSlash(path)
for _, t := range Path {
if t.HasSrc(pkg) {
tree = t
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package pkgtest
import "fmt"
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package pkgtest_test
import "pkgtest"
html_endp = []byte("</p>\n")
html_pre = []byte("<pre>")
html_endpre = []byte("</pre>\n")
- html_h = []byte("<h3>")
+ html_h = []byte(`<h3 id="`)
+ html_hq = []byte(`">`)
html_endh = []byte("</h3>\n")
)
lines []string
}
+var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`)
+
+func anchorID(line string) string {
+ return nonAlphaNumRx.ReplaceAllString(line, "_")
+}
+
// ToHTML converts comment text to formatted HTML.
// The comment was prepared by DocReader,
// so it is known not to have leading, trailing blank lines
w.Write(html_endp)
case opHead:
w.Write(html_h)
+ id := ""
for _, line := range b.lines {
+ if id == "" {
+ id = anchorID(line)
+ w.Write([]byte(id))
+ w.Write(html_hq)
+ }
commentEscape(w, line, true)
}
+ if id == "" {
+ w.Write(html_hq)
+ }
w.Write(html_endh)
case opPre:
w.Write(html_pre)
Doc string
Name string
ImportPath string
- Imports []string // TODO(gri) this field is not computed at the moment
+ Imports []string
Filenames []string
Consts []*Value
Types []*Type
type Method struct {
*Func
// TODO(gri) The following fields are not set at the moment.
- Recv *Type // original receiver base type
- Level int // embedding level; 0 means Func is not embedded
+ Origin *Type // original receiver base type
+ Level int // embedding level; 0 means Func is not embedded
}
// Type is the documentation for type declaration.
import (
"bytes"
- "fmt"
- "go/ast"
+ "flag"
"go/parser"
+ "go/printer"
"go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
"testing"
"text/template"
)
-type sources map[string]string // filename -> file contents
+var update = flag.Bool("update", false, "update golden (.out) files")
-type testCase struct {
- name string
- importPath string
- mode Mode
- srcs sources
- doc string
+const dataDir = "testdata"
+
+var templateTxt = readTemplate("template.txt")
+
+func readTemplate(filename string) *template.Template {
+ t := template.New(filename)
+ t.Funcs(template.FuncMap{
+ "node": nodeFmt,
+ "synopsis": synopsisFmt,
+ })
+ return template.Must(t.ParseFiles(filepath.Join(dataDir, filename)))
}
-var tests = make(map[string]*testCase)
-
-// To register a new test case, use the pattern:
-//
-// var _ = register(&testCase{ ... })
-//
-// (The result value of register is always 0 and only present to enable the pattern.)
-//
-func register(test *testCase) int {
- if _, found := tests[test.name]; found {
- panic(fmt.Sprintf("registration failed: test case %q already exists", test.name))
- }
- tests[test.name] = test
- return 0
+func nodeFmt(node interface{}, fset *token.FileSet) string {
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, node)
+ return strings.Replace(strings.TrimSpace(buf.String()), "\n", "\n\t", -1)
}
-func runTest(t *testing.T, test *testCase) {
- // create AST
- fset := token.NewFileSet()
- var pkg ast.Package
- pkg.Files = make(map[string]*ast.File)
- for filename, src := range test.srcs {
- file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
- if err != nil {
- t.Errorf("test %s: %v", test.name, err)
- return
- }
- switch {
- case pkg.Name == "":
- pkg.Name = file.Name.Name
- case pkg.Name != file.Name.Name:
- t.Errorf("test %s: different package names in test files", test.name)
- return
+func synopsisFmt(s string) string {
+ const n = 64
+ if len(s) > n {
+ // cut off excess text and go back to a word boundary
+ s = s[0:n]
+ if i := strings.LastIndexAny(s, "\t\n "); i >= 0 {
+ s = s[0:i]
}
- pkg.Files[filename] = file
+ s = strings.TrimSpace(s) + " ..."
}
+ return "// " + strings.Replace(s, "\n", " ", -1)
+}
- doc := New(&pkg, test.importPath, test.mode).String()
- if doc != test.doc {
- //TODO(gri) Enable this once the sorting issue of comments is fixed
- //t.Errorf("test %s\n\tgot : %s\n\twant: %s", test.name, doc, test.doc)
- }
+func isGoFile(fi os.FileInfo) bool {
+ name := fi.Name()
+ return !fi.IsDir() &&
+ len(name) > 0 && name[0] != '.' && // ignore .files
+ filepath.Ext(name) == ".go"
+}
+
+type bundle struct {
+ *Package
+ FSet *token.FileSet
}
func Test(t *testing.T) {
- for _, test := range tests {
- runTest(t, test)
+ // get all packages
+ fset := token.NewFileSet()
+ pkgs, err := parser.ParseDir(fset, dataDir, isGoFile, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
}
-}
-// ----------------------------------------------------------------------------
-// Printing support
+ // test all packages
+ for _, pkg := range pkgs {
+ importpath := dataDir + "/" + pkg.Name
+ doc := New(pkg, importpath, 0)
-func (pkg *Package) String() string {
- var buf bytes.Buffer
- docText.Execute(&buf, pkg) // ignore error - test will fail w/ incorrect output
- return buf.String()
-}
+ // golden files always use / in filenames - canonicalize them
+ for i, filename := range doc.Filenames {
+ doc.Filenames[i] = filepath.ToSlash(filename)
+ }
+
+ // print documentation
+ var buf bytes.Buffer
+ if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil {
+ t.Error(err)
+ continue
+ }
+ got := buf.Bytes()
+
+ // update golden file if necessary
+ golden := filepath.Join(dataDir, pkg.Name+".out")
+ if *update {
+ err := ioutil.WriteFile(golden, got, 0644)
+ if err != nil {
+ t.Error(err)
+ }
+ continue
+ }
-// TODO(gri) complete template
-var docText = template.Must(template.New("docText").Parse(
- `
-PACKAGE {{.Name}}
-DOC {{printf "%q" .Doc}}
-IMPORTPATH {{.ImportPath}}
-FILENAMES {{.Filenames}}
-`))
-
-// ----------------------------------------------------------------------------
-// Test cases
-
-// Test that all package comments and bugs are collected,
-// and that the importPath is correctly set.
-//
-var _ = register(&testCase{
- name: "p",
- importPath: "p",
- srcs: sources{
- "p1.go": "// comment 1\npackage p\n//BUG(uid): bug1",
- "p0.go": "// comment 0\npackage p\n// BUG(uid): bug0",
- },
- doc: `
-PACKAGE p
-DOC "comment 0\n\ncomment 1\n"
-IMPORTPATH p
-FILENAMES [p0.go p1.go]
-`,
-})
-
-// Test basic functionality.
-//
-var _ = register(&testCase{
- name: "p1",
- importPath: "p",
- srcs: sources{
- "p.go": `
-package p
-import "a"
-const pi = 3.14 // pi
-type T struct{} // T
-var V T // v
-func F(x int) int {} // F
-`,
- },
- doc: `
-PACKAGE p
-DOC ""
-IMPORTPATH p
-FILENAMES [p.go]
-`,
-})
+ // get golden file
+ want, err := ioutil.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ // compare
+ if bytes.Compare(got, want) != 0 {
+ t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want)
+ }
+ }
+}
func (doc *docReader) filterSpec(spec ast.Spec) bool {
switch s := spec.(type) {
+ case *ast.ImportSpec:
+ // always keep imports so we can collect them
+ return true
case *ast.ValueSpec:
s.Names = filterIdentList(s.Names)
if len(s.Names) > 0 {
"go/token"
"regexp"
"sort"
+ "strconv"
)
// ----------------------------------------------------------------------------
doc *ast.CommentGroup // package documentation, if any
pkgName string
mode Mode
+ imports map[string]int
values []*ast.GenDecl // consts and vars
types map[string]*typeInfo
embedded map[string]*typeInfo // embedded types, possibly not exported
func (doc *docReader) init(pkgName string, mode Mode) {
doc.pkgName = pkgName
doc.mode = mode
+ doc.imports = make(map[string]int)
doc.types = make(map[string]*typeInfo)
doc.embedded = make(map[string]*typeInfo)
doc.funcs = make(map[string]*ast.FuncDecl)
case *ast.GenDecl:
if len(d.Specs) > 0 {
switch d.Tok {
+ case token.IMPORT:
+ // imports are handled individually
+ for _, spec := range d.Specs {
+ if import_, err := strconv.Unquote(spec.(*ast.ImportSpec).Path.Value); err == nil {
+ doc.imports[import_] = 1
+ }
+ }
case token.CONST, token.VAR:
// constants and variables are always handled as a group
doc.addValue(d)
// ----------------------------------------------------------------------------
// Conversion to external representation
+func (doc *docReader) makeImports() []string {
+ list := make([]string, len(doc.imports))
+ i := 0
+ for import_ := range doc.imports {
+ list[i] = import_
+ i++
+ }
+ sort.Strings(list)
+ return list
+}
+
type sortValue []*Value
func (p sortValue) Len() int { return len(p) }
// doc.funcs and thus must be called before any other
// function consuming those lists
p.Types = doc.makeTypes(doc.types)
+ p.Imports = doc.makeImports()
p.Consts = makeValues(doc.values, token.CONST)
p.Vars = makeValues(doc.values, token.VAR)
p.Funcs = makeFuncs(doc.funcs)
--- /dev/null
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS
+ // bug0
+ // bug1
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comment 0
+package a
+
+//BUG(uid): bug0
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comment 1
+package a
+
+//BUG(uid): bug1
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "a"
+
+const Pi = 3.14 // Pi
+var MaxInt int // MaxInt
+type T struct{} // T
+var V T // v
+func F(x int) int {} // F
--- /dev/null
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var MaxInt int // MaxInt
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of gotest.
+type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+}
+
+// B is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type B struct {
+ common
+ N int
+ benchmark InternalBenchmark
+ bytes int64
+ timerOn bool
+ result BenchmarkResult
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (b *B) StartTimer() {
+ if !b.timerOn {
+ b.start = time.Now()
+ b.timerOn = true
+ }
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (b *B) StopTimer() {
+ if b.timerOn {
+ b.duration += time.Now().Sub(b.start)
+ b.timerOn = false
+ }
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (b *B) ResetTimer() {
+ if b.timerOn {
+ b.start = time.Now()
+ }
+ b.duration = 0
+}
+
+// SetBytes records the number of bytes processed in a single operation.
+// If this is called, the benchmark will report ns/op and MB/s.
+func (b *B) SetBytes(n int64) { b.bytes = n }
+
+func (b *B) nsPerOp() int64 {
+ if b.N <= 0 {
+ return 0
+ }
+ return b.duration.Nanoseconds() / int64(b.N)
+}
+
+// runN runs a single benchmark for the specified number of iterations.
+func (b *B) runN(n int) {
+ // Try to get a comparable environment for each run
+ // by clearing garbage from previous runs.
+ runtime.GC()
+ b.N = n
+ b.ResetTimer()
+ b.StartTimer()
+ b.benchmark.F(b)
+ b.StopTimer()
+}
+
+func min(x, y int) int {
+ if x > y {
+ return y
+ }
+ return x
+}
+
+func max(x, y int) int {
+ if x < y {
+ return y
+ }
+ return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+ var tens = 0
+ // tens = floor(log_10(n))
+ for n > 10 {
+ n = n / 10
+ tens++
+ }
+ // result = 10^tens
+ result := 1
+ for i := 0; i < tens; i++ {
+ result *= 10
+ }
+ return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+ base := roundDown10(n)
+ if n < (2 * base) {
+ return 2 * base
+ }
+ if n < (5 * base) {
+ return 5 * base
+ }
+ return 10 * base
+}
+
+// run times the benchmark function in a separate goroutine.
+func (b *B) run() BenchmarkResult {
+ go b.launch()
+ <-b.signal
+ return b.result
+}
+
+// launch launches the benchmark function. It gradually increases the number
+// of benchmark iterations until the benchmark runs for a second in order
+// to get a reasonable measurement. It prints timing information in this form
+// testing.BenchmarkHello 100000 19 ns/op
+// launch is run by the fun function as a separate goroutine.
+func (b *B) launch() {
+ // Run the benchmark for a single iteration in case it's expensive.
+ n := 1
+
+ // Signal that we're done whether we return normally
+ // or by FailNow's runtime.Goexit.
+ defer func() {
+ b.signal <- b
+ }()
+
+ b.runN(n)
+ // Run the benchmark for at least the specified amount of time.
+ d := time.Duration(*benchTime * float64(time.Second))
+ for !b.failed && b.duration < d && n < 1e9 {
+ last := n
+ // Predict iterations/sec.
+ if b.nsPerOp() == 0 {
+ n = 1e9
+ } else {
+ n = int(d.Nanoseconds() / b.nsPerOp())
+ }
+ // Run more iterations than we think we'll need for a second (1.5x).
+ // Don't grow too fast in case we had timing errors previously.
+ // Be sure to run at least one more than last time.
+ n = max(min(n+n/2, 100*last), last+1)
+ // Round up to something easy to read.
+ n = roundUp(n)
+ b.runN(n)
+ }
+ b.result = BenchmarkResult{b.N, b.duration, b.bytes}
+}
+
+// The results of a benchmark run.
+type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+}
+
+func (r BenchmarkResult) NsPerOp() int64 {
+ if r.N <= 0 {
+ return 0
+ }
+ return r.T.Nanoseconds() / int64(r.N)
+}
+
+func (r BenchmarkResult) mbPerSec() float64 {
+ if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
+ return 0
+ }
+ return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
+}
+
+func (r BenchmarkResult) String() string {
+ mbs := r.mbPerSec()
+ mb := ""
+ if mbs != 0 {
+ mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+ }
+ nsop := r.NsPerOp()
+ ns := fmt.Sprintf("%10d ns/op", nsop)
+ if r.N > 0 && nsop < 100 {
+ // The format specifiers here make sure that
+ // the ones digits line up for all three possible formats.
+ if nsop < 10 {
+ ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ } else {
+ ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ }
+ }
+ return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of gotest.
+func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
+ // If no flag was specified, don't run benchmarks.
+ if len(*matchBenchmarks) == 0 {
+ return
+ }
+ for _, Benchmark := range benchmarks {
+ matched, err := matchString(*matchBenchmarks, Benchmark.Name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
+ os.Exit(1)
+ }
+ if !matched {
+ continue
+ }
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ b := &B{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ benchmark: Benchmark,
+ }
+ benchName := Benchmark.Name
+ if procs != 1 {
+ benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
+ }
+ fmt.Printf("%s\t", benchName)
+ r := b.run()
+ if b.failed {
+ // The output could be very long here, but probably isn't.
+ // We print it all, regardless, because we don't want to trim the reason
+ // the benchmark failed.
+ fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
+ continue
+ }
+ fmt.Printf("%v\n", r)
+ // Unlike with tests, we ignore the -chatty flag and always print output for
+ // benchmarks since the output generation time will skew the results.
+ if len(b.output) > 0 {
+ b.trimOutput()
+ fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
+ }
+ if p := runtime.GOMAXPROCS(-1); p != procs {
+ fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
+ }
+ }
+ }
+}
+
+// trimOutput shortens the output from a benchmark, which can be very long.
+func (b *B) trimOutput() {
+ // The output is likely to appear multiple times because the benchmark
+ // is run multiple times, but at least it will be seen. This is not a big deal
+ // because benchmarks rarely print, but just in case, we trim it if it's too long.
+ const maxNewlines = 10
+ for nlCount, j := 0, 0; j < len(b.output); j++ {
+ if b.output[j] == '\n' {
+ nlCount++
+ if nlCount >= maxNewlines {
+ b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
+ break
+ }
+ }
+ }
+}
+
+// Benchmark benchmarks a single function. Useful for creating
+// custom benchmarks that do not use gotest.
+func Benchmark(f func(b *B)) BenchmarkResult {
+ b := &B{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ benchmark: InternalBenchmark{"", f},
+ }
+ return b.run()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+type InternalExample struct {
+ Name string
+ F func()
+ Output string
+}
+
+func RunExamples(examples []InternalExample) (ok bool) {
+ ok = true
+
+ var eg InternalExample
+
+ stdout, stderr := os.Stdout, os.Stderr
+ defer func() {
+ os.Stdout, os.Stderr = stdout, stderr
+ if e := recover(); e != nil {
+ fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
+ os.Exit(1)
+ }
+ }()
+
+ for _, eg = range examples {
+ if *chatty {
+ fmt.Printf("=== RUN: %s\n", eg.Name)
+ }
+
+ // capture stdout and stderr
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Stdout, os.Stderr = w, w
+ outC := make(chan string)
+ go func() {
+ buf := new(bytes.Buffer)
+ _, err := io.Copy(buf, r)
+ if err != nil {
+ fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
+ os.Exit(1)
+ }
+ outC <- buf.String()
+ }()
+
+ // run example
+ t0 := time.Now()
+ eg.F()
+ dt := time.Now().Sub(t0)
+
+ // close pipe, restore stdout/stderr, get output
+ w.Close()
+ os.Stdout, os.Stderr = stdout, stderr
+ out := <-outC
+
+ // report any errors
+ tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
+ if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
+ fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
+ eg.Name, tstr, g, e)
+ ok = false
+ } else if *chatty {
+ fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
+ }
+ }
+
+ return
+}
--- /dev/null
+{{synopsis .Doc}}
+PACKAGE {{.Name}}
+
+IMPORTPATH
+ {{.ImportPath}}
+
+{{with .Imports}}IMPORTS
+{{range .}} {{.}}
+{{end}}
+{{end}}{{/*
+
+*/}}FILENAMES
+{{range .Filenames}} {{.}}
+{{end}}{{/*
+
+*/}}{{with .Consts}}
+CONSTANTS
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Vars}}
+VARIABLES
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Funcs}}
+FUNCTIONS
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Types}}
+TYPES
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{range .Consts}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Vars}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Funcs}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Methods}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{end}}{{/*
+
+*/}}{{with .Bugs}}
+BUGS
+{{range .}} {{synopsis .}}
+{{end}}{{end}}
\ No newline at end of file
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides support for automated testing of Go packages.
+// It is intended to be used in concert with the ``gotest'' utility, which automates
+// execution of any function of the form
+// func TestXxx(*testing.T)
+// where Xxx can be any alphanumeric string (but the first letter must not be in
+// [a-z]) and serves to identify the test routine.
+// These TestXxx routines should be declared within the package they are testing.
+//
+// Functions of the form
+// func BenchmarkXxx(*testing.B)
+// are considered benchmarks, and are executed by gotest when the -test.bench
+// flag is provided.
+//
+// A sample benchmark function looks like this:
+// func BenchmarkHello(b *testing.B) {
+// for i := 0; i < b.N; i++ {
+// fmt.Sprintf("hello")
+// }
+// }
+// The benchmark package will vary b.N until the benchmark function lasts
+// long enough to be timed reliably. The output
+// testing.BenchmarkHello 10000000 282 ns/op
+// means that the loop ran 10000000 times at a speed of 282 ns per loop.
+//
+// If a benchmark needs some expensive setup before running, the timer
+// may be stopped:
+// func BenchmarkBigLen(b *testing.B) {
+// b.StopTimer()
+// big := NewBig()
+// b.StartTimer()
+// for i := 0; i < b.N; i++ {
+// big.Len()
+// }
+// }
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "gotest" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+ // Report as tests are run; default is silent for success.
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ match = flag.String("test.run", "", "regular expression to select tests to run")
+ memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+ timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+
+ cpuList []int
+)
+
+// common holds the elements common between T and B and
+// captures common methods such as Errorf.
+type common struct {
+ output []byte // Output generated by test or benchmark.
+ failed bool // Test or benchmark has failed.
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ self interface{} // To be sent on signal channel when done.
+ signal chan interface{} // Output for serial tests.
+}
+
+// Short reports whether the -test.short flag is set.
+func Short() bool {
+ return *short
+}
+
+// decorate inserts the final newline if needed and indentation tabs for formatting.
+// If addFileLine is true, it also prefixes the string with the file and line of the call site.
+func decorate(s string, addFileLine bool) string {
+ if addFileLine {
+ _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+ if ok {
+ // Truncate file name at last file name separator.
+ if index := strings.LastIndex(file, "/"); index >= 0 {
+ file = file[index+1:]
+ } else if index = strings.LastIndex(file, "\\"); index >= 0 {
+ file = file[index+1:]
+ }
+ } else {
+ file = "???"
+ line = 1
+ }
+ s = fmt.Sprintf("%s:%d: %s", file, line, s)
+ }
+ s = "\t" + s // Every line is indented at least one tab.
+ n := len(s)
+ if n > 0 && s[n-1] != '\n' {
+ s += "\n"
+ n++
+ }
+ for i := 0; i < n-1; i++ { // -1 to avoid final newline
+ if s[i] == '\n' {
+ // Second and subsequent lines are indented an extra tab.
+ return s[0:i+1] + "\t" + decorate(s[i+1:n], false)
+ }
+ }
+ return s
+}
+
+// T is a type passed to Test functions to manage test state and support formatted test logs.
+// Logs are accumulated during execution and dumped to standard error when done.
+type T struct {
+ common
+ name string // Name of test.
+ startParallel chan bool // Parallel tests will wait on this.
+}
+
+// Fail marks the function as having failed but continues execution.
+func (c *common) Fail() { c.failed = true }
+
+// Failed returns whether the function has failed.
+func (c *common) Failed() bool { return c.failed }
+
+// FailNow marks the function as having failed and stops its execution.
+// Execution will continue at the next Test.
+func (c *common) FailNow() {
+ c.Fail()
+
+ // Calling runtime.Goexit will exit the goroutine, which
+ // will run the deferred functions in this goroutine,
+ // which will eventually run the deferred lines in tRunner,
+ // which will signal to the test loop that this test is done.
+ //
+ // A previous version of this code said:
+ //
+ // c.duration = ...
+ // c.signal <- c.self
+ // runtime.Goexit()
+ //
+ // This previous version duplicated code (those lines are in
+ // tRunner no matter what), but worse the goroutine teardown
+ // implicit in runtime.Goexit was not guaranteed to complete
+ // before the test exited. If a test deferred an important cleanup
+ // function (like removing temporary files), there was no guarantee
+ // it would run on a test failure. Because we send on c.signal during
+ // a top-of-stack deferred function now, we know that the send
+ // only happens after any other stacked defers have completed.
+ runtime.Goexit()
+}
+
+// log generates the output. It's always at the same stack depth.
+func (c *common) log(s string) {
+ c.output = append(c.output, decorate(s, true)...)
+}
+
+// Log formats its arguments using default formatting, analogous to Println(),
+// and records the text in the error log.
+func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) }
+
+// Logf formats its arguments according to the format, analogous to Printf(),
+// and records the text in the error log.
+func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) }
+
+// Error is equivalent to Log() followed by Fail().
+func (c *common) Error(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.Fail()
+}
+
+// Errorf is equivalent to Logf() followed by Fail().
+func (c *common) Errorf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.Fail()
+}
+
+// Fatal is equivalent to Log() followed by FailNow().
+func (c *common) Fatal(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.FailNow()
+}
+
+// Fatalf is equivalent to Logf() followed by FailNow().
+func (c *common) Fatalf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.FailNow()
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with)
+// other parallel tests in this CPU group.
+func (t *T) Parallel() {
+ t.signal <- (*T)(nil) // Release main testing loop
+ <-t.startParallel // Wait for serial tests to finish
+}
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of gotest.
+type InternalTest struct {
+ Name string
+ F func(*T)
+}
+
+func tRunner(t *T, test *InternalTest) {
+ t.start = time.Now()
+
+ // When this goroutine is done, either because test.F(t)
+ // returned normally or because a test failure triggered
+ // a call to runtime.Goexit, record the duration and send
+ // a signal saying that the test is done.
+ defer func() {
+ t.duration = time.Now().Sub(t.start)
+ t.signal <- t
+ }()
+
+ test.F(t)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of gotest.
+func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
+ flag.Parse()
+ parseCpuList()
+
+ before()
+ startAlarm()
+ testOk := RunTests(matchString, tests)
+ exampleOk := RunExamples(examples)
+ if !testOk || !exampleOk {
+ fmt.Println("FAIL")
+ os.Exit(1)
+ }
+ fmt.Println("PASS")
+ stopAlarm()
+ RunBenchmarks(matchString, benchmarks)
+ after()
+}
+
+func (t *T) report() {
+ tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds())
+ format := "--- %s: %s %s\n%s"
+ if t.failed {
+ fmt.Printf(format, "FAIL", t.name, tstr, t.output)
+ } else if *chatty {
+ fmt.Printf(format, "PASS", t.name, tstr, t.output)
+ }
+}
+
+func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
+ ok = true
+ if len(tests) == 0 {
+ fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
+ return
+ }
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ // We build a new channel tree for each run of the loop.
+ // collector merges in one channel all the upstream signals from parallel tests.
+ // If all tests pump to the same channel, a bug can occur where a test
+ // kicks off a goroutine that Fails, yet the test still delivers a completion signal,
+ // which skews the counting.
+ var collector = make(chan interface{})
+
+ numParallel := 0
+ startParallel := make(chan bool)
+
+ for i := 0; i < len(tests); i++ {
+ matched, err := matchString(*match, tests[i].Name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err)
+ os.Exit(1)
+ }
+ if !matched {
+ continue
+ }
+ testName := tests[i].Name
+ if procs != 1 {
+ testName = fmt.Sprintf("%s-%d", tests[i].Name, procs)
+ }
+ t := &T{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ name: testName,
+ startParallel: startParallel,
+ }
+ t.self = t
+ if *chatty {
+ fmt.Printf("=== RUN %s\n", t.name)
+ }
+ go tRunner(t, &tests[i])
+ out := (<-t.signal).(*T)
+ if out == nil { // Parallel run.
+ go func() {
+ collector <- <-t.signal
+ }()
+ numParallel++
+ continue
+ }
+ t.report()
+ ok = ok && !out.failed
+ }
+
+ running := 0
+ for numParallel+running > 0 {
+ if running < *parallel && numParallel > 0 {
+ startParallel <- true
+ running++
+ numParallel--
+ continue
+ }
+ t := (<-collector).(*T)
+ t.report()
+ ok = ok && !t.failed
+ running--
+ }
+ }
+ return
+}
+
+// before runs before all testing.
+func before() {
+ if *memProfileRate > 0 {
+ runtime.MemProfileRate = *memProfileRate
+ }
+ if *cpuProfile != "" {
+ f, err := os.Create(*cpuProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s", err)
+ f.Close()
+ return
+ }
+ // Could save f so after can call f.Close; not worth the effort.
+ }
+
+}
+
+// after runs after all testing.
+func after() {
+ if *cpuProfile != "" {
+ pprof.StopCPUProfile() // flushes profile to disk
+ }
+ if *memProfile != "" {
+ f, err := os.Create(*memProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err = pprof.WriteHeapProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err)
+ }
+ f.Close()
+ }
+}
+
+var timer *time.Timer
+
+// startAlarm starts an alarm if requested.
+func startAlarm() {
+ if *timeout > 0 {
+ timer = time.AfterFunc(*timeout, alarm)
+ }
+}
+
+// stopAlarm turns off the alarm.
+func stopAlarm() {
+ if *timeout > 0 {
+ timer.Stop()
+ }
+}
+
+// alarm is called if the timeout expires.
+func alarm() {
+ panic("test timed out")
+}
+
+func parseCpuList() {
+ if len(*cpuListStr) == 0 {
+ cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
+ } else {
+ for _, val := range strings.Split(*cpuListStr, ",") {
+ cpu, err := strconv.Atoi(val)
+ if err != nil || cpu <= 0 {
+ fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu", val)
+ os.Exit(1)
+ }
+ cpuList = append(cpuList, cpu)
+ }
+ }
+}
--- /dev/null
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ N int
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
//
func ParseExpr(x string) (ast.Expr, error) {
// parse x within the context of a complete package for correct scopes;
- // use //line directive for correct positions in error messages
- file, err := ParseFile(token.NewFileSet(), "", "package p;func _(){_=\n//line :1\n"+x+";}", 0)
+ // use //line directive for correct positions in error messages and put
+ // x alone on a separate line (handles line comments), followed by a ';'
+ // to force an error if the expression is incomplete
+ file, err := ParseFile(token.NewFileSet(), "", "package p;func _(){_=\n//line :1\n"+x+"\n;}", 0)
if err != nil {
return nil, err
}
// source which can then be tokenized through repeated calls to the Scan
// function. Typical use:
//
-// var s Scanner
+// var s scanner.Scanner
// fset := token.NewFileSet() // position information is relative to fset
// file := fset.AddFile(filename, fset.Base(), len(src)) // register file
// s.Init(file, src, nil /* no error handler */, 0)
"sync"
)
+// -----------------------------------------------------------------------------
+// Positions
+
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
return p != NoPos
}
-func searchFiles(a []*File, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
-}
-
-func (s *FileSet) file(p Pos) *File {
- // common case: p is in last file touched
- if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
- return f
- }
- // p is not in last file touched - search all files
- if i := searchFiles(s.files, int(p)); i >= 0 {
- f := s.files[i]
- // f.base <= int(p) by definition of searchFiles
- if int(p) <= f.base+f.size {
- s.last = f
- return f
- }
- }
- return nil
-}
-
-// File returns the file which contains the position p.
-// If no such file is found (for instance for p == NoPos),
-// the result is nil.
-//
-func (s *FileSet) File(p Pos) (f *File) {
- if p != NoPos {
- s.mutex.RLock()
- f = s.file(p)
- s.mutex.RUnlock()
- }
- return
-}
-
-func (f *File) position(p Pos) (pos Position) {
- offset := int(p) - f.base
- pos.Offset = offset
- pos.Filename, pos.Line, pos.Column = f.info(offset)
- return
-}
-
-// Position converts a Pos in the fileset into a general Position.
-func (s *FileSet) Position(p Pos) (pos Position) {
- if p != NoPos {
- s.mutex.RLock()
- if f := s.file(p); f != nil {
- pos = f.position(p)
- }
- s.mutex.RUnlock()
- }
- return
-}
-
-// A lineInfo object describes alternative file and line number
-// information (such as provided via a //line comment in a .go
-// file) for a given file offset.
-type lineInfo struct {
- // fields are exported to make them accessible to gob
- Offset int
- Filename string
- Line int
-}
-
-// AddLineInfo adds alternative file and line number information for
-// a given file offset. The offset must be larger than the offset for
-// the previously added alternative line info and smaller than the
-// file size; otherwise the information is ignored.
-//
-// AddLineInfo is typically used to register alternative position
-// information for //line filename:line comments in source files.
-//
-func (f *File) AddLineInfo(offset int, filename string, line int) {
- f.set.mutex.Lock()
- if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
- f.infos = append(f.infos, lineInfo{offset, filename, line})
- }
- f.set.mutex.Unlock()
-}
+// -----------------------------------------------------------------------------
+// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
f.set.mutex.Unlock()
}
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
+type lineInfo struct {
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
+}
+
+// AddLineInfo adds alternative file and line number information for
+// a given file offset. The offset must be larger than the offset for
+// the previously added alternative line info and smaller than the
+// file size; otherwise the information is ignored.
+//
+// AddLineInfo is typically used to register alternative position
+// information for //line filename:line comments in source files.
+//
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+ f.set.mutex.Lock()
+ if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
+ f.infos = append(f.infos, lineInfo{offset, filename, line})
+ }
+ f.set.mutex.Unlock()
+}
+
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
return f.Position(p).Line
}
-// Position returns the Position value for the given file position p;
-// p must be a Pos value in that file or NoPos.
-//
-func (f *File) Position(p Pos) (pos Position) {
- if p != NoPos {
- if int(p) < f.base || int(p) > f.base+f.size {
- panic("illegal Pos value")
- }
- pos = f.position(p)
- }
- return
-}
-
-func searchInts(a []int, x int) int {
- // This function body is a manually inlined version of:
- //
- // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
- //
- // With better compiler optimizations, this may not be needed in the
- // future, but at the moment this change improves the go/printer
- // benchmark performance by ~30%. This has a direct impact on the
- // speed of gofmt and thus seems worthwhile (2011-04-29).
- i, j := 0, len(a)
- for i < j {
- h := i + (j-i)/2 // avoid overflow when computing h
- // i ≤ h < j
- if a[h] <= x {
- i = h + 1
- } else {
- j = h
- }
- }
- return i - 1
-}
-
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
return
}
+func (f *File) position(p Pos) (pos Position) {
+ offset := int(p) - f.base
+ pos.Offset = offset
+ pos.Filename, pos.Line, pos.Column = f.info(offset)
+ return
+}
+
+// Position returns the Position value for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ pos = f.position(p)
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// FileSet
+
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
return f
}
-// Files returns the files added to the file set.
-func (s *FileSet) Files() <-chan *File {
- ch := make(chan *File)
- go func() {
- for i := 0; ; i++ {
- var f *File
- s.mutex.RLock()
- if i < len(s.files) {
- f = s.files[i]
- }
- s.mutex.RUnlock()
- if f == nil {
- break
- }
- ch <- f
+// Iterate calls f for the files in the file set in the order they were added
+// until f returns false.
+//
+func (s *FileSet) Iterate(f func(*File) bool) {
+ for i := 0; ; i++ {
+ var file *File
+ s.mutex.RLock()
+ if i < len(s.files) {
+ file = s.files[i]
+ }
+ s.mutex.RUnlock()
+ if file == nil || !f(file) {
+ break
}
- close(ch)
- }()
- return ch
+ }
+}
+
+func searchFiles(a []*File, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
+}
+
+func (s *FileSet) file(p Pos) *File {
+ // common case: p is in last file
+ if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ return f
+ }
+ // p is not in last file - search all files
+ if i := searchFiles(s.files, int(p)); i >= 0 {
+ f := s.files[i]
+ // f.base <= int(p) by definition of searchFiles
+ if int(p) <= f.base+f.size {
+ s.last = f
+ return f
+ }
+ }
+ return nil
+}
+
+// File returns the file that contains the position p.
+// If no such file is found (for instance for p == NoPos),
+// the result is nil.
+//
+func (s *FileSet) File(p Pos) (f *File) {
+ if p != NoPos {
+ s.mutex.RLock()
+ f = s.file(p)
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// Position converts a Pos in the fileset into a general Position.
+func (s *FileSet) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ s.mutex.RLock()
+ if f := s.file(p); f != nil {
+ pos = f.position(p)
+ }
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ // TODO(gri): Remove this when compilers have caught up.
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
}
for i, test := range tests {
fset.AddFile(test.filename, fset.Base(), test.size)
j := 0
- for g := range fset.Files() {
- if g.Name() != tests[j].filename {
- t.Errorf("expected filename = %s; got %s", tests[j].filename, g.Name())
+ fset.Iterate(func(f *File) bool {
+ if f.Name() != tests[j].filename {
+ t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
}
j++
- }
+ return true
+ })
if j != i+1 {
t.Errorf("expected %d files; got %d", i+1, j)
}
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return 1 }
+
// Add p to the running checksum a, b.
func update(a, b uint32, p []byte) (aa, bb uint32) {
for _, pi := range p {
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return 1 }
+
func (d *digest) Reset() { d.crc = 0 }
func update(crc uint32, tab *Table, p []byte) uint32 {
func (d *digest) Size() int { return Size }
+func (d *digest) BlockSize() int { return 1 }
+
func (d *digest) Reset() { d.crc = 0 }
func update(crc uint64, tab *Table, p []byte) uint64 {
func (s *sum64) Size() int { return 8 }
func (s *sum64a) Size() int { return 8 }
+func (s *sum32) BlockSize() int { return 1 }
+func (s *sum32a) BlockSize() int { return 1 }
+func (s *sum64) BlockSize() int { return 1 }
+func (s *sum64a) BlockSize() int { return 1 }
+
func (s *sum32) Sum(in []byte) []byte {
v := uint32(*s)
in = append(in, byte(v>>24))
// Size returns the number of bytes Sum will return.
Size() int
+
+ // BlockSize returns the hash's underlying block size.
+ // The Write method must be able to accept any amount
+ // of data, but it may operate more efficiently if all writes
+ // are a multiple of the block size.
+ BlockSize() int
}
// Hash32 is the common interface implemented by all 32-bit hash functions.
}
if breakout[p.tok.Data] {
for i := len(p.oe) - 1; i >= 0; i-- {
- // TODO: HTML, MathML integration points.
- if p.oe[i].Namespace == "" {
+ // TODO: MathML integration points.
+ if p.oe[i].Namespace == "" || htmlIntegrationPoint(p.oe[i]) {
p.oe = p.oe[:i+1]
break
}
{"tests4.dat", -1},
{"tests5.dat", -1},
{"tests6.dat", -1},
- {"tests10.dat", 33},
+ {"tests10.dat", 35},
}
for _, tf := range testFiles {
f, err := os.Open("testdata/webkit/" + tf.filename)
import "text/template"
...
- t, err := (&template.Set{}).Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
- err = t.Execute(out, "T", "<script>alert('you have been pwned')</script>")
+ t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+ err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
produces
import "html/template"
...
- t, err := (&template.Set{}).Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
- err = t.Execute(out, "T", "<script>alert('you have been pwned')</script>")
+ t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+ err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
produces safe, escaped HTML output
- Hello, <script>alert('you have been pwned')</script>!
+ Hello, <script>alert('you have been pwned')</script>!
Contexts
<a href="/search?q={{.}}">{{.}}</a>
-At parse time each {{.}} is overwritten to add escaping functions as necessary,
-in this case,
+At parse time each {{.}} is overwritten to add escaping functions as necessary.
+In this case it becomes
<a href="/search?q={{. | urlquery}}">{{. | html}}</a>
},
{
`<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
- "z:1: (action: [(command: [F=[H]])]) appears in an ambiguous URL context",
+ "z:1: {{.H}} appears in an ambiguous URL context",
},
{
`<a onclick="alert('Hello \`,
}{
{
"{{.X}}",
- "[(command: [F=[X]])]",
+ ".X",
[]string{},
},
{
"{{.X | html}}",
- "[(command: [F=[X]]) (command: [I=html])]",
+ ".X | html",
[]string{},
},
{
"{{.X}}",
- "[(command: [F=[X]]) (command: [I=html])]",
+ ".X | html",
[]string{"html"},
},
{
"{{.X | html}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ ".X | html | urlquery",
[]string{"urlquery"},
},
{
"{{.X | html | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ ".X | html | urlquery",
[]string{"urlquery"},
},
{
"{{.X | html | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ ".X | html | urlquery",
[]string{"html", "urlquery"},
},
{
"{{.X | html | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ ".X | html | urlquery",
[]string{"html"},
},
{
"{{.X | urlquery}}",
- "[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]",
+ ".X | html | urlquery",
[]string{"html", "urlquery"},
},
{
"{{.X | html | print}}",
- "[(command: [F=[X]]) (command: [I=urlquery]) (command: [I=html]) (command: [I=print])]",
+ ".X | urlquery | html | print",
[]string{"urlquery", "html"},
},
}
- for _, test := range tests {
+ for i, test := range tests {
tmpl := template.Must(template.New("test").Parse(test.input))
action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
if !ok {
- t.Errorf("First node is not an action: %s", test.input)
+ t.Errorf("#%d: First node is not an action: %s", i, test.input)
continue
}
pipe := action.Pipe
ensurePipelineContains(pipe, test.ids)
got := pipe.String()
if got != test.output {
- t.Errorf("%s, %v: want\n\t%s\ngot\n\t%s", test.input, test.ids, test.output, got)
+ t.Errorf("#%d: %s, %v: want\n\t%s\ngot\n\t%s", i, test.input, test.ids, test.output, got)
}
}
}
return m.f(c)
}
-// RGBAModel is the Model for RGBA colors.
-var RGBAModel Model = ModelFunc(func(c Color) Color {
+// Models for the standard color types.
+var (
+ RGBAModel Model = ModelFunc(rgbaModel)
+ RGBA64Model Model = ModelFunc(rgba64Model)
+ NRGBAModel Model = ModelFunc(nrgbaModel)
+ NRGBA64Model Model = ModelFunc(nrgba64Model)
+ AlphaModel Model = ModelFunc(alphaModel)
+ Alpha16Model Model = ModelFunc(alpha16Model)
+ GrayModel Model = ModelFunc(grayModel)
+ Gray16Model Model = ModelFunc(gray16Model)
+)
+
+func rgbaModel(c Color) Color {
if _, ok := c.(RGBA); ok {
return c
}
r, g, b, a := c.RGBA()
return RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)}
-})
+}
-// RGBAModel is the Model for RGBA64 colors.
-var RGBA64Model Model = ModelFunc(func(c Color) Color {
+func rgba64Model(c Color) Color {
if _, ok := c.(RGBA64); ok {
return c
}
r, g, b, a := c.RGBA()
return RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-})
+}
-// NRGBAModel is the Model for NRGBA colors.
-var NRGBAModel Model = ModelFunc(func(c Color) Color {
+func nrgbaModel(c Color) Color {
if _, ok := c.(NRGBA); ok {
return c
}
g = (g * 0xffff) / a
b = (b * 0xffff) / a
return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)}
-})
+}
-// NRGBAModel is the Model for NRGBA64 colors.
-var NRGBA64Model Model = ModelFunc(func(c Color) Color {
+func nrgba64Model(c Color) Color {
if _, ok := c.(NRGBA64); ok {
return c
}
g = (g * 0xffff) / a
b = (b * 0xffff) / a
return NRGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-})
+}
-// AlphaModel is the Model for Alpha colors.
-var AlphaModel Model = ModelFunc(func(c Color) Color {
+func alphaModel(c Color) Color {
if _, ok := c.(Alpha); ok {
return c
}
_, _, _, a := c.RGBA()
return Alpha{uint8(a >> 8)}
-})
+}
-// Alpha16Model is the Model for Alpha16 colors.
-var Alpha16Model Model = ModelFunc(func(c Color) Color {
+func alpha16Model(c Color) Color {
if _, ok := c.(Alpha16); ok {
return c
}
_, _, _, a := c.RGBA()
return Alpha16{uint16(a)}
-})
+}
-// GrayModel is the Model for Gray colors.
-var GrayModel Model = ModelFunc(func(c Color) Color {
+func grayModel(c Color) Color {
if _, ok := c.(Gray); ok {
return c
}
r, g, b, _ := c.RGBA()
y := (299*r + 587*g + 114*b + 500) / 1000
return Gray{uint8(y >> 8)}
-})
+}
-// Gray16Model is the Model for Gray16 colors.
-var Gray16Model Model = ModelFunc(func(c Color) Color {
+func gray16Model(c Color) Color {
if _, ok := c.(Gray16); ok {
return c
}
r, g, b, _ := c.RGBA()
y := (299*r + 587*g + 114*b + 500) / 1000
return Gray16{uint16(y)}
-})
+}
// Palette is a palette of colors.
type Palette []Color
return ret
}
+// Standard colors.
var (
- // Black is an opaque black Color.
- Black = Gray16{0}
- // White is an opaque white Color.
- White = Gray16{0xffff}
- // Transparent is a fully transparent Color.
+ Black = Gray16{0}
+ White = Gray16{0xffff}
Transparent = Alpha16{0}
- // Opaque is a fully opaque Color.
- Opaque = Alpha16{0xffff}
+ Opaque = Alpha16{0xffff}
)
package color
-// RGBToYCbCr converts an RGB triple to a Y'CbCr triple. All components lie
-// within the range [0, 255].
+// RGBToYCbCr converts an RGB triple to a Y'CbCr triple.
func RGBToYCbCr(r, g, b uint8) (uint8, uint8, uint8) {
// The JFIF specification says:
// Y' = 0.2990*R + 0.5870*G + 0.1140*B
return uint8(yy), uint8(cb), uint8(cr)
}
-// YCbCrToRGB converts a Y'CbCr triple to an RGB triple. All components lie
-// within the range [0, 255].
+// YCbCrToRGB converts a Y'CbCr triple to an RGB triple.
func YCbCrToRGB(y, cb, cr uint8) (uint8, uint8, uint8) {
// The JFIF specification says:
// R = Y' + 1.40200*(Cr-128)
}
// YCbCrModel is the Model for Y'CbCr colors.
-var YCbCrModel Model = ModelFunc(func(c Color) Color {
+var YCbCrModel Model = ModelFunc(yCbCrModel)
+
+func yCbCrModel(c Color) Color {
if _, ok := c.(YCbCr); ok {
return c
}
r, g, b, _ := c.RGBA()
y, u, v := RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
return YCbCr{y, u, v}
-})
+}
sr, sg, sb, sa := src.RGBA()
// The 0x101 is here for the same reason as in drawRGBA.
a := (m - sa) * 0x101
- i0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4
+ i0 := dst.PixOffset(r.Min.X, r.Min.Y)
i1 := i0 + r.Dx()*4
for y := r.Min.Y; y != r.Max.Y; y++ {
for i := i0; i < i1; i += 4 {
// The built-in copy function is faster than a straightforward for loop to fill the destination with
// the color, but copy requires a slice source. We therefore use a for loop to fill the first row, and
// then use the first row as the slice source for the remaining rows.
- i0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4
+ i0 := dst.PixOffset(r.Min.X, r.Min.Y)
i1 := i0 + r.Dx()*4
for i := i0; i < i1; i += 4 {
dst.Pix[i+0] = uint8(sr >> 8)
func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) {
dx, dy := r.Dx(), r.Dy()
- d0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4
- s0 := (sp.Y-src.Rect.Min.Y)*src.Stride + (sp.X-src.Rect.Min.X)*4
+ d0 := dst.PixOffset(r.Min.X, r.Min.Y)
+ s0 := src.PixOffset(sp.X, sp.Y)
var (
ddelta, sdelta int
i0, i1, idelta int
func drawCopySrc(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) {
n, dy := 4*r.Dx(), r.Dy()
- d0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4
- s0 := (sp.Y-src.Rect.Min.Y)*src.Stride + (sp.X-src.Rect.Min.X)*4
+ d0 := dst.PixOffset(r.Min.X, r.Min.Y)
+ s0 := src.PixOffset(sp.X, sp.Y)
var ddelta, sdelta int
if r.Min.Y <= sp.Y {
ddelta = dst.Stride
func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Point) {
// An image.YCbCr is always fully opaque, and so if the mask is implicitly nil
// (i.e. fully opaque) then the op is effectively always Src.
- var (
- yy, cb, cr uint8
- )
x0 := (r.Min.X - dst.Rect.Min.X) * 4
x1 := (r.Max.X - dst.Rect.Min.X) * 4
y0 := r.Min.Y - dst.Rect.Min.Y
case image.YCbCrSubsampleRatio422:
for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 {
dpix := dst.Pix[y*dst.Stride:]
- for x, sx := x0, sp.X; x != x1; x, sx = x+4, sx+1 {
- i := sx / 2
- yy = src.Y[sy*src.YStride+sx]
- cb = src.Cb[sy*src.CStride+i]
- cr = src.Cr[sy*src.CStride+i]
- rr, gg, bb := color.YCbCrToRGB(yy, cb, cr)
+ yi := (sy-src.Rect.Min.Y)*src.YStride + (sp.X - src.Rect.Min.X)
+ ciBase := (sy-src.Rect.Min.Y)*src.CStride - src.Rect.Min.X/2
+ for x, sx := x0, sp.X; x != x1; x, sx, yi = x+4, sx+1, yi+1 {
+ ci := ciBase + sx/2
+ rr, gg, bb := color.YCbCrToRGB(src.Y[yi], src.Cb[ci], src.Cr[ci])
dpix[x+0] = rr
dpix[x+1] = gg
dpix[x+2] = bb
case image.YCbCrSubsampleRatio420:
for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 {
dpix := dst.Pix[y*dst.Stride:]
- for x, sx := x0, sp.X; x != x1; x, sx = x+4, sx+1 {
- i, j := sx/2, sy/2
- yy = src.Y[sy*src.YStride+sx]
- cb = src.Cb[j*src.CStride+i]
- cr = src.Cr[j*src.CStride+i]
- rr, gg, bb := color.YCbCrToRGB(yy, cb, cr)
+ yi := (sy-src.Rect.Min.Y)*src.YStride + (sp.X - src.Rect.Min.X)
+ ciBase := (sy/2-src.Rect.Min.Y/2)*src.CStride - src.Rect.Min.X/2
+ for x, sx := x0, sp.X; x != x1; x, sx, yi = x+4, sx+1, yi+1 {
+ ci := ciBase + sx/2
+ rr, gg, bb := color.YCbCrToRGB(src.Y[yi], src.Cb[ci], src.Cr[ci])
dpix[x+0] = rr
dpix[x+1] = gg
dpix[x+2] = bb
// Default to 4:4:4 subsampling.
for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 {
dpix := dst.Pix[y*dst.Stride:]
- for x, sx := x0, sp.X; x != x1; x, sx = x+4, sx+1 {
- yy = src.Y[sy*src.YStride+sx]
- cb = src.Cb[sy*src.CStride+sx]
- cr = src.Cr[sy*src.CStride+sx]
- rr, gg, bb := color.YCbCrToRGB(yy, cb, cr)
+ yi := (sy-src.Rect.Min.Y)*src.YStride + (sp.X - src.Rect.Min.X)
+ ci := (sy-src.Rect.Min.Y)*src.CStride + (sp.X - src.Rect.Min.X)
+ for x := x0; x != x1; x, yi, ci = x+4, yi+1, ci+1 {
+ rr, gg, bb := color.YCbCrToRGB(src.Y[yi], src.Cb[ci], src.Cr[ci])
dpix[x+0] = rr
dpix[x+1] = gg
dpix[x+2] = bb
}
func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform, mask *image.Alpha, mp image.Point) {
- i0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4
+ i0 := dst.PixOffset(r.Min.X, r.Min.Y)
i1 := i0 + r.Dx()*4
- mi0 := (mp.Y-mask.Rect.Min.Y)*mask.Stride + mp.X - mask.Rect.Min.X
+ mi0 := mask.PixOffset(mp.X, mp.Y)
sr, sg, sb, sa := src.RGBA()
for y, my := r.Min.Y, mp.Y; y != r.Max.Y; y, my = y+1, my+1 {
for i, mi := i0, mi0; i < i1; i, mi = i+4, mi+1 {
sx0 := sp.X + x0 - r.Min.X
mx0 := mp.X + x0 - r.Min.X
sx1 := sx0 + (x1 - x0)
- i0 := (y0-dst.Rect.Min.Y)*dst.Stride + (x0-dst.Rect.Min.X)*4
+ i0 := dst.PixOffset(x0, y0)
di := dx * 4
for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy {
for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx {
}
}
-// Add returns the rectangle r translated by -p.
+// Sub returns the rectangle r translated by -p.
func (r Rectangle) Sub(p Point) Rectangle {
return Rectangle{
Point{r.Min.X - p.X, r.Min.Y - p.Y},
if !(Point{x, y}.In(p.Rect)) {
return color.RGBA{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+ i := p.PixOffset(x, y)
return color.RGBA{p.Pix[i+0], p.Pix[i+1], p.Pix[i+2], p.Pix[i+3]}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *RGBA) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+}
+
func (p *RGBA) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+ i := p.PixOffset(x, y)
c1 := color.RGBAModel.Convert(c).(color.RGBA)
p.Pix[i+0] = c1.R
p.Pix[i+1] = c1.G
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+ i := p.PixOffset(x, y)
p.Pix[i+0] = c.R
p.Pix[i+1] = c.G
p.Pix[i+2] = c.B
if r.Empty() {
return &RGBA{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*4
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &RGBA{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewRGBA returns a new RGBA with the given width and height.
+// NewRGBA returns a new RGBA with the given bounds.
func NewRGBA(r Rectangle) *RGBA {
w, h := r.Dx(), r.Dy()
buf := make([]uint8, 4*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.RGBA64{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+ i := p.PixOffset(x, y)
return color.RGBA64{
uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1]),
uint16(p.Pix[i+2])<<8 | uint16(p.Pix[i+3]),
}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *RGBA64) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+}
+
func (p *RGBA64) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+ i := p.PixOffset(x, y)
c1 := color.RGBA64Model.Convert(c).(color.RGBA64)
p.Pix[i+0] = uint8(c1.R >> 8)
p.Pix[i+1] = uint8(c1.R)
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+ i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.R >> 8)
p.Pix[i+1] = uint8(c.R)
p.Pix[i+2] = uint8(c.G >> 8)
if r.Empty() {
return &RGBA64{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*8
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &RGBA64{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewRGBA64 returns a new RGBA64 with the given width and height.
+// NewRGBA64 returns a new RGBA64 with the given bounds.
func NewRGBA64(r Rectangle) *RGBA64 {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 8*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.NRGBA{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+ i := p.PixOffset(x, y)
return color.NRGBA{p.Pix[i+0], p.Pix[i+1], p.Pix[i+2], p.Pix[i+3]}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *NRGBA) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+}
+
func (p *NRGBA) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+ i := p.PixOffset(x, y)
c1 := color.NRGBAModel.Convert(c).(color.NRGBA)
p.Pix[i+0] = c1.R
p.Pix[i+1] = c1.G
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
+ i := p.PixOffset(x, y)
p.Pix[i+0] = c.R
p.Pix[i+1] = c.G
p.Pix[i+2] = c.B
if r.Empty() {
return &NRGBA{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*4
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &NRGBA{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewNRGBA returns a new NRGBA with the given width and height.
+// NewNRGBA returns a new NRGBA with the given bounds.
func NewNRGBA(r Rectangle) *NRGBA {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 4*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.NRGBA64{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+ i := p.PixOffset(x, y)
return color.NRGBA64{
uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1]),
uint16(p.Pix[i+2])<<8 | uint16(p.Pix[i+3]),
}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *NRGBA64) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+}
+
func (p *NRGBA64) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+ i := p.PixOffset(x, y)
c1 := color.NRGBA64Model.Convert(c).(color.NRGBA64)
p.Pix[i+0] = uint8(c1.R >> 8)
p.Pix[i+1] = uint8(c1.R)
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
+ i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.R >> 8)
p.Pix[i+1] = uint8(c.R)
p.Pix[i+2] = uint8(c.G >> 8)
if r.Empty() {
return &NRGBA64{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*8
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &NRGBA64{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewNRGBA64 returns a new NRGBA64 with the given width and height.
+// NewNRGBA64 returns a new NRGBA64 with the given bounds.
func NewNRGBA64(r Rectangle) *NRGBA64 {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 8*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.Alpha{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
return color.Alpha{p.Pix[i]}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *Alpha) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
+}
+
func (p *Alpha) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
p.Pix[i] = color.AlphaModel.Convert(c).(color.Alpha).A
}
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
p.Pix[i] = c.A
}
if r.Empty() {
return &Alpha{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*1
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &Alpha{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewAlpha returns a new Alpha with the given width and height.
+// NewAlpha returns a new Alpha with the given bounds.
func NewAlpha(r Rectangle) *Alpha {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 1*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.Alpha16{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+ i := p.PixOffset(x, y)
return color.Alpha16{uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1])}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *Alpha16) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+}
+
func (p *Alpha16) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+ i := p.PixOffset(x, y)
c1 := color.Alpha16Model.Convert(c).(color.Alpha16)
p.Pix[i+0] = uint8(c1.A >> 8)
p.Pix[i+1] = uint8(c1.A)
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+ i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.A >> 8)
p.Pix[i+1] = uint8(c.A)
}
if r.Empty() {
return &Alpha16{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*2
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &Alpha16{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewAlpha16 returns a new Alpha16 with the given width and height.
+// NewAlpha16 returns a new Alpha16 with the given bounds.
func NewAlpha16(r Rectangle) *Alpha16 {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 2*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.Gray{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
return color.Gray{p.Pix[i]}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *Gray) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
+}
+
func (p *Gray) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
p.Pix[i] = color.GrayModel.Convert(c).(color.Gray).Y
}
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
p.Pix[i] = c.Y
}
if r.Empty() {
return &Gray{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*1
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &Gray{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewGray returns a new Gray with the given width and height.
+// NewGray returns a new Gray with the given bounds.
func NewGray(r Rectangle) *Gray {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 1*w*h)
if !(Point{x, y}.In(p.Rect)) {
return color.Gray16{}
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+ i := p.PixOffset(x, y)
return color.Gray16{uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1])}
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *Gray16) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+}
+
func (p *Gray16) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+ i := p.PixOffset(x, y)
c1 := color.Gray16Model.Convert(c).(color.Gray16)
p.Pix[i+0] = uint8(c1.Y >> 8)
p.Pix[i+1] = uint8(c1.Y)
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
+ i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.Y >> 8)
p.Pix[i+1] = uint8(c.Y)
}
if r.Empty() {
return &Gray16{}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*2
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &Gray16{
Pix: p.Pix[i:],
Stride: p.Stride,
return true
}
-// NewGray16 returns a new Gray16 with the given width and height.
+// NewGray16 returns a new Gray16 with the given bounds.
func NewGray16(r Rectangle) *Gray16 {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 2*w*h)
if !(Point{x, y}.In(p.Rect)) {
return p.Palette[0]
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
return p.Palette[p.Pix[i]]
}
+// PixOffset returns the index of the first element of Pix that corresponds to
+// the pixel at (x, y).
+func (p *Paletted) PixOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
+}
+
func (p *Paletted) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
p.Pix[i] = uint8(p.Palette.Index(c))
}
if !(Point{x, y}.In(p.Rect)) {
return 0
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
return p.Pix[i]
}
if !(Point{x, y}.In(p.Rect)) {
return
}
- i := (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
+ i := p.PixOffset(x, y)
p.Pix[i] = index
}
Palette: p.Palette,
}
}
- i := (r.Min.Y-p.Rect.Min.Y)*p.Stride + (r.Min.X-p.Rect.Min.X)*1
+ i := p.PixOffset(r.Min.X, r.Min.Y)
return &Paletted{
Pix: p.Pix[i:],
Stride: p.Stride,
return
}
var subsampleRatio image.YCbCrSubsampleRatio
- n := h0 * v0
- switch n {
+ switch h0 * v0 {
case 1:
subsampleRatio = image.YCbCrSubsampleRatio444
case 2:
default:
panic("unreachable")
}
- b := make([]byte, mxx*myy*(1*8*8*n+2*8*8))
- d.img3 = &image.YCbCr{
- Y: b[mxx*myy*(0*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+0*8*8)],
- Cb: b[mxx*myy*(1*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+1*8*8)],
- Cr: b[mxx*myy*(1*8*8*n+1*8*8) : mxx*myy*(1*8*8*n+2*8*8)],
- SubsampleRatio: subsampleRatio,
- YStride: mxx * 8 * h0,
- CStride: mxx * 8,
- Rect: image.Rect(0, 0, d.width, d.height),
- }
+ m := image.NewYCbCr(image.Rect(0, 0, 8*h0*mxx, 8*v0*myy), subsampleRatio)
+ d.img3 = m.SubImage(image.Rect(0, 0, d.width, d.height)).(*image.YCbCr)
}
// Specified in section B.2.3.
}
case mRGB:
img := dst.(*image.RGBA)
- min := (ymin-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4
- max := (ymax-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4
+ min := img.PixOffset(0, ymin)
+ max := img.PixOffset(0, ymax)
var off int
for i := min; i < max; i += 4 {
img.Pix[i+0] = d.buf[off+0]
}
case mNRGBA:
img := dst.(*image.NRGBA)
- min := (ymin-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4
- max := (ymax-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4
+ min := img.PixOffset(0, ymin)
+ max := img.PixOffset(0, ymax)
if len(d.buf) != max-min {
return FormatError("short data strip")
}
copy(img.Pix[min:max], d.buf)
case mRGBA:
img := dst.(*image.RGBA)
- min := (ymin-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4
- max := (ymax-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4
+ min := img.PixOffset(0, ymin)
+ max := img.PixOffset(0, ymax)
if len(d.buf) != max-min {
return FormatError("short data strip")
}
YCbCrSubsampleRatio420
)
+func (s YCbCrSubsampleRatio) String() string {
+ switch s {
+ case YCbCrSubsampleRatio444:
+ return "YCbCrSubsampleRatio444"
+ case YCbCrSubsampleRatio422:
+ return "YCbCrSubsampleRatio422"
+ case YCbCrSubsampleRatio420:
+ return "YCbCrSubsampleRatio420"
+ }
+ return "YCbCrSubsampleRatioUnknown"
+}
+
// YCbCr is an in-memory image of Y'CbCr colors. There is one Y sample per
// pixel, but each Cb and Cr sample can span one or more pixels.
// YStride is the Y slice index delta between vertically adjacent pixels.
// For 4:2:2, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/2.
// For 4:2:0, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/4.
type YCbCr struct {
- Y []uint8
- Cb []uint8
- Cr []uint8
+ Y, Cb, Cr []uint8
YStride int
CStride int
SubsampleRatio YCbCrSubsampleRatio
if !(Point{x, y}.In(p.Rect)) {
return color.YCbCr{}
}
+ yi := p.YOffset(x, y)
+ ci := p.COffset(x, y)
+ return color.YCbCr{
+ p.Y[yi],
+ p.Cb[ci],
+ p.Cr[ci],
+ }
+}
+
+// YOffset returns the index of the first element of Y that corresponds to
+// the pixel at (x, y).
+func (p *YCbCr) YOffset(x, y int) int {
+ return (y-p.Rect.Min.Y)*p.YStride + (x - p.Rect.Min.X)
+}
+
+// COffset returns the index of the first element of Cb or Cr that corresponds
+// to the pixel at (x, y).
+func (p *YCbCr) COffset(x, y int) int {
switch p.SubsampleRatio {
case YCbCrSubsampleRatio422:
- i := x / 2
- return color.YCbCr{
- p.Y[y*p.YStride+x],
- p.Cb[y*p.CStride+i],
- p.Cr[y*p.CStride+i],
- }
+ return (y-p.Rect.Min.Y)*p.CStride + (x/2 - p.Rect.Min.X/2)
case YCbCrSubsampleRatio420:
- i, j := x/2, y/2
- return color.YCbCr{
- p.Y[y*p.YStride+x],
- p.Cb[j*p.CStride+i],
- p.Cr[j*p.CStride+i],
- }
+ return (y/2-p.Rect.Min.Y/2)*p.CStride + (x/2 - p.Rect.Min.X/2)
}
// Default to 4:4:4 subsampling.
- return color.YCbCr{
- p.Y[y*p.YStride+x],
- p.Cb[y*p.CStride+x],
- p.Cr[y*p.CStride+x],
- }
+ return (y-p.Rect.Min.Y)*p.CStride + (x - p.Rect.Min.X)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *YCbCr) SubImage(r Rectangle) Image {
- q := new(YCbCr)
- *q = *p
- q.Rect = q.Rect.Intersect(r)
- return q
+ r = r.Intersect(p.Rect)
+ // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
+ // either r1 or r2 if the intersection is empty. Without explicitly checking for
+ // this, the Pix[i:] expression below can panic.
+ if r.Empty() {
+ return &YCbCr{
+ SubsampleRatio: p.SubsampleRatio,
+ }
+ }
+ yi := p.YOffset(r.Min.X, r.Min.Y)
+ ci := p.COffset(r.Min.X, r.Min.Y)
+ return &YCbCr{
+ Y: p.Y[yi:],
+ Cb: p.Cb[ci:],
+ Cr: p.Cr[ci:],
+ SubsampleRatio: p.SubsampleRatio,
+ YStride: p.YStride,
+ CStride: p.CStride,
+ Rect: r,
+ }
}
func (p *YCbCr) Opaque() bool {
return true
}
+
+// NewYCbCr returns a new YCbCr with the given bounds and subsample ratio.
+func NewYCbCr(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr {
+ w, h, cw, ch := r.Dx(), r.Dy(), 0, 0
+ switch subsampleRatio {
+ case YCbCrSubsampleRatio422:
+ cw = (r.Max.X+1)/2 - r.Min.X/2
+ ch = h
+ case YCbCrSubsampleRatio420:
+ cw = (r.Max.X+1)/2 - r.Min.X/2
+ ch = (r.Max.Y+1)/2 - r.Min.Y/2
+ default:
+ // Default to 4:4:4 subsampling.
+ cw = w
+ ch = h
+ }
+ b := make([]byte, w*h+2*cw*ch)
+ return &YCbCr{
+ Y: b[:w*h],
+ Cb: b[w*h+0*cw*ch : w*h+1*cw*ch],
+ Cr: b[w*h+1*cw*ch : w*h+2*cw*ch],
+ SubsampleRatio: subsampleRatio,
+ YStride: w,
+ CStride: cw,
+ Rect: r,
+ }
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package image_test
+
+import (
+ . "image"
+ "image/color"
+ "testing"
+)
+
+func TestYCbCr(t *testing.T) {
+ rects := []Rectangle{
+ Rect(0, 0, 16, 16),
+ Rect(1, 0, 16, 16),
+ Rect(0, 1, 16, 16),
+ Rect(1, 1, 16, 16),
+ Rect(1, 1, 15, 16),
+ Rect(1, 1, 16, 15),
+ Rect(1, 1, 15, 15),
+ Rect(2, 3, 14, 15),
+ Rect(7, 0, 7, 16),
+ Rect(0, 8, 16, 8),
+ Rect(0, 0, 10, 11),
+ Rect(5, 6, 16, 16),
+ Rect(7, 7, 8, 8),
+ Rect(7, 8, 8, 9),
+ Rect(8, 7, 9, 8),
+ Rect(8, 8, 9, 9),
+ Rect(7, 7, 17, 17),
+ Rect(8, 8, 17, 17),
+ Rect(9, 9, 17, 17),
+ Rect(10, 10, 17, 17),
+ }
+ subsampleRatios := []YCbCrSubsampleRatio{
+ YCbCrSubsampleRatio444,
+ YCbCrSubsampleRatio422,
+ YCbCrSubsampleRatio420,
+ }
+ deltas := []Point{
+ Pt(0, 0),
+ Pt(1000, 1001),
+ Pt(5001, -400),
+ Pt(-701, -801),
+ }
+ for _, r := range rects {
+ for _, subsampleRatio := range subsampleRatios {
+ for _, delta := range deltas {
+ testYCbCr(t, r, subsampleRatio, delta)
+ }
+ }
+ }
+}
+
+func testYCbCr(t *testing.T, r Rectangle, subsampleRatio YCbCrSubsampleRatio, delta Point) {
+ // Create a YCbCr image m, whose bounds are r translated by (delta.X, delta.Y).
+ r1 := r.Add(delta)
+ m := NewYCbCr(r1, subsampleRatio)
+
+ // Test that the image buffer is reasonably small even if (delta.X, delta.Y) is far from the origin.
+ if len(m.Y) > 100*100 {
+ t.Errorf("r=%v, subsampleRatio=%v, delta=%v: image buffer is too large",
+ r, subsampleRatio, delta)
+ return
+ }
+
+ // Initialize m's pixels. For 422 and 420 subsampling, some of the Cb and Cr elements
+ // will be set multiple times. That's OK. We just want to avoid a uniform image.
+ for y := r1.Min.Y; y < r1.Max.Y; y++ {
+ for x := r1.Min.X; x < r1.Max.X; x++ {
+ yi := m.YOffset(x, y)
+ ci := m.COffset(x, y)
+ m.Y[yi] = uint8(16*y + x)
+ m.Cb[ci] = uint8(y + 16*x)
+ m.Cr[ci] = uint8(y + 16*x)
+ }
+ }
+
+ // Make various sub-images of m.
+ for y0 := delta.Y + 3; y0 < delta.Y+7; y0++ {
+ for y1 := delta.Y + 8; y1 < delta.Y+13; y1++ {
+ for x0 := delta.X + 3; x0 < delta.X+7; x0++ {
+ for x1 := delta.X + 8; x1 < delta.X+13; x1++ {
+ subRect := Rect(x0, y0, x1, y1)
+ sub := m.SubImage(subRect).(*YCbCr)
+
+ // For each point in the sub-image's bounds, check that m.At(x, y) equals sub.At(x, y).
+ for y := sub.Rect.Min.Y; y < sub.Rect.Max.Y; y++ {
+ for x := sub.Rect.Min.X; x < sub.Rect.Max.X; x++ {
+ color0 := m.At(x, y).(color.YCbCr)
+ color1 := sub.At(x, y).(color.YCbCr)
+ if color0 != color1 {
+ t.Errorf("r=%v, subsampleRatio=%v, delta=%v, x=%d, y=%d, color0=%v, color1=%v",
+ r, subsampleRatio, delta, x, y, color0, color1)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
-func WriteFile(filename string, data []byte, perm uint32) error {
+func WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
"log"
"net"
"testing"
+ "time"
)
var serverAddr string
log.Fatalf("net.ListenPacket failed udp :0 %v", e)
}
serverAddr = c.LocalAddr().String()
- c.SetReadTimeout(100e6) // 100ms
+ c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
go runSyslog(c, done)
}
var globalRand = New(&lockedSource{src: NewSource(1)})
-// Seed uses the provided seed value to initialize the generator to a deterministic state.
+// Seed uses the provided seed value to initialize the generator to a
+// deterministic state. If Seed is not called, the generator behaves as
+// if seeded by Seed(1).
func Seed(seed int64) { globalRand.Seed(seed) }
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
}
func TestNonStandardNormalValues(t *testing.T) {
- for sd := 0.5; sd < 1000; sd *= 2 {
- for m := 0.5; m < 1000; m *= 2 {
+ sdmax := 1000.0
+ mmax := 1000.0
+ if testing.Short() {
+ sdmax = 5
+ mmax = 5
+ }
+ for sd := 0.5; sd < sdmax; sd *= 2 {
+ for m := 0.5; m < mmax; m *= 2 {
for _, seed := range testSeeds {
testNormalDistribution(t, numTestSamples, m, sd, seed)
}
"unicode"
)
-// FormatMediaType serializes type t, subtype sub and the paramaters
-// param as a media type conform RFC 2045 and RFC 2616.
-// The type, subtype, and parameter names are written in lower-case.
+// FormatMediaType serializes mediatype t and the parameters
+// param as a media type conforming to RFC 2045 and RFC 2616.
+// The type and parameter names are written in lower-case.
// When any of the arguments result in a standard violation then
// FormatMediaType returns the empty string.
-func FormatMediaType(t, sub string, param map[string]string) string {
- if !(IsToken(t) && IsToken(sub)) {
+func FormatMediaType(t string, param map[string]string) string {
+ slash := strings.Index(t, "/")
+ if slash == -1 {
+ return ""
+ }
+ major, sub := t[:slash], t[slash+1:]
+ if !IsToken(major) || !IsToken(sub) {
return ""
}
var b bytes.Buffer
- b.WriteString(strings.ToLower(t))
+ b.WriteString(strings.ToLower(major))
b.WriteByte('/')
b.WriteString(strings.ToLower(sub))
t.Errorf("expected invalid media parameter; got error %q", err)
}
}
+
+type formatTest struct {
+ typ string
+ params map[string]string
+ want string
+}
+
+var formatTests = []formatTest{
+ {"noslash", nil, ""},
+ {"foo/BAR", nil, "foo/bar"},
+ {"foo/BAR", map[string]string{"X": "Y"}, "foo/bar; x=Y"},
+}
+
+func TestFormatMediaType(t *testing.T) {
+ for i, tt := range formatTests {
+ got := FormatMediaType(tt.typ, tt.params)
+ if got != tt.want {
+ t.Errorf("%d. FormatMediaType(%q, %v) = %q; want %q", i, tt.typ, tt.params, got, tt.want)
+ }
+ }
+}
}
func setExtensionType(extension, mimeType string) error {
- full, param, err := ParseMediaType(mimeType)
+ _, param, err := ParseMediaType(mimeType)
if err != nil {
return err
}
- if split := strings.Index(full, "/"); split < 0 {
- return fmt.Errorf(`mime: malformed MIME type "%s"`, mimeType)
- } else {
- main := full[:split]
- sub := full[split+1:]
- if main == "text" && param["charset"] == "" {
- param["charset"] = "utf-8"
- }
- mimeType = FormatMediaType(main, sub, param)
+ if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
+ param["charset"] = "utf-8"
+ mimeType = FormatMediaType(mimeType, param)
}
-
mimeLock.Lock()
mimeTypes[extension] = mimeType
mimeLock.Unlock()
return nil, err
}
- c.SetReadTimeout(int64(cfg.timeout) * 1e9) // nanoseconds
+ if cfg.timeout == 0 {
+ c.SetReadDeadline(time.Time{})
+ } else {
+ c.SetReadDeadline(time.Now().Add(time.Duration(cfg.timeout) * time.Second))
+ }
buf := make([]byte, 2000) // More than enough.
n, err = c.Read(buf)
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+// LookupHost looks up the given host using the local resolver.
+// It returns an array of that host's addresses.
+func LookupHost(host string) (addrs []string, err error) {
+ return lookupHost(host)
+}
+
+// LookupIP looks up host using the local resolver.
+// It returns an array of that host's IPv4 and IPv6 addresses.
+func LookupIP(host string) (addrs []IP, err error) {
+ return lookupIP(host)
+}
+
+// LookupPort looks up the port for the given network and service.
+func LookupPort(network, service string) (port int, err error) {
+ return lookupPort(network, service)
+}
+
+// LookupCNAME returns the canonical DNS host for the given name.
+// Callers that do not care about the canonical name can call
+// LookupHost or LookupIP directly; both take care of resolving
+// the canonical name as part of the lookup.
+func LookupCNAME(name string) (cname string, err error) {
+ return lookupCNAME(name)
+}
+
+// LookupSRV tries to resolve an SRV query of the given service,
+// protocol, and domain name. The proto is "tcp" or "udp".
+// The returned records are sorted by priority and randomized
+// by weight within a priority.
+//
+// LookupSRV constructs the DNS name to look up following RFC 2782.
+// That is, it looks up _service._proto.name. To accommodate services
+// publishing SRV records under non-standard names, if both service
+// and proto are empty strings, LookupSRV looks up name directly.
+func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+ return lookupSRV(service, proto, name)
+}
+
+// LookupMX returns the DNS MX records for the given domain name sorted by preference.
+func LookupMX(name string) (mx []*MX, err error) {
+ return lookupMX(name)
+}
+
+// LookupTXT returns the DNS TXT records for the given domain name.
+func LookupTXT(name string) (txt []string, err error) {
+ return lookupTXT(name)
+}
+
+// LookupAddr performs a reverse lookup for the given address, returning a list
+// of names mapping to that address.
+func LookupAddr(addr string) (name []string, err error) {
+ return lookupAddr(addr)
+}
// immutable until Close
sysfd int
family int
- proto int
+ sotype int
sysfile *os.File
cr chan bool
cw chan bool
raddr Addr
// owned by client
- rdeadline_delta int64
- rdeadline int64
- rio sync.Mutex
- wdeadline_delta int64
- wdeadline int64
- wio sync.Mutex
+ rdeadline int64
+ rio sync.Mutex
+ wdeadline int64
+ wio sync.Mutex
// owned by fd wait server
ncr, ncw int
pollserver = p
}
-func newFD(fd, family, proto int, net string) (f *netFD, err error) {
+func newFD(fd, family, sotype int, net string) (f *netFD, err error) {
onceStartServer.Do(startServer)
if e := syscall.SetNonblock(fd, true); e != nil {
return nil, e
f = &netFD{
sysfd: fd,
family: family,
- proto: proto,
+ sotype: sotype,
net: net,
}
f.cr = make(chan bool, 1)
if fd.sysfile == nil {
return 0, os.EINVAL
}
- if fd.rdeadline_delta > 0 {
- fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
- } else {
- fd.rdeadline = 0
- }
for {
n, err = syscall.Read(fd.sysfile.Fd(), p)
if err == syscall.EAGAIN {
}
if err != nil {
n = 0
- } else if n == 0 && err == nil && fd.proto != syscall.SOCK_DGRAM {
+ } else if n == 0 && err == nil && fd.sotype != syscall.SOCK_DGRAM {
err = io.EOF
}
break
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
- if fd.rdeadline_delta > 0 {
- fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
- } else {
- fd.rdeadline = 0
- }
for {
n, sa, err = syscall.Recvfrom(fd.sysfd, p, 0)
if err == syscall.EAGAIN {
defer fd.rio.Unlock()
fd.incref()
defer fd.decref()
- if fd.rdeadline_delta > 0 {
- fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
- } else {
- fd.rdeadline = 0
- }
for {
n, oobn, flags, sa, err = syscall.Recvmsg(fd.sysfd, p, oob, 0)
if err == syscall.EAGAIN {
if fd.sysfile == nil {
return 0, os.EINVAL
}
- if fd.wdeadline_delta > 0 {
- fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
- } else {
- fd.wdeadline = 0
- }
nn := 0
for {
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
- if fd.wdeadline_delta > 0 {
- fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
- } else {
- fd.wdeadline = 0
- }
for {
err = syscall.Sendto(fd.sysfd, p, 0, sa)
if err == syscall.EAGAIN {
defer fd.wio.Unlock()
fd.incref()
defer fd.decref()
- if fd.wdeadline_delta > 0 {
- fd.wdeadline = pollserver.Now() + fd.wdeadline_delta
- } else {
- fd.wdeadline = 0
- }
for {
err = syscall.Sendmsg(fd.sysfd, p, oob, sa, 0)
if err == syscall.EAGAIN {
fd.incref()
defer fd.decref()
- if fd.rdeadline_delta > 0 {
- fd.rdeadline = pollserver.Now() + fd.rdeadline_delta
- } else {
- fd.rdeadline = 0
- }
// See ../syscall/exec.go for description of ForkLock.
// It is okay to hold the lock across syscall.Accept
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
- if nfd, err = newFD(s, fd.family, fd.proto, fd.net); err != nil {
+ if nfd, err = newFD(s, fd.family, fd.sotype, fd.net); err != nil {
syscall.Close(s)
return nil, err
}
}
// ExecIO executes a single io operation. It either executes it
-// inline, or, if timeouts are employed, passes the request onto
+// inline, or, if a deadline is employed, passes the request onto
// a special goroutine and waits for completion or cancels request.
-func (s *ioSrv) ExecIO(oi anOpIface, deadline_delta int64) (n int, err error) {
+// deadline is unix nanos.
+func (s *ioSrv) ExecIO(oi anOpIface, deadline int64) (n int, err error) {
var e error
o := oi.Op()
- if deadline_delta > 0 {
+ if deadline != 0 {
// Send request to a special dedicated thread,
// so it can stop the io with CancelIO later.
s.submchan <- oi
return 0, &OpError{oi.Name(), o.fd.net, o.fd.laddr, e}
}
// Wait for our request to complete.
- // TODO(rsc): This should stop the timer.
var r ioResult
- if deadline_delta > 0 {
+ if deadline != 0 {
+ dt := deadline - time.Now().UnixNano()
+ if dt < 1 {
+ dt = 1
+ }
+ timer := time.NewTimer(time.Duration(dt) * time.Nanosecond)
+ defer timer.Stop()
select {
case r = <-o.resultc:
- case <-time.After(time.Duration(deadline_delta) * time.Nanosecond):
+ case <-timer.C:
s.canchan <- oi
<-o.errnoc
r = <-o.resultc
// immutable until Close
sysfd syscall.Handle
family int
- proto int
+ sotype int
net string
laddr Addr
raddr Addr
errnoc [2]chan error // read/write submit or cancel operation errors
// owned by client
- rdeadline_delta int64
- rdeadline int64
- rio sync.Mutex
- wdeadline_delta int64
- wdeadline int64
- wio sync.Mutex
+ rdeadline int64
+ rio sync.Mutex
+ wdeadline int64
+ wio sync.Mutex
}
-func allocFD(fd syscall.Handle, family, proto int, net string) (f *netFD) {
+func allocFD(fd syscall.Handle, family, sotype int, net string) (f *netFD) {
f = &netFD{
sysfd: fd,
family: family,
- proto: proto,
+ sotype: sotype,
net: net,
}
runtime.SetFinalizer(f, (*netFD).Close)
}
var o readOp
o.Init(fd, buf, 'r')
- n, err = iosrv.ExecIO(&o, fd.rdeadline_delta)
+ n, err = iosrv.ExecIO(&o, fd.rdeadline)
if err == nil && n == 0 {
err = io.EOF
}
var o readFromOp
o.Init(fd, buf, 'r')
o.rsan = int32(unsafe.Sizeof(o.rsa))
- n, err = iosrv.ExecIO(&o, fd.rdeadline_delta)
+ n, err = iosrv.ExecIO(&o, fd.rdeadline)
if err != nil {
return 0, nil, err
}
}
var o writeOp
o.Init(fd, buf, 'w')
- return iosrv.ExecIO(&o, fd.wdeadline_delta)
+ return iosrv.ExecIO(&o, fd.wdeadline)
}
// WriteTo to network.
var o writeToOp
o.Init(fd, buf, 'w')
o.sa = sa
- return iosrv.ExecIO(&o, fd.wdeadline_delta)
+ return iosrv.ExecIO(&o, fd.wdeadline)
}
// Accept new network connections.
// Get new socket.
// See ../syscall/exec.go for description of ForkLock.
syscall.ForkLock.RLock()
- s, e := syscall.Socket(fd.family, fd.proto, 0)
+ s, e := syscall.Socket(fd.family, fd.sotype, 0)
if e != nil {
syscall.ForkLock.RUnlock()
return nil, e
lsa, _ := lrsa.Sockaddr()
rsa, _ := rrsa.Sockaddr()
- nfd = allocFD(s, fd.family, fd.proto, fd.net)
+ nfd = allocFD(s, fd.family, fd.sotype, fd.net)
nfd.setAddr(toAddr(lsa), toAddr(rsa))
return nfd, nil
}
"GATEWAY_INTERFACE=CGI/1.1",
"REQUEST_METHOD=" + req.Method,
"QUERY_STRING=" + req.URL.RawQuery,
- "REQUEST_URI=" + req.URL.RawPath,
+ "REQUEST_URI=" + req.URL.RequestURI(),
"PATH_INFO=" + pathInfo,
"SCRIPT_NAME=" + root,
"SCRIPT_FILENAME=" + h.Path,
conn.Close()
tries := 0
- for tries < 15 && childRunning() {
+ for tries < 25 && childRunning() {
time.Sleep(50 * time.Millisecond * time.Duration(tries))
tries++
}
// The Client's Transport typically has internal state (cached
// TCP connections), so Clients should be reused instead of created as
// needed. Clients are safe for concurrent use by multiple goroutines.
-//
-// Client is not yet very configurable.
type Client struct {
- Transport RoundTripper // if nil, DefaultTransport is used
+ // Transport specifies the mechanism by which individual
+ // HTTP requests are made.
+ // If nil, DefaultTransport is used.
+ Transport RoundTripper
+ // CheckRedirect specifies the policy for handling redirects.
// If CheckRedirect is not nil, the client calls it before
// following an HTTP redirect. The arguments req and via
// are the upcoming request and the requests made already,
req.Header = make(Header)
}
- info := req.URL.RawUserinfo
- if len(info) > 0 {
- req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(info)))
+ if u := req.URL.User; u != nil {
+ req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(u.String())))
}
return t.RoundTrip(req)
}
break
}
}
- for _, cookie := range jar.Cookies(req.URL) {
- req.AddCookie(cookie)
- }
}
+ for _, cookie := range jar.Cookies(req.URL) {
+ req.AddCookie(cookie)
+ }
urlStr = req.URL.String()
if r, err = send(req, c.Transport); err != nil {
break
"net/url"
"strconv"
"strings"
+ "sync"
"testing"
)
}
}
+var expectedCookies = []*Cookie{
+ &Cookie{Name: "ChocolateChip", Value: "tasty"},
+ &Cookie{Name: "First", Value: "Hit"},
+ &Cookie{Name: "Second", Value: "Hit"},
+}
+
+var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
+ for _, cookie := range r.Cookies() {
+ SetCookie(w, cookie)
+ }
+ if r.URL.Path == "/" {
+ SetCookie(w, expectedCookies[1])
+ Redirect(w, r, "/second", StatusMovedPermanently)
+ } else {
+ SetCookie(w, expectedCookies[2])
+ w.Write([]byte("hello"))
+ }
+})
+
+// Just enough correctness for our redirect tests. Uses the URL.Host as the
+// scope of all cookies.
+type TestJar struct {
+ m sync.Mutex
+ perURL map[string][]*Cookie
+}
+
+func (j *TestJar) SetCookies(u *url.URL, cookies []*Cookie) {
+ j.m.Lock()
+ defer j.m.Unlock()
+ j.perURL[u.Host] = cookies
+}
+
+func (j *TestJar) Cookies(u *url.URL) []*Cookie {
+ j.m.Lock()
+ defer j.m.Unlock()
+ return j.perURL[u.Host]
+}
+
+func TestRedirectCookiesOnRequest(t *testing.T) {
+ var ts *httptest.Server
+ ts = httptest.NewServer(echoCookiesRedirectHandler)
+ defer ts.Close()
+ c := &Client{}
+ req, _ := NewRequest("GET", ts.URL, nil)
+ req.AddCookie(expectedCookies[0])
+ // TODO: Uncomment when an implementation of a RFC6265 cookie jar lands.
+ _ = c
+ // resp, _ := c.Do(req)
+ // matchReturnedCookies(t, expectedCookies, resp.Cookies())
+
+ req, _ = NewRequest("GET", ts.URL, nil)
+ // resp, _ = c.Do(req)
+ // matchReturnedCookies(t, expectedCookies[1:], resp.Cookies())
+}
+
+func TestRedirectCookiesJar(t *testing.T) {
+ var ts *httptest.Server
+ ts = httptest.NewServer(echoCookiesRedirectHandler)
+ defer ts.Close()
+ c := &Client{}
+ c.Jar = &TestJar{perURL: make(map[string][]*Cookie)}
+ u, _ := url.Parse(ts.URL)
+ c.Jar.SetCookies(u, []*Cookie{expectedCookies[0]})
+ resp, _ := c.Get(ts.URL)
+ matchReturnedCookies(t, expectedCookies, resp.Cookies())
+}
+
+func matchReturnedCookies(t *testing.T, expected, given []*Cookie) {
+ t.Logf("Received cookies: %v", given)
+ if len(given) != len(expected) {
+ t.Errorf("Expected %d cookies, got %d", len(expected), len(given))
+ }
+ for _, ec := range expected {
+ foundC := false
+ for _, c := range given {
+ if ec.Name == c.Name && ec.Value == c.Value {
+ foundC = true
+ break
+ }
+ }
+ if !foundC {
+ t.Errorf("Missing cookie %v", ec)
+ }
+ }
+}
+
func TestStreamingGet(t *testing.T) {
say := make(chan string)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
resp, err := client.Get("http://example.com")
// ...
- req := http.NewRequest("GET", "http://example.com", nil)
+ req, err := http.NewRequest("GET", "http://example.com", nil)
+ // ...
req.Header.Add("If-None-Match", `W/"wyzzy"`)
resp, err := client.Do(req)
// ...
func TestServeFileContentType(t *testing.T) {
const ctype = "icecream/chocolate"
- override := false
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if override {
+ if r.FormValue("override") == "1" {
w.Header().Set("Content-Type", ctype)
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
- get := func(want string) {
- resp, err := Get(ts.URL)
+ get := func(override, want string) {
+ resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
- get("text/plain; charset=utf-8")
- override = true
- get(ctype)
+ get("0", "text/plain; charset=utf-8")
+ get("1", ctype)
}
func TestServeFileMimeType(t *testing.T) {
"net"
"net/http"
"strings"
+ "time"
)
// One of the copies, say from b to r2, could be avoided by using a more
io.Reader
}
-func (c *dumpConn) Close() error { return nil }
-func (c *dumpConn) LocalAddr() net.Addr { return nil }
-func (c *dumpConn) RemoteAddr() net.Addr { return nil }
-func (c *dumpConn) SetTimeout(nsec int64) error { return nil }
-func (c *dumpConn) SetReadTimeout(nsec int64) error { return nil }
-func (c *dumpConn) SetWriteTimeout(nsec int64) error { return nil }
+func (c *dumpConn) Close() error { return nil }
+func (c *dumpConn) LocalAddr() net.Addr { return nil }
+func (c *dumpConn) RemoteAddr() net.Addr { return nil }
+func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
+func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
+func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
// DumpRequestOut is like DumpRequest but includes
// headers that the standard http.Transport adds,
var b bytes.Buffer
- urlStr := req.URL.Raw
- if urlStr == "" {
- urlStr = valueOrDefault(req.URL.EncodedPath(), "/")
- if req.URL.RawQuery != "" {
- urlStr += "?" + req.URL.RawQuery
- }
- }
-
- fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), urlStr,
- req.ProtoMajor, req.ProtoMinor)
+ fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
+ req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor)
host := req.Host
if host == "" && req.URL != nil {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
- if q := req.URL.RawQuery; q != "" {
- req.URL.RawPath = req.URL.Path + "?" + q
- } else {
- req.URL.RawPath = req.URL.Path
- }
req.URL.RawQuery = target.RawQuery
}
return &ReverseProxy{Director: director}
&Request{
Method: "GET",
URL: &url.URL{
- Raw: "http://www.techcrunch.com/",
- Scheme: "http",
- RawPath: "/",
- RawAuthority: "www.techcrunch.com",
- RawUserinfo: "",
- Host: "www.techcrunch.com",
- Path: "/",
- RawQuery: "",
- Fragment: "",
+ Scheme: "http",
+ Host: "www.techcrunch.com",
+ Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
&Request{
Method: "GET",
URL: &url.URL{
- Raw: "/",
- Path: "/",
- RawPath: "/",
+ Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
&Request{
Method: "GET",
URL: &url.URL{
- Raw: "//user@host/is/actually/a/path/",
- Scheme: "",
- RawPath: "//user@host/is/actually/a/path/",
- RawAuthority: "",
- RawUserinfo: "",
- Host: "",
- Path: "//user@host/is/actually/a/path/",
- RawQuery: "",
- Fragment: "",
+ Path: "//user@host/is/actually/a/path/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
&Request{
Method: "POST",
URL: &url.URL{
- Raw: "/",
- Path: "/",
- RawPath: "/",
+ Path: "/",
},
TransferEncoding: []string{"chunked"},
Proto: "HTTP/1.1",
host = req.URL.Host
}
- urlStr := req.URL.RawPath
- if strings.HasPrefix(urlStr, "?") {
- urlStr = "/" + urlStr // Issue 2344
- }
- if urlStr == "" {
- urlStr = valueOrDefault(req.URL.RawPath, valueOrDefault(req.URL.EncodedPath(), "/"))
- if req.URL.RawQuery != "" {
- urlStr += "?" + req.URL.RawQuery
- }
- if usingProxy {
- if urlStr == "" || urlStr[0] != '/' {
- urlStr = "/" + urlStr
- }
- urlStr = req.URL.Scheme + "://" + host + urlStr
- }
+ ruri := req.URL.RequestURI()
+ if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" {
+ ruri = req.URL.Scheme + "://" + host + ruri
}
- // TODO(bradfitz): escape at least newlines in urlStr?
+ // TODO(bradfitz): escape at least newlines in ruri?
bw := bufio.NewWriter(w)
- fmt.Fprintf(bw, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), urlStr)
+ fmt.Fprintf(bw, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri)
// Header lines
fmt.Fprintf(bw, "Host: %s\r\n", host)
Req: Request{
Method: "GET",
URL: &url.URL{
- Raw: "http://www.techcrunch.com/",
- Scheme: "http",
- RawPath: "http://www.techcrunch.com/",
- RawAuthority: "www.techcrunch.com",
- RawUserinfo: "",
- Host: "www.techcrunch.com",
- Path: "/",
- RawQuery: "",
- Fragment: "",
+ Scheme: "http",
+ Host: "www.techcrunch.com",
+ Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
Form: map[string][]string{},
},
- WantWrite: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
+ WantWrite: "GET / HTTP/1.1\r\n" +
"Host: www.techcrunch.com\r\n" +
"User-Agent: Fake\r\n" +
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
"\r\n" +
"abcdef",
- WantProxy: "POST / HTTP/1.1\r\n" +
+ WantProxy: "POST http://example.com/ HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go http package\r\n" +
"Content-Length: 6\r\n" +
return dummyAddr("remote-addr")
}
-func (c *testConn) SetTimeout(nsec int64) error {
+func (c *testConn) SetDeadline(t time.Time) error {
return nil
}
-func (c *testConn) SetReadTimeout(nsec int64) error {
+func (c *testConn) SetReadDeadline(t time.Time) error {
return nil
}
-func (c *testConn) SetWriteTimeout(nsec int64) error {
+func (c *testConn) SetWriteDeadline(t time.Time) error {
return nil
}
// Note using r.FormValue("readbody") because for POST
// requests that would read from r.Body, which we only
// conditionally want to do.
- if strings.Contains(r.URL.RawPath, "readbody=true") {
+ if strings.Contains(r.URL.RawQuery, "readbody=true") {
ioutil.ReadAll(r.Body)
w.Write([]byte("Hi"))
} else {
panic("intentional death for testing")
}))
defer ts.Close()
- _, err := Get(ts.URL)
- if err == nil {
- t.Logf("expected an error")
- }
// Do a blocking read on the log output pipe so its logging
// doesn't bleed into the next test. But wait only 5 seconds
// for it.
- done := make(chan bool)
+ done := make(chan bool, 1)
go func() {
- buf := make([]byte, 1024)
+ buf := make([]byte, 4<<10)
_, err := pr.Read(buf)
pr.Close()
if err != nil {
}
done <- true
}()
+
+ _, err := Get(ts.URL)
+ if err == nil {
+ t.Logf("expected an error")
+ }
+
select {
case <-done:
return
if err == nil {
return
}
- if c.rwc != nil { // may be nil if connection hijacked
- c.rwc.Close()
- }
var buf bytes.Buffer
fmt.Fprintf(&buf, "http: panic serving %v: %v\n", c.remoteAddr, err)
buf.Write(debug.Stack())
log.Print(buf.String())
+
+ if c.rwc != nil { // may be nil if connection hijacked
+ c.rwc.Close()
+ }
}()
if tlsConn, ok := c.rwc.(*tls.Conn); ok {
type Server struct {
Addr string // TCP address to listen on, ":http" if empty
Handler Handler // handler to invoke, http.DefaultServeMux if nil
- ReadTimeout time.Duration // the net.Conn.SetReadTimeout value for new connections
- WriteTimeout time.Duration // the net.Conn.SetWriteTimeout value for new connections
+ ReadTimeout time.Duration // maximum duration before timing out read of the request
+ WriteTimeout time.Duration // maximum duration before timing out write of the response
MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0
}
return e
}
if srv.ReadTimeout != 0 {
- rw.SetReadTimeout(srv.ReadTimeout.Nanoseconds())
+ rw.SetReadDeadline(time.Now().Add(srv.ReadTimeout))
}
if srv.WriteTimeout != 0 {
- rw.SetWriteTimeout(srv.WriteTimeout.Nanoseconds())
+ rw.SetWriteDeadline(time.Now().Add(srv.WriteTimeout))
}
c, err := srv.newConn(rw)
if err != nil {
if cm.proxyURL == nil {
return ""
}
- proxyInfo := cm.proxyURL.RawUserinfo
- if proxyInfo != "" {
- return "Basic " + base64.URLEncoding.EncodeToString([]byte(proxyInfo))
+ if u := cm.proxyURL.User; u != nil {
+ return "Basic " + base64.URLEncoding.EncodeToString([]byte(u.String()))
}
return ""
}
case cm.targetScheme == "https":
connectReq := &Request{
Method: "CONNECT",
- URL: &url.URL{RawPath: cm.targetAddr},
+ URL: &url.URL{Opaque: cm.targetAddr},
Host: cm.targetAddr,
Header: make(Header),
}
"flag"
"os"
"testing"
+ "time"
)
const ICMP_ECHO_REQUEST = 8
t.Fatalf(`net.WriteToIP(..., %v) = %v, %v`, raddr, n, err)
}
- c.SetTimeout(100e6)
+ c.SetDeadline(time.Now().Add(100 * time.Millisecond))
resp := make([]byte, 1024)
for {
n, from, err := c.ReadFrom(resp)
import (
"os"
+ "time"
)
// IPConn is the implementation of the Conn and PacketConn
// interfaces for IP network connections.
type IPConn bool
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *IPConn) SetDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *IPConn) SetReadDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *IPConn) SetWriteDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
// Implementation of the Conn interface - see Conn for documentation.
// Read implements the net.Conn Read method.
return nil
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *IPConn) SetTimeout(nsec int64) error {
- return os.EPLAN9
-}
-
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *IPConn) SetReadTimeout(nsec int64) error {
- return os.EPLAN9
-}
-
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *IPConn) SetWriteTimeout(nsec int64) error {
- return os.EPLAN9
-}
-
// IP-specific methods.
// ReadFrom implements the net.PacketConn ReadFrom method.
//
// WriteToIP can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
-// see SetTimeout and SetWriteTimeout.
+// see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (n int, err error) {
return 0, os.EPLAN9
"errors"
"os"
"syscall"
+ "time"
)
func sockaddrToIP(sa syscall.Sockaddr) Addr {
return c.fd.raddr
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *IPConn) SetTimeout(nsec int64) error {
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *IPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setTimeout(c.fd, nsec)
+ return setDeadline(c.fd, t)
}
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *IPConn) SetReadTimeout(nsec int64) error {
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *IPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setReadTimeout(c.fd, nsec)
+ return setReadDeadline(c.fd, t)
}
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *IPConn) SetWriteTimeout(nsec int64) error {
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *IPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setWriteTimeout(c.fd, nsec)
+ return setWriteDeadline(c.fd, t)
}
// SetReadBuffer sets the size of the operating system's
// that was on the packet.
//
// ReadFromIP can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetTimeout and
-// SetReadTimeout.
+// Timeout() == true after a fixed time limit; see SetDeadline and
+// SetReadDeadline.
func (c *IPConn) ReadFromIP(b []byte) (n int, addr *IPAddr, err error) {
if !c.ok() {
return 0, nil, os.EINVAL
//
// WriteToIP can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
-// see SetTimeout and SetWriteTimeout.
+// see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (n int, err error) {
if !c.ok() {
"errors"
"io"
"os"
+ "time"
)
// probeIPv6Stack returns two boolean values. If the first boolean value is
return c.raddr
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *plan9Conn) SetTimeout(nsec int64) error {
- if !c.ok() {
- return os.EINVAL
- }
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *plan9Conn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *plan9Conn) SetReadTimeout(nsec int64) error {
- if !c.ok() {
- return os.EINVAL
- }
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *plan9Conn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *plan9Conn) SetWriteTimeout(nsec int64) error {
- if !c.ok() {
- return os.EINVAL
- }
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *plan9Conn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
family() int
}
-func internetSocket(net string, laddr, raddr sockaddr, socktype, proto int, mode string, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err error) {
+func internetSocket(net string, laddr, raddr sockaddr, sotype, proto int, mode string, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err error) {
var oserr error
var la, ra syscall.Sockaddr
family := favoriteAddrFamily(net, raddr, laddr, mode)
goto Error
}
}
- fd, oserr = socket(net, family, socktype, proto, la, ra, toAddr)
+ fd, oserr = socket(net, family, sotype, proto, la, ra, toAddr)
if oserr != nil {
goto Error
}
return query("/net/dns", addr+" "+typ, 1024)
}
-// LookupHost looks up the given host using the local resolver.
-// It returns an array of that host's addresses.
-func LookupHost(host string) (addrs []string, err error) {
+func lookupHost(host string) (addrs []string, err error) {
// Use /net/cs insead of /net/dns because cs knows about
// host names in local network (e.g. from /lib/ndb/local)
lines, err := queryCS("tcp", host, "1")
return
}
-// LookupIP looks up host using the local resolver.
-// It returns an array of that host's IPv4 and IPv6 addresses.
-func LookupIP(host string) (ips []IP, err error) {
+func lookupIP(host string) (ips []IP, err error) {
addrs, err := LookupHost(host)
if err != nil {
return
return
}
-// LookupPort looks up the port for the given network and service.
-func LookupPort(network, service string) (port int, err error) {
+func lookupPort(network, service string) (port int, err error) {
switch network {
case "tcp4", "tcp6":
network = "tcp"
return 0, unknownPortError
}
-// LookupCNAME returns the canonical DNS host for the given name.
-// Callers that do not care about the canonical name can call
-// LookupHost or LookupIP directly; both take care of resolving
-// the canonical name as part of the lookup.
-func LookupCNAME(name string) (cname string, err error) {
+func lookupCNAME(name string) (cname string, err error) {
lines, err := queryDNS(name, "cname")
if err != nil {
return
return "", errors.New("net: bad response from ndb/dns")
}
-// LookupSRV tries to resolve an SRV query of the given service,
-// protocol, and domain name. The proto is "tcp" or "udp".
-// The returned records are sorted by priority and randomized
-// by weight within a priority.
-//
-// LookupSRV constructs the DNS name to look up following RFC 2782.
-// That is, it looks up _service._proto.name. To accommodate services
-// publishing SRV records under non-standard names, if both service
-// and proto are empty strings, LookupSRV looks up name directly.
-func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+func lookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
var target string
if service == "" && proto == "" {
target = name
return
}
-// LookupMX returns the DNS MX records for the given domain name sorted by preference.
-func LookupMX(name string) (mx []*MX, err error) {
+func lookupMX(name string) (mx []*MX, err error) {
lines, err := queryDNS(name, "mx")
if err != nil {
return
return
}
-// LookupTXT returns the DNS TXT records for the given domain name.
-func LookupTXT(name string) (txt []string, err error) {
+func lookupTXT(name string) (txt []string, err error) {
lines, err := queryDNS(name, "txt")
if err != nil {
return
return
}
-// LookupAddr performs a reverse lookup for the given address, returning a list
-// of names mapping to that address.
-func LookupAddr(addr string) (name []string, err error) {
+func lookupAddr(addr string) (name []string, err error) {
arpa, err := reverseaddr(addr)
if err != nil {
return
return
}
-// LookupHost looks up the given host using the local resolver.
-// It returns an array of that host's addresses.
-func LookupHost(host string) (addrs []string, err error) {
+func lookupHost(host string) (addrs []string, err error) {
addrs, err, ok := cgoLookupHost(host)
if !ok {
addrs, err = goLookupHost(host)
return
}
-// LookupIP looks up host using the local resolver.
-// It returns an array of that host's IPv4 and IPv6 addresses.
-func LookupIP(host string) (addrs []IP, err error) {
+func lookupIP(host string) (addrs []IP, err error) {
addrs, err, ok := cgoLookupIP(host)
if !ok {
addrs, err = goLookupIP(host)
return
}
-// LookupPort looks up the port for the given network and service.
-func LookupPort(network, service string) (port int, err error) {
+func lookupPort(network, service string) (port int, err error) {
port, err, ok := cgoLookupPort(network, service)
if !ok {
port, err = goLookupPort(network, service)
return
}
-// LookupCNAME returns the canonical DNS host for the given name.
-// Callers that do not care about the canonical name can call
-// LookupHost or LookupIP directly; both take care of resolving
-// the canonical name as part of the lookup.
-func LookupCNAME(name string) (cname string, err error) {
+func lookupCNAME(name string) (cname string, err error) {
cname, err, ok := cgoLookupCNAME(name)
if !ok {
cname, err = goLookupCNAME(name)
return
}
-// LookupSRV tries to resolve an SRV query of the given service,
-// protocol, and domain name. The proto is "tcp" or "udp".
-// The returned records are sorted by priority and randomized
-// by weight within a priority.
-//
-// LookupSRV constructs the DNS name to look up following RFC 2782.
-// That is, it looks up _service._proto.name. To accommodate services
-// publishing SRV records under non-standard names, if both service
-// and proto are empty strings, LookupSRV looks up name directly.
-func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+func lookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
var target string
if service == "" && proto == "" {
target = name
return
}
-// LookupMX returns the DNS MX records for the given domain name sorted by preference.
-func LookupMX(name string) (mx []*MX, err error) {
+func lookupMX(name string) (mx []*MX, err error) {
_, records, err := lookup(name, dnsTypeMX)
if err != nil {
return
return
}
-// LookupTXT returns the DNS TXT records for the given domain name.
-func LookupTXT(name string) (txt []string, err error) {
+func lookupTXT(name string) (txt []string, err error) {
_, records, err := lookup(name, dnsTypeTXT)
if err != nil {
return
return
}
-// LookupAddr performs a reverse lookup for the given address, returning a list
-// of names mapping to that address.
-func LookupAddr(addr string) (name []string, err error) {
+func lookupAddr(addr string) (name []string, err error) {
name = lookupStaticAddr(addr)
if len(name) > 0 {
return
return int(p.Proto), nil
}
-func LookupHost(name string) (addrs []string, err error) {
+func lookupHost(name string) (addrs []string, err error) {
ips, err := LookupIP(name)
if err != nil {
return
return
}
-func LookupIP(name string) (addrs []IP, err error) {
+func lookupIP(name string) (addrs []IP, err error) {
hostentLock.Lock()
defer hostentLock.Unlock()
h, e := syscall.GetHostByName(name)
return addrs, nil
}
-func LookupPort(network, service string) (port int, err error) {
+func lookupPort(network, service string) (port int, err error) {
switch network {
case "tcp4", "tcp6":
network = "tcp"
return int(syscall.Ntohs(s.Port)), nil
}
-func LookupCNAME(name string) (cname string, err error) {
+func lookupCNAME(name string) (cname string, err error) {
var r *syscall.DNSRecord
e := syscall.DnsQuery(name, syscall.DNS_TYPE_CNAME, 0, nil, &r, nil)
if e != nil {
return
}
-// LookupSRV tries to resolve an SRV query of the given service,
-// protocol, and domain name. The proto is "tcp" or "udp".
-// The returned records are sorted by priority and randomized
-// by weight within a priority.
-//
-// LookupSRV constructs the DNS name to look up following RFC 2782.
-// That is, it looks up _service._proto.name. To accommodate services
-// publishing SRV records under non-standard names, if both service
-// and proto are empty strings, LookupSRV looks up name directly.
-func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+func lookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
var target string
if service == "" && proto == "" {
target = name
return name, addrs, nil
}
-func LookupMX(name string) (mx []*MX, err error) {
+func lookupMX(name string) (mx []*MX, err error) {
var r *syscall.DNSRecord
e := syscall.DnsQuery(name, syscall.DNS_TYPE_MX, 0, nil, &r, nil)
if e != nil {
return mx, nil
}
-func LookupTXT(name string) (txt []string, err error) {
+func lookupTXT(name string) (txt []string, err error) {
var r *syscall.DNSRecord
e := syscall.DnsQuery(name, syscall.DNS_TYPE_TEXT, 0, nil, &r, nil)
if e != nil {
return
}
-func LookupAddr(addr string) (name []string, err error) {
+func lookupAddr(addr string) (name []string, err error) {
arpa, err := reverseaddr(addr)
if err != nil {
return nil, err
// TODO(rsc):
// support for raw ethernet sockets
-import "errors"
+import (
+ "errors"
+ "time"
+)
// Addr represents a network end point address.
type Addr interface {
type Conn interface {
// Read reads data from the connection.
// Read can be made to time out and return a net.Error with Timeout() == true
- // after a fixed time limit; see SetTimeout and SetReadTimeout.
+ // after a fixed time limit; see SetDeadline and SetReadDeadline.
Read(b []byte) (n int, err error)
// Write writes data to the connection.
// Write can be made to time out and return a net.Error with Timeout() == true
- // after a fixed time limit; see SetTimeout and SetWriteTimeout.
+ // after a fixed time limit; see SetDeadline and SetWriteDeadline.
Write(b []byte) (n int, err error)
// Close closes the connection.
// RemoteAddr returns the remote network address.
RemoteAddr() Addr
- // SetTimeout sets the read and write deadlines associated
+ // SetDeadline sets the read and write deadlines associated
// with the connection.
- SetTimeout(nsec int64) error
-
- // SetReadTimeout sets the time (in nanoseconds) that
- // Read will wait for data before returning an error with Timeout() == true.
- // Setting nsec == 0 (the default) disables the deadline.
- SetReadTimeout(nsec int64) error
-
- // SetWriteTimeout sets the time (in nanoseconds) that
- // Write will wait to send its data before returning an error with Timeout() == true.
- // Setting nsec == 0 (the default) disables the deadline.
+ SetDeadline(t time.Time) error
+
+ // SetReadDeadline sets the deadline for all Read calls to return.
+ // If the deadline is reached, Read will fail with a timeout
+ // (see type Error) instead of blocking.
+ // A zero value for t means Read will not time out.
+ SetReadDeadline(t time.Time) error
+
+ // SetWriteDeadline sets the deadline for all Write calls to return.
+ // If the deadline is reached, Write will fail with a timeout
+ // (see type Error) instead of blocking.
+ // A zero value for t means Write will not time out.
// Even if write times out, it may return n > 0, indicating that
// some of the data was successfully written.
- SetWriteTimeout(nsec int64) error
+ SetWriteDeadline(t time.Time) error
}
// An Error represents a network error.
// was on the packet.
// ReadFrom can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
- // see SetTimeout and SetReadTimeout.
+ // see SetDeadline and SetReadDeadline.
ReadFrom(b []byte) (n int, addr Addr, err error)
// WriteTo writes a packet with payload b to addr.
// WriteTo can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
- // see SetTimeout and SetWriteTimeout.
+ // see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
WriteTo(b []byte, addr Addr) (n int, err error)
// LocalAddr returns the local network address.
LocalAddr() Addr
- // SetTimeout sets the read and write deadlines associated
+ // SetDeadline sets the read and write deadlines associated
// with the connection.
- SetTimeout(nsec int64) error
-
- // SetReadTimeout sets the time (in nanoseconds) that
- // Read will wait for data before returning an error with Timeout() == true.
- // Setting nsec == 0 (the default) disables the deadline.
- SetReadTimeout(nsec int64) error
-
- // SetWriteTimeout sets the time (in nanoseconds) that
- // Write will wait to send its data before returning an error with Timeout() == true.
- // Setting nsec == 0 (the default) disables the deadline.
+ SetDeadline(t time.Time) error
+
+ // SetReadDeadline sets the deadline for all Read calls to return.
+ // If the deadline is reached, Read will fail with a timeout
+ // (see type Error) instead of blocking.
+ // A zero value for t means Read will not time out.
+ SetReadDeadline(t time.Time) error
+
+ // SetWriteDeadline sets the deadline for all Write calls to return.
+ // If the deadline is reached, Write will fail with a timeout
+ // (see type Error) instead of blocking.
+ // A zero value for t means Write will not time out.
// Even if write times out, it may return n > 0, indicating that
// some of the data was successfully written.
- SetWriteTimeout(nsec int64) error
+ SetWriteDeadline(t time.Time) error
}
// A Listener is a generic network listener for stream-oriented protocols.
import (
"errors"
"io"
+ "time"
)
// Pipe creates a synchronous, in-memory, full duplex
return pipeAddr(0)
}
-func (p *pipe) SetTimeout(nsec int64) error {
- return errors.New("net.Pipe does not support timeouts")
+func (p *pipe) SetDeadline(t time.Time) error {
+ return errors.New("net.Pipe does not support deadlines")
}
-func (p *pipe) SetReadTimeout(nsec int64) error {
- return errors.New("net.Pipe does not support timeouts")
+func (p *pipe) SetReadDeadline(t time.Time) error {
+ return errors.New("net.Pipe does not support deadlines")
}
-func (p *pipe) SetWriteTimeout(nsec int64) error {
- return errors.New("net.Pipe does not support timeouts")
+func (p *pipe) SetWriteDeadline(t time.Time) error {
+ return errors.New("net.Pipe does not support deadlines")
}
defer c.wio.Unlock()
c.incref()
defer c.decref()
- if c.wdeadline_delta > 0 {
- // This is a little odd that we're setting the timeout
- // for the entire file but Write has the same issue
- // (if one slurps the whole file into memory and
- // do one large Write). At least they're consistent.
- c.wdeadline = pollserver.Now() + c.wdeadline_delta
- } else {
- c.wdeadline = 0
- }
dst := c.sysfd
src := f.Fd()
"runtime"
"strings"
"testing"
+ "time"
)
// Do not test empty datagrams by default.
if err != nil {
t.Fatalf("net.Dial(%q, %q) = _, %v", network, addr, err)
}
- fd.SetReadTimeout(1e9) // 1s
+ fd.SetReadDeadline(time.Now().Add(1 * time.Second))
var b []byte
if !isEmpty {
}
func doTest(t *testing.T, network, listenaddr, dialaddr string) {
- t.Logf("Test %q %q %q\n", network, listenaddr, dialaddr)
+ t.Logf("Test %q %q %q", network, listenaddr, dialaddr)
switch listenaddr {
case "", "0.0.0.0", "[::]", "[::ffff:0.0.0.0]":
if testing.Short() || avoidMacFirewall {
t.Fatalf("net.ListenPacket(%q, %q) = _, %v", network, addr, err)
}
listening <- c.LocalAddr().String()
- c.SetReadTimeout(10e6) // 10ms
var buf [1000]byte
Run:
for {
+ c.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
n, addr, err := c.ReadFrom(buf[0:])
if e, ok := err.(Error); ok && e.Timeout() {
select {
}
func doTestPacket(t *testing.T, network, listenaddr, dialaddr string, isEmpty bool) {
- t.Logf("TestPacket %s %s %s\n", network, listenaddr, dialaddr)
+ t.Logf("TestPacket %q %q %q", network, listenaddr, dialaddr)
listening := make(chan string)
done := make(chan int)
if network == "udp" {
import (
"crypto/hmac"
+ "crypto/md5"
"errors"
"fmt"
)
func (a *cramMD5Auth) Next(fromServer []byte, more bool) ([]byte, error) {
if more {
- d := hmac.NewMD5([]byte(a.secret))
+ d := hmac.New(md5.New, []byte(a.secret))
d.Write(fromServer)
s := make([]byte, 0, d.Size())
return []byte(fmt.Sprintf("%s %x", a.username, d.Sum(s))), nil
var listenerBacklog = maxListenerBacklog()
// Generic socket creation.
-func socket(net string, f, p, t int, la, ra syscall.Sockaddr, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err error) {
+func socket(net string, f, t, p int, la, ra syscall.Sockaddr, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err error) {
// See ../syscall/exec.go for description of ForkLock.
syscall.ForkLock.RLock()
- s, e := syscall.Socket(f, p, t)
+ s, err := syscall.Socket(f, t, p)
if err != nil {
syscall.ForkLock.RUnlock()
return nil, err
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
- setDefaultSockopts(s, f, p)
+ setDefaultSockopts(s, f, t)
if la != nil {
- e = syscall.Bind(s, la)
- if e != nil {
+ err = syscall.Bind(s, la)
+ if err != nil {
closesocket(s)
- return nil, e
+ return nil, err
}
}
- if fd, err = newFD(s, f, p, net); err != nil {
+ if fd, err = newFD(s, f, t, net); err != nil {
closesocket(s)
return nil, err
}
"bytes"
"os"
"syscall"
+ "time"
)
// Boolean to int.
return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes))
}
-func setReadTimeout(fd *netFD, nsec int64) error {
- fd.rdeadline_delta = nsec
+func setReadDeadline(fd *netFD, t time.Time) error {
+ fd.rdeadline = t.UnixNano()
return nil
}
-func setWriteTimeout(fd *netFD, nsec int64) error {
- fd.wdeadline_delta = nsec
+func setWriteDeadline(fd *netFD, t time.Time) error {
+ fd.wdeadline = t.UnixNano()
return nil
}
-func setTimeout(fd *netFD, nsec int64) error {
- if e := setReadTimeout(fd, nsec); e != nil {
+func setDeadline(fd *netFD, t time.Time) error {
+ if e := setReadDeadline(fd, t); e != nil {
return e
}
- return setWriteTimeout(fd, nsec)
+ return setWriteDeadline(fd, t)
}
func setReuseAddr(fd *netFD, reuse bool) error {
"syscall"
)
-func setDefaultSockopts(s, f, p int) {
+func setDefaultSockopts(s, f, t int) {
switch f {
case syscall.AF_INET6:
// Allow both IP versions even if the OS default is otherwise.
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
}
- if f == syscall.AF_UNIX || p == syscall.IPPROTO_TCP {
+ if f == syscall.AF_UNIX ||
+ (f == syscall.AF_INET || f == syscall.AF_INET6) && t == syscall.SOCK_STREAM {
// Allow reuse of recently-used addresses.
syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
"syscall"
)
-func setDefaultSockopts(s, f, p int) {
+func setDefaultSockopts(s, f, t int) {
switch f {
case syscall.AF_INET6:
// Allow both IP versions even if the OS default is otherwise.
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
}
- if f == syscall.AF_UNIX || p == syscall.IPPROTO_TCP {
+ if f == syscall.AF_UNIX ||
+ (f == syscall.AF_INET || f == syscall.AF_INET6) && t == syscall.SOCK_STREAM {
// Allow reuse of recently-used addresses.
syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
}
"syscall"
)
-func setDefaultSockopts(s syscall.Handle, f, p int) {
+func setDefaultSockopts(s syscall.Handle, f, t int) {
switch f {
case syscall.AF_INET6:
// Allow both IP versions even if the OS default is otherwise.
import (
"os"
+ "time"
)
// TCPConn is an implementation of the Conn interface
plan9Conn
}
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *TCPConn) SetDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *TCPConn) SetReadDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *TCPConn) SetWriteDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
// CloseRead shuts down the reading side of the TCP connection.
// Most callers should just use Close.
func (c *TCPConn) CloseRead() error {
"io"
"os"
"syscall"
+ "time"
)
// BUG(rsc): On OpenBSD, listening on the "tcp" network does not listen for
return c.fd.raddr
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *TCPConn) SetTimeout(nsec int64) error {
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *TCPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setTimeout(c.fd, nsec)
+ return setDeadline(c.fd, t)
}
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *TCPConn) SetReadTimeout(nsec int64) error {
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *TCPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setReadTimeout(c.fd, nsec)
+ return setReadDeadline(c.fd, t)
}
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *TCPConn) SetWriteTimeout(nsec int64) error {
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *TCPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setWriteTimeout(c.fd, nsec)
+ return setWriteDeadline(c.fd, t)
}
// SetReadBuffer sets the size of the operating system's
// Addr returns the listener's network address, a *TCPAddr.
func (l *TCPListener) Addr() Addr { return l.fd.laddr }
-// SetTimeout sets the deadline associated with the listener
-func (l *TCPListener) SetTimeout(nsec int64) error {
+// SetDeadline sets the deadline associated with the listener.
+// A zero time value disables the deadline.
+func (l *TCPListener) SetDeadline(t time.Time) error {
if l == nil || l.fd == nil {
return os.EINVAL
}
- return setTimeout(l.fd, nsec)
+ return setDeadline(l.fd, t)
}
// File returns a copy of the underlying os.File, set to blocking mode.
package net
import (
+ "fmt"
"runtime"
"testing"
"time"
return
}
defer fd.Close()
- t0 := time.Now()
- fd.SetReadTimeout(1e8) // 100ms
- var b [100]byte
- var n int
- var err1 error
- if readFrom {
- n, _, err1 = fd.(PacketConn).ReadFrom(b[0:])
- } else {
- n, err1 = fd.Read(b[0:])
- }
- t1 := time.Now()
what := "Read"
if readFrom {
what = "ReadFrom"
}
- if n != 0 || err1 == nil || !err1.(Error).Timeout() {
- t.Errorf("fd.%s on %s %s did not return 0, timeout: %v, %v", what, network, addr, n, err1)
- }
- if dt := t1.Sub(t0); dt < 50*time.Millisecond || dt > 150*time.Millisecond {
- t.Errorf("fd.%s on %s %s took %s, expected 0.1s", what, network, addr, dt)
+
+ errc := make(chan error, 1)
+ go func() {
+ t0 := time.Now()
+ fd.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+ var b [100]byte
+ var n int
+ var err1 error
+ if readFrom {
+ n, _, err1 = fd.(PacketConn).ReadFrom(b[0:])
+ } else {
+ n, err1 = fd.Read(b[0:])
+ }
+ t1 := time.Now()
+ if n != 0 || err1 == nil || !err1.(Error).Timeout() {
+ errc <- fmt.Errorf("fd.%s on %s %s did not return 0, timeout: %v, %v", what, network, addr, n, err1)
+ return
+ }
+ if dt := t1.Sub(t0); dt < 50*time.Millisecond || dt > 250*time.Millisecond {
+ errc <- fmt.Errorf("fd.%s on %s %s took %s, expected 0.1s", what, network, addr, dt)
+ return
+ }
+ errc <- nil
+ }()
+ select {
+ case err := <-errc:
+ if err != nil {
+ t.Error(err)
+ }
+ case <-time.After(1 * time.Second):
+ t.Errorf("%s on %s %s took over 1 second, expected 0.1s", what, network, addr)
}
}
import (
"errors"
"os"
+ "time"
)
// UDPConn is the implementation of the Conn and PacketConn
plan9Conn
}
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *UDPConn) SetDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *UDPConn) SetReadDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *UDPConn) SetWriteDeadline(t time.Time) error {
+ return os.EPLAN9
+}
+
// UDP-specific methods.
// ReadFromUDP reads a UDP packet from c, copying the payload into b.
// that was on the packet.
//
// ReadFromUDP can be made to time out and return an error with Timeout() == true
-// after a fixed time limit; see SetTimeout and SetReadTimeout.
+// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) {
if !c.ok() {
return 0, nil, os.EINVAL
//
// WriteToUDP can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
-// see SetTimeout and SetWriteTimeout.
+// see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (n int, err error) {
if !c.ok() {
import (
"os"
"syscall"
+ "time"
)
func sockaddrToUDP(sa syscall.Sockaddr) Addr {
return c.fd.raddr
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *UDPConn) SetTimeout(nsec int64) error {
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *UDPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setTimeout(c.fd, nsec)
+ return setDeadline(c.fd, t)
}
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *UDPConn) SetReadTimeout(nsec int64) error {
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *UDPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setReadTimeout(c.fd, nsec)
+ return setReadDeadline(c.fd, t)
}
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *UDPConn) SetWriteTimeout(nsec int64) error {
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *UDPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setWriteTimeout(c.fd, nsec)
+ return setWriteDeadline(c.fd, t)
}
// SetReadBuffer sets the size of the operating system's
// that was on the packet.
//
// ReadFromUDP can be made to time out and return an error with Timeout() == true
-// after a fixed time limit; see SetTimeout and SetReadTimeout.
+// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) {
if !c.ok() {
return 0, nil, os.EINVAL
//
// WriteToUDP can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
-// see SetTimeout and SetWriteTimeout.
+// see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (n int, err error) {
if !c.ok() {
package net
import (
+ "io"
"runtime"
"testing"
)
ipv6 bool
packet bool
}{
- {"tcp4", "127.0.0.1:0", false, false},
- {"tcp6", "[::1]:0", true, false},
- {"udp4", "127.0.0.1:0", false, true},
- {"udp6", "[::1]:0", true, true},
+ {net: "tcp4", laddr: "127.0.0.1:0"},
+ {net: "tcp4", laddr: "previous"},
+ {net: "tcp6", laddr: "[::1]:0", ipv6: true},
+ {net: "tcp6", laddr: "previous", ipv6: true},
+ {net: "udp4", laddr: "127.0.0.1:0", packet: true},
+ {net: "udp6", laddr: "[::1]:0", ipv6: true, packet: true},
}
func TestUnicastTCPAndUDP(t *testing.T) {
return
}
+ prevladdr := ""
for _, tt := range unicastTests {
if tt.ipv6 && !supportsIPv6 {
continue
}
- var fd *netFD
+ var (
+ fd *netFD
+ closer io.Closer
+ )
if !tt.packet {
- c, err := Listen(tt.net, tt.laddr)
+ if tt.laddr == "previous" {
+ tt.laddr = prevladdr
+ }
+ l, err := Listen(tt.net, tt.laddr)
if err != nil {
t.Fatalf("Listen failed: %v", err)
}
- defer c.Close()
- fd = c.(*TCPListener).fd
+ prevladdr = l.Addr().String()
+ closer = l
+ fd = l.(*TCPListener).fd
} else {
c, err := ListenPacket(tt.net, tt.laddr)
if err != nil {
t.Fatalf("ListenPacket failed: %v", err)
}
- defer c.Close()
+ closer = c
fd = c.(*UDPConn).fd
}
if !tt.ipv6 {
} else {
testIPv6UnicastSocketOptions(t, fd)
}
+ closer.Close()
}
}
import (
"os"
+ "time"
)
// UnixConn is an implementation of the Conn interface
return nil
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *UnixConn) SetTimeout(nsec int64) error {
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *UnixConn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *UnixConn) SetReadTimeout(nsec int64) error {
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *UnixConn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *UnixConn) SetWriteTimeout(nsec int64) error {
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *UnixConn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
import (
"os"
"syscall"
+ "time"
)
func unixSocket(net string, laddr, raddr *UnixAddr, mode string) (fd *netFD, err error) {
- var proto int
+ var sotype int
switch net {
default:
return nil, UnknownNetworkError(net)
case "unix":
- proto = syscall.SOCK_STREAM
+ sotype = syscall.SOCK_STREAM
case "unixgram":
- proto = syscall.SOCK_DGRAM
+ sotype = syscall.SOCK_DGRAM
case "unixpacket":
- proto = syscall.SOCK_SEQPACKET
+ sotype = syscall.SOCK_SEQPACKET
}
var la, ra syscall.Sockaddr
}
if raddr != nil {
ra = &syscall.SockaddrUnix{Name: raddr.Name}
- } else if proto != syscall.SOCK_DGRAM || laddr == nil {
+ } else if sotype != syscall.SOCK_DGRAM || laddr == nil {
return nil, &OpError{Op: mode, Net: net, Err: errMissingAddress}
}
}
f := sockaddrToUnix
- if proto == syscall.SOCK_DGRAM {
+ if sotype == syscall.SOCK_DGRAM {
f = sockaddrToUnixgram
- } else if proto == syscall.SOCK_SEQPACKET {
+ } else if sotype == syscall.SOCK_SEQPACKET {
f = sockaddrToUnixpacket
}
- fd, oserr := socket(net, syscall.AF_UNIX, proto, 0, la, ra, f)
+ fd, oserr := socket(net, syscall.AF_UNIX, sotype, 0, la, ra, f)
if oserr != nil {
goto Error
}
return nil
}
-func protoToNet(proto int) string {
- switch proto {
+func sotypeToNet(sotype int) string {
+ switch sotype {
case syscall.SOCK_STREAM:
return "unix"
case syscall.SOCK_SEQPACKET:
case syscall.SOCK_DGRAM:
return "unixgram"
default:
- panic("protoToNet unknown protocol")
+ panic("sotypeToNet unknown socket type")
}
return ""
}
return c.fd.raddr
}
-// SetTimeout implements the net.Conn SetTimeout method.
-func (c *UnixConn) SetTimeout(nsec int64) error {
+// SetDeadline implements the net.Conn SetDeadline method.
+func (c *UnixConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setTimeout(c.fd, nsec)
+ return setDeadline(c.fd, t)
}
-// SetReadTimeout implements the net.Conn SetReadTimeout method.
-func (c *UnixConn) SetReadTimeout(nsec int64) error {
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (c *UnixConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setReadTimeout(c.fd, nsec)
+ return setReadDeadline(c.fd, t)
}
-// SetWriteTimeout implements the net.Conn SetWriteTimeout method.
-func (c *UnixConn) SetWriteTimeout(nsec int64) error {
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (c *UnixConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
}
- return setWriteTimeout(c.fd, nsec)
+ return setWriteDeadline(c.fd, t)
}
// SetReadBuffer sets the size of the operating system's
//
// ReadFromUnix can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
-// see SetTimeout and SetReadTimeout.
+// see SetDeadline and SetReadDeadline.
func (c *UnixConn) ReadFromUnix(b []byte) (n int, addr *UnixAddr, err error) {
if !c.ok() {
return 0, nil, os.EINVAL
n, sa, err := c.fd.ReadFrom(b)
switch sa := sa.(type) {
case *syscall.SockaddrUnix:
- addr = &UnixAddr{sa.Name, protoToNet(c.fd.proto)}
+ addr = &UnixAddr{sa.Name, sotypeToNet(c.fd.sotype)}
}
return
}
//
// WriteToUnix can be made to time out and return
// an error with Timeout() == true after a fixed time limit;
-// see SetTimeout and SetWriteTimeout.
+// see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
func (c *UnixConn) WriteToUnix(b []byte, addr *UnixAddr) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
}
- if addr.Net != protoToNet(c.fd.proto) {
+ if addr.Net != sotypeToNet(c.fd.sotype) {
return 0, os.EAFNOSUPPORT
}
sa := &syscall.SockaddrUnix{Name: addr.Name}
n, oobn, flags, sa, err := c.fd.ReadMsg(b, oob)
switch sa := sa.(type) {
case *syscall.SockaddrUnix:
- addr = &UnixAddr{sa.Name, protoToNet(c.fd.proto)}
+ addr = &UnixAddr{sa.Name, sotypeToNet(c.fd.sotype)}
}
return
}
return 0, 0, os.EINVAL
}
if addr != nil {
- if addr.Net != protoToNet(c.fd.proto) {
+ if addr.Net != sotypeToNet(c.fd.sotype) {
return 0, 0, os.EAFNOSUPPORT
}
sa := &syscall.SockaddrUnix{Name: addr.Name}
// Addr returns the listener's network address.
func (l *UnixListener) Addr() Addr { return l.fd.laddr }
-// SetTimeout sets the deadline associated wuth the listener
-func (l *UnixListener) SetTimeout(nsec int64) (err error) {
+// SetDeadline sets the deadline associated with the listener.
+// A zero time value disables the deadline.
+func (l *UnixListener) SetDeadline(t time.Time) (err error) {
if l == nil || l.fd == nil {
return os.EINVAL
}
- return setTimeout(l.fd, nsec)
+ return setDeadline(l.fd, t)
}
// File returns a copy of the underlying os.File, set to blocking mode.
encodeUserPassword
encodeQueryComponent
encodeFragment
- encodeOpaque
)
type EscapeError string
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
+ // TODO: Update the character sets after RFC 3986.
switch c {
case '-', '_', '.', '!', '~', '*', '\'', '(', ')': // §2.3 Unreserved characters (mark)
return false
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
- // The RFC allows : @ & = + $ , but saves / ; for assigning
- // meaning to individual path segments. This package
+ // The RFC allows : @ & = + $ but saves / ; , for assigning
+ // meaning to individual path segments. This package
// only manipulates the path as a whole, so we allow those
- // last two as well. Clients that need to distinguish between
- // `/foo;y=z/bar` and `/foo%3by=z/bar` will have to re-decode RawPath.
- // That leaves only ? to escape.
+ // last two as well. That leaves only ? to escape.
return c == '?'
case encodeUserPassword: // §3.2.2
// The RFC text is silent but the grammar allows
// everything, so escape nothing.
return false
-
- case encodeOpaque: // §3 opaque_part
- // The RFC allows opaque_part to use all characters
- // except that the leading / must be escaped.
- // (We implement that case in String.)
- return false
}
}
return string(t)
}
-// UnescapeUserinfo parses the RawUserinfo field of a URL
-// as the form user or user:password and unescapes and returns
-// the two halves.
+// A URL represents a parsed URL (technically, a URI reference).
+// The general form represented is:
//
-// This functionality should only be used with legacy web sites.
-// RFC 2396 warns that interpreting Userinfo this way
-// ``is NOT RECOMMENDED, because the passing of authentication
-// information in clear text (such as URI) has proven to be a
-// security risk in almost every case where it has been used.''
-func UnescapeUserinfo(rawUserinfo string) (user, password string, err error) {
- u, p := split(rawUserinfo, ':', true)
- if user, err = unescape(u, encodeUserPassword); err != nil {
- return "", "", err
- }
- if password, err = unescape(p, encodeUserPassword); err != nil {
- return "", "", err
- }
- return
+// scheme://[userinfo@]host/path[?query][#fragment]
+//
+// URLs that do not start with a slash after the scheme are interpreted as:
+//
+// scheme:opaque[?query][#fragment]
+//
+type URL struct {
+ Scheme string
+ Opaque string // encoded opaque data
+ User *Userinfo // username and password information
+ Host string
+ Path string
+ RawQuery string // encoded query values, without '?'
+ Fragment string // fragment for references, without '#'
}
-// EscapeUserinfo combines user and password in the form
-// user:password (or just user if password is empty) and then
-// escapes it for use as the URL.RawUserinfo field.
-//
+// User returns a Userinfo containing the provided username
+// and no password set.
+func User(username string) *Userinfo {
+ return &Userinfo{username, "", false}
+}
+
+// UserPassword returns a Userinfo containing the provided username
+// and password.
// This functionality should only be used with legacy web sites.
// RFC 2396 warns that interpreting Userinfo this way
// ``is NOT RECOMMENDED, because the passing of authentication
// information in clear text (such as URI) has proven to be a
// security risk in almost every case where it has been used.''
-func EscapeUserinfo(user, password string) string {
- raw := escape(user, encodeUserPassword)
- if password != "" {
- raw += ":" + escape(password, encodeUserPassword)
+func UserPassword(username, password string) *Userinfo {
+ return &Userinfo{username, password, true}
+}
+
+// The Userinfo type is an immutable encapsulation of username and
+// password details for a URL. An existing Userinfo value is guaranteed
+// to have a username set (potentially empty, as allowed by RFC 2396),
+// and optionally a password.
+type Userinfo struct {
+ username string
+ password string
+ passwordSet bool
+}
+
+// Username returns the username.
+func (u *Userinfo) Username() string {
+ return u.username
+}
+
+// Password returns the password in case it is set, and whether it is set.
+func (u *Userinfo) Password() (string, bool) {
+ if u.passwordSet {
+ return u.password, true
}
- return raw
+ return "", false
}
-// A URL represents a parsed URL (technically, a URI reference).
-// The general form represented is:
-// scheme://[userinfo@]host/path[?query][#fragment]
-// The Raw, RawAuthority, RawPath, and RawQuery fields are in "wire format"
-// (special characters must be hex-escaped if not meant to have special meaning).
-// All other fields are logical values; '+' or '%' represent themselves.
-//
-// The various Raw values are supplied in wire format because
-// clients typically have to split them into pieces before further
-// decoding.
-type URL struct {
- Raw string // the original string
- Scheme string // scheme
- RawAuthority string // [userinfo@]host
- RawUserinfo string // userinfo
- Host string // host
- RawPath string // /path[?query][#fragment]
- Path string // /path
- OpaquePath bool // path is opaque (unrooted when scheme is present)
- RawQuery string // query
- Fragment string // fragment
+// String returns the encoded userinfo information in the standard form
+// of "username[:password]".
+func (u *Userinfo) String() string {
+ s := escape(u.username, encodeUserPassword)
+ if u.passwordSet {
+ s += ":" + escape(u.password, encodeUserPassword)
+ }
+ return s
}
// Maybe rawurl is of the form scheme:path.
// in which case only absolute URLs or path-absolute relative URLs are allowed.
// If viaRequest is false, all forms of relative URLs are allowed.
func parse(rawurl string, viaRequest bool) (url *URL, err error) {
- var (
- leadingSlash bool
- path string
- )
+ var rest string
if rawurl == "" {
err = errors.New("empty url")
goto Error
}
url = new(URL)
- url.Raw = rawurl
// Split off possible leading "http:", "mailto:", etc.
// Cannot contain escaped characters.
- if url.Scheme, path, err = getscheme(rawurl); err != nil {
+ if url.Scheme, rest, err = getscheme(rawurl); err != nil {
goto Error
}
- leadingSlash = strings.HasPrefix(path, "/")
- if url.Scheme != "" && !leadingSlash {
- // RFC 2396:
- // Absolute URI (has scheme) with non-rooted path
- // is uninterpreted. It doesn't even have a ?query.
- // This is the case that handles mailto:name@example.com.
- url.RawPath = path
+ rest, url.RawQuery = split(rest, '?', true)
- if url.Path, err = unescape(path, encodeOpaque); err != nil {
- goto Error
+ if !strings.HasPrefix(rest, "/") {
+ if url.Scheme != "" {
+ // We consider rootless paths per RFC 3986 as opaque.
+ url.Opaque = rest
+ return url, nil
}
- url.OpaquePath = true
- } else {
- if viaRequest && !leadingSlash {
+ if viaRequest {
err = errors.New("invalid URI for request")
goto Error
}
+ }
- // Split off query before parsing path further.
- url.RawPath = path
- path, query := split(path, '?', false)
- if len(query) > 1 {
- url.RawQuery = query[1:]
- }
-
- // Maybe path is //authority/path
- if (url.Scheme != "" || !viaRequest) &&
- strings.HasPrefix(path, "//") && !strings.HasPrefix(path, "///") {
- url.RawAuthority, path = split(path[2:], '/', false)
- url.RawPath = url.RawPath[2+len(url.RawAuthority):]
- }
-
- // Split authority into userinfo@host.
- // If there's no @, split's default is wrong. Check explicitly.
- var rawHost string
- if strings.Index(url.RawAuthority, "@") < 0 {
- rawHost = url.RawAuthority
- } else {
- url.RawUserinfo, rawHost = split(url.RawAuthority, '@', true)
- }
-
- // We leave RawAuthority only in raw form because clients
- // of common protocols should be using Userinfo and Host
- // instead. Clients that wish to use RawAuthority will have to
- // interpret it themselves: RFC 2396 does not define the meaning.
-
- if strings.Contains(rawHost, "%") {
- // Host cannot contain escaped characters.
- err = errors.New("hexadecimal escape in host")
+ if (url.Scheme != "" || !viaRequest) && strings.HasPrefix(rest, "//") && !strings.HasPrefix(rest, "///") {
+ var authority string
+ authority, rest = split(rest[2:], '/', false)
+ url.User, url.Host, err = parseAuthority(authority)
+ if err != nil {
goto Error
}
- url.Host = rawHost
-
- if url.Path, err = unescape(path, encodePath); err != nil {
+ if strings.Contains(url.Host, "%") {
+ err = errors.New("hexadecimal escape in host")
goto Error
}
}
+ if url.Path, err = unescape(rest, encodePath); err != nil {
+ goto Error
+ }
return url, nil
Error:
return nil, &Error{"parse", rawurl, err}
+}
+func parseAuthority(authority string) (user *Userinfo, host string, err error) {
+ if strings.Index(authority, "@") < 0 {
+ host = authority
+ return
+ }
+ userinfo, host := split(authority, '@', true)
+ if strings.Index(userinfo, ":") < 0 {
+ if userinfo, err = unescape(userinfo, encodeUserPassword); err != nil {
+ return
+ }
+ user = User(userinfo)
+ } else {
+ username, password := split(userinfo, ':', true)
+ if username, err = unescape(username, encodeUserPassword); err != nil {
+ return
+ }
+ if password, err = unescape(password, encodeUserPassword); err != nil {
+ return
+ }
+ user = UserPassword(username, password)
+ }
+ return
}
// ParseWithReference is like Parse but allows a trailing #fragment.
func ParseWithReference(rawurlref string) (url *URL, err error) {
- // Cut off #frag.
- rawurl, frag := split(rawurlref, '#', false)
+ // Cut off #frag
+ rawurl, frag := split(rawurlref, '#', true)
if url, err = Parse(rawurl); err != nil {
return nil, err
}
- url.Raw += frag
- url.RawPath += frag
- if len(frag) > 1 {
- frag = frag[1:]
- if url.Fragment, err = unescape(frag, encodeFragment); err != nil {
- return nil, &Error{"parse", rawurl, err}
- }
+ if frag == "" {
+ return url, nil
+ }
+ if url.Fragment, err = unescape(frag, encodeFragment); err != nil {
+ return nil, &Error{"parse", rawurlref, err}
}
return url, nil
}
// String reassembles url into a valid URL string.
-//
-// There are redundant fields stored in the URL structure:
-// the String method consults Scheme, Path, Host, RawUserinfo,
-// RawQuery, and Fragment, but not Raw, RawPath or RawAuthority.
func (url *URL) String() string {
+ // TODO: Rewrite to use bytes.Buffer
result := ""
if url.Scheme != "" {
result += url.Scheme + ":"
}
- if url.Host != "" || url.RawUserinfo != "" {
- result += "//"
- if url.RawUserinfo != "" {
- // hide the password, if any
- info := url.RawUserinfo
- if i := strings.Index(info, ":"); i >= 0 {
- info = info[0:i] + ":******"
+ if url.Opaque != "" {
+ result += url.Opaque
+ } else {
+ if url.Host != "" || url.User != nil {
+ result += "//"
+ if u := url.User; u != nil {
+ result += u.String() + "@"
}
- result += info + "@"
- }
- result += url.Host
- }
- if url.OpaquePath {
- path := url.Path
- if strings.HasPrefix(path, "/") {
- result += "%2f"
- path = path[1:]
+ result += url.Host
}
- result += escape(path, encodeOpaque)
- } else {
result += escape(url.Path, encodePath)
}
if url.RawQuery != "" {
// base or reference. If ref is an absolute URL, then ResolveReference
// ignores base and returns a copy of ref.
func (base *URL) ResolveReference(ref *URL) *URL {
- url := new(URL)
- switch {
- case ref.IsAbs():
- *url = *ref
- default:
- // relativeURI = ( net_path | abs_path | rel_path ) [ "?" query ]
- *url = *base
- if ref.RawAuthority != "" {
- // The "net_path" case.
- url.RawAuthority = ref.RawAuthority
- url.Host = ref.Host
- url.RawUserinfo = ref.RawUserinfo
- }
- switch {
- case url.OpaquePath:
- url.Path = ref.Path
- url.RawPath = ref.RawPath
- url.RawQuery = ref.RawQuery
- case strings.HasPrefix(ref.Path, "/"):
- // The "abs_path" case.
- url.Path = ref.Path
- url.RawPath = ref.RawPath
- url.RawQuery = ref.RawQuery
- default:
- // The "rel_path" case.
- path := resolvePath(base.Path, ref.Path)
- if !strings.HasPrefix(path, "/") {
- path = "/" + path
- }
- url.Path = path
- url.RawPath = url.Path
- url.RawQuery = ref.RawQuery
- if ref.RawQuery != "" {
- url.RawPath += "?" + url.RawQuery
- }
+ if ref.IsAbs() {
+ url := *ref
+ return &url
+ }
+ // relativeURI = ( net_path | abs_path | rel_path ) [ "?" query ]
+ url := *base
+ url.RawQuery = ref.RawQuery
+ url.Fragment = ref.Fragment
+ if ref.Opaque != "" {
+ url.Opaque = ref.Opaque
+ url.User = nil
+ url.Host = ""
+ url.Path = ""
+ return &url
+ }
+ if ref.Host != "" || ref.User != nil {
+ // The "net_path" case.
+ url.Host = ref.Host
+ url.User = ref.User
+ }
+ if strings.HasPrefix(ref.Path, "/") {
+ // The "abs_path" case.
+ url.Path = ref.Path
+ } else {
+ // The "rel_path" case.
+ path := resolvePath(base.Path, ref.Path)
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
}
-
- url.Fragment = ref.Fragment
+ url.Path = path
}
- url.Raw = url.String()
- return url
+ return &url
}
// Query parses RawQuery and returns the corresponding values.
return v
}
-// EncodedPath returns the URL's path in "URL path encoded" form.
-func (u *URL) EncodedPath() string {
- return escape(u.Path, encodePath)
+// RequestURI returns the encoded path?query or opaque?query
+// string that would be used in an HTTP request for u.
+func (u *URL) RequestURI() string {
+ result := u.Opaque
+ if result == "" {
+ result = escape(u.Path, encodePath)
+ if result == "" {
+ result = "/"
+ }
+ }
+ if u.RawQuery != "" {
+ result += "?" + u.RawQuery
+ }
+ return result
}
{
"http://www.google.com",
&URL{
- Raw: "http://www.google.com",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
+ Scheme: "http",
+ Host: "www.google.com",
},
"",
},
{
"http://www.google.com/",
&URL{
- Raw: "http://www.google.com/",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/",
- Path: "/",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/",
},
"",
},
{
"http://www.google.com/file%20one%26two",
&URL{
- Raw: "http://www.google.com/file%20one%26two",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/file%20one%26two",
- Path: "/file one&two",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/file one&two",
},
"http://www.google.com/file%20one&two",
},
{
"ftp://webmaster@www.google.com/",
&URL{
- Raw: "ftp://webmaster@www.google.com/",
- Scheme: "ftp",
- RawAuthority: "webmaster@www.google.com",
- RawUserinfo: "webmaster",
- Host: "www.google.com",
- RawPath: "/",
- Path: "/",
+ Scheme: "ftp",
+ User: User("webmaster"),
+ Host: "www.google.com",
+ Path: "/",
},
"",
},
{
"ftp://john%20doe@www.google.com/",
&URL{
- Raw: "ftp://john%20doe@www.google.com/",
- Scheme: "ftp",
- RawAuthority: "john%20doe@www.google.com",
- RawUserinfo: "john%20doe",
- Host: "www.google.com",
- RawPath: "/",
- Path: "/",
+ Scheme: "ftp",
+ User: User("john doe"),
+ Host: "www.google.com",
+ Path: "/",
},
"ftp://john%20doe@www.google.com/",
},
{
"http://www.google.com/?q=go+language",
&URL{
- Raw: "http://www.google.com/?q=go+language",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language",
- Path: "/",
- RawQuery: "q=go+language",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/",
+ RawQuery: "q=go+language",
},
"",
},
{
"http://www.google.com/?q=go%20language",
&URL{
- Raw: "http://www.google.com/?q=go%20language",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go%20language",
- Path: "/",
- RawQuery: "q=go%20language",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/",
+ RawQuery: "q=go%20language",
},
"",
},
{
"http://www.google.com/a%20b?q=c+d",
&URL{
- Raw: "http://www.google.com/a%20b?q=c+d",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/a%20b?q=c+d",
- Path: "/a b",
- RawQuery: "q=c+d",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/a b",
+ RawQuery: "q=c+d",
},
"",
},
- // path without leading /, so no query parsing
+ // path without leading /, so no parsing
{
"http:www.google.com/?q=go+language",
&URL{
- Raw: "http:www.google.com/?q=go+language",
- Scheme: "http",
- RawPath: "www.google.com/?q=go+language",
- Path: "www.google.com/?q=go+language",
- OpaquePath: true,
+ Scheme: "http",
+ Opaque: "www.google.com/",
+ RawQuery: "q=go+language",
},
"http:www.google.com/?q=go+language",
},
- // path without leading /, so no query parsing
+ // path without leading /, so no parsing
{
"http:%2f%2fwww.google.com/?q=go+language",
&URL{
- Raw: "http:%2f%2fwww.google.com/?q=go+language",
- Scheme: "http",
- RawPath: "%2f%2fwww.google.com/?q=go+language",
- Path: "//www.google.com/?q=go+language",
- OpaquePath: true,
+ Scheme: "http",
+ Opaque: "%2f%2fwww.google.com/",
+ RawQuery: "q=go+language",
},
- "http:%2f/www.google.com/?q=go+language",
+ "http:%2f%2fwww.google.com/?q=go+language",
},
// non-authority
{
"mailto:/webmaster@golang.org",
&URL{
- Raw: "mailto:/webmaster@golang.org",
- Scheme: "mailto",
- RawPath: "/webmaster@golang.org",
- Path: "/webmaster@golang.org",
+ Scheme: "mailto",
+ Path: "/webmaster@golang.org",
},
"",
},
{
"mailto:webmaster@golang.org",
&URL{
- Raw: "mailto:webmaster@golang.org",
- Scheme: "mailto",
- RawPath: "webmaster@golang.org",
- Path: "webmaster@golang.org",
- OpaquePath: true,
+ Scheme: "mailto",
+ Opaque: "webmaster@golang.org",
},
"",
},
{
"/foo?query=http://bad",
&URL{
- Raw: "/foo?query=http://bad",
- RawPath: "/foo?query=http://bad",
Path: "/foo",
RawQuery: "query=http://bad",
},
{
"//foo",
&URL{
- RawAuthority: "foo",
- Raw: "//foo",
- Host: "foo",
- Scheme: "",
- RawPath: "",
- Path: "",
+ Host: "foo",
},
"",
},
{
"//user@foo/path?a=b",
&URL{
- Raw: "//user@foo/path?a=b",
- RawAuthority: "user@foo",
- RawUserinfo: "user",
- Scheme: "",
- RawPath: "/path?a=b",
- Path: "/path",
- RawQuery: "a=b",
- Host: "foo",
+ User: User("user"),
+ Host: "foo",
+ Path: "/path",
+ RawQuery: "a=b",
},
"",
},
{
"///threeslashes",
&URL{
- RawAuthority: "",
- Raw: "///threeslashes",
- Host: "",
- Scheme: "",
- RawPath: "///threeslashes",
- Path: "///threeslashes",
+ Path: "///threeslashes",
},
"",
},
{
"http://user:password@google.com",
&URL{
- Raw: "http://user:password@google.com",
- Scheme: "http",
- RawAuthority: "user:password@google.com",
- RawUserinfo: "user:password",
- Host: "google.com",
- },
- "http://user:******@google.com",
- },
- {
- "http://user:longerpass@google.com",
- &URL{
- Raw: "http://user:longerpass@google.com",
- Scheme: "http",
- RawAuthority: "user:longerpass@google.com",
- RawUserinfo: "user:longerpass",
- Host: "google.com",
+ Scheme: "http",
+ User: UserPassword("user", "password"),
+ Host: "google.com",
},
- "http://user:******@google.com",
+ "http://user:password@google.com",
},
}
{
"http://www.google.com/?q=go+language#foo",
&URL{
- Raw: "http://www.google.com/?q=go+language#foo",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language#foo",
- Path: "/",
- RawQuery: "q=go+language#foo",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/",
+ RawQuery: "q=go+language#foo",
},
"",
},
{
"http://www.google.com/?q=go+language#foo",
&URL{
- Raw: "http://www.google.com/?q=go+language#foo",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language#foo",
- Path: "/",
- RawQuery: "q=go+language",
- Fragment: "foo",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/",
+ RawQuery: "q=go+language",
+ Fragment: "foo",
},
"",
},
{
"http://www.google.com/?q=go+language#foo%26bar",
&URL{
- Raw: "http://www.google.com/?q=go+language#foo%26bar",
- Scheme: "http",
- RawAuthority: "www.google.com",
- Host: "www.google.com",
- RawPath: "/?q=go+language#foo%26bar",
- Path: "/",
- RawQuery: "q=go+language",
- Fragment: "foo&bar",
+ Scheme: "http",
+ Host: "www.google.com",
+ Path: "/",
+ RawQuery: "q=go+language",
+ Fragment: "foo&bar",
},
"http://www.google.com/?q=go+language#foo&bar",
},
// more useful string for debugging than fmt's struct printer
func ufmt(u *URL) string {
- return fmt.Sprintf("raw=%q, scheme=%q, rawpath=%q, auth=%q, userinfo=%q, host=%q, path=%q, rawq=%q, frag=%q",
- u.Raw, u.Scheme, u.RawPath, u.RawAuthority, u.RawUserinfo,
- u.Host, u.Path, u.RawQuery, u.Fragment)
+ var user, pass interface{}
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ pass = p
+ }
+ }
+ return fmt.Sprintf("opaque=%q, scheme=%q, user=%#v, pass=%#v, host=%q, path=%q, rawq=%q, frag=%q",
+ u.Opaque, u.Scheme, user, pass, u.Host, u.Path, u.RawQuery, u.Fragment)
}
func DoTest(t *testing.T, parse func(string) (*URL, error), name string, tests []URLTest) {
t.Errorf("%s(%q) returned error %s", name, tt.in, err)
continue
}
- s := u.String()
expected := tt.in
if len(tt.roundtrip) > 0 {
expected = tt.roundtrip
}
+ s := u.String()
if s != expected {
t.Errorf("%s(%q).String() == %q (expected %q)", name, tt.in, s, expected)
}
}
}
-type UserinfoTest struct {
- User string
- Password string
- Raw string
-}
-
-var userinfoTests = []UserinfoTest{
- {"user", "password", "user:password"},
- {"foo:bar", "~!@#$%^&*()_+{}|[]\\-=`:;'\"<>?,./",
- "foo%3Abar:~!%40%23$%25%5E&*()_+%7B%7D%7C%5B%5D%5C-=%60%3A;'%22%3C%3E?,.%2F"},
-}
-
-func TestEscapeUserinfo(t *testing.T) {
- for _, tt := range userinfoTests {
- if raw := EscapeUserinfo(tt.User, tt.Password); raw != tt.Raw {
- t.Errorf("EscapeUserinfo(%q, %q) = %q, want %q", tt.User, tt.Password, raw, tt.Raw)
- }
- }
-}
-
-func TestUnescapeUserinfo(t *testing.T) {
- for _, tt := range userinfoTests {
- if user, pass, err := UnescapeUserinfo(tt.Raw); user != tt.User || pass != tt.Password || err != nil {
- t.Errorf("UnescapeUserinfo(%q) = %q, %q, %v, want %q, %q, nil", tt.Raw, user, pass, err, tt.User, tt.Password)
- }
- }
-}
+//var userinfoTests = []UserinfoTest{
+// {"user", "password", "user:password"},
+// {"foo:bar", "~!@#$%^&*()_+{}|[]\\-=`:;'\"<>?,./",
+// "foo%3Abar:~!%40%23$%25%5E&*()_+%7B%7D%7C%5B%5D%5C-=%60%3A;'%22%3C%3E?,.%2F"},
+//}
type EncodeQueryTest struct {
m Values
t.Errorf("Expected an error from Parse wrapper parsing an empty string.")
}
+ // Ensure Opaque resets the URL.
+ base = mustParse("scheme://user@foo.com/bar")
+ abs = base.ResolveReference(&URL{Opaque: "opaque"})
+ want := mustParse("scheme:opaque")
+ if *abs != *want {
+ t.Errorf("ResolveReference failed to resolve opaque URL: want %#v, got %#v", abs, want)
+ }
+}
+
+func TestResolveReferenceOpaque(t *testing.T) {
+ mustParse := func(url string) *URL {
+ u, err := ParseWithReference(url)
+ if err != nil {
+ t.Fatalf("Expected URL to parse: %q, got error: %v", url, err)
+ }
+ return u
+ }
+ for _, test := range resolveReferenceTests {
+ base := mustParse(test.base)
+ rel := mustParse(test.rel)
+ url := base.ResolveReference(rel)
+ urlStr := url.String()
+ if urlStr != test.expected {
+ t.Errorf("Resolving %q + %q != %q; got %q", test.base, test.rel, test.expected, urlStr)
+ }
+ }
+
+ // Test that new instances are returned.
+ base := mustParse("http://foo.com/")
+ abs := base.ResolveReference(mustParse("."))
+ if base == abs {
+ t.Errorf("Expected no-op reference to return new URL instance.")
+ }
+ barRef := mustParse("http://bar.com/")
+ abs = base.ResolveReference(barRef)
+ if abs == barRef {
+ t.Errorf("Expected resolution of absolute reference to return new URL instance.")
+ }
+
+ // Test the convenience wrapper too
+ base = mustParse("http://foo.com/path/one/")
+ abs, _ = base.Parse("../two")
+ expected := "http://foo.com/path/two"
+ if abs.String() != expected {
+ t.Errorf("Parse wrapper got %q; expected %q", abs.String(), expected)
+ }
+ _, err := base.Parse("")
+ if err == nil {
+ t.Errorf("Expected an error from Parse wrapper parsing an empty string.")
+ }
+
}
func TestQueryValues(t *testing.T) {
}
}
}
+
+type RequestURITest struct {
+ url *URL
+ out string
+}
+
+var requritests = []RequestURITest{
+ {
+ &URL{
+ Scheme: "http",
+ Host: "example.com",
+ Path: "",
+ },
+ "/",
+ },
+ {
+ &URL{
+ Scheme: "http",
+ Host: "example.com",
+ Path: "/a b",
+ },
+ "/a%20b",
+ },
+ {
+ &URL{
+ Scheme: "http",
+ Host: "example.com",
+ Path: "/a b",
+ RawQuery: "q=go+language",
+ },
+ "/a%20b?q=go+language",
+ },
+ {
+ &URL{
+ Scheme: "myschema",
+ Opaque: "opaque",
+ },
+ "opaque",
+ },
+ {
+ &URL{
+ Scheme: "myschema",
+ Opaque: "opaque",
+ RawQuery: "q=go+language",
+ },
+ "opaque?q=go+language",
+ },
+}
+
+func TestRequestURI(t *testing.T) {
+ for _, tt := range requritests {
+ s := tt.url.RequestURI()
+ if s != tt.out {
+ t.Errorf("%#v.RequestURI() == %q (expected %q)", tt.url, s, tt.out)
+ }
+ }
+}
deadline := time.Now().Add(timeout)
// seq remembers the clients and their seqNum at point of entry.
seq := make(map[unackedCounter]int64)
+ cs.mu.Lock()
for client := range cs.clients {
seq[client] = client.seq()
}
+ cs.mu.Unlock()
for {
pending := false
cs.mu.Lock()
+// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
var elen int;
-// Readdirnames reads and returns a slice of names from the directory f.
-//
-// If n > 0, Readdirnames returns at most n names. In this case, if
-// Readdirnames returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is os.EOF.
-//
-// If n <= 0, Readdirnames returns all the names from the directory in
-// a single slice. In this case, if Readdirnames succeeds (reads all
-// the way to the end of the directory), it returns the slice and a
-// nil error. If it encounters an error before the end of the
-// directory, Readdirnames returns the names read until that point and
-// a non-nil error.
-func (file *File) Readdirnames(n int) (names []string, err error) {
+func (file *File) readdirnames(n int) (names []string, err error) {
if elen == 0 {
var dummy syscall.Dirent;
elen = (unsafe.Offsetof(dummy.Name) +
"syscall"
)
-// Readdir reads the contents of the directory associated with file and
-// returns an array of up to n FileInfo structures, as would be returned
-// by Lstat, in directory order. Subsequent calls on the same file will yield
-// further FileInfos.
-//
-// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
-// Readdirnames returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is io.EOF.
-//
-// If n <= 0, Readdir returns all the FileInfo from the directory in
-// a single slice. In this case, if Readdir succeeds (reads all
-// the way to the end of the directory), it returns the slice and a
-// nil error. If it encounters an error before the end of the
-// directory, Readdir returns the FileInfo read until that point
-// and a non-nil error.
-func (file *File) Readdir(n int) (fi []FileInfo, err error) {
+func (file *File) readdir(n int) (fi []FileInfo, err error) {
// If this file has no dirinfo, create one.
if file.dirinfo == nil {
file.dirinfo = new(dirInfo)
return result, nil
}
-// Readdirnames reads and returns a slice of names from the directory f.
-//
-// If n > 0, Readdirnames returns at most n names. In this case, if
-// Readdirnames returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is io.EOF.
-//
-// If n <= 0, Readdirnames returns all the names from the directory in
-// a single slice. In this case, if Readdirnames succeeds (reads all
-// the way to the end of the directory), it returns the slice and a
-// nil error. If it encounters an error before the end of the
-// directory, Readdirnames returns the names read until that point and
-// a non-nil error.
-func (file *File) Readdirnames(n int) (names []string, err error) {
+func (file *File) readdirnames(n int) (names []string, err error) {
fi, err := file.Readdir(n)
names = make([]string, len(fi))
for i := range fi {
blockSize = 4096
)
-// Readdirnames reads and returns a slice of names from the directory f.
-//
-// If n > 0, Readdirnames returns at most n names. In this case, if
-// Readdirnames returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is io.EOF.
-//
-// If n <= 0, Readdirnames returns all the names from the directory in
-// a single slice. In this case, if Readdirnames succeeds (reads all
-// the way to the end of the directory), it returns the slice and a
-// nil error. If it encounters an error before the end of the
-// directory, Readdirnames returns the names read until that point and
-// a non-nil error.
-func (f *File) Readdirnames(n int) (names []string, err error) {
+func (f *File) readdirnames(n int) (names []string, err error) {
// If this file has no dirinfo, create one.
if f.dirinfo == nil {
f.dirinfo = new(dirInfo)
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os
+
+// FindProcess looks for a running process by its pid.
+// The Process it returns can be used to obtain information
+// about the underlying operating system process.
+func FindProcess(pid int) (p *Process, err error) {
+ return findProcess(pid)
+}
+
+// Hostname returns the host name reported by the kernel.
+func Hostname() (name string, err error) {
+ return hostname()
+}
+
+// Readdir reads the contents of the directory associated with file and
+// returns an array of up to n FileInfo values, as would be returned
+// by Lstat, in directory order. Subsequent calls on the same file will yield
+// further FileInfos.
+//
+// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
+// Readdir returns an empty slice, it will return a non-nil error
+// explaining why. At the end of a directory, the error is io.EOF.
+//
+// If n <= 0, Readdir returns all the FileInfo from the directory in
+// a single slice. In this case, if Readdir succeeds (reads all
+// the way to the end of the directory), it returns the slice and a
+// nil error. If it encounters an error before the end of the
+// directory, Readdir returns the FileInfo read until that point
+// and a non-nil error.
+func (f *File) Readdir(n int) (fi []FileInfo, err error) {
+ return f.readdir(n)
+}
+
+// Readdirnames reads and returns a slice of names from the directory f.
+//
+// If n > 0, Readdirnames returns at most n names. In this case, if
+// Readdirnames returns an empty slice, it will return a non-nil error
+// explaining why. At the end of a directory, the error is io.EOF.
+//
+// If n <= 0, Readdirnames returns all the names from the directory in
+// a single slice. In this case, if Readdirnames succeeds (reads all
+// the way to the end of the directory), it returns the slice and a
+// nil error. If it encounters an error before the end of the
+// directory, Readdirnames returns the names read until that point and
+// a non-nil error.
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ return f.readdirnames(n)
+}
func (e *SyscallError) Error() string { return e.Syscall + ": " + e.Err }
-// Note: If the name of the function NewSyscallError changes,
-// pkg/go/doc/doc.go should be adjusted since it hardwires
-// this name in a heuristic.
-
// NewSyscallError returns, as an error, a new SyscallError
// with the given system call name and error details.
// As a convenience, if err is nil, NewSyscallError returns nil.
func (e *SyscallError) Error() string { return e.Syscall + ": " + e.Errno.Error() }
-// Note: If the name of the function NewSyscallError changes,
-// pkg/go/doc/doc.go should be adjusted since it hardwires
-// this name in a heuristic.
-
// NewSyscallError returns, as an error, a new SyscallError
// with the given system call name and error details.
// As a convenience, if err is nil, NewSyscallError returns nil.
// Unix shell semantics: path element "" means "."
dir = "."
}
- if err := findExecutable(dir + "/" + file); err == nil {
- return dir + "/" + file, nil
+ path := dir + "/" + file
+ if err := findExecutable(path); err == nil {
+ return path, nil
}
}
return "", &Error{file, ErrNotFound}
return ``, os.ENOENT
}
+// LookPath searches for an executable binary named file
+// in the directories named by the PATH environment variable.
+// If file contains a slash, it is tried directly and the PATH is not consulted.
+// LookPath also uses PATHEXT environment variable to match
+// a suitable candidate.
func LookPath(file string) (f string, err error) {
x := os.Getenv(`PATHEXT`)
if x == `` {
return nil
}
-// FindProcess looks for a running process by its pid.
-// The Process it returns can be used to obtain information
-// about the underlying operating system process.
-func FindProcess(pid int) (p *Process, err error) {
+func findProcess(pid int) (p *Process, err error) {
// NOOP for Plan 9.
return newProcess(pid, 0), nil
}
return nil
}
-// FindProcess looks for a running process by its pid.
-// The Process it returns can be used to obtain information
-// about the underlying operating system process.
-func FindProcess(pid int) (p *Process, err error) {
+func findProcess(pid int) (p *Process, err error) {
// NOOP for unix.
return newProcess(pid, 0), nil
}
"unsafe"
)
+// Wait waits for the Process to exit or stop, and then returns a
+// Waitmsg describing its status and an error, if any.
func (p *Process) Wait(options int) (w *Waitmsg, err error) {
s, e := syscall.WaitForSingleObject(syscall.Handle(p.handle), syscall.INFINITE)
switch s {
return syscall.Errno(syscall.EWINDOWS)
}
+// Release releases any resources associated with the Process.
func (p *Process) Release() error {
if p.handle == -1 {
return EINVAL
return nil
}
-func FindProcess(pid int) (p *Process, err error) {
+func findProcess(pid int) (p *Process, err error) {
const da = syscall.STANDARD_RIGHTS_READ |
syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE
h, e := syscall.OpenProcess(da, false, uint32(pid))
// Mkdir creates a new directory with the specified name and permission bits.
// It returns an error, if any.
-func Mkdir(name string, perm uint32) error {
- e := syscall.Mkdir(name, perm)
+func Mkdir(name string, perm FileMode) error {
+ e := syscall.Mkdir(name, syscallMode(perm))
if e != nil {
return &PathError{"mkdir", name, e}
}
return nil
}
+// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
+func syscallMode(i FileMode) (o uint32) {
+ o |= uint32(i.Perm())
+ if i&ModeSetuid != 0 {
+ o |= syscall.S_ISUID
+ }
+ if i&ModeSetgid != 0 {
+ o |= syscall.S_ISGID
+ }
+ if i&ModeSticky != 0 {
+ o |= syscall.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
+
// Chmod changes the mode of the named file to mode.
// If the file is a symbolic link, it changes the mode of the link's target.
-func Chmod(name string, mode uint32) error {
- if e := syscall.Chmod(name, mode); e != nil {
+func Chmod(name string, mode FileMode) error {
+ if e := syscall.Chmod(name, syscallMode(mode)); e != nil {
return &PathError{"chmod", name, e}
}
return nil
}
// Chmod changes the mode of the file to mode.
-func (f *File) Chmod(mode uint32) error {
- if e := syscall.Fchmod(f.fd, mode); e != nil {
+func (f *File) Chmod(mode FileMode) error {
+ if e := syscall.Fchmod(f.fd, syscallMode(mode)); e != nil {
return &PathError{"chmod", f.name, e}
}
return nil
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// It returns the File and an error, if any.
-func OpenFile(name string, flag int, perm uint32) (file *File, err error) {
- r, e := syscall.Open(name, flag|syscall.O_CLOEXEC, perm)
+func OpenFile(name string, flag int, perm FileMode) (file *File, err error) {
+ r, e := syscall.Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
if e != nil {
return nil, &PathError{"open", name, e}
}
return fileInfoFromStat(&stat, name), nil
}
-// Readdir reads the contents of the directory associated with file and
-// returns an array of up to n FileInfo values, as would be returned
-// by Lstat, in directory order. Subsequent calls on the same file will yield
-// further FileInfos.
-//
-// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
-// Readdir returns an empty slice, it will return a non-nil error
-// explaining why. At the end of a directory, the error is io.EOF.
-//
-// If n <= 0, Readdir returns all the FileInfo from the directory in
-// a single slice. In this case, if Readdir succeeds (reads all
-// the way to the end of the directory), it returns the slice and a
-// nil error. If it encounters an error before the end of the
-// directory, Readdir returns the FileInfo read until that point
-// and a non-nil error.
-func (f *File) Readdir(n int) (fi []FileInfo, err error) {
+func (f *File) readdir(n int) (fi []FileInfo, err error) {
dirname := f.name
if dirname == "" {
dirname = "."
// directories that MkdirAll creates.
// If path is already a directory, MkdirAll does nothing
// and returns nil.
-func MkdirAll(path string, perm uint32) error {
+func MkdirAll(path string, perm FileMode) error {
// If path exists, stop with success or error.
dir, err := Stat(path)
if err == nil {
if st.Mode&syscall.S_ISUID != 0 {
fs.mode |= ModeSetuid
}
+ if st.Mode&syscall.S_ISVTX != 0 {
+ fs.mode |= ModeSticky
+ }
return fs
}
import "syscall"
-func Hostname() (name string, err error) {
+func hostname() (name string, err error) {
name, err = syscall.Sysctl("kern.hostname")
if err != nil {
return "", NewSyscallError("sysctl kern.hostname", err)
package os
-// Hostname returns the host name reported by the kernel.
-func Hostname() (name string, err error) {
+func hostname() (name string, err error) {
f, err := Open("/proc/sys/kernel/hostname")
if err != nil {
return "", err
package os
-func Hostname() (name string, err error) {
+func hostname() (name string, err error) {
f, err := Open("#c/sysname")
if err != nil {
return "", err
ModeDir FileMode = 1 << (32 - 1 - iota) // d: is a directory
ModeAppend // a: append-only
ModeExclusive // l: exclusive use
- ModeTemporary // t: temporary file (not backed up)
+ ModeTemporary // T: temporary file (not backed up)
ModeSymlink // L: symbolic link
ModeDevice // D: device file
ModeNamedPipe // p: named pipe (FIFO)
ModeSetuid // u: setuid
ModeSetgid // g: setgid
ModeCharDevice // c: Unix character device, when ModeDevice is set
+ ModeSticky // t: sticky
// Mask for the type bits. For regular files, none will be set.
ModeType = ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice
)
func (m FileMode) String() string {
- const str = "daltLDpSugc"
+ const str = "dalTLDpSugct"
var buf [20]byte
w := 0
for i, c := range str {
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package patch
import (
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package path
+
+/* Commented out until gccgo has example support.
+
+import (
+ "fmt"
+ "path"
+)
+
+// b
+func ExampleBase() {
+ fmt.Println(path.Base("/a/b"))
+}
+
+// Clean("a/c") = "a/c"
+// Clean("a//c") = "a/c"
+// Clean("a/c/.") = "a/c"
+// Clean("a/c/b/..") = "a/c"
+// Clean("/../a/c") = "/a/c"
+// Clean("/../a/b/../././/c") = "/a/c"
+func ExampleClean() {
+ paths := []string{
+ "a/c",
+ "a//c",
+ "a/c/.",
+ "a/c/b/..",
+ "/../a/c",
+ "/../a/b/../././/c",
+ }
+
+ for _, p := range paths {
+ fmt.Printf("Clean(%q) = %q\n", p, path.Clean(p))
+ }
+}
+
+// /a/b
+func ExampleDir() {
+ fmt.Println(path.Dir("/a/b/c"))
+}
+
+// .css
+func ExampleExt() {
+ fmt.Println(path.Ext("/a/b/c/bar.css"))
+}
+
+// true
+func ExampleIsAbs() {
+ fmt.Println(path.IsAbs("/dev/null"))
+}
+
+// a/b/c
+func ExampleJoin() {
+ fmt.Println(path.Join("a", "b", "c"))
+}
+
+// static/ myfile.css
+func ExampleSplit() {
+ fmt.Println(path.Split("static/myfile.css"))
+}
+
+*/
}
func TestImportPath(t *testing.T) {
- if path := TypeOf(&base64.Encoding{}).Elem().PkgPath(); path != "libgo_encoding.base64" {
- t.Errorf(`TypeOf(&base64.Encoding{}).Elem().PkgPath() = %q, want "libgo_encoding.base64"`, path)
+ tests := []struct {
+ t Type
+ path string
+ }{
+ {TypeOf(&base64.Encoding{}).Elem(), "libgo_encoding.base64"},
+ {TypeOf(uint(0)), ""},
+ {TypeOf(map[string]int{}), ""},
+ {TypeOf((*error)(nil)).Elem(), ""},
+ }
+ for _, test := range tests {
+ if path := test.t.PkgPath(); path != test.path {
+ t.Errorf("%v.PkgPath() = %q, want %q", test.t, path, test.path)
+ }
}
}
// PkgPath returns the type's package path.
// The package path is a full package import path like "encoding/base64".
- // PkgPath returns an empty string for unnamed types.
+ // PkgPath returns an empty string for unnamed or predeclared types.
PkgPath() string
// Size returns the number of bytes needed to store
}
}
-type numSubexpCase struct {
- input string
- expected int
-}
-
-var numSubexpCases = []numSubexpCase{
- {``, 0},
- {`.*`, 0},
- {`abba`, 0},
- {`ab(b)a`, 1},
- {`ab(.*)a`, 1},
- {`(.*)ab(.*)a`, 2},
- {`(.*)(ab)(.*)a`, 3},
- {`(.*)((a)b)(.*)a`, 4},
- {`(.*)(\(ab)(.*)a`, 3},
- {`(.*)(\(a\)b)(.*)a`, 3},
-}
-
-func TestNumSubexp(t *testing.T) {
- for _, c := range numSubexpCases {
+type subexpCase struct {
+ input string
+ num int
+ names []string
+}
+
+var subexpCases = []subexpCase{
+ {``, 0, nil},
+ {`.*`, 0, nil},
+ {`abba`, 0, nil},
+ {`ab(b)a`, 1, []string{"", ""}},
+ {`ab(.*)a`, 1, []string{"", ""}},
+ {`(.*)ab(.*)a`, 2, []string{"", "", ""}},
+ {`(.*)(ab)(.*)a`, 3, []string{"", "", "", ""}},
+ {`(.*)((a)b)(.*)a`, 4, []string{"", "", "", "", ""}},
+ {`(.*)(\(ab)(.*)a`, 3, []string{"", "", "", ""}},
+ {`(.*)(\(a\)b)(.*)a`, 3, []string{"", "", "", ""}},
+ {`(?P<foo>.*)(?P<bar>(a)b)(?P<foo>.*)a`, 4, []string{"", "foo", "bar", "", "foo"}},
+}
+
+func TestSubexp(t *testing.T) {
+ for _, c := range subexpCases {
re := MustCompile(c.input)
n := re.NumSubexp()
- if n != c.expected {
- t.Errorf("NumSubexp for %q returned %d, expected %d", c.input, n, c.expected)
+ if n != c.num {
+ t.Errorf("%q: NumSubexp = %d, want %d", c.input, n, c.num)
+ continue
+ }
+ names := re.SubexpNames()
+ if len(names) != 1+n {
+ t.Errorf("%q: len(SubexpNames) = %d, want %d", c.input, len(names), n)
+ continue
+ }
+ if c.names != nil {
+ for i := 0; i < 1+n; i++ {
+ if names[i] != c.names[i] {
+ t.Errorf("%q: SubexpNames[%d] = %q, want %q", c.input, i, names[i], c.names[i])
+ }
+ }
}
}
}
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package regexp
import (
+// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
prefixRune rune // first rune in prefix
cond syntax.EmptyOp // empty-width conditions required at start of match
numSubexp int
+ subexpNames []string
longest bool
// cache of machines for running regexp
return nil, err
}
maxCap := re.MaxCap()
+ capNames := re.CapNames()
+
re = re.Simplify()
prog, err := syntax.Compile(re)
if err != nil {
return nil, err
}
regexp := &Regexp{
- expr: expr,
- prog: prog,
- numSubexp: maxCap,
- cond: prog.StartCond(),
- longest: longest,
+ expr: expr,
+ prog: prog,
+ numSubexp: maxCap,
+ subexpNames: capNames,
+ cond: prog.StartCond(),
+ longest: longest,
}
regexp.prefix, regexp.prefixComplete = prog.Prefix()
if regexp.prefix != "" {
return re.numSubexp
}
+// SubexpNames returns the names of the parenthesized subexpressions
+// in this Regexp. The name for the first sub-expression is names[1],
+// so that if m is a match slice, the name for m[i] is SubexpNames()[i].
+// Since the Regexp as a whole cannot be named, names[0] is always
+// the empty string. The slice should not be modified.
+func (re *Regexp) SubexpNames() []string {
+ return re.subexpNames
+}
+
const endOfText rune = -1
// input abstracts different representations of the input text. It provides
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package syntax
import "unicode"
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package syntax
import (
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package syntax_test
import (
}
return m
}
+
+// CapNames walks the regexp to find the names of capturing groups.
+func (re *Regexp) CapNames() []string {
+ names := make([]string, re.MaxCap()+1)
+ re.capNames(names)
+ return names
+}
+
+func (re *Regexp) capNames(names []string) {
+ if re.Op == OpCapture {
+ names[re.Cap] = re.Name
+ }
+ for _, sub := range re.Sub {
+ sub.capNames(names)
+ }
+}
// Callers fills the slice pc with the program counters of function invocations
// on the calling goroutine's stack. The argument skip is the number of stack frames
-// to skip before recording in pc, with 0 starting at the caller of Caller.
+// to skip before recording in pc, with 0 starting at the caller of Callers.
// It returns the number of entries written to pc.
func Callers(skip int, pc []uintptr) int
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package runtime_test
import (
if my != i && his != i {
t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
}
- ack[me][(i-1)%3] = -1
+ StoreInt32(&ack[me][(i-1)%3], -1)
}
c <- true
}(p)
if my != i && his != i {
t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
}
- ack[me][(i-1)%3] = -1
+ StoreInt64(&ack[me][(i-1)%3], -1)
}
c <- true
}(p)
var dummy *byte
const sizeofPtr uintptr = uintptr(unsafe.Sizeof(dummy))
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+ return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+ return int64(tv.Sec), int64(tv.Usec) * 1000
+}
var eg InternalExample
stdout, stderr := os.Stdout, os.Stderr
- defer func() {
- os.Stdout, os.Stderr = stdout, stderr
- if e := recover(); e != nil {
- fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
- os.Exit(1)
- }
- }()
for _, eg = range examples {
if *chatty {
// license that can be found in the LICENSE file.
// Package testing provides support for automated testing of Go packages.
-// It is intended to be used in concert with the ``gotest'' utility, which automates
+// It is intended to be used in concert with the ``go test'' command, which automates
// execution of any function of the form
// func TestXxx(*testing.T)
// where Xxx can be any alphanumeric string (but the first letter must not be in
// fmt.Sprintf("hello")
// }
// }
+//
// The benchmark package will vary b.N until the benchmark function lasts
// long enough to be timed reliably. The output
// testing.BenchmarkHello 10000000 282 ns/op
// big.Len()
// }
// }
+//
+// The package also runs and verifies example code. Example functions
+// include an introductory comment that is compared with the standard output
+// of the function when the tests are run, as in this example of an example:
+//
+// // hello
+// func ExampleHello() {
+// fmt.Println("hello")
+// }
+//
+// Example functions without comments are compiled but not executed.
+//
+// The naming convention to declare examples for a function F, a type T and
+// method M on type T are:
+//
+// func ExampleF() { ... }
+// func ExampleT() { ... }
+// func ExampleT_M() { ... }
+//
+// Multiple example functions for a type/function/method may be provided by
+// appending a distinct suffix to the name. The suffix must start with a
+// lower-case letter.
+//
+// func ExampleF_suffix() { ... }
+// func ExampleT_suffix() { ... }
+// func ExampleT_M_suffix() { ... }
+//
package testing
import (
import (
"bytes"
"fmt"
+ "strings"
"testing"
"text/template/parse"
)
nil},
{"one", `{{define "foo"}} FOO {{end}}`, noError,
[]string{"foo"},
- []string{`[(text: " FOO ")]`}},
+ []string{`" FOO "`}},
{"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
[]string{"foo", "bar"},
- []string{`[(text: " FOO ")]`, `[(text: " BAR ")]`}},
+ []string{`" FOO "`, `" BAR "`}},
// errors
{"missing end", `{{define "foo"}} FOO `, hasError,
nil,
t.Errorf("expected %q got %q", "broot", b.String())
}
}
+
+func TestRedefinition(t *testing.T) {
+ var tmpl *Template
+ var err error
+ if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
+ t.Fatalf("parse 1: %v", err)
+ }
+ if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err == nil {
+ t.Fatal("expected error")
+ }
+ if !strings.Contains(err.Error(), "redefinition") {
+ t.Fatalf("expected redefinition error; got %v", err)
+ }
+}
func (l *ListNode) String() string {
b := new(bytes.Buffer)
- fmt.Fprint(b, "[")
for _, n := range l.Nodes {
fmt.Fprint(b, n)
}
- fmt.Fprint(b, "]")
return b.String()
}
}
func (t *TextNode) String() string {
- return fmt.Sprintf("(text: %q)", t.Text)
+ return fmt.Sprintf("%q", t.Text)
}
// PipeNode holds a pipeline with optional declaration
}
func (p *PipeNode) String() string {
- if p.Decl != nil {
- return fmt.Sprintf("%v := %v", p.Decl, p.Cmds)
+ s := ""
+ if len(p.Decl) > 0 {
+ for i, v := range p.Decl {
+ if i > 0 {
+ s += ", "
+ }
+ s += v.String()
+ }
+ s += " := "
+ }
+ for i, c := range p.Cmds {
+ if i > 0 {
+ s += " | "
+ }
+ s += c.String()
}
- return fmt.Sprintf("%v", p.Cmds)
+ return s
}
// ActionNode holds an action (something bounded by delimiters).
}
func (a *ActionNode) String() string {
- return fmt.Sprintf("(action: %v)", a.Pipe)
+ return fmt.Sprintf("{{%s}}", a.Pipe)
+
}
// CommandNode holds a command (a pipeline inside an evaluating action).
}
func (c *CommandNode) String() string {
- return fmt.Sprintf("(command: %v)", c.Args)
+ s := ""
+ for i, arg := range c.Args {
+ if i > 0 {
+ s += " "
+ }
+ s += arg.String()
+ }
+ return s
}
// IdentifierNode holds an identifier.
}
func (i *IdentifierNode) String() string {
- return fmt.Sprintf("I=%s", i.Ident)
+ return i.Ident
}
// VariableNode holds a list of variable names. The dollar sign is
}
func (v *VariableNode) String() string {
- return fmt.Sprintf("V=%s", v.Ident)
+ s := ""
+ for i, id := range v.Ident {
+ if i > 0 {
+ s += "."
+ }
+ s += id
+ }
+ return s
}
// DotNode holds the special identifier '.'. It is represented by a nil pointer.
}
func (d *DotNode) String() string {
- return "{{<.>}}"
+ return "."
}
// FieldNode holds a field (identifier starting with '.').
}
func (f *FieldNode) String() string {
- return fmt.Sprintf("F=%s", f.Ident)
+ s := ""
+ for _, id := range f.Ident {
+ s += "." + id
+ }
+ return s
}
// BoolNode holds a boolean constant.
}
func (b *BoolNode) String() string {
- return fmt.Sprintf("B=%t", b.True)
+ if b.True {
+ return "true"
+ }
+ return "false"
}
// NumberNode holds a number: signed or unsigned integer, float, or complex.
}
func (n *NumberNode) String() string {
- return fmt.Sprintf("N=%s", n.Text)
+ return n.Text
}
// StringNode holds a string constant. The value has been "unquoted".
}
func (s *StringNode) String() string {
- return fmt.Sprintf("S=%#q", s.Text)
+ return s.Quoted
}
// endNode represents an {{end}} action. It is represented by a nil pointer.
panic("unknown branch type")
}
if b.ElseList != nil {
- return fmt.Sprintf("({{%s %s}} %s {{else}} %s)", name, b.Pipe, b.List, b.ElseList)
+ return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
}
- return fmt.Sprintf("({{%s %s}} %s)", name, b.Pipe, b.List)
+ return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
}
// IfNode represents an {{if}} action and its commands.
name string
input string
ok bool
- result string
+ result string // what the user would see in an error message.
}
const (
var parseTests = []parseTest{
{"empty", "", noError,
- `[]`},
+ ``},
{"comment", "{{/*\n\n\n*/}}", noError,
- `[]`},
+ ``},
{"spaces", " \t\n", noError,
- `[(text: " \t\n")]`},
+ `" \t\n"`},
{"text", "some text", noError,
- `[(text: "some text")]`},
+ `"some text"`},
{"emptyAction", "{{}}", hasError,
- `[(action: [])]`},
+ `{{}}`},
{"field", "{{.X}}", noError,
- `[(action: [(command: [F=[X]])])]`},
+ `{{.X}}`},
{"simple command", "{{printf}}", noError,
- `[(action: [(command: [I=printf])])]`},
+ `{{printf}}`},
{"$ invocation", "{{$}}", noError,
- "[(action: [(command: [V=[$]])])]"},
+ "{{$}}"},
{"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
- "[({{with [V=[$x]] := [(command: [N=3])]}} [(action: [(command: [V=[$x] N=23])])])]"},
+ "{{with $x := 3}}{{$x 23}}{{end}}"},
{"variable with fields", "{{$.I}}", noError,
- "[(action: [(command: [V=[$ I]])])]"},
+ "{{$.I}}"},
{"multi-word command", "{{printf `%d` 23}}", noError,
- "[(action: [(command: [I=printf S=`%d` N=23])])]"},
+ "{{printf `%d` 23}}"},
{"pipeline", "{{.X|.Y}}", noError,
- `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ `{{.X | .Y}}`},
{"pipeline with decl", "{{$x := .X|.Y}}", noError,
- `[(action: [V=[$x]] := [(command: [F=[X]]) (command: [F=[Y]])])]`},
- {"declaration", "{{.X|.Y}}", noError,
- `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ `{{$x := .X | .Y}}`},
{"simple if", "{{if .X}}hello{{end}}", noError,
- `[({{if [(command: [F=[X]])]}} [(text: "hello")])]`},
+ `{{if .X}}"hello"{{end}}`},
{"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
- `[({{if [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ `{{if .X}}"true"{{else}}"false"{{end}}`},
{"simple range", "{{range .X}}hello{{end}}", noError,
- `[({{range [(command: [F=[X]])]}} [(text: "hello")])]`},
+ `{{range .X}}"hello"{{end}}`},
{"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
- `[({{range [(command: [F=[X Y Z]])]}} [(text: "hello")])]`},
+ `{{range .X.Y.Z}}"hello"{{end}}`},
{"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
- `[({{range [(command: [F=[X]])]}} [(text: "hello")({{range [(command: [F=[Y]])]}} [(text: "goodbye")])])]`},
+ `{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`},
{"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
- `[({{range [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ `{{range .X}}"true"{{else}}"false"{{end}}`},
{"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
- `[({{range [(command: [F=[X]]) (command: [F=[M]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ `{{range .X | .M}}"true"{{else}}"false"{{end}}`},
{"range []int", "{{range .SI}}{{.}}{{end}}", noError,
- `[({{range [(command: [F=[SI]])]}} [(action: [(command: [{{<.>}}])])])]`},
+ `{{range .SI}}{{.}}{{end}}`},
{"constants", "{{range .SI 1 -3.2i true false 'a'}}{{end}}", noError,
- `[({{range [(command: [F=[SI] N=1 N=-3.2i B=true B=false N='a'])]}} [])]`},
+ `{{range .SI 1 -3.2i true false 'a'}}{{end}}`},
{"template", "{{template `x`}}", noError,
- `[{{template "x"}}]`},
+ `{{template "x"}}`},
{"template with arg", "{{template `x` .Y}}", noError,
- `[{{template "x" [(command: [F=[Y]])]}}]`},
+ `{{template "x" .Y}}`},
{"with", "{{with .X}}hello{{end}}", noError,
- `[({{with [(command: [F=[X]])]}} [(text: "hello")])]`},
+ `{{with .X}}"hello"{{end}}`},
{"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
- `[({{with [(command: [F=[X]])]}} [(text: "hello")] {{else}} [(text: "goodbye")])]`},
+ `{{with .X}}"hello"{{else}}"goodbye"{{end}}`},
// Errors.
{"unclosed action", "hello{{range", hasError, ""},
{"unmatched end", "{{end}}", hasError, ""},
name := new.name
if old := t.tmpl[name]; old != nil {
oldIsEmpty := parse.IsEmptyTree(old.Root)
- newIsEmpty := parse.IsEmptyTree(new.Root)
+ newIsEmpty := new.Tree != nil && parse.IsEmptyTree(new.Root)
if !oldIsEmpty && !newIsEmpty {
return fmt.Errorf("template: redefinition of template %q", name)
}
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package time
import "errors"
func (d Weekday) String() string { return days[d] }
// Computations on time.
-//
+//
// The zero value for a Time is defined to be
// January 1, year 1, 00:00:00.000000000 UTC
// which (1) looks like a zero, or as close as you can get in a date
// be a suitable "not set" sentinel, unlike Jan 1 1970, and (3) has a
// non-negative year even in time zones west of UTC, unlike 1-1-0
// 00:00:00 UTC, which would be 12-31-(-1) 19:00:00 in New York.
-//
+//
// The zero Time value does not force a specific epoch for the time
// representation. For example, to use the Unix epoch internally, we
// could define that to distinguish a zero value from Jan 1 1970, that
// time would be represented by sec=-1, nsec=1e9. However, it does
// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
// epoch, and that's what we do.
-//
+//
// The Add and Sub computations are oblivious to the choice of epoch.
-//
+//
// The presentation computations - year, month, minute, and so on - all
// rely heavily on division and modulus by positive constants. For
// calendrical calculations we want these divisions to round down, even
// }
//
// everywhere.
-//
+//
// The calendar runs on an exact 400 year cycle: a 400-year calendar
// printed for 1970-2469 will apply as well to 2470-2869. Even the days
// of the week match up. It simplifies the computations to choose the
// is the 100th year, and the missed missed leap year is the 400th year.
// So we'd prefer instead to print a calendar for 2001-2400 and reuse it
// for 2401-2800.
-//
+//
// Finally, it's convenient if the delta between the Unix epoch and
// long-ago epoch is representable by an int64 constant.
-//
+//
// These three considerations—choose an epoch as early as possible, that
// uses a year equal to 1 mod 400, and that is no more than 2⁶³ seconds
// earlier than 1970—bring us to the year -292277022399. We refer to
// this year as the absolute zero year, and to times measured as a uint64
// seconds since this year as absolute times.
-//
+//
// Times measured as an int64 seconds since the year 1—the representation
// used for Time's sec field—are called internal times.
-//
+//
// Times measured as an int64 seconds since the year 1970 are called Unix
// times.
-//
+//
// It is tempting to just use the year 1 as the absolute epoch, defining
// that the routines are only valid for years >= 1. However, the
// routines would then be invalid when displaying the epoch in time zones
// printing the zero time correctly isn't supported in half the time
// zones. By comparison, it's reasonable to mishandle some times in
// the year -292277022399.
-//
+//
// All this is opaque to clients of the API and can be changed if a
// better implementation presents itself.
}
// ISOWeek returns the ISO 8601 year and week number in which t occurs.
-// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
-// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
+// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
+// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
// of year n+1.
func (t Time) ISOWeek() (year, week int) {
year, month, day, yday := t.date(true)
return Duration(t.sec-u.sec)*Second + Duration(t.nsec-u.nsec)
}
+// Since returns the time elapsed since t.
+// It is shorthand for time.Now().Sub(t).
+func Since(t Time) Duration {
+ return Now().Sub(t)
+}
+
// AddDate returns the time corresponding to adding the
// given number of years, months, and days to t.
// For example, AddDate(-1, 2, 3) applied to January 1, 2011
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utf8
-
-import "errors"
-
-// String wraps a regular string with a small structure that provides more
-// efficient indexing by code point index, as opposed to byte index.
-// Scanning incrementally forwards or backwards is O(1) per index operation
-// (although not as fast a range clause going forwards). Random access is
-// O(N) in the length of the string, but the overhead is less than always
-// scanning from the beginning.
-// If the string is ASCII, random access is O(1).
-// Unlike the built-in string type, String has internal mutable state and
-// is not thread-safe.
-type String struct {
- str string
- numRunes int
- // If width > 0, the rune at runePos starts at bytePos and has the specified width.
- width int
- bytePos int
- runePos int
- nonASCII int // byte index of the first non-ASCII rune.
-}
-
-// NewString returns a new UTF-8 string with the provided contents.
-func NewString(contents string) *String {
- return new(String).Init(contents)
-}
-
-// Init initializes an existing String to hold the provided contents.
-// It returns a pointer to the initialized String.
-func (s *String) Init(contents string) *String {
- s.str = contents
- s.bytePos = 0
- s.runePos = 0
- for i := 0; i < len(contents); i++ {
- if contents[i] >= RuneSelf {
- // Not ASCII.
- s.numRunes = RuneCountInString(contents)
- _, s.width = DecodeRuneInString(contents)
- s.nonASCII = i
- return s
- }
- }
- // ASCII is simple. Also, the empty string is ASCII.
- s.numRunes = len(contents)
- s.width = 0
- s.nonASCII = len(contents)
- return s
-}
-
-// String returns the contents of the String. This method also means the
-// String is directly printable by fmt.Print.
-func (s *String) String() string {
- return s.str
-}
-
-// RuneCount returns the number of runes (Unicode code points) in the String.
-func (s *String) RuneCount() int {
- return s.numRunes
-}
-
-// IsASCII returns a boolean indicating whether the String contains only ASCII bytes.
-func (s *String) IsASCII() bool {
- return s.width == 0
-}
-
-// Slice returns the string sliced at rune positions [i:j].
-func (s *String) Slice(i, j int) string {
- // ASCII is easy. Let the compiler catch the indexing error if there is one.
- if j < s.nonASCII {
- return s.str[i:j]
- }
- if i < 0 || j > s.numRunes || i > j {
- panic(sliceOutOfRange)
- }
- if i == j {
- return ""
- }
- // For non-ASCII, after At(i), bytePos is always the position of the indexed character.
- var low, high int
- switch {
- case i < s.nonASCII:
- low = i
- case i == s.numRunes:
- low = len(s.str)
- default:
- s.At(i)
- low = s.bytePos
- }
- switch {
- case j == s.numRunes:
- high = len(s.str)
- default:
- s.At(j)
- high = s.bytePos
- }
- return s.str[low:high]
-}
-
-// At returns the rune with index i in the String. The sequence of runes is the same
-// as iterating over the contents with a "for range" clause.
-func (s *String) At(i int) rune {
- // ASCII is easy. Let the compiler catch the indexing error if there is one.
- if i < s.nonASCII {
- return rune(s.str[i])
- }
-
- // Now we do need to know the index is valid.
- if i < 0 || i >= s.numRunes {
- panic(outOfRange)
- }
-
- var r rune
-
- // Five easy common cases: within 1 spot of bytePos/runePos, or the beginning, or the end.
- // With these cases, all scans from beginning or end work in O(1) time per rune.
- switch {
-
- case i == s.runePos-1: // backing up one rune
- r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos])
- s.runePos = i
- s.bytePos -= s.width
- return r
- case i == s.runePos+1: // moving ahead one rune
- s.runePos = i
- s.bytePos += s.width
- fallthrough
- case i == s.runePos:
- r, s.width = DecodeRuneInString(s.str[s.bytePos:])
- return r
- case i == 0: // start of string
- r, s.width = DecodeRuneInString(s.str)
- s.runePos = 0
- s.bytePos = 0
- return r
-
- case i == s.numRunes-1: // last rune in string
- r, s.width = DecodeLastRuneInString(s.str)
- s.runePos = i
- s.bytePos = len(s.str) - s.width
- return r
- }
-
- // We need to do a linear scan. There are three places to start from:
- // 1) The beginning
- // 2) bytePos/runePos.
- // 3) The end
- // Choose the closest in rune count, scanning backwards if necessary.
- forward := true
- if i < s.runePos {
- // Between beginning and pos. Which is closer?
- // Since both i and runePos are guaranteed >= nonASCII, that's the
- // lowest location we need to start from.
- if i < (s.runePos-s.nonASCII)/2 {
- // Scan forward from beginning
- s.bytePos, s.runePos = s.nonASCII, s.nonASCII
- } else {
- // Scan backwards from where we are
- forward = false
- }
- } else {
- // Between pos and end. Which is closer?
- if i-s.runePos < (s.numRunes-s.runePos)/2 {
- // Scan forward from pos
- } else {
- // Scan backwards from end
- s.bytePos, s.runePos = len(s.str), s.numRunes
- forward = false
- }
- }
- if forward {
- // TODO: Is it much faster to use a range loop for this scan?
- for {
- r, s.width = DecodeRuneInString(s.str[s.bytePos:])
- if s.runePos == i {
- break
- }
- s.runePos++
- s.bytePos += s.width
- }
- } else {
- for {
- r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos])
- s.runePos--
- s.bytePos -= s.width
- if s.runePos == i {
- break
- }
- }
- }
- return r
-}
-
-var outOfRange = errors.New("utf8.String: index out of range")
-var sliceOutOfRange = errors.New("utf8.String: slice index out of range")
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utf8_test
-
-import (
- "math/rand"
- "testing"
- . "unicode/utf8"
-)
-
-func TestScanForwards(t *testing.T) {
- for _, s := range testStrings {
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for i, expect := range runes {
- got := str.At(i)
- if got != expect {
- t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
- }
- }
- }
-}
-
-func TestScanBackwards(t *testing.T) {
- for _, s := range testStrings {
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for i := len(runes) - 1; i >= 0; i-- {
- expect := runes[i]
- got := str.At(i)
- if got != expect {
- t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
- }
- }
- }
-}
-
-func randCount() int {
- if testing.Short() {
- return 100
- }
- return 100000
-}
-
-func TestRandomAccess(t *testing.T) {
- for _, s := range testStrings {
- if len(s) == 0 {
- continue
- }
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for j := 0; j < randCount(); j++ {
- i := rand.Intn(len(runes))
- expect := runes[i]
- got := str.At(i)
- if got != expect {
- t.Errorf("%s[%d]: expected %c (%U); got %c (%U)", s, i, expect, expect, got, got)
- }
- }
- }
-}
-
-func TestRandomSliceAccess(t *testing.T) {
- for _, s := range testStrings {
- if len(s) == 0 || s[0] == '\x80' { // the bad-UTF-8 string fools this simple test
- continue
- }
- runes := []rune(s)
- str := NewString(s)
- if str.RuneCount() != len(runes) {
- t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
- break
- }
- for k := 0; k < randCount(); k++ {
- i := rand.Intn(len(runes))
- j := rand.Intn(len(runes) + 1)
- if i > j { // include empty strings
- continue
- }
- expect := string(runes[i:j])
- got := str.Slice(i, j)
- if got != expect {
- t.Errorf("%s[%d:%d]: expected %q got %q", s, i, j, expect, got)
- }
- }
- }
-}
-
-func TestLimitSliceAccess(t *testing.T) {
- for _, s := range testStrings {
- str := NewString(s)
- if str.Slice(0, 0) != "" {
- t.Error("failure with empty slice at beginning")
- }
- nr := RuneCountInString(s)
- if str.Slice(nr, nr) != "" {
- t.Error("failure with empty slice at end")
- }
- }
-}
}
// 4.1. Opening handshake.
// Step 5. send a request line.
- bw.WriteString("GET " + config.Location.RawPath + " HTTP/1.1\r\n")
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
// Step 6-14. push request headers in fields.
fields := []string{
if config.Version != ProtocolVersionHixie75 {
panic("wrong protocol version.")
}
- bw.WriteString("GET " + config.Location.RawPath + " HTTP/1.1\r\n")
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
bw.WriteString("Upgrade: WebSocket\r\n")
bw.WriteString("Connection: Upgrade\r\n")
bw.WriteString("Host: " + config.Location.Host + "\r\n")
} else {
scheme = "ws"
}
- c.Location, err = url.ParseRequest(scheme + "://" + req.Host + req.URL.RawPath)
+ c.Location, err = url.ParseRequest(scheme + "://" + req.Host + req.URL.RequestURI())
if err != nil {
return http.StatusBadRequest, err
}
} else {
scheme = "ws"
}
- c.Location, err = url.ParseRequest(scheme + "://" + req.Host + req.URL.RawPath)
+ c.Location, err = url.ParseRequest(scheme + "://" + req.Host + req.URL.RequestURI())
if err != nil {
return http.StatusBadRequest, err
}
panic("wrong protocol version.")
}
- bw.WriteString("GET " + config.Location.RawPath + " HTTP/1.1\r\n")
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
bw.WriteString("Host: " + config.Location.Host + "\r\n")
bw.WriteString("Upgrade: websocket\r\n")
} else {
scheme = "ws"
}
- c.Location, err = url.ParseRequest(scheme + "://" + req.Host + req.URL.RawPath)
+ c.Location, err = url.ParseRequest(scheme + "://" + req.Host + req.URL.RequestURI())
if err != nil {
return http.StatusBadRequest, err
}
"net/http"
"net/url"
"sync"
+ "time"
)
const (
return &Addr{ws.config.Origin}
}
-var errSetTimeout = errors.New("websocket: cannot set timeout: not using a net.Conn")
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
-// SetTimeout sets the connection's network timeout in nanoseconds.
-func (ws *Conn) SetTimeout(nsec int64) error {
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
if conn, ok := ws.rwc.(net.Conn); ok {
- return conn.SetTimeout(nsec)
+ return conn.SetDeadline(t)
}
- return errSetTimeout
+ return errSetDeadline
}
-// SetReadTimeout sets the connection's network read timeout in nanoseconds.
-func (ws *Conn) SetReadTimeout(nsec int64) error {
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
if conn, ok := ws.rwc.(net.Conn); ok {
- return conn.SetReadTimeout(nsec)
+ return conn.SetReadDeadline(t)
}
- return errSetTimeout
+ return errSetDeadline
}
-// SetWriteTimeout sets the connection's network write timeout in nanoseconds.
-func (ws *Conn) SetWriteTimeout(nsec int64) error {
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
if conn, ok := ws.rwc.(net.Conn); ok {
- return conn.SetWriteTimeout(nsec)
+ return conn.SetWriteDeadline(t)
}
- return errSetTimeout
+ return errSetDeadline
}
// Config returns the WebSocket config.