runtime: copy netpoll code from Go 1.7 runtime
authorIan Lance Taylor <ian@gcc.gnu.org>
Tue, 18 Oct 2016 14:38:29 +0000 (14:38 +0000)
committerIan Lance Taylor <ian@gcc.gnu.org>
Tue, 18 Oct 2016 14:38:29 +0000 (14:38 +0000)
    Reviewed-on: https://go-review.googlesource.com/31325

From-SVN: r241307

25 files changed:
gcc/go/gofrontend/MERGE
libgo/Makefile.am
libgo/Makefile.in
libgo/config.h.in
libgo/configure
libgo/configure.ac
libgo/go/runtime/netpoll.go [new file with mode: 0644]
libgo/go/runtime/netpoll_epoll.go [new file with mode: 0644]
libgo/go/runtime/netpoll_kqueue.go [new file with mode: 0644]
libgo/go/runtime/netpoll_nacl.go [new file with mode: 0644]
libgo/go/runtime/netpoll_solaris.go [new file with mode: 0644]
libgo/go/runtime/netpoll_stub.go [new file with mode: 0644]
libgo/go/runtime/netpoll_windows.go [new file with mode: 0644]
libgo/go/runtime/stubs.go
libgo/go/runtime/time.go
libgo/mkrsysinfo.sh
libgo/runtime/malloc.h
libgo/runtime/mgc0.c
libgo/runtime/netpoll.goc [deleted file]
libgo/runtime/netpoll_epoll.c [deleted file]
libgo/runtime/netpoll_kqueue.c [deleted file]
libgo/runtime/netpoll_select.c [deleted file]
libgo/runtime/netpoll_stub.c [deleted file]
libgo/runtime/runtime.h
libgo/sysinfo.c

index c13c10dd9536c5732587809fc05819b4325adf98..40f91ced442ec93c2508c6951550f8120e8051b1 100644 (file)
@@ -1,4 +1,4 @@
-0a49b1dadd862215bdd38b9725a6e193b0d8fd0b
+68bb6a9875499037d3eccb79a1f92e1c7a476d58
 
 The first line of this file holds the git revision number of the last
 merge done from the gofrontend repository.
index d6a53eff9ac71bee3c35984ebfa0680ff2aa9e62..1d93deb137016ed01f0cb637cfeedc31a878bed3 100644 (file)
@@ -427,16 +427,6 @@ endif
 endif
 endif
 
-if LIBGO_IS_LINUX
-runtime_netpoll_files = runtime/netpoll_epoll.c
-else
-if LIBGO_IS_SOLARIS
-runtime_netpoll_files = runtime/netpoll_select.c
-else
-runtime_netpoll_files = runtime/netpoll_kqueue.c
-endif
-endif
-
 runtime_files = \
        runtime/go-append.c \
        runtime/go-assert.c \
@@ -500,7 +490,6 @@ runtime_files = \
        runtime/mgc0.c \
        runtime/mheap.c \
        runtime/msize.c \
-       $(runtime_netpoll_files) \
        runtime/panic.c \
        runtime/parfor.c \
        runtime/print.c \
@@ -514,7 +503,6 @@ runtime_files = \
        go-iface.c \
        lfstack.c \
        malloc.c \
-       netpoll.c \
        reflect.c \
        runtime1.c \
        sigqueue.c \
@@ -530,14 +518,6 @@ malloc.c: $(srcdir)/runtime/malloc.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
 
-mprof.c: $(srcdir)/runtime/mprof.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
-netpoll.c: $(srcdir)/runtime/netpoll.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
 reflect.c: $(srcdir)/runtime/reflect.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
@@ -546,18 +526,10 @@ runtime1.c: $(srcdir)/runtime/runtime1.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
 
-sema.c: $(srcdir)/runtime/sema.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
 sigqueue.c: $(srcdir)/runtime/sigqueue.goc goc2c
        ./goc2c --go-pkgpath os_signal $< > $@.tmp
        mv -f $@.tmp $@
 
-time.c: $(srcdir)/runtime/time.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
 %.c: $(srcdir)/runtime/%.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
index 84d96b3b6ae2ae806493f5254d1d46bbdc6c8181..43c404790e0cd2e7f4f1137eb32dccb9ef392cbb 100644 (file)
@@ -226,21 +226,18 @@ am__DEPENDENCIES_4 = $(am__DEPENDENCIES_2) \
 libgo_llgo_la_DEPENDENCIES = $(am__DEPENDENCIES_4)
 @HAVE_SYS_MMAN_H_FALSE@am__objects_1 = mem_posix_memalign.lo
 @HAVE_SYS_MMAN_H_TRUE@am__objects_1 = mem.lo
-@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@am__objects_2 = netpoll_kqueue.lo
-@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@am__objects_2 = netpoll_select.lo
-@LIBGO_IS_LINUX_TRUE@am__objects_2 = netpoll_epoll.lo
-@LIBGO_IS_LINUX_FALSE@am__objects_3 = thread-sema.lo
-@LIBGO_IS_LINUX_TRUE@am__objects_3 = thread-linux.lo
-@LIBGO_IS_RTEMS_TRUE@am__objects_4 = rtems-task-variable-add.lo
-@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@am__objects_5 = getncpu-none.lo
-@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_SOLARIS_FALSE@am__objects_5 = getncpu-bsd.lo
-@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@am__objects_5 = getncpu-bsd.lo
-@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@am__objects_5 = getncpu-solaris.lo
-@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@am__objects_5 = getncpu-irix.lo
-@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@am__objects_5 =  \
+@LIBGO_IS_LINUX_FALSE@am__objects_2 = thread-sema.lo
+@LIBGO_IS_LINUX_TRUE@am__objects_2 = thread-linux.lo
+@LIBGO_IS_RTEMS_TRUE@am__objects_3 = rtems-task-variable-add.lo
+@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@am__objects_4 = getncpu-none.lo
+@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_SOLARIS_FALSE@am__objects_4 = getncpu-bsd.lo
+@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@am__objects_4 = getncpu-bsd.lo
+@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@am__objects_4 = getncpu-solaris.lo
+@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@am__objects_4 = getncpu-irix.lo
+@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@am__objects_4 =  \
 @LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@   getncpu-bsd.lo
-@LIBGO_IS_LINUX_TRUE@am__objects_5 = getncpu-linux.lo
-am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
+@LIBGO_IS_LINUX_TRUE@am__objects_4 = getncpu-linux.lo
+am__objects_5 = go-append.lo go-assert.lo go-assert-interface.lo \
        go-breakpoint.lo go-caller.lo go-callers.lo \
        go-can-convert-interface.lo go-cdiv.lo go-cgo.lo \
        go-check-interface.lo go-construct-map.lo \
@@ -259,18 +256,18 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
        go-unsafe-pointer.lo go-unsetenv.lo go-unwind.lo go-varargs.lo \
        env_posix.lo heapdump.lo mcache.lo mcentral.lo \
        $(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
-       $(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
-       runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
-       $(am__objects_4) go-iface.lo lfstack.lo malloc.lo netpoll.lo \
-       reflect.lo runtime1.lo sigqueue.lo $(am__objects_5)
-am_libgo_llgo_la_OBJECTS = $(am__objects_6)
+       panic.lo parfor.lo print.lo proc.lo runtime.lo signal_unix.lo \
+       thread.lo $(am__objects_2) yield.lo $(am__objects_3) \
+       go-iface.lo lfstack.lo malloc.lo reflect.lo runtime1.lo \
+       sigqueue.lo $(am__objects_4)
+am_libgo_llgo_la_OBJECTS = $(am__objects_5)
 libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
 libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
        $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
        $(libgo_llgo_la_LDFLAGS) $(LDFLAGS) -o $@
 @GOC_IS_LLGO_TRUE@am_libgo_llgo_la_rpath = -rpath $(toolexeclibdir)
 libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_4)
-am_libgo_la_OBJECTS = $(am__objects_6)
+am_libgo_la_OBJECTS = $(am__objects_5)
 libgo_la_OBJECTS = $(am_libgo_la_OBJECTS)
 libgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
        --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(libgo_la_LDFLAGS) \
@@ -832,9 +829,6 @@ toolexeclibgounicode_DATA = \
 @LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@runtime_getncpu_file = runtime/getncpu-irix.c
 @LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@runtime_getncpu_file = runtime/getncpu-bsd.c
 @LIBGO_IS_LINUX_TRUE@runtime_getncpu_file = runtime/getncpu-linux.c
-@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@runtime_netpoll_files = runtime/netpoll_kqueue.c
-@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@runtime_netpoll_files = runtime/netpoll_select.c
-@LIBGO_IS_LINUX_TRUE@runtime_netpoll_files = runtime/netpoll_epoll.c
 runtime_files = \
        runtime/go-append.c \
        runtime/go-assert.c \
@@ -898,7 +892,6 @@ runtime_files = \
        runtime/mgc0.c \
        runtime/mheap.c \
        runtime/msize.c \
-       $(runtime_netpoll_files) \
        runtime/panic.c \
        runtime/parfor.c \
        runtime/print.c \
@@ -912,7 +905,6 @@ runtime_files = \
        go-iface.c \
        lfstack.c \
        malloc.c \
-       netpoll.c \
        reflect.c \
        runtime1.c \
        sigqueue.c \
@@ -1616,10 +1608,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mgc0.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheap.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msize.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll_epoll.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll_kqueue.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netpoll_select.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/panic.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/parfor.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Plo@am__quote@
@@ -2139,27 +2127,6 @@ msize.lo: runtime/msize.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o msize.lo `test -f 'runtime/msize.c' || echo '$(srcdir)/'`runtime/msize.c
 
-netpoll_kqueue.lo: runtime/netpoll_kqueue.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT netpoll_kqueue.lo -MD -MP -MF $(DEPDIR)/netpoll_kqueue.Tpo -c -o netpoll_kqueue.lo `test -f 'runtime/netpoll_kqueue.c' || echo '$(srcdir)/'`runtime/netpoll_kqueue.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/netpoll_kqueue.Tpo $(DEPDIR)/netpoll_kqueue.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/netpoll_kqueue.c' object='netpoll_kqueue.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o netpoll_kqueue.lo `test -f 'runtime/netpoll_kqueue.c' || echo '$(srcdir)/'`runtime/netpoll_kqueue.c
-
-netpoll_select.lo: runtime/netpoll_select.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT netpoll_select.lo -MD -MP -MF $(DEPDIR)/netpoll_select.Tpo -c -o netpoll_select.lo `test -f 'runtime/netpoll_select.c' || echo '$(srcdir)/'`runtime/netpoll_select.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/netpoll_select.Tpo $(DEPDIR)/netpoll_select.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/netpoll_select.c' object='netpoll_select.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o netpoll_select.lo `test -f 'runtime/netpoll_select.c' || echo '$(srcdir)/'`runtime/netpoll_select.c
-
-netpoll_epoll.lo: runtime/netpoll_epoll.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT netpoll_epoll.lo -MD -MP -MF $(DEPDIR)/netpoll_epoll.Tpo -c -o netpoll_epoll.lo `test -f 'runtime/netpoll_epoll.c' || echo '$(srcdir)/'`runtime/netpoll_epoll.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/netpoll_epoll.Tpo $(DEPDIR)/netpoll_epoll.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/netpoll_epoll.c' object='netpoll_epoll.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o netpoll_epoll.lo `test -f 'runtime/netpoll_epoll.c' || echo '$(srcdir)/'`runtime/netpoll_epoll.c
-
 panic.lo: runtime/panic.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT panic.lo -MD -MP -MF $(DEPDIR)/panic.Tpo -c -o panic.lo `test -f 'runtime/panic.c' || echo '$(srcdir)/'`runtime/panic.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/panic.Tpo $(DEPDIR)/panic.Plo
@@ -3402,14 +3369,6 @@ malloc.c: $(srcdir)/runtime/malloc.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
 
-mprof.c: $(srcdir)/runtime/mprof.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
-netpoll.c: $(srcdir)/runtime/netpoll.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
 reflect.c: $(srcdir)/runtime/reflect.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
@@ -3418,18 +3377,10 @@ runtime1.c: $(srcdir)/runtime/runtime1.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
 
-sema.c: $(srcdir)/runtime/sema.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
 sigqueue.c: $(srcdir)/runtime/sigqueue.goc goc2c
        ./goc2c --go-pkgpath os_signal $< > $@.tmp
        mv -f $@.tmp $@
 
-time.c: $(srcdir)/runtime/time.goc goc2c
-       ./goc2c $< > $@.tmp
-       mv -f $@.tmp $@
-
 %.c: $(srcdir)/runtime/%.goc goc2c
        ./goc2c $< > $@.tmp
        mv -f $@.tmp $@
index 14938da59c161058cdf62f8edd970f6a2c5db377..d3b3067d32a52c9f65312f803638aac1de7d4553 100644 (file)
 /* Define to 1 if you have the `pipe2' function. */
 #undef HAVE_PIPE2
 
+/* Define to 1 if you have the <port.h> header file. */
+#undef HAVE_PORT_H
+
 /* Define to 1 if you have the `removexattr' function. */
 #undef HAVE_REMOVEXATTR
 
 /* Define to 1 if you have the <sys/epoll.h> header file. */
 #undef HAVE_SYS_EPOLL_H
 
+/* Define to 1 if you have the <sys/event.h> header file. */
+#undef HAVE_SYS_EVENT_H
+
 /* Define to 1 if you have the <sys/file.h> header file. */
 #undef HAVE_SYS_FILE_H
 
index ade92db7da3ff014ea168c008652f1e88ebc4d14..6fd742b50b08c1cd00329687c64315b6a2c71d29 100755 (executable)
@@ -14714,7 +14714,7 @@ $as_echo "#define HAVE_GETIPINFO 1" >>confdefs.h
   fi
 
 
-for ac_header in sched.h semaphore.h sys/file.h sys/mman.h syscall.h sys/epoll.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/icmp6.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h
+for ac_header in port.h sched.h semaphore.h sys/file.h sys/mman.h syscall.h sys/epoll.h sys/event.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/icmp6.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h
 do :
   as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
 ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
index e721b0f09e3c4d8dba5fa913a0b9e1ba9db6def6..ef0e700db47c80e09762e3cb2f004372d6193bc9 100644 (file)
@@ -570,7 +570,7 @@ AC_C_BIGENDIAN
 
 GCC_CHECK_UNWIND_GETIPINFO
 
-AC_CHECK_HEADERS(sched.h semaphore.h sys/file.h sys/mman.h syscall.h sys/epoll.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/icmp6.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h)
+AC_CHECK_HEADERS(port.h sched.h semaphore.h sys/file.h sys/mman.h syscall.h sys/epoll.h sys/event.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/icmp6.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h)
 
 AC_CHECK_HEADERS([linux/filter.h linux/if_addr.h linux/if_ether.h linux/if_tun.h linux/netlink.h linux/rtnetlink.h], [], [],
 [#ifdef HAVE_SYS_SOCKET_H
diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go
new file mode 100644 (file)
index 0000000..729b597
--- /dev/null
@@ -0,0 +1,452 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
+
+package runtime
+
+import (
+       "runtime/internal/atomic"
+       "unsafe"
+)
+
+// Export temporarily for gccgo's C code to call:
+//go:linkname netpoll runtime.netpoll
+
+// Integrated network poller (platform-independent part).
+// A particular implementation (epoll/kqueue) must define the following functions:
+// func netpollinit()                  // to initialize the poller
+// func netpollopen(fd uintptr, pd *pollDesc) int32    // to arm edge-triggered notifications
+// and associate fd with pd.
+// An implementation must call the following function to denote that the pd is ready.
+// func netpollready(gpp **g, pd *pollDesc, mode int32)
+
+// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
+// goroutines respectively. The semaphore can be in the following states:
+// pdReady - io readiness notification is pending;
+//           a goroutine consumes the notification by changing the state to nil.
+// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
+//          the goroutine commits to park by changing the state to G pointer,
+//          or, alternatively, concurrent io notification changes the state to READY,
+//          or, alternatively, concurrent timeout/close changes the state to nil.
+// G pointer - the goroutine is blocked on the semaphore;
+//             io notification or timeout/close changes the state to READY or nil respectively
+//             and unparks the goroutine.
+// nil - nothing of the above.
+const (
+       pdReady uintptr = 1
+       pdWait  uintptr = 2
+)
+
+const pollBlockSize = 4 * 1024
+
+// Network poller descriptor.
+type pollDesc struct {
+       link *pollDesc // in pollcache, protected by pollcache.lock
+
+       // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
+       // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
+       // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
+       // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
+       // in a lock-free way by all operations.
+       // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
+       // that will blow up when GC starts moving objects.
+       lock    mutex // protects the following fields
+       fd      uintptr
+       closing bool
+       seq     uintptr // protects from stale timers and ready notifications
+       rg      uintptr // pdReady, pdWait, G waiting for read or nil
+       rt      timer   // read deadline timer (set if rt.f != nil)
+       rd      int64   // read deadline
+       wg      uintptr // pdReady, pdWait, G waiting for write or nil
+       wt      timer   // write deadline timer
+       wd      int64   // write deadline
+       user    uint32  // user settable cookie
+}
+
+type pollCache struct {
+       lock  mutex
+       first *pollDesc
+       // PollDesc objects must be type-stable,
+       // because we can get ready notification from epoll/kqueue
+       // after the descriptor is closed/reused.
+       // Stale notifications are detected using seq variable,
+       // seq is incremented when deadlines are changed or descriptor is reused.
+}
+
+var (
+       netpollInited uint32
+       pollcache     pollCache
+)
+
+//go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
+func net_runtime_pollServerInit() {
+       netpollinit()
+       atomic.Store(&netpollInited, 1)
+}
+
+func netpollinited() bool {
+       return atomic.Load(&netpollInited) != 0
+}
+
+//go:linkname net_runtime_pollOpen net.runtime_pollOpen
+func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
+       pd := pollcache.alloc()
+       lock(&pd.lock)
+       if pd.wg != 0 && pd.wg != pdReady {
+               throw("netpollOpen: blocked write on free descriptor")
+       }
+       if pd.rg != 0 && pd.rg != pdReady {
+               throw("netpollOpen: blocked read on free descriptor")
+       }
+       pd.fd = fd
+       pd.closing = false
+       pd.seq++
+       pd.rg = 0
+       pd.rd = 0
+       pd.wg = 0
+       pd.wd = 0
+       unlock(&pd.lock)
+
+       var errno int32
+       errno = netpollopen(fd, pd)
+       return pd, int(errno)
+}
+
+//go:linkname net_runtime_pollClose net.runtime_pollClose
+func net_runtime_pollClose(pd *pollDesc) {
+       if !pd.closing {
+               throw("netpollClose: close w/o unblock")
+       }
+       if pd.wg != 0 && pd.wg != pdReady {
+               throw("netpollClose: blocked write on closing descriptor")
+       }
+       if pd.rg != 0 && pd.rg != pdReady {
+               throw("netpollClose: blocked read on closing descriptor")
+       }
+       netpollclose(pd.fd)
+       pollcache.free(pd)
+}
+
+func (c *pollCache) free(pd *pollDesc) {
+       lock(&c.lock)
+       pd.link = c.first
+       c.first = pd
+       unlock(&c.lock)
+}
+
+//go:linkname net_runtime_pollReset net.runtime_pollReset
+func net_runtime_pollReset(pd *pollDesc, mode int) int {
+       err := netpollcheckerr(pd, int32(mode))
+       if err != 0 {
+               return err
+       }
+       if mode == 'r' {
+               pd.rg = 0
+       } else if mode == 'w' {
+               pd.wg = 0
+       }
+       return 0
+}
+
+//go:linkname net_runtime_pollWait net.runtime_pollWait
+func net_runtime_pollWait(pd *pollDesc, mode int) int {
+       err := netpollcheckerr(pd, int32(mode))
+       if err != 0 {
+               return err
+       }
+       // As for now only Solaris uses level-triggered IO.
+       if GOOS == "solaris" {
+               netpollarm(pd, mode)
+       }
+       for !netpollblock(pd, int32(mode), false) {
+               err = netpollcheckerr(pd, int32(mode))
+               if err != 0 {
+                       return err
+               }
+               // Can happen if timeout has fired and unblocked us,
+               // but before we had a chance to run, timeout has been reset.
+               // Pretend it has not happened and retry.
+       }
+       return 0
+}
+
+//go:linkname net_runtime_pollWaitCanceled net.runtime_pollWaitCanceled
+func net_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
+       // This function is used only on windows after a failed attempt to cancel
+       // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
+       for !netpollblock(pd, int32(mode), true) {
+       }
+}
+
+//go:linkname net_runtime_pollSetDeadline net.runtime_pollSetDeadline
+func net_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
+       lock(&pd.lock)
+       if pd.closing {
+               unlock(&pd.lock)
+               return
+       }
+       pd.seq++ // invalidate current timers
+       // Reset current timers.
+       if pd.rt.f != nil {
+               deltimer(&pd.rt)
+               pd.rt.f = nil
+       }
+       if pd.wt.f != nil {
+               deltimer(&pd.wt)
+               pd.wt.f = nil
+       }
+       // Setup new timers.
+       if d != 0 && d <= nanotime() {
+               d = -1
+       }
+       if mode == 'r' || mode == 'r'+'w' {
+               pd.rd = d
+       }
+       if mode == 'w' || mode == 'r'+'w' {
+               pd.wd = d
+       }
+       if pd.rd > 0 && pd.rd == pd.wd {
+               pd.rt.f = netpollDeadline
+               pd.rt.when = pd.rd
+               // Copy current seq into the timer arg.
+               // Timer func will check the seq against current descriptor seq,
+               // if they differ the descriptor was reused or timers were reset.
+               pd.rt.arg = pd
+               pd.rt.seq = pd.seq
+               addtimer(&pd.rt)
+       } else {
+               if pd.rd > 0 {
+                       pd.rt.f = netpollReadDeadline
+                       pd.rt.when = pd.rd
+                       pd.rt.arg = pd
+                       pd.rt.seq = pd.seq
+                       addtimer(&pd.rt)
+               }
+               if pd.wd > 0 {
+                       pd.wt.f = netpollWriteDeadline
+                       pd.wt.when = pd.wd
+                       pd.wt.arg = pd
+                       pd.wt.seq = pd.seq
+                       addtimer(&pd.wt)
+               }
+       }
+       // If we set the new deadline in the past, unblock currently pending IO if any.
+       var rg, wg *g
+       atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
+       if pd.rd < 0 {
+               rg = netpollunblock(pd, 'r', false)
+       }
+       if pd.wd < 0 {
+               wg = netpollunblock(pd, 'w', false)
+       }
+       unlock(&pd.lock)
+       if rg != nil {
+               goready(rg, 3)
+       }
+       if wg != nil {
+               goready(wg, 3)
+       }
+}
+
+//go:linkname net_runtime_pollUnblock net.runtime_pollUnblock
+func net_runtime_pollUnblock(pd *pollDesc) {
+       lock(&pd.lock)
+       if pd.closing {
+               throw("netpollUnblock: already closing")
+       }
+       pd.closing = true
+       pd.seq++
+       var rg, wg *g
+       atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
+       rg = netpollunblock(pd, 'r', false)
+       wg = netpollunblock(pd, 'w', false)
+       if pd.rt.f != nil {
+               deltimer(&pd.rt)
+               pd.rt.f = nil
+       }
+       if pd.wt.f != nil {
+               deltimer(&pd.wt)
+               pd.wt.f = nil
+       }
+       unlock(&pd.lock)
+       if rg != nil {
+               goready(rg, 3)
+       }
+       if wg != nil {
+               goready(wg, 3)
+       }
+}
+
+// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
+func netpollready(gpp *guintptr, pd *pollDesc, mode int32) {
+       var rg, wg guintptr
+       if mode == 'r' || mode == 'r'+'w' {
+               rg.set(netpollunblock(pd, 'r', true))
+       }
+       if mode == 'w' || mode == 'r'+'w' {
+               wg.set(netpollunblock(pd, 'w', true))
+       }
+       if rg != 0 {
+               rg.ptr().schedlink = *gpp
+               *gpp = rg
+       }
+       if wg != 0 {
+               wg.ptr().schedlink = *gpp
+               *gpp = wg
+       }
+}
+
+func netpollcheckerr(pd *pollDesc, mode int32) int {
+       if pd.closing {
+               return 1 // errClosing
+       }
+       if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) {
+               return 2 // errTimeout
+       }
+       return 0
+}
+
+func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
+       return atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
+}
+
+// returns true if IO is ready, or false if timedout or closed
+// waitio - wait only for completed IO, ignore errors
+func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
+       gpp := &pd.rg
+       if mode == 'w' {
+               gpp = &pd.wg
+       }
+
+       // set the gpp semaphore to WAIT
+       for {
+               old := *gpp
+               if old == pdReady {
+                       *gpp = 0
+                       return true
+               }
+               if old != 0 {
+                       throw("netpollblock: double wait")
+               }
+               if atomic.Casuintptr(gpp, 0, pdWait) {
+                       break
+               }
+       }
+
+       // need to recheck error states after setting gpp to WAIT
+       // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
+       // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
+       if waitio || netpollcheckerr(pd, mode) == 0 {
+               gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet, 5)
+       }
+       // be careful to not lose concurrent READY notification
+       old := atomic.Xchguintptr(gpp, 0)
+       if old > pdWait {
+               throw("netpollblock: corrupted state")
+       }
+       return old == pdReady
+}
+
+func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
+       gpp := &pd.rg
+       if mode == 'w' {
+               gpp = &pd.wg
+       }
+
+       for {
+               old := *gpp
+               if old == pdReady {
+                       return nil
+               }
+               if old == 0 && !ioready {
+                       // Only set READY for ioready. runtime_pollWait
+                       // will check for timeout/cancel before waiting.
+                       return nil
+               }
+               var new uintptr
+               if ioready {
+                       new = pdReady
+               }
+               if atomic.Casuintptr(gpp, old, new) {
+                       if old == pdReady || old == pdWait {
+                               old = 0
+                       }
+                       return (*g)(unsafe.Pointer(old))
+               }
+       }
+}
+
+func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
+       lock(&pd.lock)
+       // Seq arg is seq when the timer was set.
+       // If it's stale, ignore the timer event.
+       if seq != pd.seq {
+               // The descriptor was reused or timers were reset.
+               unlock(&pd.lock)
+               return
+       }
+       var rg *g
+       if read {
+               if pd.rd <= 0 || pd.rt.f == nil {
+                       throw("netpolldeadlineimpl: inconsistent read deadline")
+               }
+               pd.rd = -1
+               atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
+               rg = netpollunblock(pd, 'r', false)
+       }
+       var wg *g
+       if write {
+               if pd.wd <= 0 || pd.wt.f == nil && !read {
+                       throw("netpolldeadlineimpl: inconsistent write deadline")
+               }
+               pd.wd = -1
+               atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
+               wg = netpollunblock(pd, 'w', false)
+       }
+       unlock(&pd.lock)
+       if rg != nil {
+               goready(rg, 0)
+       }
+       if wg != nil {
+               goready(wg, 0)
+       }
+}
+
+func netpollDeadline(arg interface{}, seq uintptr) {
+       netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
+}
+
+func netpollReadDeadline(arg interface{}, seq uintptr) {
+       netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
+}
+
+func netpollWriteDeadline(arg interface{}, seq uintptr) {
+       netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
+}
+
+func (c *pollCache) alloc() *pollDesc {
+       lock(&c.lock)
+       if c.first == nil {
+               const pdSize = unsafe.Sizeof(pollDesc{})
+               n := pollBlockSize / pdSize
+               if n == 0 {
+                       n = 1
+               }
+               // Must be in non-GC memory because can be referenced
+               // only from epoll/kqueue internals.
+               mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
+               for i := uintptr(0); i < n; i++ {
+                       pd := (*pollDesc)(add(mem, i*pdSize))
+                       pd.link = c.first
+                       c.first = pd
+               }
+       }
+       pd := c.first
+       c.first = pd.link
+       unlock(&c.lock)
+       return pd
+}
diff --git a/libgo/go/runtime/netpoll_epoll.go b/libgo/go/runtime/netpoll_epoll.go
new file mode 100644 (file)
index 0000000..777150e
--- /dev/null
@@ -0,0 +1,116 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package runtime
+
+import "unsafe"
+
+//extern epoll_create
+func epollcreate(size int32) int32
+
+//extern epoll_create1
+func epollcreate1(flags int32) int32
+
+//go:noescape
+//extern epoll_ctl
+func epollctl(epfd, op, fd int32, ev *epollevent) int32
+
+//go:noescape
+//extern epoll_wait
+func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
+
+//extern __go_fcntl_uintptr
+func fcntlUintptr(fd, cmd, arg uintptr) (uintptr, uintptr)
+
+func closeonexec(fd int32) {
+       fcntlUintptr(uintptr(fd), _F_SETFD, _FD_CLOEXEC)
+}
+
+var (
+       epfd int32 = -1 // epoll descriptor
+)
+
+func netpollinit() {
+       epfd = epollcreate1(_EPOLL_CLOEXEC)
+       if epfd >= 0 {
+               return
+       }
+       epfd = epollcreate(1024)
+       if epfd >= 0 {
+               closeonexec(epfd)
+               return
+       }
+       println("netpollinit: failed to create epoll descriptor", errno())
+       throw("netpollinit: failed to create descriptor")
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+       var ev epollevent
+       ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET
+       *(**pollDesc)(unsafe.Pointer(&ev.data)) = pd
+       if epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev) < 0 {
+               return int32(errno())
+       }
+       return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+       var ev epollevent
+       if epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev) < 0 {
+               return int32(errno())
+       }
+       return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+       throw("unused")
+}
+
+// polls for ready network connections
+// returns list of goroutines that become runnable
+func netpoll(block bool) *g {
+       if epfd == -1 {
+               return nil
+       }
+       waitms := int32(-1)
+       if !block {
+               waitms = 0
+       }
+       var events [128]epollevent
+retry:
+       n := epollwait(epfd, &events[0], int32(len(events)), waitms)
+       if n < 0 {
+               e := errno()
+               if e != _EINTR {
+                       println("runtime: epollwait on fd", epfd, "failed with", e)
+                       throw("epollwait failed")
+               }
+               goto retry
+       }
+       var gp guintptr
+       for i := int32(0); i < n; i++ {
+               ev := &events[i]
+               if ev.events == 0 {
+                       continue
+               }
+               var mode int32
+               if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 {
+                       mode += 'r'
+               }
+               if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 {
+                       mode += 'w'
+               }
+               if mode != 0 {
+                       pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
+
+                       netpollready(&gp, pd, mode)
+               }
+       }
+       if block && gp == 0 {
+               goto retry
+       }
+       return gp.ptr()
+}
diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go
new file mode 100644 (file)
index 0000000..eae4f21
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package runtime
+
+// Integrated network poller (kqueue-based implementation).
+
+import "unsafe"
+
+//extern kqueue
+func kqueue() int32
+
+//go:noescape
+//extern kevent
+func kevent(kq int32, ch *keventt, nch uintptr, ev *keventt, nev uintptr, ts *timespec) int32
+
+//extern __go_fcntl_uintptr
+func fcntlUintptr(fd, cmd, arg uintptr) (uintptr, uintptr)
+
+func closeonexec(fd int32) {
+       fcntlUintptr(uintptr(fd), _F_SETFD, _FD_CLOEXEC)
+}
+
+var (
+       kq int32 = -1
+)
+
+func netpollinit() {
+       kq = kqueue()
+       if kq < 0 {
+               println("netpollinit: kqueue failed with", errno())
+               throw("netpollinit: kqueue failed")
+       }
+       closeonexec(kq)
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+       // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
+       // for the whole fd lifetime. The notifications are automatically unregistered
+       // when fd is closed.
+       var ev [2]keventt
+       *(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd
+       ev[0].filter = _EVFILT_READ
+       ev[0].flags = _EV_ADD | _EV_CLEAR
+       ev[0].fflags = 0
+       ev[0].data = 0
+       ev[0].udata = (*byte)(unsafe.Pointer(pd))
+       ev[1] = ev[0]
+       ev[1].filter = _EVFILT_WRITE
+       n := kevent(kq, &ev[0], 2, nil, 0, nil)
+       if n < 0 {
+               return int32(errno())
+       }
+       return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+       // Don't need to unregister because calling close()
+       // on fd will remove any kevents that reference the descriptor.
+       return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+       throw("unused")
+}
+
+// Polls for ready network connections.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) *g {
+       if kq == -1 {
+               return nil
+       }
+       var tp *timespec
+       var ts timespec
+       if !block {
+               tp = &ts
+       }
+       var events [64]keventt
+retry:
+       n := kevent(kq, nil, 0, &events[0], uintptr(len(events)), tp)
+       if n < 0 {
+               e := errno()
+               if e != _EINTR {
+                       println("runtime: kevent on fd", kq, "failed with", e)
+                       throw("kevent failed")
+               }
+               goto retry
+       }
+       var gp guintptr
+       for i := 0; i < int(n); i++ {
+               ev := &events[i]
+               var mode int32
+               if ev.filter == _EVFILT_READ {
+                       mode += 'r'
+               }
+               if ev.filter == _EVFILT_WRITE {
+                       mode += 'w'
+               }
+               if mode != 0 {
+                       netpollready(&gp, (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
+               }
+       }
+       if block && gp == 0 {
+               goto retry
+       }
+       return gp.ptr()
+}
diff --git a/libgo/go/runtime/netpoll_nacl.go b/libgo/go/runtime/netpoll_nacl.go
new file mode 100644 (file)
index 0000000..5cbc300
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fake network poller for NaCl.
+// Should never be used, because NaCl network connections do not honor "SetNonblock".
+
+package runtime
+
+func netpollinit() {
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+       return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+       return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+}
+
+func netpoll(block bool) *g {
+       return nil
+}
diff --git a/libgo/go/runtime/netpoll_solaris.go b/libgo/go/runtime/netpoll_solaris.go
new file mode 100644 (file)
index 0000000..cc6754c
--- /dev/null
@@ -0,0 +1,225 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Solaris runtime-integrated network poller.
+//
+// Solaris uses event ports for scalable network I/O. Event
+// ports are level-triggered, unlike epoll and kqueue which
+// can be configured in both level-triggered and edge-triggered
+// mode. Level triggering means we have to keep track of a few things
+// ourselves. After we receive an event for a file descriptor,
+// it's our responsibility to ask again to be notified for future
+// events for that descriptor. When doing this we must keep track of
+// what kind of events the goroutines are currently interested in,
+// for example a fd may be open both for reading and writing.
+//
+// A description of the high level operation of this code
+// follows. Networking code will get a file descriptor by some means
+// and will register it with the netpolling mechanism by a code path
+// that eventually calls runtime·netpollopen. runtime·netpollopen
+// calls port_associate with an empty event set. That means that we
+// will not receive any events at this point. The association needs
+// to be done at this early point because we need to process the I/O
+// readiness notification at some point in the future. If I/O becomes
+// ready when nobody is listening, when we finally care about it,
+// nobody will tell us anymore.
+//
+// Beside calling runtime·netpollopen, the networking code paths
+// will call runtime·netpollarm each time goroutines are interested
+// in doing network I/O. Because now we know what kind of I/O we
+// are interested in (reading/writing), we can call port_associate
+// passing the correct type of event set (POLLIN/POLLOUT). As we made
+// sure to have already associated the file descriptor with the port,
+// when we now call port_associate, we will unblock the main poller
+// loop (in runtime·netpoll) right away if the socket is actually
+// ready for I/O.
+//
+// The main poller loop runs in its own thread waiting for events
+// using port_getn. When an event happens, it will tell the scheduler
+// about it using runtime·netpollready. Besides doing this, it must
+// also re-associate the events that were not part of this current
+// notification with the file descriptor. Failing to do this would
+// mean each notification will prevent concurrent code using the
+// same file descriptor in parallel.
+//
+// The logic dealing with re-associations is encapsulated in
+// runtime·netpollupdate. This function takes care to associate the
+// descriptor only with the subset of events that were previously
+// part of the association, except the one that just happened. We
+// can't re-associate with that right away, because event ports
+// are level triggered so it would cause a busy loop. Instead, that
+// association is effected only by the runtime·netpollarm code path,
+// when Go code actually asks for I/O.
+//
+// The open and arming mechanisms are serialized using the lock
+// inside PollDesc. This is required because the netpoll loop runs
+// asynchronously in respect to other Go code and by the time we get
+// to call port_associate to update the association in the loop, the
+// file descriptor might have been closed and reopened already. The
+// lock allows runtime·netpollupdate to be called synchronously from
+// the loop thread while preventing other threads operating to the
+// same PollDesc, so once we unblock in the main loop, until we loop
+// again we know for sure we are always talking about the same file
+// descriptor and can safely access the data we want (the event set).
+
+//extern __go_fcntl_uintptr
+func fcntlUintptr(fd, cmd, arg uintptr) (uintptr, uintptr)
+
+func fcntl(fd, cmd int32, arg uintptr) int32 {
+       r, _ := fcntlUintptr(uintptr(fd), uintptr(cmd), arg)
+       return int32(r)
+}
+
+//extern port_create
+func port_create() int32
+
+//extern port_associate
+func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32
+
+//extern port_dissociate
+func port_dissociate(port, source int32, object uintptr) int32
+
+//extern port_getn
+func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32
+
+var portfd int32 = -1
+
+func netpollinit() {
+       portfd = port_create()
+       if portfd >= 0 {
+               fcntl(portfd, _F_SETFD, _FD_CLOEXEC)
+               return
+       }
+
+       print("netpollinit: failed to create port (", errno(), ")\n")
+       throw("netpollinit: failed to create port")
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+       lock(&pd.lock)
+       // We don't register for any specific type of events yet, that's
+       // netpollarm's job. We merely ensure we call port_associate before
+       // asynchronous connect/accept completes, so when we actually want
+       // to do any I/O, the call to port_associate (from netpollarm,
+       // with the interested event set) will unblock port_getn right away
+       // because of the I/O readiness notification.
+       pd.user = 0
+       r := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))
+       unlock(&pd.lock)
+       if r < 0 {
+               return int32(errno())
+       }
+       return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+       if port_dissociate(portfd, _PORT_SOURCE_FD, fd) < 0 {
+               return int32(errno())
+       }
+       return 0
+}
+
+// Updates the association with a new set of interested events. After
+// this call, port_getn will return one and only one event for that
+// particular descriptor, so this function needs to be called again.
+func netpollupdate(pd *pollDesc, set, clear uint32) {
+       if pd.closing {
+               return
+       }
+
+       old := pd.user
+       events := (old & ^clear) | set
+       if old == events {
+               return
+       }
+
+       if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
+               print("netpollupdate: failed to associate (", errno(), ")\n")
+               throw("netpollupdate: failed to associate")
+       }
+       pd.user = events
+}
+
+// subscribe the fd to the port such that port_getn will return one event.
+func netpollarm(pd *pollDesc, mode int) {
+       lock(&pd.lock)
+       switch mode {
+       case 'r':
+               netpollupdate(pd, _POLLIN, 0)
+       case 'w':
+               netpollupdate(pd, _POLLOUT, 0)
+       default:
+               throw("netpollarm: bad mode")
+       }
+       unlock(&pd.lock)
+}
+
+// polls for ready network connections
+// returns list of goroutines that become runnable
+func netpoll(block bool) *g {
+       if portfd == -1 {
+               return nil
+       }
+
+       var wait *timespec
+       var zero timespec
+       if !block {
+               wait = &zero
+       }
+
+       var events [128]portevent
+retry:
+       var n uint32 = 1
+       if port_getn(portfd, &events[0], uint32(len(events)), &n, wait) < 0 {
+               if e := errno(); e != _EINTR {
+                       print("runtime: port_getn on fd ", portfd, " failed with ", e, "\n")
+                       throw("port_getn failed")
+               }
+               goto retry
+       }
+
+       var gp guintptr
+       for i := 0; i < int(n); i++ {
+               ev := &events[i]
+
+               if ev.portev_events == 0 {
+                       continue
+               }
+               pd := (*pollDesc)(unsafe.Pointer(ev.portev_user))
+
+               var mode, clear int32
+               if (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {
+                       mode += 'r'
+                       clear |= _POLLIN
+               }
+               if (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {
+                       mode += 'w'
+                       clear |= _POLLOUT
+               }
+               // To effect edge-triggered events, we need to be sure to
+               // update our association with whatever events were not
+               // set with the event. For example if we are registered
+               // for POLLIN|POLLOUT, and we get POLLIN, besides waking
+               // the goroutine interested in POLLIN we have to not forget
+               // about the one interested in POLLOUT.
+               if clear != 0 {
+                       lock(&pd.lock)
+                       netpollupdate(pd, 0, uint32(clear))
+                       unlock(&pd.lock)
+               }
+
+               if mode != 0 {
+                       netpollready(&gp, pd, mode)
+               }
+       }
+
+       if block && gp == 0 {
+               goto retry
+       }
+       return gp.ptr()
+}
diff --git a/libgo/go/runtime/netpoll_stub.go b/libgo/go/runtime/netpoll_stub.go
new file mode 100644 (file)
index 0000000..09f64ad
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9
+
+package runtime
+
+// Polls for ready network connections.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) (gp *g) {
+       // Implementation for platforms that do not support
+       // integrated network poller.
+       return
+}
+
+func netpollinited() bool {
+       return false
+}
diff --git a/libgo/go/runtime/netpoll_windows.go b/libgo/go/runtime/netpoll_windows.go
new file mode 100644 (file)
index 0000000..7ad1158
--- /dev/null
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+       "unsafe"
+)
+
+const _DWORD_MAX = 0xffffffff
+
+const _INVALID_HANDLE_VALUE = ^uintptr(0)
+
+// net_op must be the same as beginning of net.operation. Keep these in sync.
+type net_op struct {
+       // used by windows
+       o overlapped
+       // used by netpoll
+       pd    *pollDesc
+       mode  int32
+       errno int32
+       qty   uint32
+}
+
+type overlappedEntry struct {
+       key      uintptr
+       op       *net_op // In reality it's *overlapped, but we cast it to *net_op anyway.
+       internal uintptr
+       qty      uint32
+}
+
+var iocphandle uintptr = _INVALID_HANDLE_VALUE // completion port io handle
+
+func netpollinit() {
+       iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
+       if iocphandle == 0 {
+               println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
+               throw("netpoll: failed to create iocp handle")
+       }
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+       if stdcall4(_CreateIoCompletionPort, fd, iocphandle, 0, 0) == 0 {
+               return -int32(getlasterror())
+       }
+       return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+       // nothing to do
+       return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+       throw("unused")
+}
+
+// Polls for completed network IO.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) *g {
+       var entries [64]overlappedEntry
+       var wait, qty, key, flags, n, i uint32
+       var errno int32
+       var op *net_op
+       var gp guintptr
+
+       mp := getg().m
+
+       if iocphandle == _INVALID_HANDLE_VALUE {
+               return nil
+       }
+       wait = 0
+       if block {
+               wait = _INFINITE
+       }
+retry:
+       if _GetQueuedCompletionStatusEx != nil {
+               n = uint32(len(entries) / int(gomaxprocs))
+               if n < 8 {
+                       n = 8
+               }
+               if block {
+                       mp.blocked = true
+               }
+               if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
+                       mp.blocked = false
+                       errno = int32(getlasterror())
+                       if !block && errno == _WAIT_TIMEOUT {
+                               return nil
+                       }
+                       println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
+                       throw("netpoll: GetQueuedCompletionStatusEx failed")
+               }
+               mp.blocked = false
+               for i = 0; i < n; i++ {
+                       op = entries[i].op
+                       errno = 0
+                       qty = 0
+                       if stdcall5(_WSAGetOverlappedResult, op.pd.fd, uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 {
+                               errno = int32(getlasterror())
+                       }
+                       handlecompletion(&gp, op, errno, qty)
+               }
+       } else {
+               op = nil
+               errno = 0
+               qty = 0
+               if block {
+                       mp.blocked = true
+               }
+               if stdcall5(_GetQueuedCompletionStatus, iocphandle, uintptr(unsafe.Pointer(&qty)), uintptr(unsafe.Pointer(&key)), uintptr(unsafe.Pointer(&op)), uintptr(wait)) == 0 {
+                       mp.blocked = false
+                       errno = int32(getlasterror())
+                       if !block && errno == _WAIT_TIMEOUT {
+                               return nil
+                       }
+                       if op == nil {
+                               println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
+                               throw("netpoll: GetQueuedCompletionStatus failed")
+                       }
+                       // dequeued failed IO packet, so report that
+               }
+               mp.blocked = false
+               handlecompletion(&gp, op, errno, qty)
+       }
+       if block && gp == 0 {
+               goto retry
+       }
+       return gp.ptr()
+}
+
+func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) {
+       if op == nil {
+               throw("netpoll: GetQueuedCompletionStatus returned op == nil")
+       }
+       mode := op.mode
+       if mode != 'r' && mode != 'w' {
+               println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
+               throw("netpoll: GetQueuedCompletionStatus returned invalid mode")
+       }
+       op.errno = errno
+       op.qty = qty
+       netpollready(gpp, op.pd, mode)
+}
index 3db8fea62a1d38375cb4a2390aa8a32f342dab82..083710d0b15aff8af390331c342de366c01eb07b 100644 (file)
@@ -296,7 +296,7 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
 func lock(l *mutex)
 func unlock(l *mutex)
 
-// Here for gccgo for Solaris.
+// Here for gccgo for netpoll and Solaris.
 func errno() int
 
 // Temporary for gccgo until we port proc.go.
@@ -460,3 +460,9 @@ func setmaxthreads(int) int
 func setMaxThreads(in int) (out int) {
        return setmaxthreads(in)
 }
+
+// Temporary for gccgo until we port atomic_pointer.go.
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+       atomic.StorepNoWB(noescape(ptr), new)
+}
index d9a1d59a414d23d32829b63e72def00704677108..8df185dc8fd6056d2c9986fdbbcb5d6bbab5e01e 100644 (file)
@@ -8,10 +8,6 @@ package runtime
 
 import "unsafe"
 
-// Export temporarily for gccgo's C code to call:
-//go:linkname addtimer runtime.addtimer
-//go:linkname deltimer runtime.deltimer
-
 // Package time knows the layout of this structure.
 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
 // For GOOS=nacl, package syscall knows the layout of this structure.
index b5c2c709c20765a7f6e072155c9a8addd6b1816e..d05e5ecaffb6630a354891c6cf19aee8f274d3f0 100755 (executable)
@@ -64,6 +64,27 @@ echo "func (ts *timespec) set_nsec(x int32) {" >> ${OUT}
 echo " ts.tv_nsec = timespec_nsec_t(x)" >> ${OUT}
 echo "}" >> ${OUT}
 
+# Define the epollevent struct.  This needs special attention because
+# the C definition uses a union and is sometimes packed.
+if grep '^const _epoll_data_offset ' ${OUT} >/dev/null 2>&1; then
+  val=`grep '^const _epoll_data_offset ' ${OUT} | sed -e 's/const _epoll_data_offset = \(.*\)$/\1/'`
+  if test "$val" = "4"; then
+      echo 'type epollevent struct { events uint32; data [8]byte }' >> ${OUT}
+  elif test "$val" = "8"; then
+      echo 'type epollevent struct { events uint32; pad [4]byte; data [8]byte }' >> ${OUT}
+  else
+      echo 1>&2 "unknown epoll data offset value ${val}"
+      exit 1
+  fi
+fi
+# Make sure EPOLLRDHUP and EPOLL_CLOEXEC are defined.
+if ! grep '^const _EPOLLRDHUP' ${OUT} >/dev/null 2>&1; then
+  echo "const _EPOLLRDHUP = 0x2000" >> ${OUT}
+fi
+if ! grep '^const _EPOLL_CLOEXEC' ${OUT} >/dev/null 2>&1; then
+  echo "const _EPOLL_CLOEXEC = 02000000" >> ${OUT}
+fi
+
 # The semt structure, for Solaris.
 grep '^type _sem_t ' gen-sysinfo.go | \
     sed -e 's/_sem_t/semt/' >> ${OUT}
@@ -101,3 +122,14 @@ grep '^type _mac_ipaddr_t ' gen-sysinfo.go | \
 grep '^type _mactun_info_t ' gen-sysinfo.go | \
     sed -e 's/_in6_addr_t/[16]byte/g' \
     >> ${OUT}
+
+# The Solaris port_event_t struct.
+grep '^type _port_event_t ' gen-sysinfo.go | \
+    sed -e s'/_port_event_t/portevent/' \
+    >> ${OUT}
+
+# The *BSD kevent struct.
+grep '^type _kevent ' gen-sysinfo.go | \
+    sed -e s'/_kevent/keventt/' \
+      -e 's/ udata [^;}]*/ udata *byte/' \
+    >> ${OUT}
index f37f206e5f7821f7a985af733b664abd225cdc77..f033aa6efdabf3830c7c8bd9616c12e71c6c13c0 100644 (file)
@@ -544,4 +544,3 @@ int32       runtime_setgcpercent(int32)
 
 struct Workbuf;
 void   runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
-void   runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
index 84353c771ab2a757f0d70b6dadbf8c7526047e98..cd3c55244b56632051eb875eb724c903f53cefe6 100644 (file)
@@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i)
                enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
                enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
                runtime_proc_scan(&wbuf, enqueue1);
-               runtime_netpoll_scan(&wbuf, enqueue1);
                break;
 
        case RootFinalizers:
diff --git a/libgo/runtime/netpoll.goc b/libgo/runtime/netpoll.goc
deleted file mode 100644 (file)
index 9467c02..0000000
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
-
-package net
-
-#include "runtime.h"
-#include "defs.h"
-#include "arch.h"
-#include "malloc.h"
-
-// Map gccgo field names to gc field names.
-// Eface aka __go_empty_interface.
-#define type __type_descriptor
-#define data __object
-
-// Integrated network poller (platform-independent part).
-// A particular implementation (epoll/kqueue) must define the following functions:
-// void runtime_netpollinit(void);                     // to initialize the poller
-// int32 runtime_netpollopen(uintptr fd, PollDesc *pd);        // to arm edge-triggered notifications
-                                                       // and associate fd with pd.
-// An implementation must call the following function to denote that the pd is ready.
-// void runtime_netpollready(G **gpp, PollDesc *pd, int32 mode);
-
-// PollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
-// goroutines respectively. The semaphore can be in the following states:
-// READY - io readiness notification is pending;
-//         a goroutine consumes the notification by changing the state to nil.
-// WAIT - a goroutine prepares to park on the semaphore, but not yet parked;
-//        the goroutine commits to park by changing the state to G pointer,
-//        or, alternatively, concurrent io notification changes the state to READY,
-//        or, alternatively, concurrent timeout/close changes the state to nil.
-// G pointer - the goroutine is blocked on the semaphore;
-//             io notification or timeout/close changes the state to READY or nil respectively
-//             and unparks the goroutine.
-// nil - nothing of the above.
-#define READY ((G*)1)
-#define WAIT  ((G*)2)
-
-enum
-{
-       PollBlockSize   = 4*1024,
-};
-
-struct PollDesc
-{
-       PollDesc* link; // in pollcache, protected by pollcache.Lock
-
-       // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
-       // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
-       // pollReset, pollWait, pollWaitCanceled and runtime_netpollready (IO rediness notification)
-       // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
-       // in a lock-free way by all operations.
-       Lock;           // protectes the following fields
-       uintptr fd;
-       bool    closing;
-       uintptr seq;    // protects from stale timers and ready notifications
-       G*      rg;     // READY, WAIT, G waiting for read or nil
-       Timer   rt;     // read deadline timer (set if rt.fv != nil)
-       int64   rd;     // read deadline
-       G*      wg;     // READY, WAIT, G waiting for write or nil
-       Timer   wt;     // write deadline timer
-       int64   wd;     // write deadline
-       void*   user;   // user settable cookie
-};
-
-static struct
-{
-       Lock;
-       PollDesc*       first;
-       // PollDesc objects must be type-stable,
-       // because we can get ready notification from epoll/kqueue
-       // after the descriptor is closed/reused.
-       // Stale notifications are detected using seq variable,
-       // seq is incremented when deadlines are changed or descriptor is reused.
-} pollcache;
-
-static bool    netpollblock(PollDesc*, int32, bool);
-static G*      netpollunblock(PollDesc*, int32, bool);
-static void    deadline(Eface, uintptr);
-static void    readDeadline(Eface, uintptr);
-static void    writeDeadline(Eface, uintptr);
-static PollDesc*       allocPollDesc(void);
-static intgo   checkerr(PollDesc *pd, int32 mode);
-
-static FuncVal deadlineFn      = {(void(*)(void))deadline};
-static FuncVal readDeadlineFn  = {(void(*)(void))readDeadline};
-static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
-
-func runtime_pollServerInit() {
-       runtime_netpollinit();
-}
-
-func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
-       pd = allocPollDesc();
-       runtime_lock(pd);
-       if(pd->wg != nil && pd->wg != READY)
-               runtime_throw("runtime_pollOpen: blocked write on free descriptor");
-       if(pd->rg != nil && pd->rg != READY)
-               runtime_throw("runtime_pollOpen: blocked read on free descriptor");
-       pd->fd = fd;
-       pd->closing = false;
-       pd->seq++;
-       pd->rg = nil;
-       pd->rd = 0;
-       pd->wg = nil;
-       pd->wd = 0;
-       runtime_unlock(pd);
-
-       errno = runtime_netpollopen(fd, pd);
-}
-
-func runtime_pollClose(pd *PollDesc) {
-       if(!pd->closing)
-               runtime_throw("runtime_pollClose: close w/o unblock");
-       if(pd->wg != nil && pd->wg != READY)
-               runtime_throw("runtime_pollClose: blocked write on closing descriptor");
-       if(pd->rg != nil && pd->rg != READY)
-               runtime_throw("runtime_pollClose: blocked read on closing descriptor");
-       runtime_netpollclose(pd->fd);
-       runtime_lock(&pollcache);
-       pd->link = pollcache.first;
-       pollcache.first = pd;
-       runtime_unlock(&pollcache);
-}
-
-func runtime_pollReset(pd *PollDesc, mode int) (err int) {
-       err = checkerr(pd, mode);
-       if(err)
-               goto ret;
-       if(mode == 'r')
-               pd->rg = nil;
-       else if(mode == 'w')
-               pd->wg = nil;
-ret:
-}
-
-func runtime_pollWait(pd *PollDesc, mode int) (err int) {
-       err = checkerr(pd, mode);
-       if(err == 0) {
-               // As for now only Solaris uses level-triggered IO.
-               if(Solaris)
-                       runtime_netpollarm(pd, mode);
-               while(!netpollblock(pd, mode, false)) {
-                       err = checkerr(pd, mode);
-                       if(err != 0)
-                               break;
-                       // Can happen if timeout has fired and unblocked us,
-                       // but before we had a chance to run, timeout has been reset.
-                       // Pretend it has not happened and retry.
-               }
-       }
-}
-
-func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
-       // This function is used only on windows after a failed attempt to cancel
-       // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
-       while(!netpollblock(pd, mode, true))
-               ;
-}
-
-func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
-       G *rg, *wg;
-
-       runtime_lock(pd);
-       if(pd->closing) {
-               runtime_unlock(pd);
-               return;
-       }
-       pd->seq++;  // invalidate current timers
-       // Reset current timers.
-       if(pd->rt.f) {
-               runtime_deltimer(&pd->rt);
-               pd->rt.f = nil;
-       }
-       if(pd->wt.f) {
-               runtime_deltimer(&pd->wt);
-               pd->wt.f = nil;
-       }
-       // Setup new timers.
-       if(d != 0 && d <= runtime_nanotime())
-               d = -1;
-       if(mode == 'r' || mode == 'r'+'w')
-               pd->rd = d;
-       if(mode == 'w' || mode == 'r'+'w')
-               pd->wd = d;
-       if(pd->rd > 0 && pd->rd == pd->wd) {
-               pd->rt.f = &deadlineFn;
-               pd->rt.when = pd->rd;
-               // Copy current seq into the timer arg.
-               // Timer func will check the seq against current descriptor seq,
-               // if they differ the descriptor was reused or timers were reset.
-               pd->rt.arg.type = nil; // should be *pollDesc type descriptor.
-               pd->rt.arg.data = pd;
-               pd->rt.seq = pd->seq;
-               runtime_addtimer(&pd->rt);
-       } else {
-               if(pd->rd > 0) {
-                       pd->rt.f = &readDeadlineFn;
-                       pd->rt.when = pd->rd;
-                       pd->rt.arg.type = nil; // should be *pollDesc type descriptor.
-                       pd->rt.arg.data = pd;
-                       pd->rt.seq = pd->seq;
-                       runtime_addtimer(&pd->rt);
-               }
-               if(pd->wd > 0) {
-                       pd->wt.f = &writeDeadlineFn;
-                       pd->wt.when = pd->wd;
-                       pd->wt.arg.type = nil; // should be *pollDesc type descriptor.
-                       pd->wt.arg.data = pd;
-                       pd->wt.seq = pd->seq;
-                       runtime_addtimer(&pd->wt);
-               }
-       }
-       // If we set the new deadline in the past, unblock currently pending IO if any.
-       rg = nil;
-       runtime_atomicstorep(&wg, nil);  // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
-       if(pd->rd < 0)
-               rg = netpollunblock(pd, 'r', false);
-       if(pd->wd < 0)
-               wg = netpollunblock(pd, 'w', false);
-       runtime_unlock(pd);
-       if(rg)
-               runtime_ready(rg);
-       if(wg)
-               runtime_ready(wg);
-}
-
-func runtime_pollUnblock(pd *PollDesc) {
-       G *rg, *wg;
-
-       runtime_lock(pd);
-       if(pd->closing)
-               runtime_throw("runtime_pollUnblock: already closing");
-       pd->closing = true;
-       pd->seq++;
-       runtime_atomicstorep(&rg, nil);  // full memory barrier between store to closing and read of rg/wg in netpollunblock
-       rg = netpollunblock(pd, 'r', false);
-       wg = netpollunblock(pd, 'w', false);
-       if(pd->rt.f) {
-               runtime_deltimer(&pd->rt);
-               pd->rt.f = nil;
-       }
-       if(pd->wt.f) {
-               runtime_deltimer(&pd->wt);
-               pd->wt.f = nil;
-       }
-       runtime_unlock(pd);
-       if(rg)
-               runtime_ready(rg);
-       if(wg)
-               runtime_ready(wg);
-}
-
-uintptr
-runtime_netpollfd(PollDesc *pd)
-{
-       return pd->fd;
-}
-
-void**
-runtime_netpolluser(PollDesc *pd)
-{
-       return &pd->user;
-}
-
-bool
-runtime_netpollclosing(PollDesc *pd)
-{
-       return pd->closing;
-}
-
-void
-runtime_netpolllock(PollDesc *pd)
-{
-       runtime_lock(pd);
-}
-
-void
-runtime_netpollunlock(PollDesc *pd)
-{
-       runtime_unlock(pd);
-}
-
-// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
-void
-runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
-{
-       G *rg, *wg;
-
-       rg = wg = nil;
-       if(mode == 'r' || mode == 'r'+'w')
-               rg = netpollunblock(pd, 'r', true);
-       if(mode == 'w' || mode == 'r'+'w')
-               wg = netpollunblock(pd, 'w', true);
-       if(rg) {
-               rg->schedlink = (uintptr)*gpp;
-               *gpp = rg;
-       }
-       if(wg) {
-               wg->schedlink = (uintptr)*gpp;
-               *gpp = wg;
-       }
-}
-
-static intgo
-checkerr(PollDesc *pd, int32 mode)
-{
-       if(pd->closing)
-               return 1;  // errClosing
-       if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
-               return 2;  // errTimeout
-       return 0;
-}
-
-static bool
-blockcommit(G *gp, G **gpp)
-{
-       return runtime_casp(gpp, WAIT, gp);
-}
-
-// returns true if IO is ready, or false if timedout or closed
-// waitio - wait only for completed IO, ignore errors
-static bool
-netpollblock(PollDesc *pd, int32 mode, bool waitio)
-{
-       G **gpp, *old;
-
-       gpp = &pd->rg;
-       if(mode == 'w')
-               gpp = &pd->wg;
-
-       // set the gpp semaphore to WAIT
-       for(;;) {
-               old = *gpp;
-               if(old == READY) {
-                       *gpp = nil;
-                       return true;
-               }
-               if(old != nil)
-                       runtime_throw("netpollblock: double wait");
-               if(runtime_casp(gpp, nil, WAIT))
-                       break;
-       }
-
-       // need to recheck error states after setting gpp to WAIT
-       // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
-       // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
-       if(waitio || checkerr(pd, mode) == 0)
-               runtime_park((bool(*)(G*, void*))blockcommit, gpp, "IO wait");
-       // be careful to not lose concurrent READY notification
-       old = runtime_xchgp(gpp, nil);
-       if(old > WAIT)
-               runtime_throw("netpollblock: corrupted state");
-       return old == READY;
-}
-
-static G*
-netpollunblock(PollDesc *pd, int32 mode, bool ioready)
-{
-       G **gpp, *old, *new;
-
-       gpp = &pd->rg;
-       if(mode == 'w')
-               gpp = &pd->wg;
-
-       for(;;) {
-               old = *gpp;
-               if(old == READY)
-                       return nil;
-               if(old == nil && !ioready) {
-                       // Only set READY for ioready. runtime_pollWait
-                       // will check for timeout/cancel before waiting.
-                       return nil;
-               }
-               new = nil;
-               if(ioready)
-                       new = READY;
-               if(runtime_casp(gpp, old, new))
-                       break;
-       }
-       if(old > WAIT)
-               return old;  // must be G*
-       return nil;
-}
-
-static void
-deadlineimpl(Eface arg, uintptr seq, bool read, bool write)
-{
-       PollDesc *pd;
-       G *rg, *wg;
-
-       pd = (PollDesc*)arg.data;
-       rg = wg = nil;
-       runtime_lock(pd);
-       // Seq arg is seq when the timer was set.
-       // If it's stale, ignore the timer event.
-       if(seq != pd->seq) {
-               // The descriptor was reused or timers were reset.
-               runtime_unlock(pd);
-               return;
-       }
-       if(read) {
-               if(pd->rd <= 0 || pd->rt.f == nil)
-                       runtime_throw("deadlineimpl: inconsistent read deadline");
-               pd->rd = -1;
-               runtime_atomicstorep(&pd->rt.f, nil);  // full memory barrier between store to rd and load of rg in netpollunblock
-               rg = netpollunblock(pd, 'r', false);
-       }
-       if(write) {
-               if(pd->wd <= 0 || (pd->wt.f == nil && !read))
-                       runtime_throw("deadlineimpl: inconsistent write deadline");
-               pd->wd = -1;
-               runtime_atomicstorep(&pd->wt.f, nil);  // full memory barrier between store to wd and load of wg in netpollunblock
-               wg = netpollunblock(pd, 'w', false);
-       }
-       runtime_unlock(pd);
-       if(rg)
-               runtime_ready(rg);
-       if(wg)
-               runtime_ready(wg);
-}
-
-static void
-deadline(Eface arg, uintptr seq)
-{
-       deadlineimpl(arg, seq, true, true);
-}
-
-static void
-readDeadline(Eface arg, uintptr seq)
-{
-       deadlineimpl(arg, seq, true, false);
-}
-
-static void
-writeDeadline(Eface arg, uintptr seq)
-{
-       deadlineimpl(arg, seq, false, true);
-}
-
-static PollDesc*
-allocPollDesc(void)
-{
-       PollDesc *pd;
-       uint32 i, n;
-
-       runtime_lock(&pollcache);
-       if(pollcache.first == nil) {
-               n = PollBlockSize/sizeof(*pd);
-               if(n == 0)
-                       n = 1;
-               // Must be in non-GC memory because can be referenced
-               // only from epoll/kqueue internals.
-               pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats()->other_sys);
-               for(i = 0; i < n; i++) {
-                       pd[i].link = pollcache.first;
-                       pollcache.first = &pd[i];
-               }
-       }
-       pd = pollcache.first;
-       pollcache.first = pd->link;
-       runtime_unlock(&pollcache);
-       return pd;
-}
diff --git a/libgo/runtime/netpoll_epoll.c b/libgo/runtime/netpoll_epoll.c
deleted file mode 100644 (file)
index 1281f45..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-#include <errno.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/epoll.h>
-
-#include "runtime.h"
-#include "defs.h"
-#include "malloc.h"
-
-#ifndef EPOLLRDHUP
-#define EPOLLRDHUP 0x2000
-#endif
-
-#ifndef EPOLL_CLOEXEC
-#define EPOLL_CLOEXEC 02000000
-#endif
-
-#ifndef HAVE_EPOLL_CREATE1
-extern int epoll_create1(int __flags);
-#endif
-
-typedef struct epoll_event EpollEvent;
-
-static int32
-runtime_epollcreate(int32 size)
-{
-       int r;
-
-       r = epoll_create(size);
-       if(r >= 0)
-               return r;
-       return - errno;
-}
-
-static int32
-runtime_epollcreate1(int32 flags)
-{
-       int r;
-
-       r = epoll_create1(flags);
-       if(r >= 0)
-               return r;
-       return - errno;
-}
-
-static int32
-runtime_epollctl(int32 epfd, int32 op, int32 fd, EpollEvent *ev)
-{
-       int r;
-
-       r = epoll_ctl(epfd, op, fd, ev);
-       if(r >= 0)
-               return r;
-       return - errno;
-}
-
-static int32
-runtime_epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout)
-{
-       int r;
-
-       r = epoll_wait(epfd, ev, nev, timeout);
-       if(r >= 0)
-               return r;
-       return - errno;
-}
-
-static void
-runtime_closeonexec(int32 fd)
-{
-       fcntl(fd, F_SETFD, FD_CLOEXEC);
-}
-
-static int32 epfd = -1;  // epoll descriptor
-
-void
-runtime_netpollinit(void)
-{
-       epfd = runtime_epollcreate1(EPOLL_CLOEXEC);
-       if(epfd >= 0)
-               return;
-       epfd = runtime_epollcreate(1024);
-       if(epfd >= 0) {
-               runtime_closeonexec(epfd);
-               return;
-       }
-       runtime_printf("netpollinit: failed to create descriptor (%d)\n", -epfd);
-       runtime_throw("netpollinit: failed to create descriptor");
-}
-
-int32
-runtime_netpollopen(uintptr fd, PollDesc *pd)
-{
-       EpollEvent ev;
-       int32 res;
-
-       ev.events = EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLET;
-       ev.data.ptr = (void*)pd;
-       res = runtime_epollctl(epfd, EPOLL_CTL_ADD, (int32)fd, &ev);
-       return -res;
-}
-
-int32
-runtime_netpollclose(uintptr fd)
-{
-       EpollEvent ev;
-       int32 res;
-
-       res = runtime_epollctl(epfd, EPOLL_CTL_DEL, (int32)fd, &ev);
-       return -res;
-}
-
-void
-runtime_netpollarm(PollDesc* pd, int32 mode)
-{
-       USED(pd);
-       USED(mode);
-       runtime_throw("unused");
-}
-
-// polls for ready network connections
-// returns list of goroutines that become runnable
-G*
-runtime_netpoll(bool block)
-{
-       static int32 lasterr;
-       EpollEvent events[128], *ev;
-       int32 n, i, waitms, mode;
-       G *gp;
-
-       if(epfd == -1)
-               return nil;
-       waitms = -1;
-       if(!block)
-               waitms = 0;
-retry:
-       n = runtime_epollwait(epfd, events, nelem(events), waitms);
-       if(n < 0) {
-               if(n != -EINTR && n != lasterr) {
-                       lasterr = n;
-                       runtime_printf("runtime: epollwait on fd %d failed with %d\n", epfd, -n);
-               }
-               goto retry;
-       }
-       gp = nil;
-       for(i = 0; i < n; i++) {
-               ev = &events[i];
-               if(ev->events == 0)
-                       continue;
-               mode = 0;
-               if(ev->events & (EPOLLIN|EPOLLRDHUP|EPOLLHUP|EPOLLERR))
-                       mode += 'r';
-               if(ev->events & (EPOLLOUT|EPOLLHUP|EPOLLERR))
-                       mode += 'w';
-               if(mode)
-                       runtime_netpollready(&gp, (void*)ev->data.ptr, mode);
-       }
-       if(block && gp == nil)
-               goto retry;
-       return gp;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
-       USED(wbufp);
-       USED(enqueue1);
-}
diff --git a/libgo/runtime/netpoll_kqueue.c b/libgo/runtime/netpoll_kqueue.c
deleted file mode 100644 (file)
index 5144a87..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd netbsd openbsd
-
-#include "runtime.h"
-#include "defs.h"
-#include "malloc.h"
-
-// Integrated network poller (kqueue-based implementation).
-
-int32  runtime_kqueue(void);
-int32  runtime_kevent(int32, Kevent*, int32, Kevent*, int32, Timespec*);
-void   runtime_closeonexec(int32);
-
-static int32 kq = -1;
-
-void
-runtime_netpollinit(void)
-{
-       kq = runtime_kqueue();
-       if(kq < 0) {
-               runtime_printf("netpollinit: kqueue failed with %d\n", -kq);
-               runtime_throw("netpollinit: kqueue failed");
-       }
-       runtime_closeonexec(kq);
-}
-
-int32
-runtime_netpollopen(uintptr fd, PollDesc *pd)
-{
-       Kevent ev[2];
-       int32 n;
-
-       // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
-       // for the whole fd lifetime.  The notifications are automatically unregistered
-       // when fd is closed.
-       ev[0].ident = (uint32)fd;
-       ev[0].filter = EVFILT_READ;
-       ev[0].flags = EV_ADD|EV_CLEAR;
-       ev[0].fflags = 0;
-       ev[0].data = 0;
-       ev[0].udata = (kevent_udata)pd;
-       ev[1] = ev[0];
-       ev[1].filter = EVFILT_WRITE;
-       n = runtime_kevent(kq, ev, 2, nil, 0, nil);
-       if(n < 0)
-               return -n;
-       return 0;
-}
-
-int32
-runtime_netpollclose(uintptr fd)
-{
-       // Don't need to unregister because calling close()
-       // on fd will remove any kevents that reference the descriptor.
-       USED(fd);
-       return 0;
-}
-
-void
-runtime_netpollarm(PollDesc* pd, int32 mode)
-{
-       USED(pd, mode);
-       runtime_throw("unused");
-}
-
-// Polls for ready network connections.
-// Returns list of goroutines that become runnable.
-G*
-runtime_netpoll(bool block)
-{
-       static int32 lasterr;
-       Kevent events[64], *ev;
-       Timespec ts, *tp;
-       int32 n, i, mode;
-       G *gp;
-
-       if(kq == -1)
-               return nil;
-       tp = nil;
-       if(!block) {
-               ts.tv_sec = 0;
-               ts.tv_nsec = 0;
-               tp = &ts;
-       }
-       gp = nil;
-retry:
-       n = runtime_kevent(kq, nil, 0, events, nelem(events), tp);
-       if(n < 0) {
-               if(n != -EINTR && n != lasterr) {
-                       lasterr = n;
-                       runtime_printf("runtime: kevent on fd %d failed with %d\n", kq, -n);
-               }
-               goto retry;
-       }
-       for(i = 0; i < n; i++) {
-               ev = &events[i];
-               mode = 0;
-               if(ev->filter == EVFILT_READ)
-                       mode += 'r';
-               if(ev->filter == EVFILT_WRITE)
-                       mode += 'w';
-               if(mode)
-                       runtime_netpollready(&gp, (PollDesc*)ev->udata, mode);
-       }
-       if(block && gp == nil)
-               goto retry;
-       return gp;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
-       USED(wbufp);
-       USED(enqueue1);
-}
diff --git a/libgo/runtime/netpoll_select.c b/libgo/runtime/netpoll_select.c
deleted file mode 100644 (file)
index b32a1d5..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-#include "config.h"
-
-#include <errno.h>
-#include <sys/times.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <fcntl.h>
-
-#ifdef HAVE_SYS_SELECT_H
-#include <sys/select.h>
-#endif
-
-#include "runtime.h"
-#include "malloc.h"
-
-static Lock selectlock;
-static int rdwake;
-static int wrwake;
-static fd_set fds;
-static PollDesc **data;
-static int allocated;
-
-void
-runtime_netpollinit(void)
-{
-       int p[2];
-       int fl;
-
-       FD_ZERO(&fds);
-       allocated = 128;
-       data = runtime_mallocgc(allocated * sizeof(PollDesc *), 0,
-                               FlagNoScan|FlagNoProfiling|FlagNoInvokeGC);
-
-       if(pipe(p) < 0)
-               runtime_throw("netpollinit: failed to create pipe");
-       rdwake = p[0];
-       wrwake = p[1];
-
-       fl = fcntl(rdwake, F_GETFL);
-       if(fl < 0)
-               runtime_throw("netpollinit: fcntl failed");
-       fl |= O_NONBLOCK;
-       if(fcntl(rdwake, F_SETFL, fl))
-                runtime_throw("netpollinit: fcntl failed");
-       fcntl(rdwake, F_SETFD, FD_CLOEXEC);
-
-       fl = fcntl(wrwake, F_GETFL);
-       if(fl < 0)
-               runtime_throw("netpollinit: fcntl failed");
-       fl |= O_NONBLOCK;
-       if(fcntl(wrwake, F_SETFL, fl))
-                runtime_throw("netpollinit: fcntl failed");
-       fcntl(wrwake, F_SETFD, FD_CLOEXEC);
-
-       FD_SET(rdwake, &fds);
-}
-
-int32
-runtime_netpollopen(uintptr fd, PollDesc *pd)
-{
-       byte b;
-
-       runtime_lock(&selectlock);
-
-       if((int)fd >= allocated) {
-               int c;
-               PollDesc **n;
-
-               c = allocated;
-
-               runtime_unlock(&selectlock);
-
-               while((int)fd >= c)
-                       c *= 2;
-               n = runtime_mallocgc(c * sizeof(PollDesc *), 0,
-                                    FlagNoScan|FlagNoProfiling|FlagNoInvokeGC);
-
-               runtime_lock(&selectlock);
-
-               if(c > allocated) {
-                       __builtin_memcpy(n, data, allocated * sizeof(PollDesc *));
-                       allocated = c;
-                       data = n;
-               }
-       }
-       FD_SET(fd, &fds);
-       data[fd] = pd;
-
-       runtime_unlock(&selectlock);
-
-       b = 0;
-       write(wrwake, &b, sizeof b);
-
-       return 0;
-}
-
-int32
-runtime_netpollclose(uintptr fd)
-{
-       byte b;
-
-       runtime_lock(&selectlock);
-
-       FD_CLR(fd, &fds);
-       data[fd] = nil;
-
-       runtime_unlock(&selectlock);
-
-       b = 0;
-       write(wrwake, &b, sizeof b);
-
-       return 0;
-}
-
-/* Used to avoid using too much stack memory.  */
-static bool inuse;
-static fd_set grfds, gwfds, gefds, gtfds;
-
-G*
-runtime_netpoll(bool block)
-{
-       fd_set *prfds, *pwfds, *pefds, *ptfds;
-       bool allocatedfds;
-       struct timeval timeout;
-       struct timeval *pt;
-       int max, c, i;
-       G *gp;
-       int32 mode;
-       byte b;
-       struct stat st;
-
-       allocatedfds = false;
-
- retry:
-       runtime_lock(&selectlock);
-
-       max = allocated;
-
-       if(max == 0) {
-               runtime_unlock(&selectlock);
-               return nil;
-       }
-
-       if(inuse) {
-               if(!allocatedfds) {
-                       prfds = runtime_SysAlloc(4 * sizeof fds, &mstats()->other_sys);
-                       pwfds = prfds + 1;
-                       pefds = pwfds + 1;
-                       ptfds = pefds + 1;
-                       allocatedfds = true;
-               }
-       } else {
-               prfds = &grfds;
-               pwfds = &gwfds;
-               pefds = &gefds;
-               ptfds = &gtfds;
-               inuse = true;
-               allocatedfds = false;
-       }
-
-       __builtin_memcpy(prfds, &fds, sizeof fds);
-
-       runtime_unlock(&selectlock);
-
-       __builtin_memcpy(pwfds, prfds, sizeof fds);
-       FD_CLR(rdwake, pwfds);
-       __builtin_memcpy(pefds, pwfds, sizeof fds);
-
-       __builtin_memcpy(ptfds, pwfds, sizeof fds);
-
-       __builtin_memset(&timeout, 0, sizeof timeout);
-       pt = &timeout;
-       if(block)
-               pt = nil;
-
-       c = select(max, prfds, pwfds, pefds, pt);
-       if(c < 0) {
-               if(errno == EBADF) {
-                       // Some file descriptor has been closed.
-                       // Check each one, and treat each closed
-                       // descriptor as ready for read/write.
-                       c = 0;
-                       FD_ZERO(prfds);
-                       FD_ZERO(pwfds);
-                       FD_ZERO(pefds);
-                       for(i = 0; i < max; i++) {
-                               if(FD_ISSET(i, ptfds)
-                                  && fstat(i, &st) < 0
-                                  && errno == EBADF) {
-                                       FD_SET(i, prfds);
-                                       FD_SET(i, pwfds);
-                                       c += 2;
-                               }
-                       }
-               }
-               else {
-                       if(errno != EINTR)
-                               runtime_printf("runtime: select failed with %d\n", errno);
-                       goto retry;
-               }
-       }
-       gp = nil;
-       for(i = 0; i < max && c > 0; i++) {
-               mode = 0;
-               if(FD_ISSET(i, prfds)) {
-                       mode += 'r';
-                       --c;
-               }
-               if(FD_ISSET(i, pwfds)) {
-                       mode += 'w';
-                       --c;
-               }
-               if(FD_ISSET(i, pefds)) {
-                       mode = 'r' + 'w';
-                       --c;
-               }
-               if(i == rdwake && mode != 0) {
-                       while(read(rdwake, &b, sizeof b) > 0)
-                               ;
-                       continue;
-               }
-               if(mode) {
-                       PollDesc *pd;
-
-                       runtime_lock(&selectlock);
-                       pd = data[i];
-                       runtime_unlock(&selectlock);
-                       if(pd != nil)
-                               runtime_netpollready(&gp, pd, mode);
-               }
-       }
-       if(block && gp == nil)
-               goto retry;
-
-       if(allocatedfds) {
-               runtime_SysFree(prfds, 4 * sizeof fds, &mstats()->other_sys);
-       } else {
-               runtime_lock(&selectlock);
-               inuse = false;
-               runtime_unlock(&selectlock);
-       }
-
-       return gp;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
-       enqueue1(wbufp, (Obj){(byte*)&data, sizeof data, 0});
-}
diff --git a/libgo/runtime/netpoll_stub.c b/libgo/runtime/netpoll_stub.c
deleted file mode 100644 (file)
index 468a610..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build plan9
-
-#include "runtime.h"
-#include "malloc.h"
-
-// Polls for ready network connections.
-// Returns list of goroutines that become runnable.
-G*
-runtime_netpoll(bool block)
-{
-       // Implementation for platforms that do not support
-       // integrated network poller.
-       USED(block);
-       return nil;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
-       USED(wbufp);
-       USED(addroot);
-}
index dedc57452f941d813d65bb817492dab7110c0abb..e60eaed63744cfe7ae6040f50d966c4ed0a75df4 100644 (file)
@@ -391,21 +391,8 @@ int64      runtime_tickspersecond(void)
      __asm__ (GOSYM_PREFIX "runtime.tickspersecond");
 void   runtime_blockevent(int64, int32);
 extern int64 runtime_blockprofilerate;
-void   runtime_addtimer(Timer*)
-  __asm__ (GOSYM_PREFIX "runtime.addtimer");
-bool   runtime_deltimer(Timer*)
-  __asm__ (GOSYM_PREFIX "runtime.deltimer");
-G*     runtime_netpoll(bool);
-void   runtime_netpollinit(void);
-int32  runtime_netpollopen(uintptr, PollDesc*);
-int32   runtime_netpollclose(uintptr);
-void   runtime_netpollready(G**, PollDesc*, int32);
-uintptr        runtime_netpollfd(PollDesc*);
-void   runtime_netpollarm(PollDesc*, int32);
-void** runtime_netpolluser(PollDesc*);
-bool   runtime_netpollclosing(PollDesc*);
-void   runtime_netpolllock(PollDesc*);
-void   runtime_netpollunlock(PollDesc*);
+G*     runtime_netpoll(bool)
+  __asm__ (GOSYM_PREFIX "runtime.netpoll");
 void   runtime_crash(void);
 void   runtime_parsedebugvars(void)
   __asm__(GOSYM_PREFIX "runtime.parsedebugvars");
index 09c0f496a6e635439be5a3931ede4189f21287a5..56790c604159468c049da3bfa35644e86b1758d6 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "config.h"
 
+#include <stddef.h>
 #include <sys/types.h>
 #include <dirent.h>
 #include <errno.h>
@@ -49,6 +50,9 @@
 #if defined(HAVE_SYS_EPOLL_H)
 #include <sys/epoll.h>
 #endif
+#if defined(HAVE_SYS_EVENT_H)
+#include <sys/event.h>
+#endif
 #if defined(HAVE_SYS_FILE_H)
 #include <sys/file.h>
 #endif
 #if defined(HAVE_SEMAPHORE_H)
 #include <semaphore.h>
 #endif
+#if defined(HAVE_PORT_H)
+#include <port.h>
+#endif
 
 /* Constants that may only be defined as expressions on some systems,
    expressions too complex for -fdump-go-spec to handle.  These are
@@ -260,3 +267,9 @@ enum {
   NLA_HDRLEN_val = NLA_HDRLEN,
 #endif
 };
+
+#if defined(HAVE_SYS_EPOLL_H)
+enum {
+  epoll_data_offset = offsetof(struct epoll_event, data)
+};
+#endif