-50c8fc924389
+63484e8b6b76
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
runtime/go-eface-compare.c \
runtime/go-eface-val-compare.c \
runtime/go-fieldtrack.c \
- runtime/go-getgoroot.c \
runtime/go-int-array-to-string.c \
runtime/go-int-to-string.c \
runtime/go-interface-compare.c \
runtime/go-type-interface.c \
runtime/go-type-string.c \
runtime/go-typedesc-equal.c \
- runtime/go-typestring.c \
runtime/go-unsafe-new.c \
runtime/go-unsafe-newarray.c \
runtime/go-unsafe-pointer.c \
runtime/go-unwind.c \
runtime/go-varargs.c \
- runtime/chan.c \
- runtime/cpuprof.c \
runtime/env_posix.c \
- runtime/lfstack.c \
$(runtime_lock_files) \
runtime/mcache.c \
runtime/mcentral.c \
runtime/thread.c \
runtime/yield.c \
$(rtems_task_variable_add_file) \
+ chan.c \
+ cpuprof.c \
go-iface.c \
+ lfstack.c \
malloc.c \
map.c \
mprof.c \
netpoll.c \
+ rdebug.c \
reflect.c \
runtime1.c \
sema.c \
go-check-interface.lo go-construct-map.lo \
go-convert-interface.lo go-copy.lo go-defer.lo \
go-deferred-recover.lo go-eface-compare.lo \
- go-eface-val-compare.lo go-fieldtrack.lo go-getgoroot.lo \
+ go-eface-val-compare.lo go-fieldtrack.lo \
go-int-array-to-string.lo go-int-to-string.lo \
go-interface-compare.lo go-interface-eface-compare.lo \
go-interface-val-compare.lo go-make-slice.lo go-map-delete.lo \
go-strplus.lo go-strslice.lo go-traceback.lo \
go-type-complex.lo go-type-eface.lo go-type-error.lo \
go-type-float.lo go-type-identity.lo go-type-interface.lo \
- go-type-string.lo go-typedesc-equal.lo go-typestring.lo \
- go-unsafe-new.lo go-unsafe-newarray.lo go-unsafe-pointer.lo \
- go-unwind.lo go-varargs.lo chan.lo cpuprof.lo env_posix.lo \
- lfstack.lo $(am__objects_1) mcache.lo mcentral.lo \
- $(am__objects_2) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
- $(am__objects_3) panic.lo parfor.lo print.lo proc.lo \
+ go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \
+ go-unsafe-newarray.lo go-unsafe-pointer.lo go-unwind.lo \
+ go-varargs.lo env_posix.lo $(am__objects_1) mcache.lo \
+ mcentral.lo $(am__objects_2) mfixalloc.lo mgc0.lo mheap.lo \
+ msize.lo $(am__objects_3) panic.lo parfor.lo print.lo proc.lo \
runtime.lo signal_unix.lo thread.lo yield.lo $(am__objects_4) \
- go-iface.lo malloc.lo map.lo mprof.lo netpoll.lo reflect.lo \
- runtime1.lo sema.lo sigqueue.lo string.lo time.lo \
- $(am__objects_5)
+ chan.lo cpuprof.lo go-iface.lo lfstack.lo malloc.lo map.lo \
+ mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo sema.lo \
+ sigqueue.lo string.lo time.lo $(am__objects_5)
am_libgo_la_OBJECTS = $(am__objects_6)
libgo_la_OBJECTS = $(am_libgo_la_OBJECTS)
libgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
runtime/go-eface-compare.c \
runtime/go-eface-val-compare.c \
runtime/go-fieldtrack.c \
- runtime/go-getgoroot.c \
runtime/go-int-array-to-string.c \
runtime/go-int-to-string.c \
runtime/go-interface-compare.c \
runtime/go-type-interface.c \
runtime/go-type-string.c \
runtime/go-typedesc-equal.c \
- runtime/go-typestring.c \
runtime/go-unsafe-new.c \
runtime/go-unsafe-newarray.c \
runtime/go-unsafe-pointer.c \
runtime/go-unwind.c \
runtime/go-varargs.c \
- runtime/chan.c \
- runtime/cpuprof.c \
runtime/env_posix.c \
- runtime/lfstack.c \
$(runtime_lock_files) \
runtime/mcache.c \
runtime/mcentral.c \
runtime/thread.c \
runtime/yield.c \
$(rtems_task_variable_add_file) \
+ chan.c \
+ cpuprof.c \
go-iface.c \
+ lfstack.c \
malloc.c \
map.c \
mprof.c \
netpoll.c \
+ rdebug.c \
reflect.c \
runtime1.c \
sema.c \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-eface-compare.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-eface-val-compare.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-fieldtrack.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-getgoroot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-iface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-int-array-to-string.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-int-to-string.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-interface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-string.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-typedesc-equal.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-typestring.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsafe-new.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsafe-newarray.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsafe-pointer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/parfor.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rdebug.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reflect.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rtems-task-variable-add.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/runtime.Plo@am__quote@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-fieldtrack.lo `test -f 'runtime/go-fieldtrack.c' || echo '$(srcdir)/'`runtime/go-fieldtrack.c
-go-getgoroot.lo: runtime/go-getgoroot.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-getgoroot.lo -MD -MP -MF $(DEPDIR)/go-getgoroot.Tpo -c -o go-getgoroot.lo `test -f 'runtime/go-getgoroot.c' || echo '$(srcdir)/'`runtime/go-getgoroot.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-getgoroot.Tpo $(DEPDIR)/go-getgoroot.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-getgoroot.c' object='go-getgoroot.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-getgoroot.lo `test -f 'runtime/go-getgoroot.c' || echo '$(srcdir)/'`runtime/go-getgoroot.c
-
go-int-array-to-string.lo: runtime/go-int-array-to-string.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-int-array-to-string.lo -MD -MP -MF $(DEPDIR)/go-int-array-to-string.Tpo -c -o go-int-array-to-string.lo `test -f 'runtime/go-int-array-to-string.c' || echo '$(srcdir)/'`runtime/go-int-array-to-string.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-int-array-to-string.Tpo $(DEPDIR)/go-int-array-to-string.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-typedesc-equal.lo `test -f 'runtime/go-typedesc-equal.c' || echo '$(srcdir)/'`runtime/go-typedesc-equal.c
-go-typestring.lo: runtime/go-typestring.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-typestring.lo -MD -MP -MF $(DEPDIR)/go-typestring.Tpo -c -o go-typestring.lo `test -f 'runtime/go-typestring.c' || echo '$(srcdir)/'`runtime/go-typestring.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-typestring.Tpo $(DEPDIR)/go-typestring.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-typestring.c' object='go-typestring.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-typestring.lo `test -f 'runtime/go-typestring.c' || echo '$(srcdir)/'`runtime/go-typestring.c
-
go-unsafe-new.lo: runtime/go-unsafe-new.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-unsafe-new.lo -MD -MP -MF $(DEPDIR)/go-unsafe-new.Tpo -c -o go-unsafe-new.lo `test -f 'runtime/go-unsafe-new.c' || echo '$(srcdir)/'`runtime/go-unsafe-new.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-unsafe-new.Tpo $(DEPDIR)/go-unsafe-new.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-varargs.lo `test -f 'runtime/go-varargs.c' || echo '$(srcdir)/'`runtime/go-varargs.c
-chan.lo: runtime/chan.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT chan.lo -MD -MP -MF $(DEPDIR)/chan.Tpo -c -o chan.lo `test -f 'runtime/chan.c' || echo '$(srcdir)/'`runtime/chan.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/chan.Tpo $(DEPDIR)/chan.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/chan.c' object='chan.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o chan.lo `test -f 'runtime/chan.c' || echo '$(srcdir)/'`runtime/chan.c
-
-cpuprof.lo: runtime/cpuprof.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpuprof.lo -MD -MP -MF $(DEPDIR)/cpuprof.Tpo -c -o cpuprof.lo `test -f 'runtime/cpuprof.c' || echo '$(srcdir)/'`runtime/cpuprof.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/cpuprof.Tpo $(DEPDIR)/cpuprof.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/cpuprof.c' object='cpuprof.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpuprof.lo `test -f 'runtime/cpuprof.c' || echo '$(srcdir)/'`runtime/cpuprof.c
-
env_posix.lo: runtime/env_posix.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT env_posix.lo -MD -MP -MF $(DEPDIR)/env_posix.Tpo -c -o env_posix.lo `test -f 'runtime/env_posix.c' || echo '$(srcdir)/'`runtime/env_posix.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/env_posix.Tpo $(DEPDIR)/env_posix.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o env_posix.lo `test -f 'runtime/env_posix.c' || echo '$(srcdir)/'`runtime/env_posix.c
-lfstack.lo: runtime/lfstack.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lfstack.lo -MD -MP -MF $(DEPDIR)/lfstack.Tpo -c -o lfstack.lo `test -f 'runtime/lfstack.c' || echo '$(srcdir)/'`runtime/lfstack.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/lfstack.Tpo $(DEPDIR)/lfstack.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/lfstack.c' object='lfstack.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lfstack.lo `test -f 'runtime/lfstack.c' || echo '$(srcdir)/'`runtime/lfstack.c
-
lock_sema.lo: runtime/lock_sema.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lock_sema.lo -MD -MP -MF $(DEPDIR)/lock_sema.Tpo -c -o lock_sema.lo `test -f 'runtime/lock_sema.c' || echo '$(srcdir)/'`runtime/lock_sema.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/lock_sema.Tpo $(DEPDIR)/lock_sema.Plo
Pushcnt uintptr
}
-func lfstackpush(head *uint64, node *LFNode)
-func lfstackpop2(head *uint64) *LFNode
+func lfstackpush_go(head *uint64, node *LFNode)
+func lfstackpop_go(head *uint64) *LFNode
-var LFStackPush = lfstackpush
-var LFStackPop = lfstackpop2
+var LFStackPush = lfstackpush_go
+var LFStackPop = lfstackpop_go
type ParFor struct {
body *byte
wait bool
}
-func parforalloc2(nthrmax uint32) *ParFor
-func parforsetup2(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
-func parfordo(desc *ParFor)
-func parforiters(desc *ParFor, tid uintptr) (uintptr, uintptr)
+func newParFor(nthrmax uint32) *ParFor
+func parForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
+func parForDo(desc *ParFor)
+func parForIters(desc *ParFor, tid uintptr) (uintptr, uintptr)
-var NewParFor = parforalloc2
-var ParForSetup = parforsetup2
-var ParForDo = parfordo
+var NewParFor = newParFor
+var ParForSetup = parForSetup
+var ParForDo = parForDo
func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
- begin, end := parforiters(desc, uintptr(tid))
+ begin, end := parForIters(desc, uintptr(tid))
return uint32(begin), uint32(end)
}
// var Int32Hash = int32Hash
// var Int64Hash = int64Hash
-// func GogoBytes() int32
-
var hashLoad float64 // declared in hashmap.c
var HashLoad = &hashLoad
func memclrBytes(b []byte)
var MemclrBytes = memclrBytes
+
+// func gogoBytes() int32
+
+// var GogoBytes = gogoBytes
tests := [...]TestCase{
{"chan recv", blockChanRecv, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"chan send", blockChanSend, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"chan close", blockChanClose, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"select recv async", blockSelectRecvAsync, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
{"select send sync", blockSelectSendSync, `
[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
`},
done
done
-runtime="chan.c cpuprof.c env_posix.c lock_futex.c lock_sema.c mcache.c mcentral.c mfixalloc.c mgc0.c mgc0.h mheap.c msize.c netpoll.goc netpoll_epoll.c netpoll_kqueue.c netpoll_stub.c panic.c print.c proc.c race.h runtime.c runtime.h signal_unix.c signal_unix.h malloc.h malloc.goc mprof.goc parfor.c runtime1.goc sema.goc sigqueue.goc string.goc time.goc"
+runtime="chan.goc chan.h cpuprof.goc env_posix.c lock_futex.c lfstack.goc lock_sema.c mcache.c mcentral.c mfixalloc.c mgc0.c mgc0.h mheap.c msize.c netpoll.goc netpoll_epoll.c netpoll_kqueue.c netpoll_stub.c panic.c print.c proc.c race.h rdebug.goc runtime.c runtime.h signal_unix.c signal_unix.h malloc.h malloc.goc mprof.goc parfor.c runtime1.goc sema.goc sigqueue.goc string.goc time.goc"
for f in $runtime; do
merge_c $f $f
done
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch.h"
-#include "go-type.h"
-#include "race.h"
-#include "malloc.h"
-
-typedef struct WaitQ WaitQ;
-typedef struct SudoG SudoG;
-typedef struct Select Select;
-typedef struct Scase Scase;
-
-typedef struct __go_type_descriptor Type;
-typedef struct __go_channel_type ChanType;
-
-struct SudoG
-{
- G* g;
- uint32* selectdone;
- SudoG* link;
- int64 releasetime;
- byte* elem; // data element
-};
-
-struct WaitQ
-{
- SudoG* first;
- SudoG* last;
-};
-
-// The garbage collector is assuming that Hchan can only contain pointers into the stack
-// and cannot contain pointers into the heap.
-struct Hchan
-{
- uintgo qcount; // total data in the q
- uintgo dataqsiz; // size of the circular q
- uint16 elemsize;
- uint8 elemalign;
- uint8 pad; // ensures proper alignment of the buffer that follows Hchan in memory
- bool closed;
- const Type* elemtype; // element type
- uintgo sendx; // send index
- uintgo recvx; // receive index
- WaitQ recvq; // list of recv waiters
- WaitQ sendq; // list of send waiters
- Lock;
-};
-
-uint32 runtime_Hchansize = sizeof(Hchan);
-
-// Buffer follows Hchan immediately in memory.
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
-
-enum
-{
- debug = 0,
-
- // Scase.kind
- CaseRecv,
- CaseSend,
- CaseDefault,
-};
-
-struct Scase
-{
- SudoG sg; // must be first member (cast to Scase)
- Hchan* chan; // chan
- uint16 kind;
- uint16 index; // index to return
- bool* receivedp; // pointer to received bool (recv2)
-};
-
-struct Select
-{
- uint16 tcase; // total count of scase[]
- uint16 ncase; // currently filled scase[]
- uint16* pollorder; // case poll order
- Hchan** lockorder; // channel lock order
- Scase scase[1]; // one per case (in order of appearance)
-};
-
-static void dequeueg(WaitQ*);
-static SudoG* dequeue(WaitQ*);
-static void enqueue(WaitQ*, SudoG*);
-static void racesync(Hchan*, SudoG*);
-
-static Hchan*
-makechan(ChanType *t, int64 hint)
-{
- Hchan *c;
- uintptr n;
- const Type *elem;
-
- elem = t->__element_type;
-
- // compiler checks this but be safe.
- if(elem->__size >= (1<<16))
- runtime_throw("makechan: invalid channel element type");
-
- if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size))
- runtime_panicstring("makechan: size out of range");
-
- n = sizeof(*c);
- n = ROUND(n, elem->__align);
-
- // allocate memory in one call
- c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0);
- c->elemsize = elem->__size;
- c->elemtype = elem;
- c->dataqsiz = hint;
-
- if(debug)
- runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n",
- c, (int64)elem->__size, (int64)c->dataqsiz);
-
- return c;
-}
-
-// For reflect
-// func makechan(typ *ChanType, size uint64) (chan)
-Hchan *reflect_makechan(ChanType *, uint64)
- __asm__ (GOSYM_PREFIX "reflect.makechan");
-
-Hchan *
-reflect_makechan(ChanType *t, uint64 size)
-{
- Hchan *c;
-
- c = makechan(t, size);
- return c;
-}
-
-// makechan(t *ChanType, hint int64) (hchan *chan any);
-Hchan*
-__go_new_channel(ChanType *t, uintptr hint)
-{
- return makechan(t, hint);
-}
-
-Hchan*
-__go_new_channel_big(ChanType *t, uint64 hint)
-{
- return makechan(t, hint);
-}
-
-/*
- * generic single channel send/recv
- * if the bool pointer is nil,
- * then the full exchange will
- * occur. if pres is not nil,
- * then the protocol will not
- * sleep but return if it could
- * not complete.
- *
- * sleep can wake up with g->param == nil
- * when a channel involved in the sleep has
- * been closed. it is easiest to loop and re-run
- * the operation; we'll see that it's now closed.
- */
-static bool
-chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
-{
- SudoG *sg;
- SudoG mysg;
- G* gp;
- int64 t0;
- G* g;
-
- g = runtime_g();
-
- if(raceenabled)
- runtime_racereadobjectpc(ep, t->__element_type, runtime_getcallerpc(&t), chansend);
-
- if(c == nil) {
- USED(t);
- if(!block)
- return false;
- runtime_park(nil, nil, "chan send (nil chan)");
- return false; // not reached
- }
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug) {
- runtime_printf("chansend: chan=%p\n", c);
- }
-
- t0 = 0;
- mysg.releasetime = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- mysg.releasetime = -1;
- }
-
- runtime_lock(c);
- if(raceenabled)
- runtime_racereadpc(c, pc, chansend);
- if(c->closed)
- goto closed;
-
- if(c->dataqsiz > 0)
- goto asynch;
-
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- if(raceenabled)
- racesync(c, sg);
- runtime_unlock(c);
-
- gp = sg->g;
- gp->param = sg;
- if(sg->elem != nil)
- runtime_memmove(sg->elem, ep, c->elemsize);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- return true;
- }
-
- if(!block) {
- runtime_unlock(c);
- return false;
- }
-
- mysg.elem = ep;
- mysg.g = g;
- mysg.selectdone = nil;
- g->param = nil;
- enqueue(&c->sendq, &mysg);
- runtime_parkunlock(c, "chan send");
-
- if(g->param == nil) {
- runtime_lock(c);
- if(!c->closed)
- runtime_throw("chansend: spurious wakeup");
- goto closed;
- }
-
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
-
- return true;
-
-asynch:
- if(c->closed)
- goto closed;
-
- if(c->qcount >= c->dataqsiz) {
- if(!block) {
- runtime_unlock(c);
- return false;
- }
- mysg.g = g;
- mysg.elem = nil;
- mysg.selectdone = nil;
- enqueue(&c->sendq, &mysg);
- runtime_parkunlock(c, "chan send");
-
- runtime_lock(c);
- goto asynch;
- }
-
- if(raceenabled)
- runtime_racerelease(chanbuf(c, c->sendx));
-
- runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize);
- if(++c->sendx == c->dataqsiz)
- c->sendx = 0;
- c->qcount++;
-
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- gp = sg->g;
- runtime_unlock(c);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else
- runtime_unlock(c);
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-closed:
- runtime_unlock(c);
- runtime_panicstring("send on closed channel");
- return false; // not reached
-}
-
-
-static bool
-chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
-{
- SudoG *sg;
- SudoG mysg;
- G *gp;
- int64 t0;
- G *g;
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- // raceenabled: don't need to check ep, as it is always on the stack.
-
- if(debug)
- runtime_printf("chanrecv: chan=%p\n", c);
-
- g = runtime_g();
-
- if(c == nil) {
- USED(t);
- if(!block)
- return false;
- runtime_park(nil, nil, "chan receive (nil chan)");
- return false; // not reached
- }
-
- t0 = 0;
- mysg.releasetime = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- mysg.releasetime = -1;
- }
-
- runtime_lock(c);
- if(c->dataqsiz > 0)
- goto asynch;
-
- if(c->closed)
- goto closed;
-
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- if(raceenabled)
- racesync(c, sg);
- runtime_unlock(c);
-
- if(ep != nil)
- runtime_memmove(ep, sg->elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
-
- if(received != nil)
- *received = true;
- return true;
- }
-
- if(!block) {
- runtime_unlock(c);
- return false;
- }
-
- mysg.elem = ep;
- mysg.g = g;
- mysg.selectdone = nil;
- g->param = nil;
- enqueue(&c->recvq, &mysg);
- runtime_parkunlock(c, "chan receive");
-
- if(g->param == nil) {
- runtime_lock(c);
- if(!c->closed)
- runtime_throw("chanrecv: spurious wakeup");
- goto closed;
- }
-
- if(received != nil)
- *received = true;
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-asynch:
- if(c->qcount <= 0) {
- if(c->closed)
- goto closed;
-
- if(!block) {
- runtime_unlock(c);
- if(received != nil)
- *received = false;
- return false;
- }
- mysg.g = g;
- mysg.elem = nil;
- mysg.selectdone = nil;
- enqueue(&c->recvq, &mysg);
- runtime_parkunlock(c, "chan receive");
-
- runtime_lock(c);
- goto asynch;
- }
-
- if(raceenabled)
- runtime_raceacquire(chanbuf(c, c->recvx));
-
- if(ep != nil)
- runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize);
- runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
- if(++c->recvx == c->dataqsiz)
- c->recvx = 0;
- c->qcount--;
-
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- gp = sg->g;
- runtime_unlock(c);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else
- runtime_unlock(c);
-
- if(received != nil)
- *received = true;
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-closed:
- if(ep != nil)
- runtime_memclr(ep, c->elemsize);
- if(received != nil)
- *received = false;
- if(raceenabled)
- runtime_raceacquire(c);
- runtime_unlock(c);
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-}
-
-// The compiler generates a call to __go_send_small to send a value 8
-// bytes or smaller.
-void
-__go_send_small(ChanType *t, Hchan* c, uint64 val)
-{
- union
- {
- byte b[sizeof(uint64)];
- uint64 v;
- } u;
- byte *v;
-
- u.v = val;
-#ifndef WORDS_BIGENDIAN
- v = u.b;
-#else
- v = u.b + sizeof(uint64) - t->__element_type->__size;
-#endif
- chansend(t, c, v, true, runtime_getcallerpc(&t));
-}
-
-// The compiler generates a call to __go_send_big to send a value
-// larger than 8 bytes or smaller.
-void
-__go_send_big(ChanType *t, Hchan* c, byte* v)
-{
- chansend(t, c, v, true, runtime_getcallerpc(&t));
-}
-
-// The compiler generates a call to __go_receive to receive a
-// value from a channel.
-void
-__go_receive(ChanType *t, Hchan* c, byte* v)
-{
- chanrecv(t, c, v, true, nil);
-}
-
-_Bool runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
- __asm__ (GOSYM_PREFIX "runtime.chanrecv2");
-
-_Bool
-runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
-{
- bool received = false;
-
- chanrecv(t, c, v, true, &received);
- return received;
-}
-
-// func selectnbsend(c chan any, elem *any) bool
-//
-// compiler implements
-//
-// select {
-// case c <- v:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbsend(c, v) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-_Bool
-runtime_selectnbsend(ChanType *t, Hchan *c, byte *val)
-{
- bool res;
-
- res = chansend(t, c, val, false, runtime_getcallerpc(&t));
- return (_Bool)res;
-}
-
-// func selectnbrecv(elem *any, c chan any) bool
-//
-// compiler implements
-//
-// select {
-// case v = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbrecv(&v, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-_Bool
-runtime_selectnbrecv(ChanType *t, byte *v, Hchan *c)
-{
- bool selected;
-
- selected = chanrecv(t, c, v, false, nil);
- return (_Bool)selected;
-}
-
-// func selectnbrecv2(elem *any, ok *bool, c chan any) bool
-//
-// compiler implements
-//
-// select {
-// case v, ok = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if c != nil && selectnbrecv2(&v, &ok, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-_Bool
-runtime_selectnbrecv2(ChanType *t, byte *v, _Bool *received, Hchan *c)
-{
- bool selected;
- bool r;
-
- r = false;
- selected = chanrecv(t, c, v, false, received == nil ? nil : &r);
- if(received != nil)
- *received = r;
- return selected;
-}
-
-// For reflect:
-// func chansend(c chan, val *any, nb bool) (selected bool)
-// where val points to the data to be sent.
-//
-// The "uintptr selected" is really "bool selected" but saying
-// uintptr gets us the right alignment for the output parameter block.
-
-_Bool reflect_chansend(ChanType *, Hchan *, byte *, _Bool)
- __asm__ (GOSYM_PREFIX "reflect.chansend");
-
-_Bool
-reflect_chansend(ChanType *t, Hchan *c, byte *val, _Bool nb)
-{
- bool selected;
-
- selected = chansend(t, c, val, !nb, runtime_getcallerpc(&t));
- return (_Bool)selected;
-}
-
-// For reflect:
-// func chanrecv(c chan, nb bool, val *any) (selected, received bool)
-// where val points to a data area that will be filled in with the
-// received value. val must have the size and type of the channel element type.
-
-struct chanrecv_ret
-{
- _Bool selected;
- _Bool received;
-};
-
-struct chanrecv_ret reflect_chanrecv(ChanType *, Hchan *, _Bool, byte *val)
- __asm__ (GOSYM_PREFIX "reflect.chanrecv");
-
-struct chanrecv_ret
-reflect_chanrecv(ChanType *t, Hchan *c, _Bool nb, byte *val)
-{
- struct chanrecv_ret ret;
- bool selected;
- bool received;
-
- received = false;
- selected = chanrecv(t, c, val, !nb, &received);
- ret.selected = (_Bool)selected;
- ret.received = (_Bool)received;
- return ret;
-}
-
-static Select* newselect(int32);
-
-// newselect(size uint32) (sel *byte);
-
-void* runtime_newselect(int32) __asm__ (GOSYM_PREFIX "runtime.newselect");
-
-void*
-runtime_newselect(int32 size)
-{
- return (void*)newselect(size);
-}
-
-static Select*
-newselect(int32 size)
-{
- int32 n;
- Select *sel;
-
- n = 0;
- if(size > 1)
- n = size-1;
-
- // allocate all the memory we need in a single allocation
- // start with Select with size cases
- // then lockorder with size entries
- // then pollorder with size entries
- sel = runtime_mal(sizeof(*sel) +
- n*sizeof(sel->scase[0]) +
- size*sizeof(sel->lockorder[0]) +
- size*sizeof(sel->pollorder[0]));
-
- sel->tcase = size;
- sel->ncase = 0;
- sel->lockorder = (void*)(sel->scase + size);
- sel->pollorder = (void*)(sel->lockorder + size);
-
- if(debug)
- runtime_printf("newselect s=%p size=%d\n", sel, size);
- return sel;
-}
-
-// cut in half to give stack a chance to split
-static void selectsend(Select *sel, Hchan *c, int index, void *elem);
-
-// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
-
-void runtime_selectsend(Select *, Hchan *, void *, int32)
- __asm__ (GOSYM_PREFIX "runtime.selectsend");
-
-void
-runtime_selectsend(Select *sel, Hchan *c, void *elem, int32 index)
-{
- // nil cases do not compete
- if(c == nil)
- return;
-
- selectsend(sel, c, index, elem);
-}
-
-static void
-selectsend(Select *sel, Hchan *c, int index, void *elem)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectsend: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
-
- cas->index = index;
- cas->chan = c;
- cas->kind = CaseSend;
- cas->sg.elem = elem;
-
- if(debug)
- runtime_printf("selectsend s=%p index=%d chan=%p\n",
- sel, cas->index, cas->chan);
-}
-
-// cut in half to give stack a chance to split
-static void selectrecv(Select *sel, Hchan *c, int index, void *elem, bool*);
-
-// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
-
-void runtime_selectrecv(Select *, Hchan *, void *, int32)
- __asm__ (GOSYM_PREFIX "runtime.selectrecv");
-
-void
-runtime_selectrecv(Select *sel, Hchan *c, void *elem, int32 index)
-{
- // nil cases do not compete
- if(c == nil)
- return;
-
- selectrecv(sel, c, index, elem, nil);
-}
-
-// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
-
-void runtime_selectrecv2(Select *, Hchan *, void *, bool *, int32)
- __asm__ (GOSYM_PREFIX "runtime.selectrecv2");
-
-void
-runtime_selectrecv2(Select *sel, Hchan *c, void *elem, bool *received, int32 index)
-{
- // nil cases do not compete
- if(c == nil)
- return;
-
- selectrecv(sel, c, index, elem, received);
-}
-
-static void
-selectrecv(Select *sel, Hchan *c, int index, void *elem, bool *received)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectrecv: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
- cas->index = index;
- cas->chan = c;
-
- cas->kind = CaseRecv;
- cas->sg.elem = elem;
- cas->receivedp = received;
-
- if(debug)
- runtime_printf("selectrecv s=%p index=%d chan=%p\n",
- sel, cas->index, cas->chan);
-}
-
-// cut in half to give stack a chance to split
-static void selectdefault(Select*, int);
-
-// selectdefault(sel *byte) (selected bool);
-
-void runtime_selectdefault(Select *, int32) __asm__ (GOSYM_PREFIX "runtime.selectdefault");
-
-void
-runtime_selectdefault(Select *sel, int32 index)
-{
- selectdefault(sel, index);
-}
-
-static void
-selectdefault(Select *sel, int32 index)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectdefault: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
- cas->index = index;
- cas->chan = nil;
-
- cas->kind = CaseDefault;
-
- if(debug)
- runtime_printf("selectdefault s=%p index=%d\n",
- sel, cas->index);
-}
-
-static void
-sellock(Select *sel)
-{
- uint32 i;
- Hchan *c, *c0;
-
- c = nil;
- for(i=0; i<sel->ncase; i++) {
- c0 = sel->lockorder[i];
- if(c0 && c0 != c) {
- c = sel->lockorder[i];
- runtime_lock(c);
- }
- }
-}
-
-static void
-selunlock(Select *sel)
-{
- int32 i, n, r;
- Hchan *c;
-
- // We must be very careful here to not touch sel after we have unlocked
- // the last lock, because sel can be freed right after the last unlock.
- // Consider the following situation.
- // First M calls runtime_park() in runtime_selectgo() passing the sel.
- // Once runtime_park() has unlocked the last lock, another M makes
- // the G that calls select runnable again and schedules it for execution.
- // When the G runs on another M, it locks all the locks and frees sel.
- // Now if the first M touches sel, it will access freed memory.
- n = (int32)sel->ncase;
- r = 0;
- // skip the default case
- if(n>0 && sel->lockorder[0] == nil)
- r = 1;
- for(i = n-1; i >= r; i--) {
- c = sel->lockorder[i];
- if(i>0 && sel->lockorder[i-1] == c)
- continue; // will unlock it on the next iteration
- runtime_unlock(c);
- }
-}
-
-static bool
-selparkcommit(G *gp, void *sel)
-{
- USED(gp);
- selunlock(sel);
- return true;
-}
-
-void
-runtime_block(void)
-{
- runtime_park(nil, nil, "select (no cases)"); // forever
-}
-
-static int selectgo(Select**);
-
-// selectgo(sel *byte);
-
-int runtime_selectgo(Select *) __asm__ (GOSYM_PREFIX "runtime.selectgo");
-
-int
-runtime_selectgo(Select *sel)
-{
- return selectgo(&sel);
-}
-
-static int
-selectgo(Select **selp)
-{
- Select *sel;
- uint32 o, i, j, k, done;
- int64 t0;
- Scase *cas, *dfl;
- Hchan *c;
- SudoG *sg;
- G *gp;
- int index;
- G *g;
-
- sel = *selp;
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug)
- runtime_printf("select: sel=%p\n", sel);
-
- g = runtime_g();
-
- t0 = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- for(i=0; i<sel->ncase; i++)
- sel->scase[i].sg.releasetime = -1;
- }
-
- // The compiler rewrites selects that statically have
- // only 0 or 1 cases plus default into simpler constructs.
- // The only way we can end up with such small sel->ncase
- // values here is for a larger select in which most channels
- // have been nilled out. The general code handles those
- // cases correctly, and they are rare enough not to bother
- // optimizing (and needing to test).
-
- // generate permuted order
- for(i=0; i<sel->ncase; i++)
- sel->pollorder[i] = i;
- for(i=1; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- j = runtime_fastrand1()%(i+1);
- sel->pollorder[i] = sel->pollorder[j];
- sel->pollorder[j] = o;
- }
-
- // sort the cases by Hchan address to get the locking order.
- // simple heap sort, to guarantee n log n time and constant stack footprint.
- for(i=0; i<sel->ncase; i++) {
- j = i;
- c = sel->scase[j].chan;
- while(j > 0 && sel->lockorder[k=(j-1)/2] < c) {
- sel->lockorder[j] = sel->lockorder[k];
- j = k;
- }
- sel->lockorder[j] = c;
- }
- for(i=sel->ncase; i-->0; ) {
- c = sel->lockorder[i];
- sel->lockorder[i] = sel->lockorder[0];
- j = 0;
- for(;;) {
- k = j*2+1;
- if(k >= i)
- break;
- if(k+1 < i && sel->lockorder[k] < sel->lockorder[k+1])
- k++;
- if(c < sel->lockorder[k]) {
- sel->lockorder[j] = sel->lockorder[k];
- j = k;
- continue;
- }
- break;
- }
- sel->lockorder[j] = c;
- }
- /*
- for(i=0; i+1<sel->ncase; i++)
- if(sel->lockorder[i] > sel->lockorder[i+1]) {
- runtime_printf("i=%d %p %p\n", i, sel->lockorder[i], sel->lockorder[i+1]);
- runtime_throw("select: broken sort");
- }
- */
- sellock(sel);
-
-loop:
- // pass 1 - look for something already waiting
- dfl = nil;
- for(i=0; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- cas = &sel->scase[o];
- c = cas->chan;
-
- switch(cas->kind) {
- case CaseRecv:
- if(c->dataqsiz > 0) {
- if(c->qcount > 0)
- goto asyncrecv;
- } else {
- sg = dequeue(&c->sendq);
- if(sg != nil)
- goto syncrecv;
- }
- if(c->closed)
- goto rclose;
- break;
-
- case CaseSend:
- if(raceenabled)
- runtime_racereadpc(c, runtime_selectgo, chansend);
- if(c->closed)
- goto sclose;
- if(c->dataqsiz > 0) {
- if(c->qcount < c->dataqsiz)
- goto asyncsend;
- } else {
- sg = dequeue(&c->recvq);
- if(sg != nil)
- goto syncsend;
- }
- break;
-
- case CaseDefault:
- dfl = cas;
- break;
- }
- }
-
- if(dfl != nil) {
- selunlock(sel);
- cas = dfl;
- goto retc;
- }
-
-
- // pass 2 - enqueue on all chans
- done = 0;
- for(i=0; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- cas = &sel->scase[o];
- c = cas->chan;
- sg = &cas->sg;
- sg->g = g;
- sg->selectdone = &done;
-
- switch(cas->kind) {
- case CaseRecv:
- enqueue(&c->recvq, sg);
- break;
-
- case CaseSend:
- enqueue(&c->sendq, sg);
- break;
- }
- }
-
- g->param = nil;
- runtime_park(selparkcommit, sel, "select");
-
- sellock(sel);
- sg = g->param;
-
- // pass 3 - dequeue from unsuccessful chans
- // otherwise they stack up on quiet channels
- for(i=0; i<sel->ncase; i++) {
- cas = &sel->scase[i];
- if(cas != (Scase*)sg) {
- c = cas->chan;
- if(cas->kind == CaseSend)
- dequeueg(&c->sendq);
- else
- dequeueg(&c->recvq);
- }
- }
-
- if(sg == nil)
- goto loop;
-
- cas = (Scase*)sg;
- c = cas->chan;
-
- if(c->dataqsiz > 0)
- runtime_throw("selectgo: shouldn't happen");
-
- if(debug)
- runtime_printf("wait-return: sel=%p c=%p cas=%p kind=%d\n",
- sel, c, cas, cas->kind);
-
- if(cas->kind == CaseRecv) {
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- }
-
- if(raceenabled) {
- if(cas->kind == CaseRecv && cas->sg.elem != nil)
- runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
- else if(cas->kind == CaseSend)
- runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
- }
-
- selunlock(sel);
- goto retc;
-
-asyncrecv:
- // can receive from buffer
- if(raceenabled) {
- if(cas->sg.elem != nil)
- runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
- runtime_raceacquire(chanbuf(c, c->recvx));
- }
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- if(cas->sg.elem != nil)
- runtime_memmove(cas->sg.elem, chanbuf(c, c->recvx), c->elemsize);
- runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
- if(++c->recvx == c->dataqsiz)
- c->recvx = 0;
- c->qcount--;
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- gp = sg->g;
- selunlock(sel);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else {
- selunlock(sel);
- }
- goto retc;
-
-asyncsend:
- // can send to buffer
- if(raceenabled) {
- runtime_racerelease(chanbuf(c, c->sendx));
- runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
- }
- runtime_memmove(chanbuf(c, c->sendx), cas->sg.elem, c->elemsize);
- if(++c->sendx == c->dataqsiz)
- c->sendx = 0;
- c->qcount++;
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- gp = sg->g;
- selunlock(sel);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else {
- selunlock(sel);
- }
- goto retc;
-
-syncrecv:
- // can receive from sleeping sender (sg)
- if(raceenabled) {
- if(cas->sg.elem != nil)
- runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
- racesync(c, sg);
- }
- selunlock(sel);
- if(debug)
- runtime_printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- if(cas->sg.elem != nil)
- runtime_memmove(cas->sg.elem, sg->elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- goto retc;
-
-rclose:
- // read at end of closed channel
- selunlock(sel);
- if(cas->receivedp != nil)
- *cas->receivedp = false;
- if(cas->sg.elem != nil)
- runtime_memclr(cas->sg.elem, c->elemsize);
- if(raceenabled)
- runtime_raceacquire(c);
- goto retc;
-
-syncsend:
- // can send to sleeping receiver (sg)
- if(raceenabled) {
- runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
- racesync(c, sg);
- }
- selunlock(sel);
- if(debug)
- runtime_printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
- if(sg->elem != nil)
- runtime_memmove(sg->elem, cas->sg.elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
-
-retc:
- // return index corresponding to chosen case
- index = cas->index;
- if(cas->sg.releasetime > 0)
- runtime_blockevent(cas->sg.releasetime - t0, 2);
- runtime_free(sel);
- return index;
-
-sclose:
- // send on closed channel
- selunlock(sel);
- runtime_panicstring("send on closed channel");
- return 0; // not reached
-}
-
-// This struct must match ../reflect/value.go:/runtimeSelect.
-typedef struct runtimeSelect runtimeSelect;
-struct runtimeSelect
-{
- uintptr dir;
- ChanType *typ;
- Hchan *ch;
- byte *val;
-};
-
-// This enum must match ../reflect/value.go:/SelectDir.
-enum SelectDir {
- SelectSend = 1,
- SelectRecv,
- SelectDefault,
-};
-
-// func rselect(cases []runtimeSelect) (chosen int, recvOK bool)
-
-struct rselect_ret {
- intgo chosen;
- _Bool recvOK;
-};
-
-struct rselect_ret reflect_rselect(Slice)
- __asm__ (GOSYM_PREFIX "reflect.rselect");
-
-struct rselect_ret
-reflect_rselect(Slice cases)
-{
- struct rselect_ret ret;
- intgo chosen;
- bool recvOK;
- int32 i;
- Select *sel;
- runtimeSelect* rcase, *rc;
-
- chosen = -1;
- recvOK = false;
-
- rcase = (runtimeSelect*)cases.__values;
-
- sel = newselect(cases.__count);
- for(i=0; i<cases.__count; i++) {
- rc = &rcase[i];
- switch(rc->dir) {
- case SelectDefault:
- selectdefault(sel, i);
- break;
- case SelectSend:
- if(rc->ch == nil)
- break;
- selectsend(sel, rc->ch, i, rc->val);
- break;
- case SelectRecv:
- if(rc->ch == nil)
- break;
- selectrecv(sel, rc->ch, i, rc->val, &recvOK);
- break;
- }
- }
-
- chosen = (intgo)(uintptr)selectgo(&sel);
-
- ret.chosen = chosen;
- ret.recvOK = (_Bool)recvOK;
- return ret;
-}
-
-static void closechan(Hchan *c, void *pc);
-
-// closechan(sel *byte);
-void
-runtime_closechan(Hchan *c)
-{
- closechan(c, runtime_getcallerpc(&c));
-}
-
-// For reflect
-// func chanclose(c chan)
-
-void reflect_chanclose(Hchan *) __asm__ (GOSYM_PREFIX "reflect.chanclose");
-
-void
-reflect_chanclose(Hchan *c)
-{
- closechan(c, runtime_getcallerpc(&c));
-}
-
-static void
-closechan(Hchan *c, void *pc)
-{
- SudoG *sg;
- G* gp;
-
- if(c == nil)
- runtime_panicstring("close of nil channel");
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- runtime_lock(c);
- if(c->closed) {
- runtime_unlock(c);
- runtime_panicstring("close of closed channel");
- }
-
- if(raceenabled) {
- runtime_racewritepc(c, pc, runtime_closechan);
- runtime_racerelease(c);
- }
-
- c->closed = true;
-
- // release all readers
- for(;;) {
- sg = dequeue(&c->recvq);
- if(sg == nil)
- break;
- gp = sg->g;
- gp->param = nil;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- }
-
- // release all writers
- for(;;) {
- sg = dequeue(&c->sendq);
- if(sg == nil)
- break;
- gp = sg->g;
- gp->param = nil;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- }
-
- runtime_unlock(c);
-}
-
-void
-__go_builtin_close(Hchan *c)
-{
- runtime_closechan(c);
-}
-
-// For reflect
-// func chanlen(c chan) (len int)
-
-intgo reflect_chanlen(Hchan *) __asm__ (GOSYM_PREFIX "reflect.chanlen");
-
-intgo
-reflect_chanlen(Hchan *c)
-{
- intgo len;
-
- if(c == nil)
- len = 0;
- else
- len = c->qcount;
- return len;
-}
-
-intgo
-__go_chan_len(Hchan *c)
-{
- return reflect_chanlen(c);
-}
-
-// For reflect
-// func chancap(c chan) int
-
-intgo reflect_chancap(Hchan *) __asm__ (GOSYM_PREFIX "reflect.chancap");
-
-intgo
-reflect_chancap(Hchan *c)
-{
- intgo cap;
-
- if(c == nil)
- cap = 0;
- else
- cap = c->dataqsiz;
- return cap;
-}
-
-intgo
-__go_chan_cap(Hchan *c)
-{
- return reflect_chancap(c);
-}
-
-static SudoG*
-dequeue(WaitQ *q)
-{
- SudoG *sgp;
-
-loop:
- sgp = q->first;
- if(sgp == nil)
- return nil;
- q->first = sgp->link;
-
- // if sgp participates in a select and is already signaled, ignore it
- if(sgp->selectdone != nil) {
- // claim the right to signal
- if(*sgp->selectdone != 0 || !runtime_cas(sgp->selectdone, 0, 1))
- goto loop;
- }
-
- return sgp;
-}
-
-static void
-dequeueg(WaitQ *q)
-{
- SudoG **l, *sgp, *prevsgp;
- G *g;
-
- g = runtime_g();
- prevsgp = nil;
- for(l=&q->first; (sgp=*l) != nil; l=&sgp->link, prevsgp=sgp) {
- if(sgp->g == g) {
- *l = sgp->link;
- if(q->last == sgp)
- q->last = prevsgp;
- break;
- }
- }
-}
-
-static void
-enqueue(WaitQ *q, SudoG *sgp)
-{
- sgp->link = nil;
- if(q->first == nil) {
- q->first = sgp;
- q->last = sgp;
- return;
- }
- q->last->link = sgp;
- q->last = sgp;
-}
-
-static void
-racesync(Hchan *c, SudoG *sg)
-{
- runtime_racerelease(chanbuf(c, 0));
- runtime_raceacquireg(sg->g, chanbuf(c, 0));
- runtime_racereleaseg(sg->g, chanbuf(c, 0));
- runtime_raceacquire(chanbuf(c, 0));
-}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "runtime.h"
+#include "arch.h"
+#include "go-type.h"
+#include "race.h"
+#include "malloc.h"
+#include "chan.h"
+
+uint32 runtime_Hchansize = sizeof(Hchan);
+
+static void dequeueg(WaitQ*);
+static SudoG* dequeue(WaitQ*);
+static void enqueue(WaitQ*, SudoG*);
+static void racesync(Hchan*, SudoG*);
+
+static Hchan*
+makechan(ChanType *t, int64 hint)
+{
+ Hchan *c;
+ uintptr n;
+ const Type *elem;
+
+ elem = t->__element_type;
+
+ // compiler checks this but be safe.
+ if(elem->__size >= (1<<16))
+ runtime_throw("makechan: invalid channel element type");
+
+ if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size))
+ runtime_panicstring("makechan: size out of range");
+
+ n = sizeof(*c);
+ n = ROUND(n, elem->__align);
+
+ // allocate memory in one call
+ c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0);
+ c->elemsize = elem->__size;
+ c->elemtype = elem;
+ c->dataqsiz = hint;
+
+ if(debug)
+ runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n",
+ c, (int64)elem->__size, (int64)c->dataqsiz);
+
+ return c;
+}
+
+func reflect.makechan(t *ChanType, size uint64) (c *Hchan) {
+ c = makechan(t, size);
+}
+
+Hchan*
+__go_new_channel(ChanType *t, uintptr hint)
+{
+ return makechan(t, hint);
+}
+
+Hchan*
+__go_new_channel_big(ChanType *t, uint64 hint)
+{
+ return makechan(t, hint);
+}
+
+/*
+ * generic single channel send/recv
+ * if the bool pointer is nil,
+ * then the full exchange will
+ * occur. if pres is not nil,
+ * then the protocol will not
+ * sleep but return if it could
+ * not complete.
+ *
+ * sleep can wake up with g->param == nil
+ * when a channel involved in the sleep has
+ * been closed. it is easiest to loop and re-run
+ * the operation; we'll see that it's now closed.
+ */
+static bool
+chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
+{
+ SudoG *sg;
+ SudoG mysg;
+ G* gp;
+ int64 t0;
+ G* g;
+
+ g = runtime_g();
+
+ if(raceenabled)
+ runtime_racereadobjectpc(ep, t->__element_type, runtime_getcallerpc(&t), chansend);
+
+ if(c == nil) {
+ USED(t);
+ if(!block)
+ return false;
+ runtime_park(nil, nil, "chan send (nil chan)");
+ return false; // not reached
+ }
+
+ if(runtime_gcwaiting())
+ runtime_gosched();
+
+ if(debug) {
+ runtime_printf("chansend: chan=%p\n", c);
+ }
+
+ t0 = 0;
+ mysg.releasetime = 0;
+ if(runtime_blockprofilerate > 0) {
+ t0 = runtime_cputicks();
+ mysg.releasetime = -1;
+ }
+
+ runtime_lock(c);
+ if(raceenabled)
+ runtime_racereadpc(c, pc, chansend);
+ if(c->closed)
+ goto closed;
+
+ if(c->dataqsiz > 0)
+ goto asynch;
+
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ if(raceenabled)
+ racesync(c, sg);
+ runtime_unlock(c);
+
+ gp = sg->g;
+ gp->param = sg;
+ if(sg->elem != nil)
+ runtime_memmove(sg->elem, ep, c->elemsize);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ return true;
+ }
+
+ if(!block) {
+ runtime_unlock(c);
+ return false;
+ }
+
+ mysg.elem = ep;
+ mysg.g = g;
+ mysg.selectdone = nil;
+ g->param = nil;
+ enqueue(&c->sendq, &mysg);
+ runtime_parkunlock(c, "chan send");
+
+ if(g->param == nil) {
+ runtime_lock(c);
+ if(!c->closed)
+ runtime_throw("chansend: spurious wakeup");
+ goto closed;
+ }
+
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+
+ return true;
+
+asynch:
+ if(c->closed)
+ goto closed;
+
+ if(c->qcount >= c->dataqsiz) {
+ if(!block) {
+ runtime_unlock(c);
+ return false;
+ }
+ mysg.g = g;
+ mysg.elem = nil;
+ mysg.selectdone = nil;
+ enqueue(&c->sendq, &mysg);
+ runtime_parkunlock(c, "chan send");
+
+ runtime_lock(c);
+ goto asynch;
+ }
+
+ if(raceenabled)
+ runtime_racerelease(chanbuf(c, c->sendx));
+
+ runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize);
+ if(++c->sendx == c->dataqsiz)
+ c->sendx = 0;
+ c->qcount++;
+
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ gp = sg->g;
+ runtime_unlock(c);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ } else
+ runtime_unlock(c);
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+ return true;
+
+closed:
+ runtime_unlock(c);
+ runtime_panicstring("send on closed channel");
+ return false; // not reached
+}
+
+
+static bool
+chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
+{
+ SudoG *sg;
+ SudoG mysg;
+ G *gp;
+ int64 t0;
+ G *g;
+
+ if(runtime_gcwaiting())
+ runtime_gosched();
+
+ // raceenabled: don't need to check ep, as it is always on the stack.
+
+ if(debug)
+ runtime_printf("chanrecv: chan=%p\n", c);
+
+ g = runtime_g();
+
+ if(c == nil) {
+ USED(t);
+ if(!block)
+ return false;
+ runtime_park(nil, nil, "chan receive (nil chan)");
+ return false; // not reached
+ }
+
+ t0 = 0;
+ mysg.releasetime = 0;
+ if(runtime_blockprofilerate > 0) {
+ t0 = runtime_cputicks();
+ mysg.releasetime = -1;
+ }
+
+ runtime_lock(c);
+ if(c->dataqsiz > 0)
+ goto asynch;
+
+ if(c->closed)
+ goto closed;
+
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ if(raceenabled)
+ racesync(c, sg);
+ runtime_unlock(c);
+
+ if(ep != nil)
+ runtime_memmove(ep, sg->elem, c->elemsize);
+ gp = sg->g;
+ gp->param = sg;
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+
+ if(received != nil)
+ *received = true;
+ return true;
+ }
+
+ if(!block) {
+ runtime_unlock(c);
+ return false;
+ }
+
+ mysg.elem = ep;
+ mysg.g = g;
+ mysg.selectdone = nil;
+ g->param = nil;
+ enqueue(&c->recvq, &mysg);
+ runtime_parkunlock(c, "chan receive");
+
+ if(g->param == nil) {
+ runtime_lock(c);
+ if(!c->closed)
+ runtime_throw("chanrecv: spurious wakeup");
+ goto closed;
+ }
+
+ if(received != nil)
+ *received = true;
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+ return true;
+
+asynch:
+ if(c->qcount <= 0) {
+ if(c->closed)
+ goto closed;
+
+ if(!block) {
+ runtime_unlock(c);
+ if(received != nil)
+ *received = false;
+ return false;
+ }
+ mysg.g = g;
+ mysg.elem = nil;
+ mysg.selectdone = nil;
+ enqueue(&c->recvq, &mysg);
+ runtime_parkunlock(c, "chan receive");
+
+ runtime_lock(c);
+ goto asynch;
+ }
+
+ if(raceenabled)
+ runtime_raceacquire(chanbuf(c, c->recvx));
+
+ if(ep != nil)
+ runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize);
+ runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
+ if(++c->recvx == c->dataqsiz)
+ c->recvx = 0;
+ c->qcount--;
+
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ gp = sg->g;
+ runtime_unlock(c);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ } else
+ runtime_unlock(c);
+
+ if(received != nil)
+ *received = true;
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+ return true;
+
+closed:
+ if(ep != nil)
+ runtime_memclr(ep, c->elemsize);
+ if(received != nil)
+ *received = false;
+ if(raceenabled)
+ runtime_raceacquire(c);
+ runtime_unlock(c);
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+ return true;
+}
+
+// The compiler generates a call to __go_send_small to send a value 8
+// bytes or smaller.
+void
+__go_send_small(ChanType *t, Hchan* c, uint64 val)
+{
+ union
+ {
+ byte b[sizeof(uint64)];
+ uint64 v;
+ } u;
+ byte *v;
+
+ u.v = val;
+#ifndef WORDS_BIGENDIAN
+ v = u.b;
+#else
+ v = u.b + sizeof(uint64) - t->__element_type->__size;
+#endif
+ chansend(t, c, v, true, runtime_getcallerpc(&t));
+}
+
+// The compiler generates a call to __go_send_big to send a value
+// larger than 8 bytes or smaller.
+void
+__go_send_big(ChanType *t, Hchan* c, byte* v)
+{
+ chansend(t, c, v, true, runtime_getcallerpc(&t));
+}
+
+// The compiler generates a call to __go_receive to receive a
+// value from a channel.
+void
+__go_receive(ChanType *t, Hchan* c, byte* v)
+{
+ chanrecv(t, c, v, true, nil);
+}
+
+_Bool runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
+ __asm__ (GOSYM_PREFIX "runtime.chanrecv2");
+
+_Bool
+runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
+{
+ bool received = false;
+
+ chanrecv(t, c, v, true, &received);
+ return received;
+}
+
+// compiler implements
+//
+// select {
+// case c <- v:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbsend(c, v) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+func selectnbsend(t *ChanType, c *Hchan, elem *byte) (selected bool) {
+ selected = chansend(t, c, elem, false, runtime_getcallerpc(&t));
+}
+
+// compiler implements
+//
+// select {
+// case v = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbrecv(&v, c) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+func selectnbrecv(t *ChanType, elem *byte, c *Hchan) (selected bool) {
+ selected = chanrecv(t, c, elem, false, nil);
+}
+
+// compiler implements
+//
+// select {
+// case v, ok = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if c != nil && selectnbrecv2(&v, &ok, c) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+func selectnbrecv2(t *ChanType, elem *byte, received *bool, c *Hchan) (selected bool) {
+ bool r;
+
+ selected = chanrecv(t, c, elem, false, received == nil ? nil : &r);
+ if(received != nil)
+ *received = r;
+}
+
+func reflect.chansend(t *ChanType, c *Hchan, elem *byte, nb bool) (selected bool) {
+ selected = chansend(t, c, elem, !nb, runtime_getcallerpc(&t));
+}
+
+func reflect.chanrecv(t *ChanType, c *Hchan, nb bool, elem *byte) (selected bool, received bool) {
+ received = false;
+ selected = chanrecv(t, c, elem, !nb, &received);
+}
+
+static Select* newselect(int32);
+
+func newselect(size int32) (sel *byte) {
+ sel = (byte*)newselect(size);
+}
+
+static Select*
+newselect(int32 size)
+{
+ int32 n;
+ Select *sel;
+
+ n = 0;
+ if(size > 1)
+ n = size-1;
+
+ // allocate all the memory we need in a single allocation
+ // start with Select with size cases
+ // then lockorder with size entries
+ // then pollorder with size entries
+ sel = runtime_mal(sizeof(*sel) +
+ n*sizeof(sel->scase[0]) +
+ size*sizeof(sel->lockorder[0]) +
+ size*sizeof(sel->pollorder[0]));
+
+ sel->tcase = size;
+ sel->ncase = 0;
+ sel->lockorder = (void*)(sel->scase + size);
+ sel->pollorder = (void*)(sel->lockorder + size);
+
+ if(debug)
+ runtime_printf("newselect s=%p size=%d\n", sel, size);
+ return sel;
+}
+
+// cut in half to give stack a chance to split
+static void selectsend(Select *sel, Hchan *c, int index, void *elem);
+
+func selectsend(sel *Select, c *Hchan, elem *byte, index int32) {
+ // nil cases do not compete
+ if(c != nil)
+ selectsend(sel, c, index, elem);
+}
+
+static void
+selectsend(Select *sel, Hchan *c, int index, void *elem)
+{
+ int32 i;
+ Scase *cas;
+
+ i = sel->ncase;
+ if(i >= sel->tcase)
+ runtime_throw("selectsend: too many cases");
+ sel->ncase = i+1;
+ cas = &sel->scase[i];
+
+ cas->index = index;
+ cas->chan = c;
+ cas->kind = CaseSend;
+ cas->sg.elem = elem;
+
+ if(debug)
+ runtime_printf("selectsend s=%p index=%d chan=%p\n",
+ sel, cas->index, cas->chan);
+}
+
+// cut in half to give stack a chance to split
+static void selectrecv(Select *sel, Hchan *c, int index, void *elem, bool*);
+
+func selectrecv(sel *Select, c *Hchan, elem *byte, index int32) {
+ // nil cases do not compete
+ if(c != nil)
+ selectrecv(sel, c, index, elem, nil);
+}
+
+func selectrecv2(sel *Select, c *Hchan, elem *byte, received *bool, index int32) {
+ // nil cases do not compete
+ if(c != nil)
+ selectrecv(sel, c, index, elem, received);
+}
+
+static void
+selectrecv(Select *sel, Hchan *c, int index, void *elem, bool *received)
+{
+ int32 i;
+ Scase *cas;
+
+ i = sel->ncase;
+ if(i >= sel->tcase)
+ runtime_throw("selectrecv: too many cases");
+ sel->ncase = i+1;
+ cas = &sel->scase[i];
+ cas->index = index;
+ cas->chan = c;
+
+ cas->kind = CaseRecv;
+ cas->sg.elem = elem;
+ cas->receivedp = received;
+
+ if(debug)
+ runtime_printf("selectrecv s=%p index=%d chan=%p\n",
+ sel, cas->index, cas->chan);
+}
+
+// cut in half to give stack a chance to split
+static void selectdefault(Select*, int);
+
+func selectdefault(sel *Select, index int32) {
+ selectdefault(sel, index);
+}
+
+static void
+selectdefault(Select *sel, int32 index)
+{
+ int32 i;
+ Scase *cas;
+
+ i = sel->ncase;
+ if(i >= sel->tcase)
+ runtime_throw("selectdefault: too many cases");
+ sel->ncase = i+1;
+ cas = &sel->scase[i];
+ cas->index = index;
+ cas->chan = nil;
+
+ cas->kind = CaseDefault;
+
+ if(debug)
+ runtime_printf("selectdefault s=%p index=%d\n",
+ sel, cas->index);
+}
+
+static void
+sellock(Select *sel)
+{
+ uint32 i;
+ Hchan *c, *c0;
+
+ c = nil;
+ for(i=0; i<sel->ncase; i++) {
+ c0 = sel->lockorder[i];
+ if(c0 && c0 != c) {
+ c = sel->lockorder[i];
+ runtime_lock(c);
+ }
+ }
+}
+
+static void
+selunlock(Select *sel)
+{
+ int32 i, n, r;
+ Hchan *c;
+
+ // We must be very careful here to not touch sel after we have unlocked
+ // the last lock, because sel can be freed right after the last unlock.
+ // Consider the following situation.
+ // First M calls runtime_park() in runtime_selectgo() passing the sel.
+ // Once runtime_park() has unlocked the last lock, another M makes
+ // the G that calls select runnable again and schedules it for execution.
+ // When the G runs on another M, it locks all the locks and frees sel.
+ // Now if the first M touches sel, it will access freed memory.
+ n = (int32)sel->ncase;
+ r = 0;
+ // skip the default case
+ if(n>0 && sel->lockorder[0] == nil)
+ r = 1;
+ for(i = n-1; i >= r; i--) {
+ c = sel->lockorder[i];
+ if(i>0 && sel->lockorder[i-1] == c)
+ continue; // will unlock it on the next iteration
+ runtime_unlock(c);
+ }
+}
+
+static bool
+selparkcommit(G *gp, void *sel)
+{
+ USED(gp);
+ selunlock(sel);
+ return true;
+}
+
+func block() {
+ runtime_park(nil, nil, "select (no cases)"); // forever
+}
+
+static int selectgo(Select**);
+
+// selectgo(sel *byte);
+
+func selectgo(sel *Select) (ret int32) {
+ return selectgo(&sel);
+}
+
+static int
+selectgo(Select **selp)
+{
+ Select *sel;
+ uint32 o, i, j, k, done;
+ int64 t0;
+ Scase *cas, *dfl;
+ Hchan *c;
+ SudoG *sg;
+ G *gp;
+ int index;
+ G *g;
+
+ sel = *selp;
+ if(runtime_gcwaiting())
+ runtime_gosched();
+
+ if(debug)
+ runtime_printf("select: sel=%p\n", sel);
+
+ g = runtime_g();
+
+ t0 = 0;
+ if(runtime_blockprofilerate > 0) {
+ t0 = runtime_cputicks();
+ for(i=0; i<sel->ncase; i++)
+ sel->scase[i].sg.releasetime = -1;
+ }
+
+ // The compiler rewrites selects that statically have
+ // only 0 or 1 cases plus default into simpler constructs.
+ // The only way we can end up with such small sel->ncase
+ // values here is for a larger select in which most channels
+ // have been nilled out. The general code handles those
+ // cases correctly, and they are rare enough not to bother
+ // optimizing (and needing to test).
+
+ // generate permuted order
+ for(i=0; i<sel->ncase; i++)
+ sel->pollorder[i] = i;
+ for(i=1; i<sel->ncase; i++) {
+ o = sel->pollorder[i];
+ j = runtime_fastrand1()%(i+1);
+ sel->pollorder[i] = sel->pollorder[j];
+ sel->pollorder[j] = o;
+ }
+
+ // sort the cases by Hchan address to get the locking order.
+ // simple heap sort, to guarantee n log n time and constant stack footprint.
+ for(i=0; i<sel->ncase; i++) {
+ j = i;
+ c = sel->scase[j].chan;
+ while(j > 0 && sel->lockorder[k=(j-1)/2] < c) {
+ sel->lockorder[j] = sel->lockorder[k];
+ j = k;
+ }
+ sel->lockorder[j] = c;
+ }
+ for(i=sel->ncase; i-->0; ) {
+ c = sel->lockorder[i];
+ sel->lockorder[i] = sel->lockorder[0];
+ j = 0;
+ for(;;) {
+ k = j*2+1;
+ if(k >= i)
+ break;
+ if(k+1 < i && sel->lockorder[k] < sel->lockorder[k+1])
+ k++;
+ if(c < sel->lockorder[k]) {
+ sel->lockorder[j] = sel->lockorder[k];
+ j = k;
+ continue;
+ }
+ break;
+ }
+ sel->lockorder[j] = c;
+ }
+ /*
+ for(i=0; i+1<sel->ncase; i++)
+ if(sel->lockorder[i] > sel->lockorder[i+1]) {
+ runtime_printf("i=%d %p %p\n", i, sel->lockorder[i], sel->lockorder[i+1]);
+ runtime_throw("select: broken sort");
+ }
+ */
+ sellock(sel);
+
+loop:
+ // pass 1 - look for something already waiting
+ dfl = nil;
+ for(i=0; i<sel->ncase; i++) {
+ o = sel->pollorder[i];
+ cas = &sel->scase[o];
+ c = cas->chan;
+
+ switch(cas->kind) {
+ case CaseRecv:
+ if(c->dataqsiz > 0) {
+ if(c->qcount > 0)
+ goto asyncrecv;
+ } else {
+ sg = dequeue(&c->sendq);
+ if(sg != nil)
+ goto syncrecv;
+ }
+ if(c->closed)
+ goto rclose;
+ break;
+
+ case CaseSend:
+ if(raceenabled)
+ runtime_racereadpc(c, runtime_selectgo, chansend);
+ if(c->closed)
+ goto sclose;
+ if(c->dataqsiz > 0) {
+ if(c->qcount < c->dataqsiz)
+ goto asyncsend;
+ } else {
+ sg = dequeue(&c->recvq);
+ if(sg != nil)
+ goto syncsend;
+ }
+ break;
+
+ case CaseDefault:
+ dfl = cas;
+ break;
+ }
+ }
+
+ if(dfl != nil) {
+ selunlock(sel);
+ cas = dfl;
+ goto retc;
+ }
+
+
+ // pass 2 - enqueue on all chans
+ done = 0;
+ for(i=0; i<sel->ncase; i++) {
+ o = sel->pollorder[i];
+ cas = &sel->scase[o];
+ c = cas->chan;
+ sg = &cas->sg;
+ sg->g = g;
+ sg->selectdone = &done;
+
+ switch(cas->kind) {
+ case CaseRecv:
+ enqueue(&c->recvq, sg);
+ break;
+
+ case CaseSend:
+ enqueue(&c->sendq, sg);
+ break;
+ }
+ }
+
+ g->param = nil;
+ runtime_park(selparkcommit, sel, "select");
+
+ sellock(sel);
+ sg = g->param;
+
+ // pass 3 - dequeue from unsuccessful chans
+ // otherwise they stack up on quiet channels
+ for(i=0; i<sel->ncase; i++) {
+ cas = &sel->scase[i];
+ if(cas != (Scase*)sg) {
+ c = cas->chan;
+ if(cas->kind == CaseSend)
+ dequeueg(&c->sendq);
+ else
+ dequeueg(&c->recvq);
+ }
+ }
+
+ if(sg == nil)
+ goto loop;
+
+ cas = (Scase*)sg;
+ c = cas->chan;
+
+ if(c->dataqsiz > 0)
+ runtime_throw("selectgo: shouldn't happen");
+
+ if(debug)
+ runtime_printf("wait-return: sel=%p c=%p cas=%p kind=%d\n",
+ sel, c, cas, cas->kind);
+
+ if(cas->kind == CaseRecv) {
+ if(cas->receivedp != nil)
+ *cas->receivedp = true;
+ }
+
+ if(raceenabled) {
+ if(cas->kind == CaseRecv && cas->sg.elem != nil)
+ runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
+ else if(cas->kind == CaseSend)
+ runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
+ }
+
+ selunlock(sel);
+ goto retc;
+
+asyncrecv:
+ // can receive from buffer
+ if(raceenabled) {
+ if(cas->sg.elem != nil)
+ runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
+ runtime_raceacquire(chanbuf(c, c->recvx));
+ }
+ if(cas->receivedp != nil)
+ *cas->receivedp = true;
+ if(cas->sg.elem != nil)
+ runtime_memmove(cas->sg.elem, chanbuf(c, c->recvx), c->elemsize);
+ runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
+ if(++c->recvx == c->dataqsiz)
+ c->recvx = 0;
+ c->qcount--;
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ gp = sg->g;
+ selunlock(sel);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ } else {
+ selunlock(sel);
+ }
+ goto retc;
+
+asyncsend:
+ // can send to buffer
+ if(raceenabled) {
+ runtime_racerelease(chanbuf(c, c->sendx));
+ runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
+ }
+ runtime_memmove(chanbuf(c, c->sendx), cas->sg.elem, c->elemsize);
+ if(++c->sendx == c->dataqsiz)
+ c->sendx = 0;
+ c->qcount++;
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ gp = sg->g;
+ selunlock(sel);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ } else {
+ selunlock(sel);
+ }
+ goto retc;
+
+syncrecv:
+ // can receive from sleeping sender (sg)
+ if(raceenabled) {
+ if(cas->sg.elem != nil)
+ runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
+ racesync(c, sg);
+ }
+ selunlock(sel);
+ if(debug)
+ runtime_printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
+ if(cas->receivedp != nil)
+ *cas->receivedp = true;
+ if(cas->sg.elem != nil)
+ runtime_memmove(cas->sg.elem, sg->elem, c->elemsize);
+ gp = sg->g;
+ gp->param = sg;
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ goto retc;
+
+rclose:
+ // read at end of closed channel
+ selunlock(sel);
+ if(cas->receivedp != nil)
+ *cas->receivedp = false;
+ if(cas->sg.elem != nil)
+ runtime_memclr(cas->sg.elem, c->elemsize);
+ if(raceenabled)
+ runtime_raceacquire(c);
+ goto retc;
+
+syncsend:
+ // can send to sleeping receiver (sg)
+ if(raceenabled) {
+ runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
+ racesync(c, sg);
+ }
+ selunlock(sel);
+ if(debug)
+ runtime_printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
+ if(sg->elem != nil)
+ runtime_memmove(sg->elem, cas->sg.elem, c->elemsize);
+ gp = sg->g;
+ gp->param = sg;
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+
+retc:
+ // return index corresponding to chosen case
+ index = cas->index;
+ if(cas->sg.releasetime > 0)
+ runtime_blockevent(cas->sg.releasetime - t0, 2);
+ runtime_free(sel);
+ return index;
+
+sclose:
+ // send on closed channel
+ selunlock(sel);
+ runtime_panicstring("send on closed channel");
+ return 0; // not reached
+}
+
+// This struct must match ../reflect/value.go:/runtimeSelect.
+typedef struct runtimeSelect runtimeSelect;
+struct runtimeSelect
+{
+ uintptr dir;
+ ChanType *typ;
+ Hchan *ch;
+ byte *val;
+};
+
+// This enum must match ../reflect/value.go:/SelectDir.
+enum SelectDir {
+ SelectSend = 1,
+ SelectRecv,
+ SelectDefault,
+};
+
+func reflect.rselect(cases Slice) (chosen int, recvOK bool) {
+ int32 i;
+ Select *sel;
+ runtimeSelect* rcase, *rc;
+
+ chosen = -1;
+ recvOK = false;
+
+ rcase = (runtimeSelect*)cases.__values;
+
+ sel = newselect(cases.__count);
+ for(i=0; i<cases.__count; i++) {
+ rc = &rcase[i];
+ switch(rc->dir) {
+ case SelectDefault:
+ selectdefault(sel, i);
+ break;
+ case SelectSend:
+ if(rc->ch == nil)
+ break;
+ selectsend(sel, rc->ch, i, rc->val);
+ break;
+ case SelectRecv:
+ if(rc->ch == nil)
+ break;
+ selectrecv(sel, rc->ch, i, rc->val, &recvOK);
+ break;
+ }
+ }
+
+ chosen = (intgo)(uintptr)selectgo(&sel);
+}
+
+static void closechan(Hchan *c, void *pc);
+
+func closechan(c *Hchan) {
+ closechan(c, runtime_getcallerpc(&c));
+}
+
+func reflect.chanclose(c *Hchan) {
+ closechan(c, runtime_getcallerpc(&c));
+}
+
+static void
+closechan(Hchan *c, void *pc)
+{
+ SudoG *sg;
+ G* gp;
+
+ if(c == nil)
+ runtime_panicstring("close of nil channel");
+
+ if(runtime_gcwaiting())
+ runtime_gosched();
+
+ runtime_lock(c);
+ if(c->closed) {
+ runtime_unlock(c);
+ runtime_panicstring("close of closed channel");
+ }
+
+ if(raceenabled) {
+ runtime_racewritepc(c, pc, runtime_closechan);
+ runtime_racerelease(c);
+ }
+
+ c->closed = true;
+
+ // release all readers
+ for(;;) {
+ sg = dequeue(&c->recvq);
+ if(sg == nil)
+ break;
+ gp = sg->g;
+ gp->param = nil;
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ }
+
+ // release all writers
+ for(;;) {
+ sg = dequeue(&c->sendq);
+ if(sg == nil)
+ break;
+ gp = sg->g;
+ gp->param = nil;
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ }
+
+ runtime_unlock(c);
+}
+
+void
+__go_builtin_close(Hchan *c)
+{
+ runtime_closechan(c);
+}
+
+func reflect.chanlen(c *Hchan) (len int) {
+ if(c == nil)
+ len = 0;
+ else
+ len = c->qcount;
+}
+
+intgo
+__go_chan_len(Hchan *c)
+{
+ return reflect_chanlen(c);
+}
+
+func reflect.chancap(c *Hchan) (cap int) {
+ if(c == nil)
+ cap = 0;
+ else
+ cap = c->dataqsiz;
+}
+
+intgo
+__go_chan_cap(Hchan *c)
+{
+ return reflect_chancap(c);
+}
+
+static SudoG*
+dequeue(WaitQ *q)
+{
+ SudoG *sgp;
+
+loop:
+ sgp = q->first;
+ if(sgp == nil)
+ return nil;
+ q->first = sgp->link;
+
+ // if sgp participates in a select and is already signaled, ignore it
+ if(sgp->selectdone != nil) {
+ // claim the right to signal
+ if(*sgp->selectdone != 0 || !runtime_cas(sgp->selectdone, 0, 1))
+ goto loop;
+ }
+
+ return sgp;
+}
+
+static void
+dequeueg(WaitQ *q)
+{
+ SudoG **l, *sgp, *prevsgp;
+ G *g;
+
+ g = runtime_g();
+ prevsgp = nil;
+ for(l=&q->first; (sgp=*l) != nil; l=&sgp->link, prevsgp=sgp) {
+ if(sgp->g == g) {
+ *l = sgp->link;
+ if(q->last == sgp)
+ q->last = prevsgp;
+ break;
+ }
+ }
+}
+
+static void
+enqueue(WaitQ *q, SudoG *sgp)
+{
+ sgp->link = nil;
+ if(q->first == nil) {
+ q->first = sgp;
+ q->last = sgp;
+ return;
+ }
+ q->last->link = sgp;
+ q->last = sgp;
+}
+
+static void
+racesync(Hchan *c, SudoG *sg)
+{
+ runtime_racerelease(chanbuf(c, 0));
+ runtime_raceacquireg(sg->g, chanbuf(c, 0));
+ runtime_racereleaseg(sg->g, chanbuf(c, 0));
+ runtime_raceacquire(chanbuf(c, 0));
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+typedef struct WaitQ WaitQ;
+typedef struct SudoG SudoG;
+typedef struct Select Select;
+typedef struct Scase Scase;
+
+typedef struct __go_type_descriptor Type;
+typedef struct __go_channel_type ChanType;
+
+struct SudoG
+{
+ G* g;
+ uint32* selectdone;
+ SudoG* link;
+ int64 releasetime;
+ byte* elem; // data element
+};
+
+struct WaitQ
+{
+ SudoG* first;
+ SudoG* last;
+};
+
+// The garbage collector is assuming that Hchan can only contain pointers into the stack
+// and cannot contain pointers into the heap.
+struct Hchan
+{
+ uintgo qcount; // total data in the q
+ uintgo dataqsiz; // size of the circular q
+ uint16 elemsize;
+ uint16 pad; // ensures proper alignment of the buffer that follows Hchan in memory
+ bool closed;
+ const Type* elemtype; // element type
+ uintgo sendx; // send index
+ uintgo recvx; // receive index
+ WaitQ recvq; // list of recv waiters
+ WaitQ sendq; // list of send waiters
+ Lock;
+};
+
+// Buffer follows Hchan immediately in memory.
+// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
+
+enum
+{
+ debug = 0,
+
+ // Scase.kind
+ CaseRecv,
+ CaseSend,
+ CaseDefault,
+};
+
+struct Scase
+{
+ SudoG sg; // must be first member (cast to Scase)
+ Hchan* chan; // chan
+ uint16 kind;
+ uint16 index; // index to return
+ bool* receivedp; // pointer to received bool (recv2)
+};
+
+struct Select
+{
+ uint16 tcase; // total count of scase[]
+ uint16 ncase; // currently filled scase[]
+ uint16* pollorder; // case poll order
+ Hchan** lockorder; // channel lock order
+ Scase scase[1]; // one per case (in order of appearance)
+};
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// CPU profiling.
-// Based on algorithms and data structures used in
-// http://code.google.com/p/google-perftools/.
-//
-// The main difference between this code and the google-perftools
-// code is that this code is written to allow copying the profile data
-// to an arbitrary io.Writer, while the google-perftools code always
-// writes to an operating system file.
-//
-// The signal handler for the profiling clock tick adds a new stack trace
-// to a hash table tracking counts for recent traces. Most clock ticks
-// hit in the cache. In the event of a cache miss, an entry must be
-// evicted from the hash table, copied to a log that will eventually be
-// written as profile data. The google-perftools code flushed the
-// log itself during the signal handler. This code cannot do that, because
-// the io.Writer might block or need system calls or locks that are not
-// safe to use from within the signal handler. Instead, we split the log
-// into two halves and let the signal handler fill one half while a goroutine
-// is writing out the other half. When the signal handler fills its half, it
-// offers to swap with the goroutine. If the writer is not done with its half,
-// we lose the stack trace for this clock tick (and record that loss).
-// The goroutine interacts with the signal handler by calling getprofile() to
-// get the next log piece to write, implicitly handing back the last log
-// piece it obtained.
-//
-// The state of this dance between the signal handler and the goroutine
-// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
-// is not using either log half and is waiting (or will soon be waiting) for
-// a new piece by calling notesleep(&p->wait). If the signal handler
-// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait)
-// to wake the goroutine. The value indicates the number of entries in the
-// log half being handed off. The goroutine leaves the non-zero value in
-// place until it has finished processing the log half and then flips the number
-// back to zero. Setting the high bit in handoff means that the profiling is over,
-// and the goroutine is now in charge of flushing the data left in the hash table
-// to the log and returning that data.
-//
-// The handoff field is manipulated using atomic operations.
-// For the most part, the manipulation of handoff is orderly: if handoff == 0
-// then the signal handler owns it and can change it to non-zero.
-// If handoff != 0 then the goroutine owns it and can change it to zero.
-// If that were the end of the story then we would not need to manipulate
-// handoff using atomic operations. The operations are needed, however,
-// in order to let the log closer set the high bit to indicate "EOF" safely
-// in the situation when normally the goroutine "owns" handoff.
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-#include "array.h"
-typedef struct __go_open_array Slice;
-#define array __values
-#define len __count
-#define cap __capacity
-
-enum
-{
- HashSize = 1<<10,
- LogSize = 1<<17,
- Assoc = 4,
- MaxStack = 64,
-};
-
-typedef struct Profile Profile;
-typedef struct Bucket Bucket;
-typedef struct Entry Entry;
-
-struct Entry {
- uintptr count;
- uintptr depth;
- uintptr stack[MaxStack];
-};
-
-struct Bucket {
- Entry entry[Assoc];
-};
-
-struct Profile {
- bool on; // profiling is on
- Note wait; // goroutine waits here
- uintptr count; // tick count
- uintptr evicts; // eviction count
- uintptr lost; // lost ticks that need to be logged
- uintptr totallost; // total lost ticks
-
- // Active recent stack traces.
- Bucket hash[HashSize];
-
- // Log of traces evicted from hash.
- // Signal handler has filled log[toggle][:nlog].
- // Goroutine is writing log[1-toggle][:handoff].
- uintptr log[2][LogSize/2];
- uintptr nlog;
- int32 toggle;
- uint32 handoff;
-
- // Writer state.
- // Writer maintains its own toggle to avoid races
- // looking at signal handler's toggle.
- uint32 wtoggle;
- bool wholding; // holding & need to release a log half
- bool flushing; // flushing hash table - profile is over
- bool eod_sent; // special end-of-data record sent; => flushing
-};
-
-static Lock lk;
-static Profile *prof;
-
-static void tick(uintptr*, int32);
-static void add(Profile*, uintptr*, int32);
-static bool evict(Profile*, Entry*);
-static bool flushlog(Profile*);
-
-static uintptr eod[3] = {0, 1, 0};
-
-// LostProfileData is a no-op function used in profiles
-// to mark the number of profiling stack traces that were
-// discarded due to slow data writers.
-static void
-LostProfileData(void)
-{
-}
-
-extern void runtime_SetCPUProfileRate(intgo)
- __asm__ (GOSYM_PREFIX "runtime.SetCPUProfileRate");
-
-// SetCPUProfileRate sets the CPU profiling rate.
-// The user documentation is in debug.go.
-void
-runtime_SetCPUProfileRate(intgo hz)
-{
- uintptr *p;
- uintptr n;
-
- // Clamp hz to something reasonable.
- if(hz < 0)
- hz = 0;
- if(hz > 1000000)
- hz = 1000000;
-
- runtime_lock(&lk);
- if(hz > 0) {
- if(prof == nil) {
- prof = runtime_SysAlloc(sizeof *prof, &mstats.other_sys);
- if(prof == nil) {
- runtime_printf("runtime: cpu profiling cannot allocate memory\n");
- runtime_unlock(&lk);
- return;
- }
- }
- if(prof->on || prof->handoff != 0) {
- runtime_printf("runtime: cannot set cpu profile rate until previous profile has finished.\n");
- runtime_unlock(&lk);
- return;
- }
-
- prof->on = true;
- p = prof->log[0];
- // pprof binary header format.
- // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
- *p++ = 0; // count for header
- *p++ = 3; // depth for header
- *p++ = 0; // version number
- *p++ = 1000000 / hz; // period (microseconds)
- *p++ = 0;
- prof->nlog = p - prof->log[0];
- prof->toggle = 0;
- prof->wholding = false;
- prof->wtoggle = 0;
- prof->flushing = false;
- prof->eod_sent = false;
- runtime_noteclear(&prof->wait);
-
- runtime_setcpuprofilerate(tick, hz);
- } else if(prof != nil && prof->on) {
- runtime_setcpuprofilerate(nil, 0);
- prof->on = false;
-
- // Now add is not running anymore, and getprofile owns the entire log.
- // Set the high bit in prof->handoff to tell getprofile.
- for(;;) {
- n = prof->handoff;
- if(n&0x80000000)
- runtime_printf("runtime: setcpuprofile(off) twice");
- if(runtime_cas(&prof->handoff, n, n|0x80000000))
- break;
- }
- if(n == 0) {
- // we did the transition from 0 -> nonzero so we wake getprofile
- runtime_notewakeup(&prof->wait);
- }
- }
- runtime_unlock(&lk);
-}
-
-static void
-tick(uintptr *pc, int32 n)
-{
- add(prof, pc, n);
-}
-
-// add adds the stack trace to the profile.
-// It is called from signal handlers and other limited environments
-// and cannot allocate memory or acquire locks that might be
-// held at the time of the signal, nor can it use substantial amounts
-// of stack. It is allowed to call evict.
-static void
-add(Profile *p, uintptr *pc, int32 n)
-{
- int32 i, j;
- uintptr h, x;
- Bucket *b;
- Entry *e;
-
- if(n > MaxStack)
- n = MaxStack;
-
- // Compute hash.
- h = 0;
- for(i=0; i<n; i++) {
- h = h<<8 | (h>>(8*(sizeof(h)-1)));
- x = pc[i];
- h += x*31 + x*7 + x*3;
- }
- p->count++;
-
- // Add to entry count if already present in table.
- b = &p->hash[h%HashSize];
- for(i=0; i<Assoc; i++) {
- e = &b->entry[i];
- if(e->depth != (uintptr)n)
- continue;
- for(j=0; j<n; j++)
- if(e->stack[j] != pc[j])
- goto ContinueAssoc;
- e->count++;
- return;
- ContinueAssoc:;
- }
-
- // Evict entry with smallest count.
- e = &b->entry[0];
- for(i=1; i<Assoc; i++)
- if(b->entry[i].count < e->count)
- e = &b->entry[i];
- if(e->count > 0) {
- if(!evict(p, e)) {
- // Could not evict entry. Record lost stack.
- p->lost++;
- p->totallost++;
- return;
- }
- p->evicts++;
- }
-
- // Reuse the newly evicted entry.
- e->depth = n;
- e->count = 1;
- for(i=0; i<n; i++)
- e->stack[i] = pc[i];
-}
-
-// evict copies the given entry's data into the log, so that
-// the entry can be reused. evict is called from add, which
-// is called from the profiling signal handler, so it must not
-// allocate memory or block. It is safe to call flushLog.
-// evict returns true if the entry was copied to the log,
-// false if there was no room available.
-static bool
-evict(Profile *p, Entry *e)
-{
- int32 i, d, nslot;
- uintptr *log, *q;
-
- d = e->depth;
- nslot = d+2;
- log = p->log[p->toggle];
- if(p->nlog+nslot > nelem(p->log[0])) {
- if(!flushlog(p))
- return false;
- log = p->log[p->toggle];
- }
-
- q = log+p->nlog;
- *q++ = e->count;
- *q++ = d;
- for(i=0; i<d; i++)
- *q++ = e->stack[i];
- p->nlog = q - log;
- e->count = 0;
- return true;
-}
-
-// flushlog tries to flush the current log and switch to the other one.
-// flushlog is called from evict, called from add, called from the signal handler,
-// so it cannot allocate memory or block. It can try to swap logs with
-// the writing goroutine, as explained in the comment at the top of this file.
-static bool
-flushlog(Profile *p)
-{
- uintptr *log, *q;
-
- if(!runtime_cas(&p->handoff, 0, p->nlog))
- return false;
- runtime_notewakeup(&p->wait);
-
- p->toggle = 1 - p->toggle;
- log = p->log[p->toggle];
- q = log;
- if(p->lost > 0) {
- *q++ = p->lost;
- *q++ = 1;
- *q++ = (uintptr)LostProfileData;
- }
- p->nlog = q - log;
- return true;
-}
-
-// getprofile blocks until the next block of profiling data is available
-// and returns it as a []byte. It is called from the writing goroutine.
-Slice
-getprofile(Profile *p)
-{
- uint32 i, j, n;
- Slice ret;
- Bucket *b;
- Entry *e;
-
- ret.array = nil;
- ret.len = 0;
- ret.cap = 0;
-
- if(p == nil)
- return ret;
-
- if(p->wholding) {
- // Release previous log to signal handling side.
- // Loop because we are racing against SetCPUProfileRate(0).
- for(;;) {
- n = p->handoff;
- if(n == 0) {
- runtime_printf("runtime: phase error during cpu profile handoff\n");
- return ret;
- }
- if(n & 0x80000000) {
- p->wtoggle = 1 - p->wtoggle;
- p->wholding = false;
- p->flushing = true;
- goto flush;
- }
- if(runtime_cas(&p->handoff, n, 0))
- break;
- }
- p->wtoggle = 1 - p->wtoggle;
- p->wholding = false;
- }
-
- if(p->flushing)
- goto flush;
-
- if(!p->on && p->handoff == 0)
- return ret;
-
- // Wait for new log.
- runtime_notetsleepg(&p->wait, -1);
- runtime_noteclear(&p->wait);
-
- n = p->handoff;
- if(n == 0) {
- runtime_printf("runtime: phase error during cpu profile wait\n");
- return ret;
- }
- if(n == 0x80000000) {
- p->flushing = true;
- goto flush;
- }
- n &= ~0x80000000;
-
- // Return new log to caller.
- p->wholding = true;
-
- ret.array = (byte*)p->log[p->wtoggle];
- ret.len = n*sizeof(uintptr);
- ret.cap = ret.len;
- return ret;
-
-flush:
- // In flush mode.
- // Add is no longer being called. We own the log.
- // Also, p->handoff is non-zero, so flushlog will return false.
- // Evict the hash table into the log and return it.
- for(i=0; i<HashSize; i++) {
- b = &p->hash[i];
- for(j=0; j<Assoc; j++) {
- e = &b->entry[j];
- if(e->count > 0 && !evict(p, e)) {
- // Filled the log. Stop the loop and return what we've got.
- goto breakflush;
- }
- }
- }
-breakflush:
-
- // Return pending log data.
- if(p->nlog > 0) {
- // Note that we're using toggle now, not wtoggle,
- // because we're working on the log directly.
- ret.array = (byte*)p->log[p->toggle];
- ret.len = p->nlog*sizeof(uintptr);
- ret.cap = ret.len;
- p->nlog = 0;
- return ret;
- }
-
- // Made it through the table without finding anything to log.
- if(!p->eod_sent) {
- // We may not have space to append this to the partial log buf,
- // so we always return a new slice for the end-of-data marker.
- p->eod_sent = true;
- ret.array = (byte*)eod;
- ret.len = sizeof eod;
- ret.cap = ret.len;
- return ret;
- }
-
- // Finally done. Clean up and return nil.
- p->flushing = false;
- if(!runtime_cas(&p->handoff, p->handoff, 0))
- runtime_printf("runtime: profile flush racing with something\n");
- return ret; // set to nil at top of function
-}
-
-extern Slice runtime_CPUProfile(void)
- __asm__ (GOSYM_PREFIX "runtime.CPUProfile");
-
-// CPUProfile returns the next cpu profile block as a []byte.
-// The user documentation is in debug.go.
-Slice
-runtime_CPUProfile(void)
-{
- return getprofile(prof);
-}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU profiling.
+// Based on algorithms and data structures used in
+// http://code.google.com/p/google-perftools/.
+//
+// The main difference between this code and the google-perftools
+// code is that this code is written to allow copying the profile data
+// to an arbitrary io.Writer, while the google-perftools code always
+// writes to an operating system file.
+//
+// The signal handler for the profiling clock tick adds a new stack trace
+// to a hash table tracking counts for recent traces. Most clock ticks
+// hit in the cache. In the event of a cache miss, an entry must be
+// evicted from the hash table, copied to a log that will eventually be
+// written as profile data. The google-perftools code flushed the
+// log itself during the signal handler. This code cannot do that, because
+// the io.Writer might block or need system calls or locks that are not
+// safe to use from within the signal handler. Instead, we split the log
+// into two halves and let the signal handler fill one half while a goroutine
+// is writing out the other half. When the signal handler fills its half, it
+// offers to swap with the goroutine. If the writer is not done with its half,
+// we lose the stack trace for this clock tick (and record that loss).
+// The goroutine interacts with the signal handler by calling getprofile() to
+// get the next log piece to write, implicitly handing back the last log
+// piece it obtained.
+//
+// The state of this dance between the signal handler and the goroutine
+// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
+// is not using either log half and is waiting (or will soon be waiting) for
+// a new piece by calling notesleep(&p->wait). If the signal handler
+// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait)
+// to wake the goroutine. The value indicates the number of entries in the
+// log half being handed off. The goroutine leaves the non-zero value in
+// place until it has finished processing the log half and then flips the number
+// back to zero. Setting the high bit in handoff means that the profiling is over,
+// and the goroutine is now in charge of flushing the data left in the hash table
+// to the log and returning that data.
+//
+// The handoff field is manipulated using atomic operations.
+// For the most part, the manipulation of handoff is orderly: if handoff == 0
+// then the signal handler owns it and can change it to non-zero.
+// If handoff != 0 then the goroutine owns it and can change it to zero.
+// If that were the end of the story then we would not need to manipulate
+// handoff using atomic operations. The operations are needed, however,
+// in order to let the log closer set the high bit to indicate "EOF" safely
+// in the situation when normally the goroutine "owns" handoff.
+
+package runtime
+#include "runtime.h"
+#include "arch.h"
+#include "malloc.h"
+
+#include "array.h"
+typedef struct __go_open_array Slice;
+#define array __values
+#define len __count
+#define cap __capacity
+
+enum
+{
+ HashSize = 1<<10,
+ LogSize = 1<<17,
+ Assoc = 4,
+ MaxStack = 64,
+};
+
+typedef struct Profile Profile;
+typedef struct Bucket Bucket;
+typedef struct Entry Entry;
+
+struct Entry {
+ uintptr count;
+ uintptr depth;
+ uintptr stack[MaxStack];
+};
+
+struct Bucket {
+ Entry entry[Assoc];
+};
+
+struct Profile {
+ bool on; // profiling is on
+ Note wait; // goroutine waits here
+ uintptr count; // tick count
+ uintptr evicts; // eviction count
+ uintptr lost; // lost ticks that need to be logged
+ uintptr totallost; // total lost ticks
+
+ // Active recent stack traces.
+ Bucket hash[HashSize];
+
+ // Log of traces evicted from hash.
+ // Signal handler has filled log[toggle][:nlog].
+ // Goroutine is writing log[1-toggle][:handoff].
+ uintptr log[2][LogSize/2];
+ uintptr nlog;
+ int32 toggle;
+ uint32 handoff;
+
+ // Writer state.
+ // Writer maintains its own toggle to avoid races
+ // looking at signal handler's toggle.
+ uint32 wtoggle;
+ bool wholding; // holding & need to release a log half
+ bool flushing; // flushing hash table - profile is over
+ bool eod_sent; // special end-of-data record sent; => flushing
+};
+
+static Lock lk;
+static Profile *prof;
+
+static void tick(uintptr*, int32);
+static void add(Profile*, uintptr*, int32);
+static bool evict(Profile*, Entry*);
+static bool flushlog(Profile*);
+
+static uintptr eod[3] = {0, 1, 0};
+
+// LostProfileData is a no-op function used in profiles
+// to mark the number of profiling stack traces that were
+// discarded due to slow data writers.
+static void
+LostProfileData(void)
+{
+}
+
+extern void runtime_SetCPUProfileRate(intgo)
+ __asm__ (GOSYM_PREFIX "runtime.SetCPUProfileRate");
+
+// SetCPUProfileRate sets the CPU profiling rate.
+// The user documentation is in debug.go.
+void
+runtime_SetCPUProfileRate(intgo hz)
+{
+ uintptr *p;
+ uintptr n;
+
+ // Clamp hz to something reasonable.
+ if(hz < 0)
+ hz = 0;
+ if(hz > 1000000)
+ hz = 1000000;
+
+ runtime_lock(&lk);
+ if(hz > 0) {
+ if(prof == nil) {
+ prof = runtime_SysAlloc(sizeof *prof, &mstats.other_sys);
+ if(prof == nil) {
+ runtime_printf("runtime: cpu profiling cannot allocate memory\n");
+ runtime_unlock(&lk);
+ return;
+ }
+ }
+ if(prof->on || prof->handoff != 0) {
+ runtime_printf("runtime: cannot set cpu profile rate until previous profile has finished.\n");
+ runtime_unlock(&lk);
+ return;
+ }
+
+ prof->on = true;
+ p = prof->log[0];
+ // pprof binary header format.
+ // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
+ *p++ = 0; // count for header
+ *p++ = 3; // depth for header
+ *p++ = 0; // version number
+ *p++ = 1000000 / hz; // period (microseconds)
+ *p++ = 0;
+ prof->nlog = p - prof->log[0];
+ prof->toggle = 0;
+ prof->wholding = false;
+ prof->wtoggle = 0;
+ prof->flushing = false;
+ prof->eod_sent = false;
+ runtime_noteclear(&prof->wait);
+
+ runtime_setcpuprofilerate(tick, hz);
+ } else if(prof != nil && prof->on) {
+ runtime_setcpuprofilerate(nil, 0);
+ prof->on = false;
+
+ // Now add is not running anymore, and getprofile owns the entire log.
+ // Set the high bit in prof->handoff to tell getprofile.
+ for(;;) {
+ n = prof->handoff;
+ if(n&0x80000000)
+ runtime_printf("runtime: setcpuprofile(off) twice");
+ if(runtime_cas(&prof->handoff, n, n|0x80000000))
+ break;
+ }
+ if(n == 0) {
+ // we did the transition from 0 -> nonzero so we wake getprofile
+ runtime_notewakeup(&prof->wait);
+ }
+ }
+ runtime_unlock(&lk);
+}
+
+static void
+tick(uintptr *pc, int32 n)
+{
+ add(prof, pc, n);
+}
+
+// add adds the stack trace to the profile.
+// It is called from signal handlers and other limited environments
+// and cannot allocate memory or acquire locks that might be
+// held at the time of the signal, nor can it use substantial amounts
+// of stack. It is allowed to call evict.
+static void
+add(Profile *p, uintptr *pc, int32 n)
+{
+ int32 i, j;
+ uintptr h, x;
+ Bucket *b;
+ Entry *e;
+
+ if(n > MaxStack)
+ n = MaxStack;
+
+ // Compute hash.
+ h = 0;
+ for(i=0; i<n; i++) {
+ h = h<<8 | (h>>(8*(sizeof(h)-1)));
+ x = pc[i];
+ h += x*31 + x*7 + x*3;
+ }
+ p->count++;
+
+ // Add to entry count if already present in table.
+ b = &p->hash[h%HashSize];
+ for(i=0; i<Assoc; i++) {
+ e = &b->entry[i];
+ if(e->depth != (uintptr)n)
+ continue;
+ for(j=0; j<n; j++)
+ if(e->stack[j] != pc[j])
+ goto ContinueAssoc;
+ e->count++;
+ return;
+ ContinueAssoc:;
+ }
+
+ // Evict entry with smallest count.
+ e = &b->entry[0];
+ for(i=1; i<Assoc; i++)
+ if(b->entry[i].count < e->count)
+ e = &b->entry[i];
+ if(e->count > 0) {
+ if(!evict(p, e)) {
+ // Could not evict entry. Record lost stack.
+ p->lost++;
+ p->totallost++;
+ return;
+ }
+ p->evicts++;
+ }
+
+ // Reuse the newly evicted entry.
+ e->depth = n;
+ e->count = 1;
+ for(i=0; i<n; i++)
+ e->stack[i] = pc[i];
+}
+
+// evict copies the given entry's data into the log, so that
+// the entry can be reused. evict is called from add, which
+// is called from the profiling signal handler, so it must not
+// allocate memory or block. It is safe to call flushLog.
+// evict returns true if the entry was copied to the log,
+// false if there was no room available.
+static bool
+evict(Profile *p, Entry *e)
+{
+ int32 i, d, nslot;
+ uintptr *log, *q;
+
+ d = e->depth;
+ nslot = d+2;
+ log = p->log[p->toggle];
+ if(p->nlog+nslot > nelem(p->log[0])) {
+ if(!flushlog(p))
+ return false;
+ log = p->log[p->toggle];
+ }
+
+ q = log+p->nlog;
+ *q++ = e->count;
+ *q++ = d;
+ for(i=0; i<d; i++)
+ *q++ = e->stack[i];
+ p->nlog = q - log;
+ e->count = 0;
+ return true;
+}
+
+// flushlog tries to flush the current log and switch to the other one.
+// flushlog is called from evict, called from add, called from the signal handler,
+// so it cannot allocate memory or block. It can try to swap logs with
+// the writing goroutine, as explained in the comment at the top of this file.
+static bool
+flushlog(Profile *p)
+{
+ uintptr *log, *q;
+
+ if(!runtime_cas(&p->handoff, 0, p->nlog))
+ return false;
+ runtime_notewakeup(&p->wait);
+
+ p->toggle = 1 - p->toggle;
+ log = p->log[p->toggle];
+ q = log;
+ if(p->lost > 0) {
+ *q++ = p->lost;
+ *q++ = 1;
+ *q++ = (uintptr)LostProfileData;
+ }
+ p->nlog = q - log;
+ return true;
+}
+
+// getprofile blocks until the next block of profiling data is available
+// and returns it as a []byte. It is called from the writing goroutine.
+Slice
+getprofile(Profile *p)
+{
+ uint32 i, j, n;
+ Slice ret;
+ Bucket *b;
+ Entry *e;
+
+ ret.array = nil;
+ ret.len = 0;
+ ret.cap = 0;
+
+ if(p == nil)
+ return ret;
+
+ if(p->wholding) {
+ // Release previous log to signal handling side.
+ // Loop because we are racing against SetCPUProfileRate(0).
+ for(;;) {
+ n = p->handoff;
+ if(n == 0) {
+ runtime_printf("runtime: phase error during cpu profile handoff\n");
+ return ret;
+ }
+ if(n & 0x80000000) {
+ p->wtoggle = 1 - p->wtoggle;
+ p->wholding = false;
+ p->flushing = true;
+ goto flush;
+ }
+ if(runtime_cas(&p->handoff, n, 0))
+ break;
+ }
+ p->wtoggle = 1 - p->wtoggle;
+ p->wholding = false;
+ }
+
+ if(p->flushing)
+ goto flush;
+
+ if(!p->on && p->handoff == 0)
+ return ret;
+
+ // Wait for new log.
+ runtime_notetsleepg(&p->wait, -1);
+ runtime_noteclear(&p->wait);
+
+ n = p->handoff;
+ if(n == 0) {
+ runtime_printf("runtime: phase error during cpu profile wait\n");
+ return ret;
+ }
+ if(n == 0x80000000) {
+ p->flushing = true;
+ goto flush;
+ }
+ n &= ~0x80000000;
+
+ // Return new log to caller.
+ p->wholding = true;
+
+ ret.array = (byte*)p->log[p->wtoggle];
+ ret.len = n*sizeof(uintptr);
+ ret.cap = ret.len;
+ return ret;
+
+flush:
+ // In flush mode.
+ // Add is no longer being called. We own the log.
+ // Also, p->handoff is non-zero, so flushlog will return false.
+ // Evict the hash table into the log and return it.
+ for(i=0; i<HashSize; i++) {
+ b = &p->hash[i];
+ for(j=0; j<Assoc; j++) {
+ e = &b->entry[j];
+ if(e->count > 0 && !evict(p, e)) {
+ // Filled the log. Stop the loop and return what we've got.
+ goto breakflush;
+ }
+ }
+ }
+breakflush:
+
+ // Return pending log data.
+ if(p->nlog > 0) {
+ // Note that we're using toggle now, not wtoggle,
+ // because we're working on the log directly.
+ ret.array = (byte*)p->log[p->toggle];
+ ret.len = p->nlog*sizeof(uintptr);
+ ret.cap = ret.len;
+ p->nlog = 0;
+ return ret;
+ }
+
+ // Made it through the table without finding anything to log.
+ if(!p->eod_sent) {
+ // We may not have space to append this to the partial log buf,
+ // so we always return a new slice for the end-of-data marker.
+ p->eod_sent = true;
+ ret.array = (byte*)eod;
+ ret.len = sizeof eod;
+ ret.cap = ret.len;
+ return ret;
+ }
+
+ // Finally done. Clean up and return nil.
+ p->flushing = false;
+ if(!runtime_cas(&p->handoff, p->handoff, 0))
+ runtime_printf("runtime: profile flush racing with something\n");
+ return ret; // set to nil at top of function
+}
+
+// CPUProfile returns the next cpu profile block as a []byte.
+// The user documentation is in debug.go.
+func CPUProfile() (ret Slice) {
+ ret = getprofile(prof);
+}
__go_panic (e);
}
-
-/* Return the number of CGO calls. */
-
-int64 runtime_NumCgoCall (void) __asm__ (GOSYM_PREFIX "runtime.NumCgoCall");
-
-int64
-runtime_NumCgoCall (void)
-{
- int64 ret;
- M* m;
-
- ret = 0;
- for (m = runtime_atomicloadp (&runtime_allm); m != NULL; m = m->alllink)
- ret += m->ncgocall;
- return ret;
-}
+++ /dev/null
-/* go-getgoroot.c -- getgoroot function for runtime package.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdlib.h>
-
-#include "runtime.h"
-
-String getgoroot (void) __asm__ (GOSYM_PREFIX "runtime.getgoroot");
-
-String
-getgoroot ()
-{
- const char *p;
- String ret;
-
- p = getenv ("GOROOT");
- ret.str = (const byte *) p;
- if (ret.str == NULL)
- ret.len = 0;
- else
- ret.len = __builtin_strlen (p);
- return ret;
-}
+++ /dev/null
-/* go-typestring.c -- the runtime.typestring function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "interface.h"
-#include "go-type.h"
-
-String typestring(struct __go_empty_interface) __asm__ (GOSYM_PREFIX "runtime.typestring");
-
-String
-typestring (struct __go_empty_interface e)
-{
- return *e.__type_descriptor->__reflection;
-}
package = read_package();
read_preprocessor_lines();
while (read_func_header(&name, ¶ms, &rets)) {
- write_func_header(package, name, params, rets);
+ char *p;
+ char *pkg;
+ char *nm;
+
+ p = strchr(name, '.');
+ if (p == NULL) {
+ pkg = package;
+ nm = name;
+ } else {
+ pkg = name;
+ nm = p + 1;
+ *p = '\0';
+ }
+ write_func_header(pkg, nm, params, rets);
copy_body();
- write_func_trailer(package, name, rets);
+ write_func_trailer(pkg, nm, rets);
free(name);
free_params(params);
free_params(rets);
+++ /dev/null
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Lock-free stack.
-
-#include "runtime.h"
-#include "arch.h"
-
-#if __SIZEOF_POINTER__ == 8
-// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
-// So we use 17msb of pointers as ABA counter.
-# define PTR_BITS 47
-#else
-# define PTR_BITS 32
-#endif
-#define PTR_MASK ((1ull<<PTR_BITS)-1)
-#define CNT_MASK (0ull-1)
-
-#if __SIZEOF_POINTER__ == 8 && (defined(__sparc__) || (defined(__sun__) && defined(__amd64__)))
-// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
-// Use low-order three bits as ABA counter.
-// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
-#undef PTR_BITS
-#undef CNT_MASK
-#undef PTR_MASK
-#define PTR_BITS 0
-#define CNT_MASK 7
-#define PTR_MASK ((0ull-1)<<3)
-#endif
-
-void
-runtime_lfstackpush(uint64 *head, LFNode *node)
-{
- uint64 old, new;
-
- if((uintptr)node != ((uintptr)node&PTR_MASK)) {
- runtime_printf("p=%p\n", node);
- runtime_throw("runtime_lfstackpush: invalid pointer");
- }
-
- node->pushcnt++;
- new = (uint64)(uintptr)node|(((uint64)node->pushcnt&CNT_MASK)<<PTR_BITS);
- for(;;) {
- old = runtime_atomicload64(head);
- node->next = (LFNode*)(uintptr)(old&PTR_MASK);
- if(runtime_cas64(head, old, new))
- break;
- }
-}
-
-LFNode*
-runtime_lfstackpop(uint64 *head)
-{
- LFNode *node, *node2;
- uint64 old, new;
-
- for(;;) {
- old = runtime_atomicload64(head);
- if(old == 0)
- return nil;
- node = (LFNode*)(uintptr)(old&PTR_MASK);
- node2 = runtime_atomicloadp(&node->next);
- new = 0;
- if(node2 != nil)
- new = (uint64)(uintptr)node2|(((uint64)node2->pushcnt&CNT_MASK)<<PTR_BITS);
- if(runtime_cas64(head, old, new))
- return node;
- }
-}
-
-LFNode* runtime_lfstackpop2(uint64*)
- __asm__ (GOSYM_PREFIX "runtime.lfstackpop2");
-
-LFNode*
-runtime_lfstackpop2(uint64 *head)
-{
- return runtime_lfstackpop(head);
-}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lock-free stack.
+
+package runtime
+#include "runtime.h"
+#include "arch.h"
+
+#if __SIZEOF_POINTER__ == 8
+// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
+// So we use 17msb of pointers as ABA counter.
+# define PTR_BITS 47
+#else
+# define PTR_BITS 32
+#endif
+#define PTR_MASK ((1ull<<PTR_BITS)-1)
+#define CNT_MASK (0ull-1)
+
+#if __SIZEOF_POINTER__ == 8 && (defined(__sparc__) || (defined(__sun__) && defined(__amd64__)))
+// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
+// Use low-order three bits as ABA counter.
+// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
+#undef PTR_BITS
+#undef CNT_MASK
+#undef PTR_MASK
+#define PTR_BITS 0
+#define CNT_MASK 7
+#define PTR_MASK ((0ull-1)<<3)
+#endif
+
+void
+runtime_lfstackpush(uint64 *head, LFNode *node)
+{
+ uint64 old, new;
+
+ if((uintptr)node != ((uintptr)node&PTR_MASK)) {
+ runtime_printf("p=%p\n", node);
+ runtime_throw("runtime_lfstackpush: invalid pointer");
+ }
+
+ node->pushcnt++;
+ new = (uint64)(uintptr)node|(((uint64)node->pushcnt&CNT_MASK)<<PTR_BITS);
+ for(;;) {
+ old = runtime_atomicload64(head);
+ node->next = (LFNode*)(uintptr)(old&PTR_MASK);
+ if(runtime_cas64(head, old, new))
+ break;
+ }
+}
+
+LFNode*
+runtime_lfstackpop(uint64 *head)
+{
+ LFNode *node, *node2;
+ uint64 old, new;
+
+ for(;;) {
+ old = runtime_atomicload64(head);
+ if(old == 0)
+ return nil;
+ node = (LFNode*)(uintptr)(old&PTR_MASK);
+ node2 = runtime_atomicloadp(&node->next);
+ new = 0;
+ if(node2 != nil)
+ new = (uint64)(uintptr)node2|(((uint64)node2->pushcnt&CNT_MASK)<<PTR_BITS);
+ if(runtime_cas64(head, old, new))
+ return node;
+ }
+}
+
+func lfstackpush_go(head *uint64, node *LFNode) {
+ runtime_lfstackpush(head, node);
+}
+
+func lfstackpop_go(head *uint64) (node *LFNode) {
+ node = runtime_lfstackpop(head);
+}
return runtime_mallocgc(n, 0, 0);
}
-void *
-runtime_new(const Type *typ)
-{
- return runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
+func new(typ *Type) (ret *uint8) {
+ ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
}
static void*
void runtime_gc_itab_ptr(Eface*);
void runtime_memorydump(void);
+int32 runtime_setgcpercent(int32);
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
#endif
default:
+ runtime_printf("runtime: invalid GC instruction %p at %p\n", pc[0], pc);
runtime_throw("scanblock: invalid GC instruction");
return;
}
pauses->__count = n+3;
}
-intgo runtime_debug_setGCPercent(intgo)
- __asm__("runtime_debug.setGCPercent");
-
-intgo
-runtime_debug_setGCPercent(intgo in)
-{
- intgo out;
+int32
+runtime_setgcpercent(int32 in) {
+ int32 out;
runtime_lock(&runtime_mheap);
if(gcpercent == GcpercentUnknown)
return desc;
}
-// For testing from Go
-// func parforalloc2(nthrmax uint32) *ParFor
-
-ParFor *runtime_parforalloc2(uint32)
- __asm__ (GOSYM_PREFIX "runtime.parforalloc2");
-
-ParFor *
-runtime_parforalloc2(uint32 nthrmax)
-{
- return runtime_parforalloc(nthrmax);
-}
-
void
runtime_parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32))
{
}
}
-// For testing from Go
-// func parforsetup2(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
-
-void runtime_parforsetup2(ParFor *, uint32, uint32, void *, bool, void *)
- __asm__ (GOSYM_PREFIX "runtime.parforsetup2");
-
-void
-runtime_parforsetup2(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void *body)
-{
- runtime_parforsetup(desc, nthr, n, ctx, wait, *(void(**)(ParFor*, uint32))body);
-}
-
void
runtime_parfordo(ParFor *desc)
{
me->nsleep = 0;
}
-// For testing from Go
-// func parforiters(desc *ParFor, tid uintptr) (uintptr, uintptr)
-
-struct parforiters_ret {
- uintptr start;
- uintptr end;
-};
-
-struct parforiters_ret runtime_parforiters(ParFor *, uintptr)
- __asm__ (GOSYM_PREFIX "runtime.parforiters");
-
-struct parforiters_ret
-runtime_parforiters(ParFor *desc, uintptr tid)
+// For testing from Go.
+void
+runtime_parforiters(ParFor *desc, uintptr tid, uintptr *start, uintptr *end)
{
- struct parforiters_ret ret;
-
- ret.start = (uint32)desc->thr[tid].pos;
- ret.end = (uint32)(desc->thr[tid].pos>>32);
- return ret;
+ *start = (uint32)desc->thr[tid].pos;
+ *end = (uint32)(desc->thr[tid].pos>>32);
}
void
runtime_printpointer(void *p)
{
- runtime_printhex((uint64)(uintptr)p);
+ runtime_printhex((uintptr)p);
}
void
return g->lockedm != nil && m->lockedg != nil;
}
-// for testing of callbacks
-
-_Bool runtime_golockedOSThread(void)
- __asm__ (GOSYM_PREFIX "runtime.golockedOSThread");
-
-_Bool
-runtime_golockedOSThread(void)
-{
- return runtime_lockedOSThread();
-}
-
-intgo runtime_NumGoroutine (void)
- __asm__ (GOSYM_PREFIX "runtime.NumGoroutine");
-
-intgo
-runtime_NumGoroutine()
-{
- return runtime_gcount();
-}
-
int32
runtime_gcount(void)
{
}
}
-intgo runtime_debug_setMaxThreads(intgo)
- __asm__(GOSYM_PREFIX "runtime_debug.setMaxThreads");
-
-intgo
-runtime_debug_setMaxThreads(intgo in)
+int32
+runtime_setmaxthreads(int32 in)
{
- intgo out;
+ int32 out;
runtime_lock(&runtime_sched);
out = runtime_sched.maxmcount;
{
return runtime_sched.gcwaiting;
}
-
-// func runtime_procPin() int
-
-intgo sync_runtime_procPin(void)
- __asm__(GOSYM_PREFIX "sync.runtime_procPin");
-
-intgo
-sync_runtime_procPin()
-{
- M *mp;
-
- mp = m;
- // Disable preemption.
- mp->locks++;
- return mp->p->id;
-}
-
-// func runtime_procUnpin()
-
-void sync_runtime_procUnpin(void)
- __asm__ (GOSYM_PREFIX "sync.runtime_procUnpin");
-
-void
-sync_runtime_procUnpin(void)
-{
- m->locks--;
-}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_debug
+#include "runtime.h"
+#include "arch.h"
+#include "malloc.h"
+
+func setMaxStack(in int) (out int) {
+ out = runtime_maxstacksize;
+ runtime_maxstacksize = in;
+}
+
+func setGCPercent(in int) (out int) {
+ out = runtime_setgcpercent(in);
+}
+
+func setMaxThreads(in int) (out int) {
+ out = runtime_setmaxthreads(in);
+}
#include "runtime.h"
#include "array.h"
-#include "go-panic.h"
// The GOTRACEBACK environment variable controls the
// behavior of a Go program that is crashing and exiting.
return res;
}
-int64 runtime_pprof_runtime_cyclesPerSecond(void)
- __asm__ (GOSYM_PREFIX "runtime_pprof.runtime_cyclesPerSecond");
-
-int64
-runtime_pprof_runtime_cyclesPerSecond(void)
-{
- return runtime_tickspersecond();
-}
-
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
void
uintptr runtime_maxstacksize = 1<<20; // enough until runtime.main sets it for real
-intgo runtime_debug_setMaxStack(intgo)
- __asm__ (GOSYM_PREFIX "runtime_debug.setMaxStack");
-
-intgo
-runtime_debug_setMaxStack(intgo in)
-{
- intgo out;
-
- out = runtime_maxstacksize;
- runtime_maxstacksize = in;
- return out;
-}
-
void memclrBytes(Slice)
__asm__ (GOSYM_PREFIX "runtime.memclrBytes");
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
#define nil ((void*)0)
#define USED(v) ((void) v)
-#define ROUND(x, n) (((x)+(n)-1)&~((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
+#define ROUND(x, n) (((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
byte* runtime_startup_random_data;
uint32 runtime_startup_random_data_len;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
extern DebugVars runtime_debug;
+extern uintptr runtime_maxstacksize;
/*
* common functions and data
intgo runtime_findnullw(const uint16*);
void runtime_dump(byte*, int32);
-/*
- * very low level c-called
- */
void runtime_gogo(G*);
struct __go_func_type;
void runtime_args(int32, byte**);
void runtime_parsedebugvars(void);
void _rt0_go(void);
void* runtime_funcdata(Func*, int32);
+int32 runtime_setmaxthreads(int32);
void runtime_stoptheworld(void);
void runtime_starttheworld(void);
*/
ParFor* runtime_parforalloc(uint32 nthrmax);
void runtime_parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32));
-void runtime_parfordo(ParFor *desc) __asm__ (GOSYM_PREFIX "runtime.parfordo");
+void runtime_parfordo(ParFor *desc);
+void runtime_parforiters(ParFor*, uintptr, uintptr*, uintptr*);
/*
* low level C-called
void runtime_osyield(void);
void runtime_lockOSThread(void);
void runtime_unlockOSThread(void);
+bool runtime_lockedOSThread(void);
bool runtime_showframe(String, bool);
void runtime_printcreatedby(G*);
package runtime
#include "runtime.h"
+#include "arch.h"
+#include "go-type.h"
func GOMAXPROCS(n int) (ret int) {
ret = runtime_gomaxprocsfunc(n);
func NumCPU() (ret int) {
ret = runtime_ncpu;
}
+
+func NumCgoCall() (ret int64) {
+ M *mp;
+
+ ret = 0;
+ for(mp=runtime_atomicloadp(&runtime_allm); mp; mp=mp->alllink)
+ ret += mp->ncgocall;
+}
+
+func newParFor(nthrmax uint32) (desc *ParFor) {
+ desc = runtime_parforalloc(nthrmax);
+}
+
+func parForSetup(desc *ParFor, nthr uint32, n uint32, ctx *byte, wait bool, body *byte) {
+ runtime_parforsetup(desc, nthr, n, ctx, wait, *(void(**)(ParFor*, uint32))body);
+}
+
+func parForDo(desc *ParFor) {
+ runtime_parfordo(desc);
+}
+
+func parForIters(desc *ParFor, tid uintptr) (start uintptr, end uintptr) {
+ runtime_parforiters(desc, tid, &start, &end);
+}
+
+func typestring(e Eface) (s String) {
+ s = *e.__type_descriptor->__reflection;
+}
+
+func golockedOSThread() (ret bool) {
+ ret = runtime_lockedOSThread();
+}
+
+func NumGoroutine() (ret int) {
+ ret = runtime_gcount();
+}
+
+func getgoroot() (out String) {
+ const byte *p;
+
+ p = runtime_getenv("GOROOT");
+ out = runtime_gostringnocopy(p);
+}
+
+func runtime_pprof.runtime_cyclesPerSecond() (res int64) {
+ res = runtime_tickspersecond();
+}
+
+func sync.runtime_procPin() (p int) {
+ M *mp;
+
+ mp = runtime_m();
+ // Disable preemption.
+ mp->locks++;
+ p = mp->p->id;
+}
+
+func sync.runtime_procUnpin() {
+ runtime_m()->locks--;
+}
return s;
}
-String runtime_cstringToGo(byte*)
- __asm__ (GOSYM_PREFIX "runtime.cstringToGo");
-
-String
-runtime_cstringToGo(byte *str)
-{
- return runtime_gostringnocopy(str);
+func cstringToGo(str *byte) (s String) {
+ s = runtime_gostringnocopy(str);
}
enum