-2431267d513804a3b1aa71adde9aefba9e3c3c59
+9401e714d690e3907a64ac5c8cd5aed9e28f511b
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
break;
case Runtime::MAKECHAN:
- case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
break;
case Runtime::MAKECHAN:
- case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
}
case Runtime::MAKECHAN:
- case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
break;
case Runtime::MAKECHAN:
- case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
|| et->channel_type() != NULL
|| et->map_type() != NULL
|| et->function_type() != NULL
+ || et->integer_type() != NULL
|| et->is_nil_type());
else if (et->is_unsafe_pointer_type())
go_assert(t->points_to() != NULL);
break;
case BUILTIN_LEN:
+ case BUILTIN_CAP:
Expression_list::iterator pa = this->args()->begin();
if (!(*pa)->is_variable()
&& ((*pa)->type()->map_type() != NULL
Expression::make_nil(loc),
Expression::make_nil(loc));
else if (is_chan)
- call = Runtime::make_call((have_big_args
- ? Runtime::MAKECHANBIG
- : Runtime::MAKECHAN),
- loc, 2, type_arg, len_arg);
+ call = Runtime::make_call(Runtime::MAKECHAN, loc, 2, type_arg, len_arg);
else
go_unreachable();
this->seen_ = false;
}
else if (arg_type->channel_type() != NULL)
- val = Runtime::make_call(Runtime::CHAN_CAP, location, 1, arg);
+ {
+ // The second field is the capacity. If the pointer
+ // is nil, the capacity is zero.
+ Type* uintptr_type = Type::lookup_integer_type("uintptr");
+ Type* pint_type = Type::make_pointer_type(int_type);
+ Expression* parg = Expression::make_unsafe_cast(uintptr_type,
+ arg,
+ location);
+ int off = int_type->integer_type()->bits() / 8;
+ Expression* eoff = Expression::make_integer_ul(off,
+ uintptr_type,
+ location);
+ parg = Expression::make_binary(OPERATOR_PLUS, parg, eoff,
+ location);
+ parg = Expression::make_unsafe_cast(pint_type, parg, location);
+ Expression* nil = Expression::make_nil(location);
+ nil = Expression::make_cast(pint_type, nil, location);
+ Expression* cmp = Expression::make_binary(OPERATOR_EQEQ,
+ arg, nil, location);
+ Expression* zero = Expression::make_integer_ul(0, int_type,
+ location);
+ Expression* indir = Expression::make_unary(OPERATOR_MULT,
+ parg, location);
+ val = Expression::make_conditional(cmp, zero, indir, location);
+ }
else
go_unreachable();
}
Expression* recv_addr =
Expression::make_temporary_reference(this->temp_receiver_, loc);
recv_addr = Expression::make_unary(OPERATOR_AND, recv_addr, loc);
- Expression* recv =
- Runtime::make_call(Runtime::RECEIVE, loc, 3,
- td, this->channel_, recv_addr);
+ Expression* recv = Runtime::make_call(Runtime::CHANRECV1, loc, 3,
+ td, this->channel_, recv_addr);
return Expression::make_compound(recv, recv_ref, loc)->get_backend(context);
}
// Make a channel.
-DEF_GO_RUNTIME(MAKECHAN, "__go_new_channel", P2(TYPE, UINTPTR), R1(CHAN))
-DEF_GO_RUNTIME(MAKECHANBIG, "__go_new_channel_big", P2(TYPE, UINT64), R1(CHAN))
+DEF_GO_RUNTIME(MAKECHAN, "runtime.makechan", P2(TYPE, INT64), R1(CHAN))
-// Get the capacity of a channel (the size of the buffer).
-DEF_GO_RUNTIME(CHAN_CAP, "__go_chan_cap", P1(CHAN), R1(INT))
-
-// Send a small value on a channel.
-DEF_GO_RUNTIME(SEND_SMALL, "__go_send_small", P3(TYPE, CHAN, UINT64), R0())
-
-// Send a big value on a channel.
-DEF_GO_RUNTIME(SEND_BIG, "__go_send_big", P3(TYPE, CHAN, POINTER), R0())
+// Send a value on a channel.
+DEF_GO_RUNTIME(CHANSEND, "runtime.chansend1", P3(TYPE, CHAN, POINTER), R0())
// Receive a value from a channel.
-DEF_GO_RUNTIME(RECEIVE, "__go_receive", P3(TYPE, CHAN, POINTER), R0())
+DEF_GO_RUNTIME(CHANRECV1, "runtime.chanrecv1", P3(TYPE, CHAN, POINTER), R0())
// Receive a value from a channel returning whether it is closed.
DEF_GO_RUNTIME(CHANRECV2, "runtime.chanrecv2", P3(TYPE, CHAN, POINTER),
// Start building a select statement.
-DEF_GO_RUNTIME(NEWSELECT, "runtime.newselect", P1(INT32), R1(POINTER))
+DEF_GO_RUNTIME(NEWSELECT, "runtime.newselect", P3(POINTER, INT64, INT32), R0())
// Add a default clause to a select statement.
DEF_GO_RUNTIME(SELECTDEFAULT, "runtime.selectdefault",
// Close.
-DEF_GO_RUNTIME(CLOSE, "__go_builtin_close", P1(CHAN), R0())
+DEF_GO_RUNTIME(CLOSE, "runtime.closechan", P1(CHAN), R0())
// Copy.
element_type,
this->val_, loc);
- bool is_small;
bool can_take_address;
switch (element_type->base()->classification())
{
case Type::TYPE_POINTER:
case Type::TYPE_MAP:
case Type::TYPE_CHANNEL:
- is_small = true;
- can_take_address = false;
- break;
-
case Type::TYPE_FLOAT:
case Type::TYPE_COMPLEX:
case Type::TYPE_STRING:
case Type::TYPE_INTERFACE:
- is_small = false;
can_take_address = false;
break;
case Type::TYPE_STRUCT:
- is_small = false;
can_take_address = true;
break;
case Type::TYPE_ARRAY:
- is_small = false;
can_take_address = !element_type->is_slice_type();
break;
Expression* td = Expression::make_type_descriptor(this->channel_->type(),
loc);
- Runtime::Function code;
Bstatement* btemp = NULL;
- if (is_small)
- {
- // Type is small enough to handle as uint64.
- code = Runtime::SEND_SMALL;
- val = Expression::make_unsafe_cast(Type::lookup_integer_type("uint64"),
- val, loc);
- }
- else if (can_take_address)
- {
- // Must pass address of value. The function doesn't change the
- // value, so just take its address directly.
- code = Runtime::SEND_BIG;
+ if (can_take_address)
+ {
+ // The function doesn't change the value, so just take its
+ // address directly.
val = Expression::make_unary(OPERATOR_AND, val, loc);
}
else
{
- // Must pass address of value, but the value is small enough
- // that it might be in registers. Copy value into temporary
- // variable to take address.
- code = Runtime::SEND_BIG;
+ // The value is not in a variable, or is small enough that it
+ // might be in a register, and taking the address would push it
+ // on the stack. Copy it into a temporary variable to take the
+ // address.
Temporary_statement* temp = Statement::make_temporary(element_type,
val, loc);
Expression* ref = Expression::make_temporary_reference(temp, loc);
btemp = temp->get_backend(context);
}
- Expression* call = Runtime::make_call(code, loc, 3, td, this->channel_, val);
+ Expression* call = Runtime::make_call(Runtime::CHANSEND, loc, 3, td,
+ this->channel_, val);
context->gogo()->lower_expression(context->function(), NULL, &call);
Bexpression* bcall = call->get_backend(context);
Location loc = this->location_;
Expression* selref = Expression::make_temporary_reference(sel, loc);
+ selref = Expression::make_unary(OPERATOR_AND, selref, loc);
Expression* index_expr = Expression::make_integer_ul(this->index_, NULL,
loc);
}
Expression* selref = Expression::make_temporary_reference(sel, location);
+ selref = Expression::make_unary(OPERATOR_AND, selref, location);
Expression* call = Runtime::make_call(Runtime::SELECTGO, location, 1,
selref);
context->gogo()->lower_expression(context->function(), NULL, &call);
go_assert(this->sel_ == NULL);
- Expression* size_expr = Expression::make_integer_ul(this->clauses_->size(),
- NULL, loc);
- Expression* call = Runtime::make_call(Runtime::NEWSELECT, loc, 1, size_expr);
-
- this->sel_ = Statement::make_temporary(NULL, call, loc);
+ int ncases = this->clauses_->size();
+ Type* selstruct_type = Channel_type::select_type(ncases);
+ this->sel_ = Statement::make_temporary(selstruct_type, NULL, loc);
b->add_statement(this->sel_);
+ int64_t selstruct_size;
+ if (!selstruct_type->backend_type_size(gogo, &selstruct_size))
+ {
+ go_assert(saw_errors());
+ return Statement::make_error_statement(loc);
+ }
+
+ Expression* ref = Expression::make_temporary_reference(this->sel_, loc);
+ ref = Expression::make_unary(OPERATOR_AND, ref, loc);
+ Expression* selstruct_size_expr =
+ Expression::make_integer_int64(selstruct_size, NULL, loc);
+ Expression* size_expr = Expression::make_integer_ul(ncases, NULL, loc);
+ Expression* call = Runtime::make_call(Runtime::NEWSELECT, loc, 3,
+ ref, selstruct_size_expr, size_expr);
+ b->add_statement(Statement::make_statement(call, true));
+
this->clauses_->lower(gogo, function, b, this->sel_);
this->is_lowered_ = true;
b->add_statement(this);
return Type::make_channel_type(may_send, may_receive, element_type);
}
+// Return the type to manage a select statement with ncases case
+// statements. A value of this type is allocated on the stack. This
+// must match the type hselect in libgo/go/runtime/select.go.
+
+Type*
+Channel_type::select_type(int ncases)
+{
+ Type* unsafe_pointer_type = Type::make_pointer_type(Type::make_void_type());
+ Type* uint16_type = Type::lookup_integer_type("uint16");
+
+ static Struct_type* scase_type;
+ if (scase_type == NULL)
+ {
+ Type* uintptr_type = Type::lookup_integer_type("uintptr");
+ Type* uint64_type = Type::lookup_integer_type("uint64");
+ scase_type =
+ Type::make_builtin_struct_type(7,
+ "elem", unsafe_pointer_type,
+ "chan", unsafe_pointer_type,
+ "pc", uintptr_type,
+ "kind", uint16_type,
+ "index", uint16_type,
+ "receivedp", unsafe_pointer_type,
+ "releasetime", uint64_type);
+ scase_type->set_is_struct_incomparable();
+ }
+
+ Expression* ncases_expr =
+ Expression::make_integer_ul(ncases, NULL, Linemap::predeclared_location());
+ Array_type* scases = Type::make_array_type(scase_type, ncases_expr);
+ scases->set_is_array_incomparable();
+ Array_type* order = Type::make_array_type(uint16_type, ncases_expr);
+ order->set_is_array_incomparable();
+
+ Struct_type* ret =
+ Type::make_builtin_struct_type(7,
+ "tcase", uint16_type,
+ "ncase", uint16_type,
+ "pollorder", unsafe_pointer_type,
+ "lockorder", unsafe_pointer_type,
+ "scase", scases,
+ "lockorderarr", order,
+ "pollorderarr", order);
+ ret->set_is_struct_incomparable();
+ return ret;
+}
+
// Make a new channel type.
Channel_type*
static Type*
make_chan_type_descriptor_type();
+ static Type*
+ select_type(int ncases);
+
protected:
int
do_traverse(Traverse* traverse)
$(runtime_thread_files) \
runtime/yield.c \
$(rtems_task_variable_add_file) \
- chan.c \
cpuprof.c \
go-iface.c \
lfstack.c \
$(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
$(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
- $(am__objects_4) chan.lo cpuprof.lo go-iface.lo lfstack.lo \
- malloc.lo mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo \
- sema.lo sigqueue.lo string.lo time.lo $(am__objects_5)
+ $(am__objects_4) cpuprof.lo go-iface.lo lfstack.lo malloc.lo \
+ mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo sema.lo \
+ sigqueue.lo string.lo time.lo $(am__objects_5)
am_libgo_llgo_la_OBJECTS = $(am__objects_6)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
$(runtime_thread_files) \
runtime/yield.c \
$(rtems_task_variable_add_file) \
- chan.c \
cpuprof.c \
go-iface.c \
lfstack.c \
distclean-compile:
-rm -f *.tab.c
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/chan.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpuprof.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/env_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getncpu-bsd.Plo@am__quote@
<-c.Done()
},
limit: 8,
- gccgoLimit: 15,
+ gccgoLimit: 18,
},
{
desc: "WithCancel(bg)",
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// This file contains the implementation of Go channels.
+
+// Invariants:
+// At least one of c.sendq and c.recvq is empty.
+// For buffered channels, also:
+// c.qcount > 0 implies that c.recvq is empty.
+// c.qcount < c.dataqsiz implies that c.sendq is empty.
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname makechan runtime.makechan
+//go:linkname chansend1 runtime.chansend1
+//go:linkname chanrecv1 runtime.chanrecv1
+//go:linkname chanrecv2 runtime.chanrecv2
+//go:linkname closechan runtime.closechan
+
+const (
+ maxAlign = 8
+ hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
+ debugChan = false
+)
+
+type hchan struct {
+ qcount uint // total data in the queue
+ dataqsiz uint // size of the circular queue
+ buf unsafe.Pointer // points to an array of dataqsiz elements
+ elemsize uint16
+ closed uint32
+ elemtype *_type // element type
+ sendx uint // send index
+ recvx uint // receive index
+ recvq waitq // list of recv waiters
+ sendq waitq // list of send waiters
+
+ // lock protects all fields in hchan, as well as several
+ // fields in sudogs blocked on this channel.
+ //
+ // Do not change another G's status while holding this lock
+ // (in particular, do not ready a G), as this can deadlock
+ // with stack shrinking.
+ lock mutex
+}
+
+type waitq struct {
+ first *sudog
+ last *sudog
+}
+
+//go:linkname reflect_makechan reflect.makechan
+func reflect_makechan(t *chantype, size int64) *hchan {
+ return makechan(t, size)
+}
+
+func makechan(t *chantype, size int64) *hchan {
+ elem := t.elem
+
+ // compiler checks this but be safe.
+ if elem.size >= 1<<16 {
+ throw("makechan: invalid channel element type")
+ }
+ if hchanSize%maxAlign != 0 || elem.align > maxAlign {
+ throw("makechan: bad alignment")
+ }
+ if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/elem.size) {
+ panic(plainError("makechan: size out of range"))
+ }
+
+ var c *hchan
+ if elem.kind&kindNoPointers != 0 || size == 0 {
+ // Allocate memory in one call.
+ // Hchan does not contain pointers interesting for GC in this case:
+ // buf points into the same allocation, elemtype is persistent.
+ // SudoG's are referenced from their owning thread so they can't be collected.
+ // TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
+ c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
+ if size > 0 && elem.size != 0 {
+ c.buf = add(unsafe.Pointer(c), hchanSize)
+ } else {
+ // race detector uses this location for synchronization
+ // Also prevents us from pointing beyond the allocation (see issue 9401).
+ c.buf = unsafe.Pointer(c)
+ }
+ } else {
+ c = new(hchan)
+ c.buf = newarray(elem, int(size))
+ }
+ c.elemsize = uint16(elem.size)
+ c.elemtype = elem
+ c.dataqsiz = uint(size)
+
+ if debugChan {
+ print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
+ }
+ return c
+}
+
+// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+func chanbuf(c *hchan, i uint) unsafe.Pointer {
+ return add(c.buf, uintptr(i)*uintptr(c.elemsize))
+}
+
+// entry point for c <- x from compiled code
+//go:nosplit
+func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
+ chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
+}
+
+/*
+ * generic single channel send/recv
+ * If block is not nil,
+ * then the protocol will not
+ * sleep but return if it could
+ * not complete.
+ *
+ * sleep can wake up with g.param == nil
+ * when a channel involved in the sleep has
+ * been closed. it is easiest to loop and re-run
+ * the operation; we'll see that it's now closed.
+ */
+func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
+ if raceenabled {
+ raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
+ }
+ if msanenabled {
+ msanread(ep, t.elem.size)
+ }
+
+ if c == nil {
+ if !block {
+ return false
+ }
+ gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2)
+ throw("unreachable")
+ }
+
+ if debugChan {
+ print("chansend: chan=", c, "\n")
+ }
+
+ if raceenabled {
+ racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
+ }
+
+ // Fast path: check for failed non-blocking operation without acquiring the lock.
+ //
+ // After observing that the channel is not closed, we observe that the channel is
+ // not ready for sending. Each of these observations is a single word-sized read
+ // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
+ // Because a closed channel cannot transition from 'ready for sending' to
+ // 'not ready for sending', even if the channel is closed between the two observations,
+ // they imply a moment between the two when the channel was both not yet closed
+ // and not ready for sending. We behave as if we observed the channel at that moment,
+ // and report that the send cannot proceed.
+ //
+ // It is okay if the reads are reordered here: if we observe that the channel is not
+ // ready for sending and then observe that it is not closed, that implies that the
+ // channel wasn't closed during the first observation.
+ if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
+ (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
+ return false
+ }
+
+ var t0 int64
+ if blockprofilerate > 0 {
+ t0 = cputicks()
+ }
+
+ lock(&c.lock)
+
+ if c.closed != 0 {
+ unlock(&c.lock)
+ panic(plainError("send on closed channel"))
+ }
+
+ if sg := c.recvq.dequeue(); sg != nil {
+ // Found a waiting receiver. We pass the value we want to send
+ // directly to the receiver, bypassing the channel buffer (if any).
+ send(c, sg, ep, func() { unlock(&c.lock) })
+ return true
+ }
+
+ if c.qcount < c.dataqsiz {
+ // Space is available in the channel buffer. Enqueue the element to send.
+ qp := chanbuf(c, c.sendx)
+ if raceenabled {
+ raceacquire(qp)
+ racerelease(qp)
+ }
+ typedmemmove(c.elemtype, qp, ep)
+ c.sendx++
+ if c.sendx == c.dataqsiz {
+ c.sendx = 0
+ }
+ c.qcount++
+ unlock(&c.lock)
+ return true
+ }
+
+ if !block {
+ unlock(&c.lock)
+ return false
+ }
+
+ // Block on the channel. Some receiver will complete our operation for us.
+ gp := getg()
+ mysg := acquireSudog()
+ mysg.releasetime = 0
+ if t0 != 0 {
+ mysg.releasetime = -1
+ }
+ // No stack splits between assigning elem and enqueuing mysg
+ // on gp.waiting where copystack can find it.
+ mysg.elem = ep
+ mysg.waitlink = nil
+ mysg.g = gp
+ mysg.selectdone = nil
+ mysg.c = c
+ gp.waiting = mysg
+ gp.param = nil
+ c.sendq.enqueue(mysg)
+ goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3)
+
+ // someone woke us up.
+ if mysg != gp.waiting {
+ throw("G waiting list is corrupted")
+ }
+ gp.waiting = nil
+ if gp.param == nil {
+ if c.closed == 0 {
+ throw("chansend: spurious wakeup")
+ }
+ panic(plainError("send on closed channel"))
+ }
+ gp.param = nil
+ if mysg.releasetime > 0 {
+ blockevent(mysg.releasetime-t0, 2)
+ }
+ mysg.c = nil
+ releaseSudog(mysg)
+ return true
+}
+
+// send processes a send operation on an empty channel c.
+// The value ep sent by the sender is copied to the receiver sg.
+// The receiver is then woken up to go on its merry way.
+// Channel c must be empty and locked. send unlocks c with unlockf.
+// sg must already be dequeued from c.
+// ep must be non-nil and point to the heap or the caller's stack.
+func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
+ if raceenabled {
+ if c.dataqsiz == 0 {
+ racesync(c, sg)
+ } else {
+ // Pretend we go through the buffer, even though
+ // we copy directly. Note that we need to increment
+ // the head/tail locations only when raceenabled.
+ qp := chanbuf(c, c.recvx)
+ raceacquire(qp)
+ racerelease(qp)
+ raceacquireg(sg.g, qp)
+ racereleaseg(sg.g, qp)
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
+ }
+ }
+ if sg.elem != nil {
+ sendDirect(c.elemtype, sg, ep)
+ sg.elem = nil
+ }
+ gp := sg.g
+ unlockf()
+ gp.param = unsafe.Pointer(sg)
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ goready(gp, 4)
+}
+
+func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
+ // Send on an unbuffered or empty-buffered channel is the only operation
+ // in the entire runtime where one goroutine
+ // writes to the stack of another goroutine. The GC assumes that
+ // stack writes only happen when the goroutine is running and are
+ // only done by that goroutine. Using a write barrier is sufficient to
+ // make up for violating that assumption, but the write barrier has to work.
+ // typedmemmove will call heapBitsBulkBarrier, but the target bytes
+ // are not in the heap, so that will not help. We arrange to call
+ // memmove and typeBitsBulkBarrier instead.
+
+ // Once we read sg.elem out of sg, it will no longer
+ // be updated if the destination's stack gets copied (shrunk).
+ // So make sure that no preemption points can happen between read & use.
+ dst := sg.elem
+ memmove(dst, src, t.size)
+ typeBitsBulkBarrier(t, uintptr(dst), t.size)
+}
+
+func closechan(c *hchan) {
+ if c == nil {
+ panic(plainError("close of nil channel"))
+ }
+
+ lock(&c.lock)
+ if c.closed != 0 {
+ unlock(&c.lock)
+ panic(plainError("close of closed channel"))
+ }
+
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer(&c))
+ racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
+ racerelease(unsafe.Pointer(c))
+ }
+
+ c.closed = 1
+
+ var glist *g
+
+ // release all readers
+ for {
+ sg := c.recvq.dequeue()
+ if sg == nil {
+ break
+ }
+ if sg.elem != nil {
+ memclr(sg.elem, uintptr(c.elemsize))
+ sg.elem = nil
+ }
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ gp := sg.g
+ gp.param = nil
+ if raceenabled {
+ raceacquireg(gp, unsafe.Pointer(c))
+ }
+ gp.schedlink.set(glist)
+ glist = gp
+ }
+
+ // release all writers (they will panic)
+ for {
+ sg := c.sendq.dequeue()
+ if sg == nil {
+ break
+ }
+ sg.elem = nil
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ gp := sg.g
+ gp.param = nil
+ if raceenabled {
+ raceacquireg(gp, unsafe.Pointer(c))
+ }
+ gp.schedlink.set(glist)
+ glist = gp
+ }
+ unlock(&c.lock)
+
+ // Ready all Gs now that we've dropped the channel lock.
+ for glist != nil {
+ gp := glist
+ glist = glist.schedlink.ptr()
+ gp.schedlink = 0
+ goready(gp, 3)
+ }
+}
+
+// entry points for <- c from compiled code
+//go:nosplit
+func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
+ chanrecv(t, c, elem, true)
+}
+
+//go:nosplit
+func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
+ _, received = chanrecv(t, c, elem, true)
+ return
+}
+
+// chanrecv receives on channel c and writes the received data to ep.
+// ep may be nil, in which case received data is ignored.
+// If block == false and no elements are available, returns (false, false).
+// Otherwise, if c is closed, zeros *ep and returns (true, false).
+// Otherwise, fills in *ep with an element and returns (true, true).
+// A non-nil ep must point to the heap or the caller's stack.
+func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
+ // raceenabled: don't need to check ep, as it is always on the stack
+ // or is new memory allocated by reflect.
+
+ if debugChan {
+ print("chanrecv: chan=", c, "\n")
+ }
+
+ if c == nil {
+ if !block {
+ return
+ }
+ gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2)
+ throw("unreachable")
+ }
+
+ // Fast path: check for failed non-blocking operation without acquiring the lock.
+ //
+ // After observing that the channel is not ready for receiving, we observe that the
+ // channel is not closed. Each of these observations is a single word-sized read
+ // (first c.sendq.first or c.qcount, and second c.closed).
+ // Because a channel cannot be reopened, the later observation of the channel
+ // being not closed implies that it was also not closed at the moment of the
+ // first observation. We behave as if we observed the channel at that moment
+ // and report that the receive cannot proceed.
+ //
+ // The order of operations is important here: reversing the operations can lead to
+ // incorrect behavior when racing with a close.
+ if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
+ c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
+ atomic.Load(&c.closed) == 0 {
+ return
+ }
+
+ var t0 int64
+ if blockprofilerate > 0 {
+ t0 = cputicks()
+ }
+
+ lock(&c.lock)
+
+ if c.closed != 0 && c.qcount == 0 {
+ if raceenabled {
+ raceacquire(unsafe.Pointer(c))
+ }
+ unlock(&c.lock)
+ if ep != nil {
+ memclr(ep, uintptr(c.elemsize))
+ }
+ return true, false
+ }
+
+ if sg := c.sendq.dequeue(); sg != nil {
+ // Found a waiting sender. If buffer is size 0, receive value
+ // directly from sender. Otherwise, receive from head of queue
+ // and add sender's value to the tail of the queue (both map to
+ // the same buffer slot because the queue is full).
+ recv(c, sg, ep, func() { unlock(&c.lock) })
+ return true, true
+ }
+
+ if c.qcount > 0 {
+ // Receive directly from queue
+ qp := chanbuf(c, c.recvx)
+ if raceenabled {
+ raceacquire(qp)
+ racerelease(qp)
+ }
+ if ep != nil {
+ typedmemmove(c.elemtype, ep, qp)
+ }
+ memclr(qp, uintptr(c.elemsize))
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.qcount--
+ unlock(&c.lock)
+ return true, true
+ }
+
+ if !block {
+ unlock(&c.lock)
+ return false, false
+ }
+
+ // no sender available: block on this channel.
+ gp := getg()
+ mysg := acquireSudog()
+ mysg.releasetime = 0
+ if t0 != 0 {
+ mysg.releasetime = -1
+ }
+ // No stack splits between assigning elem and enqueuing mysg
+ // on gp.waiting where copystack can find it.
+ mysg.elem = ep
+ mysg.waitlink = nil
+ gp.waiting = mysg
+ mysg.g = gp
+ mysg.selectdone = nil
+ mysg.c = c
+ gp.param = nil
+ c.recvq.enqueue(mysg)
+ goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3)
+
+ // someone woke us up
+ if mysg != gp.waiting {
+ throw("G waiting list is corrupted")
+ }
+ gp.waiting = nil
+ if mysg.releasetime > 0 {
+ blockevent(mysg.releasetime-t0, 2)
+ }
+ closed := gp.param == nil
+ gp.param = nil
+ mysg.c = nil
+ releaseSudog(mysg)
+ return true, !closed
+}
+
+// recv processes a receive operation on a full channel c.
+// There are 2 parts:
+// 1) The value sent by the sender sg is put into the channel
+// and the sender is woken up to go on its merry way.
+// 2) The value received by the receiver (the current G) is
+// written to ep.
+// For synchronous channels, both values are the same.
+// For asynchronous channels, the receiver gets its data from
+// the channel buffer and the sender's data is put in the
+// channel buffer.
+// Channel c must be full and locked. recv unlocks c with unlockf.
+// sg must already be dequeued from c.
+// A non-nil ep must point to the heap or the caller's stack.
+func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
+ if c.dataqsiz == 0 {
+ if raceenabled {
+ racesync(c, sg)
+ }
+ if ep != nil {
+ // copy data from sender
+ // ep points to our own stack or heap, so nothing
+ // special (ala sendDirect) needed here.
+ typedmemmove(c.elemtype, ep, sg.elem)
+ }
+ } else {
+ // Queue is full. Take the item at the
+ // head of the queue. Make the sender enqueue
+ // its item at the tail of the queue. Since the
+ // queue is full, those are both the same slot.
+ qp := chanbuf(c, c.recvx)
+ if raceenabled {
+ raceacquire(qp)
+ racerelease(qp)
+ raceacquireg(sg.g, qp)
+ racereleaseg(sg.g, qp)
+ }
+ // copy data from queue to receiver
+ if ep != nil {
+ typedmemmove(c.elemtype, ep, qp)
+ }
+ // copy data from sender to queue
+ typedmemmove(c.elemtype, qp, sg.elem)
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
+ }
+ sg.elem = nil
+ gp := sg.g
+ unlockf()
+ gp.param = unsafe.Pointer(sg)
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ goready(gp, 4)
+}
+
+// compiler implements
+//
+// select {
+// case c <- v:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbsend(c, v) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
+ return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
+}
+
+// compiler implements
+//
+// select {
+// case v = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbrecv(&v, c) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
+ selected, _ = chanrecv(t, c, elem, false)
+ return
+}
+
+// compiler implements
+//
+// select {
+// case v, ok = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if c != nil && selectnbrecv2(&v, &ok, c) {
+// ... foo
+// } else {
+// ... bar
+// }
+//
+func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
+ // TODO(khr): just return 2 values from this function, now that it is in Go.
+ selected, *received = chanrecv(t, c, elem, false)
+ return
+}
+
+//go:linkname reflect_chansend reflect.chansend
+func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
+ return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
+}
+
+//go:linkname reflect_chanrecv reflect.chanrecv
+func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
+ return chanrecv(t, c, elem, !nb)
+}
+
+//go:linkname reflect_chanlen reflect.chanlen
+func reflect_chanlen(c *hchan) int {
+ if c == nil {
+ return 0
+ }
+ return int(c.qcount)
+}
+
+//go:linkname reflect_chancap reflect.chancap
+func reflect_chancap(c *hchan) int {
+ if c == nil {
+ return 0
+ }
+ return int(c.dataqsiz)
+}
+
+//go:linkname reflect_chanclose reflect.chanclose
+func reflect_chanclose(c *hchan) {
+ closechan(c)
+}
+
+func (q *waitq) enqueue(sgp *sudog) {
+ sgp.next = nil
+ x := q.last
+ if x == nil {
+ sgp.prev = nil
+ q.first = sgp
+ q.last = sgp
+ return
+ }
+ sgp.prev = x
+ x.next = sgp
+ q.last = sgp
+}
+
+func (q *waitq) dequeue() *sudog {
+ for {
+ sgp := q.first
+ if sgp == nil {
+ return nil
+ }
+ y := sgp.next
+ if y == nil {
+ q.first = nil
+ q.last = nil
+ } else {
+ y.prev = nil
+ q.first = y
+ sgp.next = nil // mark as removed (see dequeueSudog)
+ }
+
+ // if sgp participates in a select and is already signaled, ignore it
+ if sgp.selectdone != nil {
+ // claim the right to signal
+ if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
+ continue
+ }
+ }
+
+ return sgp
+ }
+}
+
+func racesync(c *hchan, sg *sudog) {
+ racerelease(chanbuf(c, 0))
+ raceacquireg(sg.g, chanbuf(c, 0))
+ racereleaseg(sg.g, chanbuf(c, 0))
+ raceacquire(chanbuf(c, 0))
+}
//
// sudogs are allocated from a special pool. Use acquireSudog and
// releaseSudog to allocate and free them.
-/*
-Commented out for gccgo for now.
-
type sudog struct {
// The following fields are protected by the hchan.lock of the
// channel this sudog is blocking on. shrinkstack depends on
waitlink *sudog // g.waiting list
c *hchan // channel
}
-*/
type gcstats struct {
// the struct must consist of only uint64's,
gopc uintptr // pc of go statement that created this goroutine
startpc uintptr // pc of goroutine function
racectx uintptr
- // Not for gccgo for now: waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
+ waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
// Not for gccgo: cgoCtxt []uintptr // cgo traceback context
// Per-G GC state
gfree *g
gfreecnt int32
- // Not for gccgo for now: sudogcache []*sudog
+ sudogcache []*sudog
// Not for gccgo for now: sudogbuf [128]*sudog
// Not for gccgo for now: tracebuf traceBufPtr
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// This file contains the implementation of Go select statements.
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname newselect runtime.newselect
+//go:linkname selectdefault runtime.selectdefault
+//go:linkname selectsend runtime.selectsend
+//go:linkname selectrecv runtime.selectrecv
+//go:linkname selectrecv2 runtime.selectrecv2
+//go:linkname selectgo runtime.selectgo
+
+const (
+ debugSelect = false
+
+ // scase.kind
+ caseRecv = iota
+ caseSend
+ caseDefault
+)
+
+// Select statement header.
+// Known to compiler.
+// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
+type hselect struct {
+ tcase uint16 // total count of scase[]
+ ncase uint16 // currently filled scase[]
+ pollorder *uint16 // case poll order
+ lockorder *uint16 // channel lock order
+ scase [1]scase // one per case (in order of appearance)
+}
+
+// Select case descriptor.
+// Known to compiler.
+// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
+type scase struct {
+ elem unsafe.Pointer // data element
+ c *hchan // chan
+ pc uintptr // return pc
+ kind uint16
+ index uint16 // case index
+ receivedp *bool // pointer to received bool (recv2)
+ releasetime int64
+}
+
+var (
+ chansendpc = funcPC(chansend)
+ chanrecvpc = funcPC(chanrecv)
+)
+
+func selectsize(size uintptr) uintptr {
+ selsize := unsafe.Sizeof(hselect{}) +
+ (size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
+ size*unsafe.Sizeof(*hselect{}.lockorder) +
+ size*unsafe.Sizeof(*hselect{}.pollorder)
+ return round(selsize, sys.Int64Align)
+}
+
+func newselect(sel *hselect, selsize int64, size int32) {
+ if selsize != int64(selectsize(uintptr(size))) {
+ print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
+ throw("bad select size")
+ }
+ if size != int32(uint16(size)) {
+ throw("select size too large")
+ }
+ sel.tcase = uint16(size)
+ sel.ncase = 0
+ sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])))
+ sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)))
+
+ // For gccgo the temporary variable will not have been zeroed.
+ memclr(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])+uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)+uintptr(size)*unsafe.Sizeof(*hselect{}.pollorder))
+
+ if debugSelect {
+ print("newselect s=", sel, " size=", size, "\n")
+ }
+}
+
+func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer, index int32) {
+ // nil cases do not compete
+ if c != nil {
+ selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, index)
+ }
+ return
+}
+
+// cut in half to give stack a chance to split
+func selectsendImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, index int32) {
+ i := sel.ncase
+ if i >= sel.tcase {
+ throw("selectsend: too many cases")
+ }
+ sel.ncase = i + 1
+ cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
+
+ cas.pc = pc
+ cas.c = c
+ cas.index = uint16(index)
+ cas.kind = caseSend
+ cas.elem = elem
+
+ if debugSelect {
+ print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " index=", cas.index, "\n")
+ }
+}
+
+func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer, index int32) {
+ // nil cases do not compete
+ if c != nil {
+ selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, index)
+ }
+ return
+}
+
+func selectrecv2(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool, index int32) {
+ // nil cases do not compete
+ if c != nil {
+ selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, index)
+ }
+ return
+}
+
+func selectrecvImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, index int32) {
+ i := sel.ncase
+ if i >= sel.tcase {
+ throw("selectrecv: too many cases")
+ }
+ sel.ncase = i + 1
+ cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
+ cas.pc = pc
+ cas.c = c
+ cas.index = uint16(index)
+ cas.kind = caseRecv
+ cas.elem = elem
+ cas.receivedp = received
+
+ if debugSelect {
+ print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " index=", cas.index, "\n")
+ }
+}
+
+func selectdefault(sel *hselect, index int32) {
+ selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), index)
+ return
+}
+
+func selectdefaultImpl(sel *hselect, callerpc uintptr, index int32) {
+ i := sel.ncase
+ if i >= sel.tcase {
+ throw("selectdefault: too many cases")
+ }
+ sel.ncase = i + 1
+ cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
+ cas.pc = callerpc
+ cas.c = nil
+ cas.index = uint16(index)
+ cas.kind = caseDefault
+
+ if debugSelect {
+ print("selectdefault s=", sel, " pc=", hex(cas.pc), " index=", cas.index, "\n")
+ }
+}
+
+func sellock(scases []scase, lockorder []uint16) {
+ var c *hchan
+ for _, o := range lockorder {
+ c0 := scases[o].c
+ if c0 != nil && c0 != c {
+ c = c0
+ lock(&c.lock)
+ }
+ }
+}
+
+func selunlock(scases []scase, lockorder []uint16) {
+ // We must be very careful here to not touch sel after we have unlocked
+ // the last lock, because sel can be freed right after the last unlock.
+ // Consider the following situation.
+ // First M calls runtime·park() in runtime·selectgo() passing the sel.
+ // Once runtime·park() has unlocked the last lock, another M makes
+ // the G that calls select runnable again and schedules it for execution.
+ // When the G runs on another M, it locks all the locks and frees sel.
+ // Now if the first M touches sel, it will access freed memory.
+ n := len(scases)
+ r := 0
+ // skip the default case
+ if n > 0 && scases[lockorder[0]].c == nil {
+ r = 1
+ }
+ for i := n - 1; i >= r; i-- {
+ c := scases[lockorder[i]].c
+ if i > 0 && c == scases[lockorder[i-1]].c {
+ continue // will unlock it on the next iteration
+ }
+ unlock(&c.lock)
+ }
+}
+
+func selparkcommit(gp *g, _ unsafe.Pointer) bool {
+ // This must not access gp's stack (see gopark). In
+ // particular, it must not access the *hselect. That's okay,
+ // because by the time this is called, gp.waiting has all
+ // channels in lock order.
+ var lastc *hchan
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if sg.c != lastc && lastc != nil {
+ // As soon as we unlock the channel, fields in
+ // any sudog with that channel may change,
+ // including c and waitlink. Since multiple
+ // sudogs may have the same channel, we unlock
+ // only after we've passed the last instance
+ // of a channel.
+ unlock(&lastc.lock)
+ }
+ lastc = sg.c
+ }
+ if lastc != nil {
+ unlock(&lastc.lock)
+ }
+ return true
+}
+
+func block() {
+ gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever
+}
+
+// selectgo implements the select statement.
+//
+// *sel is on the current goroutine's stack (regardless of any
+// escaping in selectgo).
+//
+// selectgo does not return. Instead, it overwrites its return PC and
+// returns directly to the triggered select case. Because of this, it
+// cannot appear at the top of a split stack.
+func selectgo(sel *hselect) int32 {
+ _, index := selectgoImpl(sel)
+ return int32(index)
+}
+
+// selectgoImpl returns scase.pc and scase.so for the select
+// case which fired.
+func selectgoImpl(sel *hselect) (uintptr, uint16) {
+ if debugSelect {
+ print("select: sel=", sel, "\n")
+ }
+
+ scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
+ scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
+
+ var t0 int64
+ if blockprofilerate > 0 {
+ t0 = cputicks()
+ for i := 0; i < int(sel.ncase); i++ {
+ scases[i].releasetime = -1
+ }
+ }
+
+ // The compiler rewrites selects that statically have
+ // only 0 or 1 cases plus default into simpler constructs.
+ // The only way we can end up with such small sel.ncase
+ // values here is for a larger select in which most channels
+ // have been nilled out. The general code handles those
+ // cases correctly, and they are rare enough not to bother
+ // optimizing (and needing to test).
+
+ // generate permuted order
+ pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
+ pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
+ for i := 1; i < int(sel.ncase); i++ {
+ j := int(fastrand1()) % (i + 1)
+ pollorder[i] = pollorder[j]
+ pollorder[j] = uint16(i)
+ }
+
+ // sort the cases by Hchan address to get the locking order.
+ // simple heap sort, to guarantee n log n time and constant stack footprint.
+ lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
+ lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice))
+ for i := 0; i < int(sel.ncase); i++ {
+ j := i
+ // Start with the pollorder to permute cases on the same channel.
+ c := scases[pollorder[i]].c
+ for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
+ k := (j - 1) / 2
+ lockorder[j] = lockorder[k]
+ j = k
+ }
+ lockorder[j] = pollorder[i]
+ }
+ for i := int(sel.ncase) - 1; i >= 0; i-- {
+ o := lockorder[i]
+ c := scases[o].c
+ lockorder[i] = lockorder[0]
+ j := 0
+ for {
+ k := j*2 + 1
+ if k >= i {
+ break
+ }
+ if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
+ k++
+ }
+ if c.sortkey() < scases[lockorder[k]].c.sortkey() {
+ lockorder[j] = lockorder[k]
+ j = k
+ continue
+ }
+ break
+ }
+ lockorder[j] = o
+ }
+ /*
+ for i := 0; i+1 < int(sel.ncase); i++ {
+ if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
+ print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
+ throw("select: broken sort")
+ }
+ }
+ */
+
+ // lock all the channels involved in the select
+ sellock(scases, lockorder)
+
+ var (
+ gp *g
+ done uint32
+ sg *sudog
+ c *hchan
+ k *scase
+ sglist *sudog
+ sgnext *sudog
+ qp unsafe.Pointer
+ nextp **sudog
+ )
+
+loop:
+ // pass 1 - look for something already waiting
+ var dfl *scase
+ var cas *scase
+ for i := 0; i < int(sel.ncase); i++ {
+ cas = &scases[pollorder[i]]
+ c = cas.c
+
+ switch cas.kind {
+ case caseRecv:
+ sg = c.sendq.dequeue()
+ if sg != nil {
+ goto recv
+ }
+ if c.qcount > 0 {
+ goto bufrecv
+ }
+ if c.closed != 0 {
+ goto rclose
+ }
+
+ case caseSend:
+ if raceenabled {
+ racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
+ }
+ if c.closed != 0 {
+ goto sclose
+ }
+ sg = c.recvq.dequeue()
+ if sg != nil {
+ goto send
+ }
+ if c.qcount < c.dataqsiz {
+ goto bufsend
+ }
+
+ case caseDefault:
+ dfl = cas
+ }
+ }
+
+ if dfl != nil {
+ selunlock(scases, lockorder)
+ cas = dfl
+ goto retc
+ }
+
+ // pass 2 - enqueue on all chans
+ gp = getg()
+ done = 0
+ if gp.waiting != nil {
+ throw("gp.waiting != nil")
+ }
+ nextp = &gp.waiting
+ for _, casei := range lockorder {
+ cas = &scases[casei]
+ c = cas.c
+ sg := acquireSudog()
+ sg.g = gp
+ // Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs
+ sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
+ // No stack splits between assigning elem and enqueuing
+ // sg on gp.waiting where copystack can find it.
+ sg.elem = cas.elem
+ sg.releasetime = 0
+ if t0 != 0 {
+ sg.releasetime = -1
+ }
+ sg.c = c
+ // Construct waiting list in lock order.
+ *nextp = sg
+ nextp = &sg.waitlink
+
+ switch cas.kind {
+ case caseRecv:
+ c.recvq.enqueue(sg)
+
+ case caseSend:
+ c.sendq.enqueue(sg)
+ }
+ }
+
+ // wait for someone to wake us up
+ gp.param = nil
+ gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 2)
+
+ // someone woke us up
+ sellock(scases, lockorder)
+ sg = (*sudog)(gp.param)
+ gp.param = nil
+
+ // pass 3 - dequeue from unsuccessful chans
+ // otherwise they stack up on quiet channels
+ // record the successful case, if any.
+ // We singly-linked up the SudoGs in lock order.
+ cas = nil
+ sglist = gp.waiting
+ // Clear all elem before unlinking from gp.waiting.
+ for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
+ sg1.selectdone = nil
+ sg1.elem = nil
+ sg1.c = nil
+ }
+ gp.waiting = nil
+
+ for _, casei := range lockorder {
+ k = &scases[casei]
+ if sglist.releasetime > 0 {
+ k.releasetime = sglist.releasetime
+ }
+ if sg == sglist {
+ // sg has already been dequeued by the G that woke us up.
+ cas = k
+ } else {
+ c = k.c
+ if k.kind == caseSend {
+ c.sendq.dequeueSudoG(sglist)
+ } else {
+ c.recvq.dequeueSudoG(sglist)
+ }
+ }
+ sgnext = sglist.waitlink
+ sglist.waitlink = nil
+ releaseSudog(sglist)
+ sglist = sgnext
+ }
+
+ if cas == nil {
+ // This can happen if we were woken up by a close().
+ // TODO: figure that out explicitly so we don't need this loop.
+ goto loop
+ }
+
+ c = cas.c
+
+ if debugSelect {
+ print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
+ }
+
+ if cas.kind == caseRecv {
+ if cas.receivedp != nil {
+ *cas.receivedp = true
+ }
+ }
+
+ if raceenabled {
+ if cas.kind == caseRecv && cas.elem != nil {
+ raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
+ } else if cas.kind == caseSend {
+ raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
+ }
+ }
+ if msanenabled {
+ if cas.kind == caseRecv && cas.elem != nil {
+ msanwrite(cas.elem, c.elemtype.size)
+ } else if cas.kind == caseSend {
+ msanread(cas.elem, c.elemtype.size)
+ }
+ }
+
+ selunlock(scases, lockorder)
+ goto retc
+
+bufrecv:
+ // can receive from buffer
+ if raceenabled {
+ if cas.elem != nil {
+ raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
+ }
+ raceacquire(chanbuf(c, c.recvx))
+ racerelease(chanbuf(c, c.recvx))
+ }
+ if msanenabled && cas.elem != nil {
+ msanwrite(cas.elem, c.elemtype.size)
+ }
+ if cas.receivedp != nil {
+ *cas.receivedp = true
+ }
+ qp = chanbuf(c, c.recvx)
+ if cas.elem != nil {
+ typedmemmove(c.elemtype, cas.elem, qp)
+ }
+ memclr(qp, uintptr(c.elemsize))
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.qcount--
+ selunlock(scases, lockorder)
+ goto retc
+
+bufsend:
+ // can send to buffer
+ if raceenabled {
+ raceacquire(chanbuf(c, c.sendx))
+ racerelease(chanbuf(c, c.sendx))
+ raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
+ }
+ if msanenabled {
+ msanread(cas.elem, c.elemtype.size)
+ }
+ typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
+ c.sendx++
+ if c.sendx == c.dataqsiz {
+ c.sendx = 0
+ }
+ c.qcount++
+ selunlock(scases, lockorder)
+ goto retc
+
+recv:
+ // can receive from sleeping sender (sg)
+ recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
+ if debugSelect {
+ print("syncrecv: sel=", sel, " c=", c, "\n")
+ }
+ if cas.receivedp != nil {
+ *cas.receivedp = true
+ }
+ goto retc
+
+rclose:
+ // read at end of closed channel
+ selunlock(scases, lockorder)
+ if cas.receivedp != nil {
+ *cas.receivedp = false
+ }
+ if cas.elem != nil {
+ memclr(cas.elem, uintptr(c.elemsize))
+ }
+ if raceenabled {
+ raceacquire(unsafe.Pointer(c))
+ }
+ goto retc
+
+send:
+ // can send to a sleeping receiver (sg)
+ if raceenabled {
+ raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
+ }
+ if msanenabled {
+ msanread(cas.elem, c.elemtype.size)
+ }
+ send(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
+ if debugSelect {
+ print("syncsend: sel=", sel, " c=", c, "\n")
+ }
+ goto retc
+
+retc:
+ if cas.releasetime > 0 {
+ blockevent(cas.releasetime-t0, 2)
+ }
+ return cas.pc, cas.index
+
+sclose:
+ // send on closed channel
+ selunlock(scases, lockorder)
+ panic(plainError("send on closed channel"))
+}
+
+func (c *hchan) sortkey() uintptr {
+ // TODO(khr): if we have a moving garbage collector, we'll need to
+ // change this function.
+ return uintptr(unsafe.Pointer(c))
+}
+
+// A runtimeSelect is a single case passed to rselect.
+// This must match ../reflect/value.go:/runtimeSelect
+type runtimeSelect struct {
+ dir selectDir
+ typ unsafe.Pointer // channel type (not used here)
+ ch *hchan // channel
+ val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
+}
+
+// These values must match ../reflect/value.go:/SelectDir.
+type selectDir int
+
+const (
+ _ selectDir = iota
+ selectSend // case Chan <- Send
+ selectRecv // case <-Chan:
+ selectDefault // default
+)
+
+//go:linkname reflect_rselect reflect.rselect
+func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
+ // flagNoScan is safe here, because all objects are also referenced from cases.
+ size := selectsize(uintptr(len(cases)))
+ sel := (*hselect)(mallocgc(size, nil, true))
+ newselect(sel, int64(size), int32(len(cases)))
+ r := new(bool)
+ for i := range cases {
+ rc := &cases[i]
+ switch rc.dir {
+ case selectDefault:
+ selectdefaultImpl(sel, uintptr(i), 0)
+ case selectSend:
+ if rc.ch == nil {
+ break
+ }
+ selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0)
+ case selectRecv:
+ if rc.ch == nil {
+ break
+ }
+ selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0)
+ }
+ }
+
+ pc, _ := selectgoImpl(sel)
+ chosen = int(pc)
+ recvOK = *r
+ return
+}
+
+func (q *waitq) dequeueSudoG(sgp *sudog) {
+ x := sgp.prev
+ y := sgp.next
+ if x != nil {
+ if y != nil {
+ // middle of queue
+ x.next = y
+ y.prev = x
+ sgp.next = nil
+ sgp.prev = nil
+ return
+ }
+ // end of queue
+ x.next = nil
+ q.last = x
+ sgp.prev = nil
+ return
+ }
+ if y != nil {
+ // start of queue
+ y.prev = nil
+ q.first = y
+ sgp.next = nil
+ return
+ }
+
+ // x==y==nil. Either sgp is the only element in the queue,
+ // or it has already been removed. Use q.first to disambiguate.
+ if q.first == sgp {
+ q.first = nil
+ q.last = nil
+ }
+}
func entersyscall(int32)
func entersyscallblock(int32)
func exitsyscall(int32)
+func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
+func goparkunlock(*mutex, string, byte, int)
+func goready(*g, int)
+
+// Temporary for gccgo until we port mprof.go.
+var blockprofilerate uint64
+
+func blockevent(cycles int64, skip int) {}
+
+// Temporary hack for gccgo until we port proc.go.
+//go:nosplit
+func acquireSudog() *sudog {
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.sudogcache) == 0 {
+ pp.sudogcache = append(pp.sudogcache, new(sudog))
+ }
+ n := len(pp.sudogcache)
+ s := pp.sudogcache[n-1]
+ pp.sudogcache[n-1] = nil
+ pp.sudogcache = pp.sudogcache[:n-1]
+ if s.elem != nil {
+ throw("acquireSudog: found s.elem != nil in cache")
+ }
+ releasem(mp)
+ return s
+}
+
+// Temporary hack for gccgo until we port proc.go.
+//go:nosplit
+func releaseSudog(s *sudog) {
+ if s.elem != nil {
+ throw("runtime: sudog with non-nil elem")
+ }
+ if s.selectdone != nil {
+ throw("runtime: sudog with non-nil selectdone")
+ }
+ if s.next != nil {
+ throw("runtime: sudog with non-nil next")
+ }
+ if s.prev != nil {
+ throw("runtime: sudog with non-nil prev")
+ }
+ if s.waitlink != nil {
+ throw("runtime: sudog with non-nil waitlink")
+ }
+ if s.c != nil {
+ throw("runtime: sudog with non-nil c")
+ }
+ gp := getg()
+ if gp.param != nil {
+ throw("runtime: releaseSudog with non-nil gp.param")
+ }
+ mp := acquirem() // avoid rescheduling to another P
+ pp := mp.p.ptr()
+ pp.sudogcache = append(pp.sudogcache, s)
+ releasem(mp)
+}
+
+// Temporary hack for gccgo until we port the garbage collector.
+func typeBitsBulkBarrier(typ *_type, p, size uintptr) {}
+
+// Temporary for gccgo until we port print.go.
+type hex uint64
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go execution tracer.
+// The tracer captures a wide range of execution events like goroutine
+// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
+// changes of heap size, processor start/stop, etc and writes them to a buffer
+// in a compact form. A precise nanosecond-precision timestamp and a stack
+// trace is captured for most events.
+// See https://golang.org/s/go15trace for more info.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Event types in the trace, args are given in square brackets.
+const (
+ traceEvNone = 0 // unused
+ traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
+ traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
+ traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
+ traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
+ traceEvProcStart = 5 // start of P [timestamp, thread id]
+ traceEvProcStop = 6 // stop of P [timestamp]
+ traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
+ traceEvGCDone = 8 // GC done [timestamp]
+ traceEvGCScanStart = 9 // GC scan start [timestamp]
+ traceEvGCScanDone = 10 // GC scan done [timestamp]
+ traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
+ traceEvGCSweepDone = 12 // GC sweep done [timestamp]
+ traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
+ traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
+ traceEvGoEnd = 15 // goroutine ends [timestamp]
+ traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
+ traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
+ traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
+ traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
+ traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
+ traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
+ traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
+ traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
+ traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
+ traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
+ traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
+ traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
+ traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
+ traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
+ traceEvGoSysBlock = 30 // syscall blocks [timestamp]
+ traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
+ traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
+ traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
+ traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
+ traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
+ traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
+ traceEvString = 37 // string dictionary entry [ID, length, string]
+ traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
+ traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
+ traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
+ traceEvCount = 41
+)
+
+const (
+ // Timestamps in trace are cputicks/traceTickDiv.
+ // This makes absolute values of timestamp diffs smaller,
+ // and so they are encoded in less number of bytes.
+ // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
+ // The suggested increment frequency for PowerPC's time base register is
+ // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
+ // and ppc64le.
+ // Tracing won't work reliably for architectures where cputicks is emulated
+ // by nanotime, so the value doesn't matter for those architectures.
+ traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
+ // Maximum number of PCs in a single stack trace.
+ // Since events contain only stack id rather than whole stack trace,
+ // we can allow quite large values here.
+ traceStackSize = 128
+ // Identifier of a fake P that is used when we trace without a real P.
+ traceGlobProc = -1
+ // Maximum number of bytes to encode uint64 in base-128.
+ traceBytesPerNumber = 10
+ // Shift of the number of arguments in the first event byte.
+ traceArgCountShift = 6
+ // Flag passed to traceGoPark to denote that the previous wakeup of this
+ // goroutine was futile. For example, a goroutine was unblocked on a mutex,
+ // but another goroutine got ahead and acquired the mutex before the first
+ // goroutine is scheduled, so the first goroutine has to block again.
+ // Such wakeups happen on buffered channels and sync.Mutex,
+ // but are generally not interesting for end user.
+ traceFutileWakeup byte = 128
+)
+
+// trace is global tracing context.
+var trace struct {
+ lock mutex // protects the following members
+ lockOwner *g // to avoid deadlocks during recursive lock locks
+ enabled bool // when set runtime traces events
+ shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false
+ headerWritten bool // whether ReadTrace has emitted trace header
+ footerWritten bool // whether ReadTrace has emitted trace footer
+ shutdownSema uint32 // used to wait for ReadTrace completion
+ seqStart uint64 // sequence number when tracing was started
+ ticksStart int64 // cputicks when tracing was started
+ ticksEnd int64 // cputicks when tracing was stopped
+ timeStart int64 // nanotime when tracing was started
+ timeEnd int64 // nanotime when tracing was stopped
+ seqGC uint64 // GC start/done sequencer
+ reading traceBufPtr // buffer currently handed off to user
+ empty traceBufPtr // stack of empty buffers
+ fullHead traceBufPtr // queue of full buffers
+ fullTail traceBufPtr
+ reader *g // goroutine that called ReadTrace, or nil
+ stackTab traceStackTable // maps stack traces to unique ids
+
+ // Dictionary for traceEvString.
+ // Currently this is used only for func/file:line info after tracing session,
+ // so we assume single-threaded access.
+ strings map[string]uint64
+ stringSeq uint64
+
+ bufLock mutex // protects buf
+ buf traceBufPtr // global trace buffer, used when running without a p
+}
+
+// traceBufHeader is per-P tracing buffer.
+type traceBufHeader struct {
+ link traceBufPtr // in trace.empty/full
+ lastTicks uint64 // when we wrote the last event
+ pos int // next write offset in arr
+ stk [traceStackSize]uintptr // scratch buffer for traceback
+}
+
+// traceBuf is per-P tracing buffer.
+type traceBuf struct {
+ traceBufHeader
+ arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
+}
+
+// traceBufPtr is a *traceBuf that is not traced by the garbage
+// collector and doesn't have write barriers. traceBufs are not
+// allocated from the GC'd heap, so this is safe, and are often
+// manipulated in contexts where write barriers are not allowed, so
+// this is necessary.
+type traceBufPtr uintptr
+
+func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
+func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
+func traceBufPtrOf(b *traceBuf) traceBufPtr {
+ return traceBufPtr(unsafe.Pointer(b))
+}
+
+/*
+Commented out for gccgo for now.
+
+// StartTrace enables tracing for the current process.
+// While tracing, the data will be buffered and available via ReadTrace.
+// StartTrace returns an error if tracing is already enabled.
+// Most clients should use the runtime/trace package or the testing package's
+// -test.trace flag instead of calling StartTrace directly.
+func StartTrace() error {
+ // Stop the world, so that we can take a consistent snapshot
+ // of all goroutines at the beginning of the trace.
+ stopTheWorld("start tracing")
+
+ // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
+ // Exitsyscall could check trace.enabled long before and then suddenly wake up
+ // and decide to write to trace at a random point in time.
+ // However, such syscall will use the global trace.buf buffer, because we've
+ // acquired all p's by doing stop-the-world. So this protects us from such races.
+ lock(&trace.bufLock)
+
+ if trace.enabled || trace.shutdown {
+ unlock(&trace.bufLock)
+ startTheWorld()
+ return errorString("tracing is already enabled")
+ }
+
+ // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
+ // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
+ // That would lead to an inconsistent trace:
+ // - either GoSysExit appears before EvGoInSyscall,
+ // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
+ // To instruct traceEvent that it must not ignore events below, we set startingtrace.
+ // trace.enabled is set afterwards once we have emitted all preliminary events.
+ _g_ := getg()
+ _g_.m.startingtrace = true
+ for _, gp := range allgs {
+ status := readgstatus(gp)
+ if status != _Gdead {
+ traceGoCreate(gp, gp.startpc) // also resets gp.traceseq/tracelastp
+ }
+ if status == _Gwaiting {
+ // traceEvGoWaiting is implied to have seq=1.
+ gp.traceseq++
+ traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
+ }
+ if status == _Gsyscall {
+ gp.traceseq++
+ traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
+ } else {
+ gp.sysblocktraced = false
+ }
+ }
+ traceProcStart()
+ traceGoStart()
+ // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
+ // If we do it the other way around, it is possible that exitsyscall will
+ // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
+ // It will lead to a false conclusion that cputicks is broken.
+ trace.ticksStart = cputicks()
+ trace.timeStart = nanotime()
+ trace.headerWritten = false
+ trace.footerWritten = false
+ trace.strings = make(map[string]uint64)
+ trace.stringSeq = 0
+ trace.seqGC = 0
+ _g_.m.startingtrace = false
+ trace.enabled = true
+
+ unlock(&trace.bufLock)
+
+ startTheWorld()
+ return nil
+}
+
+// StopTrace stops tracing, if it was previously enabled.
+// StopTrace only returns after all the reads for the trace have completed.
+func StopTrace() {
+ // Stop the world so that we can collect the trace buffers from all p's below,
+ // and also to avoid races with traceEvent.
+ stopTheWorld("stop tracing")
+
+ // See the comment in StartTrace.
+ lock(&trace.bufLock)
+
+ if !trace.enabled {
+ unlock(&trace.bufLock)
+ startTheWorld()
+ return
+ }
+
+ traceGoSched()
+
+ for _, p := range &allp {
+ if p == nil {
+ break
+ }
+ buf := p.tracebuf
+ if buf != 0 {
+ traceFullQueue(buf)
+ p.tracebuf = 0
+ }
+ }
+ if trace.buf != 0 && trace.buf.ptr().pos != 0 {
+ buf := trace.buf
+ trace.buf = 0
+ traceFullQueue(buf)
+ }
+
+ for {
+ trace.ticksEnd = cputicks()
+ trace.timeEnd = nanotime()
+ // Windows time can tick only every 15ms, wait for at least one tick.
+ if trace.timeEnd != trace.timeStart {
+ break
+ }
+ osyield()
+ }
+
+ trace.enabled = false
+ trace.shutdown = true
+ unlock(&trace.bufLock)
+
+ startTheWorld()
+
+ // The world is started but we've set trace.shutdown, so new tracing can't start.
+ // Wait for the trace reader to flush pending buffers and stop.
+ semacquire(&trace.shutdownSema, false)
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&trace.shutdownSema))
+ }
+
+ // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
+ lock(&trace.lock)
+ for _, p := range &allp {
+ if p == nil {
+ break
+ }
+ if p.tracebuf != 0 {
+ throw("trace: non-empty trace buffer in proc")
+ }
+ }
+ if trace.buf != 0 {
+ throw("trace: non-empty global trace buffer")
+ }
+ if trace.fullHead != 0 || trace.fullTail != 0 {
+ throw("trace: non-empty full trace buffer")
+ }
+ if trace.reading != 0 || trace.reader != nil {
+ throw("trace: reading after shutdown")
+ }
+ for trace.empty != 0 {
+ buf := trace.empty
+ trace.empty = buf.ptr().link
+ sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
+ }
+ trace.strings = nil
+ trace.shutdown = false
+ unlock(&trace.lock)
+}
+
+// ReadTrace returns the next chunk of binary tracing data, blocking until data
+// is available. If tracing is turned off and all the data accumulated while it
+// was on has been returned, ReadTrace returns nil. The caller must copy the
+// returned data before calling ReadTrace again.
+// ReadTrace must be called from one goroutine at a time.
+func ReadTrace() []byte {
+ // This function may need to lock trace.lock recursively
+ // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
+ // To allow this we use trace.lockOwner.
+ // Also this function must not allocate while holding trace.lock:
+ // allocation can call heap allocate, which will try to emit a trace
+ // event while holding heap lock.
+ lock(&trace.lock)
+ trace.lockOwner = getg()
+
+ if trace.reader != nil {
+ // More than one goroutine reads trace. This is bad.
+ // But we rather do not crash the program because of tracing,
+ // because tracing can be enabled at runtime on prod servers.
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ println("runtime: ReadTrace called from multiple goroutines simultaneously")
+ return nil
+ }
+ // Recycle the old buffer.
+ if buf := trace.reading; buf != 0 {
+ buf.ptr().link = trace.empty
+ trace.empty = buf
+ trace.reading = 0
+ }
+ // Write trace header.
+ if !trace.headerWritten {
+ trace.headerWritten = true
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ return []byte("go 1.7 trace\x00\x00\x00\x00")
+ }
+ // Wait for new data.
+ if trace.fullHead == 0 && !trace.shutdown {
+ trace.reader = getg()
+ goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
+ lock(&trace.lock)
+ }
+ // Write a buffer.
+ if trace.fullHead != 0 {
+ buf := traceFullDequeue()
+ trace.reading = buf
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ return buf.ptr().arr[:buf.ptr().pos]
+ }
+ // Write footer with timer frequency.
+ if !trace.footerWritten {
+ trace.footerWritten = true
+ // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
+ freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ var data []byte
+ data = append(data, traceEvFrequency|0<<traceArgCountShift)
+ data = traceAppend(data, uint64(freq))
+ if timers.gp != nil {
+ data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
+ data = traceAppend(data, uint64(timers.gp.goid))
+ }
+ // This will emit a bunch of full buffers, we will pick them up
+ // on the next iteration.
+ trace.stackTab.dump()
+ return data
+ }
+ // Done.
+ if trace.shutdown {
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ if raceenabled {
+ // Model synchronization on trace.shutdownSema, which race
+ // detector does not see. This is required to avoid false
+ // race reports on writer passed to trace.Start.
+ racerelease(unsafe.Pointer(&trace.shutdownSema))
+ }
+ // trace.enabled is already reset, so can call traceable functions.
+ semrelease(&trace.shutdownSema)
+ return nil
+ }
+ // Also bad, but see the comment above.
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ println("runtime: spurious wakeup of trace reader")
+ return nil
+}
+
+// traceReader returns the trace reader that should be woken up, if any.
+func traceReader() *g {
+ if trace.reader == nil || (trace.fullHead == 0 && !trace.shutdown) {
+ return nil
+ }
+ lock(&trace.lock)
+ if trace.reader == nil || (trace.fullHead == 0 && !trace.shutdown) {
+ unlock(&trace.lock)
+ return nil
+ }
+ gp := trace.reader
+ trace.reader = nil
+ unlock(&trace.lock)
+ return gp
+}
+
+// traceProcFree frees trace buffer associated with pp.
+func traceProcFree(pp *p) {
+ buf := pp.tracebuf
+ pp.tracebuf = 0
+ if buf == 0 {
+ return
+ }
+ lock(&trace.lock)
+ traceFullQueue(buf)
+ unlock(&trace.lock)
+}
+
+// traceFullQueue queues buf into queue of full buffers.
+func traceFullQueue(buf traceBufPtr) {
+ buf.ptr().link = 0
+ if trace.fullHead == 0 {
+ trace.fullHead = buf
+ } else {
+ trace.fullTail.ptr().link = buf
+ }
+ trace.fullTail = buf
+}
+
+// traceFullDequeue dequeues from queue of full buffers.
+func traceFullDequeue() traceBufPtr {
+ buf := trace.fullHead
+ if buf == 0 {
+ return 0
+ }
+ trace.fullHead = buf.ptr().link
+ if trace.fullHead == 0 {
+ trace.fullTail = 0
+ }
+ buf.ptr().link = 0
+ return buf
+}
+
+// traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
+// ev is event type.
+// If skip > 0, write current stack id as the last argument (skipping skip top frames).
+// If skip = 0, this event type should contain a stack, but we don't want
+// to collect and remember it for this particular call.
+func traceEvent(ev byte, skip int, args ...uint64) {
+ mp, pid, bufp := traceAcquireBuffer()
+ // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
+ // This protects from races between traceEvent and StartTrace/StopTrace.
+
+ // The caller checked that trace.enabled == true, but trace.enabled might have been
+ // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
+ // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
+ // so if we see trace.enabled == true now, we know it's true for the rest of the function.
+ // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
+ // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
+ if !trace.enabled && !mp.startingtrace {
+ traceReleaseBuffer(pid)
+ return
+ }
+ buf := (*bufp).ptr()
+ const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
+ if buf == nil || len(buf.arr)-buf.pos < maxSize {
+ buf = traceFlush(traceBufPtrOf(buf)).ptr()
+ (*bufp).set(buf)
+ }
+
+ ticks := uint64(cputicks()) / traceTickDiv
+ tickDiff := ticks - buf.lastTicks
+ if buf.pos == 0 {
+ buf.byte(traceEvBatch | 1<<traceArgCountShift)
+ buf.varint(uint64(pid))
+ buf.varint(ticks)
+ tickDiff = 0
+ }
+ buf.lastTicks = ticks
+ narg := byte(len(args))
+ if skip >= 0 {
+ narg++
+ }
+ // We have only 2 bits for number of arguments.
+ // If number is >= 3, then the event type is followed by event length in bytes.
+ if narg > 3 {
+ narg = 3
+ }
+ startPos := buf.pos
+ buf.byte(ev | narg<<traceArgCountShift)
+ var lenp *byte
+ if narg == 3 {
+ // Reserve the byte for length assuming that length < 128.
+ buf.varint(0)
+ lenp = &buf.arr[buf.pos-1]
+ }
+ buf.varint(tickDiff)
+ for _, a := range args {
+ buf.varint(a)
+ }
+ if skip == 0 {
+ buf.varint(0)
+ } else if skip > 0 {
+ _g_ := getg()
+ gp := mp.curg
+ var nstk int
+ if gp == _g_ {
+ nstk = callers(skip, buf.stk[:])
+ } else if gp != nil {
+ gp = mp.curg
+ // This may happen when tracing a system call,
+ // so we must lock the stack.
+ if gcTryLockStackBarriers(gp) {
+ nstk = gcallers(gp, skip, buf.stk[:])
+ gcUnlockStackBarriers(gp)
+ }
+ }
+ if nstk > 0 {
+ nstk-- // skip runtime.goexit
+ }
+ if nstk > 0 && gp.goid == 1 {
+ nstk-- // skip runtime.main
+ }
+ id := trace.stackTab.put(buf.stk[:nstk])
+ buf.varint(uint64(id))
+ }
+ evSize := buf.pos - startPos
+ if evSize > maxSize {
+ throw("invalid length of trace event")
+ }
+ if lenp != nil {
+ // Fill in actual length.
+ *lenp = byte(evSize - 2)
+ }
+ traceReleaseBuffer(pid)
+}
+
+// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
+func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
+ mp = acquirem()
+ if p := mp.p.ptr(); p != nil {
+ return mp, p.id, &p.tracebuf
+ }
+ lock(&trace.bufLock)
+ return mp, traceGlobProc, &trace.buf
+}
+
+// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
+func traceReleaseBuffer(pid int32) {
+ if pid == traceGlobProc {
+ unlock(&trace.bufLock)
+ }
+ releasem(getg().m)
+}
+
+// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
+func traceFlush(buf traceBufPtr) traceBufPtr {
+ owner := trace.lockOwner
+ dolock := owner == nil || owner != getg().m.curg
+ if dolock {
+ lock(&trace.lock)
+ }
+ if buf != 0 {
+ traceFullQueue(buf)
+ }
+ if trace.empty != 0 {
+ buf = trace.empty
+ trace.empty = buf.ptr().link
+ } else {
+ buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
+ if buf == 0 {
+ throw("trace: out of memory")
+ }
+ }
+ bufp := buf.ptr()
+ bufp.link.set(nil)
+ bufp.pos = 0
+ bufp.lastTicks = 0
+ if dolock {
+ unlock(&trace.lock)
+ }
+ return buf
+}
+
+func traceString(buf *traceBuf, s string) (uint64, *traceBuf) {
+ if s == "" {
+ return 0, buf
+ }
+ if id, ok := trace.strings[s]; ok {
+ return id, buf
+ }
+
+ trace.stringSeq++
+ id := trace.stringSeq
+ trace.strings[s] = id
+
+ size := 1 + 2*traceBytesPerNumber + len(s)
+ if len(buf.arr)-buf.pos < size {
+ buf = traceFlush(traceBufPtrOf(buf)).ptr()
+ }
+ buf.byte(traceEvString)
+ buf.varint(id)
+ buf.varint(uint64(len(s)))
+ buf.pos += copy(buf.arr[buf.pos:], s)
+ return id, buf
+}
+
+// traceAppend appends v to buf in little-endian-base-128 encoding.
+func traceAppend(buf []byte, v uint64) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ buf = append(buf, 0x80|byte(v))
+ }
+ buf = append(buf, byte(v))
+ return buf
+}
+
+// varint appends v to buf in little-endian-base-128 encoding.
+func (buf *traceBuf) varint(v uint64) {
+ pos := buf.pos
+ for ; v >= 0x80; v >>= 7 {
+ buf.arr[pos] = 0x80 | byte(v)
+ pos++
+ }
+ buf.arr[pos] = byte(v)
+ pos++
+ buf.pos = pos
+}
+
+// byte appends v to buf.
+func (buf *traceBuf) byte(v byte) {
+ buf.arr[buf.pos] = v
+ buf.pos++
+}
+
+*/
+
+// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
+// It is lock-free for reading.
+type traceStackTable struct {
+ lock mutex
+ seq uint32
+ mem traceAlloc
+ tab [1 << 13]traceStackPtr
+}
+
+// traceStack is a single stack in traceStackTable.
+type traceStack struct {
+ link traceStackPtr
+ hash uintptr
+ id uint32
+ n int
+ stk [0]uintptr // real type [n]uintptr
+}
+
+type traceStackPtr uintptr
+
+/*
+Commented out for gccgo for now.
+
+func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
+
+// stack returns slice of PCs.
+func (ts *traceStack) stack() []uintptr {
+ return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
+}
+
+// put returns a unique id for the stack trace pcs and caches it in the table,
+// if it sees the trace for the first time.
+func (tab *traceStackTable) put(pcs []uintptr) uint32 {
+ if len(pcs) == 0 {
+ return 0
+ }
+ hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
+ // First, search the hashtable w/o the mutex.
+ if id := tab.find(pcs, hash); id != 0 {
+ return id
+ }
+ // Now, double check under the mutex.
+ lock(&tab.lock)
+ if id := tab.find(pcs, hash); id != 0 {
+ unlock(&tab.lock)
+ return id
+ }
+ // Create new record.
+ tab.seq++
+ stk := tab.newStack(len(pcs))
+ stk.hash = hash
+ stk.id = tab.seq
+ stk.n = len(pcs)
+ stkpc := stk.stack()
+ for i, pc := range pcs {
+ stkpc[i] = pc
+ }
+ part := int(hash % uintptr(len(tab.tab)))
+ stk.link = tab.tab[part]
+ atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
+ unlock(&tab.lock)
+ return stk.id
+}
+
+// find checks if the stack trace pcs is already present in the table.
+func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
+ part := int(hash % uintptr(len(tab.tab)))
+Search:
+ for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
+ if stk.hash == hash && stk.n == len(pcs) {
+ for i, stkpc := range stk.stack() {
+ if stkpc != pcs[i] {
+ continue Search
+ }
+ }
+ return stk.id
+ }
+ }
+ return 0
+}
+
+// newStack allocates a new stack of size n.
+func (tab *traceStackTable) newStack(n int) *traceStack {
+ return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
+}
+
+// dump writes all previously cached stacks to trace buffers,
+// releases all memory and resets state.
+func (tab *traceStackTable) dump() {
+ frames := make(map[uintptr]traceFrame)
+ var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
+ buf := traceFlush(0).ptr()
+ for _, stk := range tab.tab {
+ stk := stk.ptr()
+ for ; stk != nil; stk = stk.link.ptr() {
+ tmpbuf := tmp[:0]
+ tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
+ tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
+ for _, pc := range stk.stack() {
+ var frame traceFrame
+ frame, buf = traceFrameForPC(buf, frames, pc)
+ tmpbuf = traceAppend(tmpbuf, uint64(pc))
+ tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
+ tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
+ tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
+ }
+ // Now copy to the buffer.
+ size := 1 + traceBytesPerNumber + len(tmpbuf)
+ if len(buf.arr)-buf.pos < size {
+ buf = traceFlush(traceBufPtrOf(buf)).ptr()
+ }
+ buf.byte(traceEvStack | 3<<traceArgCountShift)
+ buf.varint(uint64(len(tmpbuf)))
+ buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
+ }
+ }
+
+ lock(&trace.lock)
+ traceFullQueue(traceBufPtrOf(buf))
+ unlock(&trace.lock)
+
+ tab.mem.drop()
+ *tab = traceStackTable{}
+}
+
+type traceFrame struct {
+ funcID uint64
+ fileID uint64
+ line uint64
+}
+
+func traceFrameForPC(buf *traceBuf, frames map[uintptr]traceFrame, pc uintptr) (traceFrame, *traceBuf) {
+ if frame, ok := frames[pc]; ok {
+ return frame, buf
+ }
+
+ var frame traceFrame
+ f := findfunc(pc)
+ if f == nil {
+ frames[pc] = frame
+ return frame, buf
+ }
+
+ fn := funcname(f)
+ const maxLen = 1 << 10
+ if len(fn) > maxLen {
+ fn = fn[len(fn)-maxLen:]
+ }
+ frame.funcID, buf = traceString(buf, fn)
+ file, line := funcline(f, pc-sys.PCQuantum)
+ frame.line = uint64(line)
+ if len(file) > maxLen {
+ file = file[len(file)-maxLen:]
+ }
+ frame.fileID, buf = traceString(buf, file)
+ return frame, buf
+}
+
+*/
+
+// traceAlloc is a non-thread-safe region allocator.
+// It holds a linked list of traceAllocBlock.
+type traceAlloc struct {
+ head traceAllocBlockPtr
+ off uintptr
+}
+
+// traceAllocBlock is a block in traceAlloc.
+//
+// traceAllocBlock is allocated from non-GC'd memory, so it must not
+// contain heap pointers. Writes to pointers to traceAllocBlocks do
+// not need write barriers.
+type traceAllocBlock struct {
+ next traceAllocBlockPtr
+ data [64<<10 - sys.PtrSize]byte
+}
+
+type traceAllocBlockPtr uintptr
+
+func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
+func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
+
+/*
+Commented out for gccgo for now.
+
+// alloc allocates n-byte block.
+func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
+ n = round(n, sys.PtrSize)
+ if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
+ if n > uintptr(len(a.head.ptr().data)) {
+ throw("trace: alloc too large")
+ }
+ block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
+ if block == nil {
+ throw("trace: out of memory")
+ }
+ block.next.set(a.head.ptr())
+ a.head.set(block)
+ a.off = 0
+ }
+ p := &a.head.ptr().data[a.off]
+ a.off += n
+ return unsafe.Pointer(p)
+}
+
+// drop frees all previously allocated memory and resets the allocator.
+func (a *traceAlloc) drop() {
+ for a.head != 0 {
+ block := a.head.ptr()
+ a.head.set(block.next.ptr())
+ sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
+ }
+}
+
+// The following functions write specific events to trace.
+
+func traceGomaxprocs(procs int32) {
+ traceEvent(traceEvGomaxprocs, 1, uint64(procs))
+}
+
+func traceProcStart() {
+ traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
+}
+
+func traceProcStop(pp *p) {
+ // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
+ // to handle this we temporary employ the P.
+ mp := acquirem()
+ oldp := mp.p
+ mp.p.set(pp)
+ traceEvent(traceEvProcStop, -1)
+ mp.p = oldp
+ releasem(mp)
+}
+
+func traceGCStart() {
+ traceEvent(traceEvGCStart, 3, trace.seqGC)
+ trace.seqGC++
+}
+
+func traceGCDone() {
+ traceEvent(traceEvGCDone, -1)
+}
+
+func traceGCScanStart() {
+ traceEvent(traceEvGCScanStart, -1)
+}
+
+func traceGCScanDone() {
+ traceEvent(traceEvGCScanDone, -1)
+}
+
+func traceGCSweepStart() {
+ traceEvent(traceEvGCSweepStart, 1)
+}
+
+func traceGCSweepDone() {
+ traceEvent(traceEvGCSweepDone, -1)
+}
+
+func traceGoCreate(newg *g, pc uintptr) {
+ newg.traceseq = 0
+ newg.tracelastp = getg().m.p
+ // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
+ id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
+ traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
+}
+
+func traceGoStart() {
+ _g_ := getg().m.curg
+ _p_ := _g_.m.p
+ _g_.traceseq++
+ if _g_.tracelastp == _p_ {
+ traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
+ } else {
+ _g_.tracelastp = _p_
+ traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
+ }
+}
+
+func traceGoEnd() {
+ traceEvent(traceEvGoEnd, -1)
+}
+
+func traceGoSched() {
+ _g_ := getg()
+ _g_.tracelastp = _g_.m.p
+ traceEvent(traceEvGoSched, 1)
+}
+
+func traceGoPreempt() {
+ _g_ := getg()
+ _g_.tracelastp = _g_.m.p
+ traceEvent(traceEvGoPreempt, 1)
+}
+
+func traceGoPark(traceEv byte, skip int, gp *g) {
+ if traceEv&traceFutileWakeup != 0 {
+ traceEvent(traceEvFutileWakeup, -1)
+ }
+ traceEvent(traceEv & ^traceFutileWakeup, skip)
+}
+
+func traceGoUnpark(gp *g, skip int) {
+ _p_ := getg().m.p
+ gp.traceseq++
+ if gp.tracelastp == _p_ {
+ traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
+ } else {
+ gp.tracelastp = _p_
+ traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
+ }
+}
+
+func traceGoSysCall() {
+ traceEvent(traceEvGoSysCall, 1)
+}
+
+func traceGoSysExit(ts int64) {
+ if ts != 0 && ts < trace.ticksStart {
+ // There is a race between the code that initializes sysexitticks
+ // (in exitsyscall, which runs without a P, and therefore is not
+ // stopped with the rest of the world) and the code that initializes
+ // a new trace. The recorded sysexitticks must therefore be treated
+ // as "best effort". If they are valid for this trace, then great,
+ // use them for greater accuracy. But if they're not valid for this
+ // trace, assume that the trace was started after the actual syscall
+ // exit (but before we actually managed to start the goroutine,
+ // aka right now), and assign a fresh time stamp to keep the log consistent.
+ ts = 0
+ }
+ _g_ := getg().m.curg
+ _g_.traceseq++
+ _g_.tracelastp = _g_.m.p
+ traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
+}
+
+func traceGoSysBlock(pp *p) {
+ // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
+ // to handle this we temporary employ the P.
+ mp := acquirem()
+ oldp := mp.p
+ mp.p.set(pp)
+ traceEvent(traceEvGoSysBlock, -1)
+ mp.p = oldp
+ releasem(mp)
+}
+
+func traceHeapAlloc() {
+ traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
+}
+
+func traceNextGC() {
+ traceEvent(traceEvNextGC, -1, memstats.next_gc)
+}
+
+*/
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "go-type.h"
-#include "malloc.h"
-#include "chan.h"
-
-uint32 runtime_Hchansize = sizeof(Hchan);
-
-static void dequeueg(WaitQ*);
-static SudoG* dequeue(WaitQ*);
-static void enqueue(WaitQ*, SudoG*);
-
-static Hchan*
-makechan(ChanType *t, int64 hint)
-{
- Hchan *c;
- uintptr n;
- const Type *elem;
-
- elem = t->__element_type;
-
- // compiler checks this but be safe.
- if(elem->__size >= (1<<16))
- runtime_throw("makechan: invalid channel element type");
-
- if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size))
- runtime_panicstring("makechan: size out of range");
-
- n = sizeof(*c);
- n = ROUND(n, elem->__align);
-
- // allocate memory in one call
- c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0);
- c->elemsize = elem->__size;
- c->elemtype = elem;
- c->dataqsiz = hint;
-
- if(debug)
- runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n",
- c, (int64)elem->__size, (int64)c->dataqsiz);
-
- return c;
-}
-
-func reflect.makechan(t *ChanType, size uint64) (c *Hchan) {
- c = makechan(t, size);
-}
-
-Hchan*
-__go_new_channel(ChanType *t, uintptr hint)
-{
- return makechan(t, hint);
-}
-
-Hchan*
-__go_new_channel_big(ChanType *t, uint64 hint)
-{
- return makechan(t, hint);
-}
-
-/*
- * generic single channel send/recv
- * if the bool pointer is nil,
- * then the full exchange will
- * occur. if pres is not nil,
- * then the protocol will not
- * sleep but return if it could
- * not complete.
- *
- * sleep can wake up with g->param == nil
- * when a channel involved in the sleep has
- * been closed. it is easiest to loop and re-run
- * the operation; we'll see that it's now closed.
- */
-static bool
-chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
-{
- USED(pc);
- SudoG *sg;
- SudoG mysg;
- G* gp;
- int64 t0;
- G* g;
-
- g = runtime_g();
-
- if(c == nil) {
- USED(t);
- if(!block)
- return false;
- runtime_park(nil, nil, "chan send (nil chan)");
- return false; // not reached
- }
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug) {
- runtime_printf("chansend: chan=%p\n", c);
- }
-
- t0 = 0;
- mysg.releasetime = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- mysg.releasetime = -1;
- }
-
- runtime_lock(c);
- if(c->closed)
- goto closed;
-
- if(c->dataqsiz > 0)
- goto asynch;
-
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- runtime_unlock(c);
-
- gp = sg->g;
- gp->param = sg;
- if(sg->elem != nil)
- runtime_memmove(sg->elem, ep, c->elemsize);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- return true;
- }
-
- if(!block) {
- runtime_unlock(c);
- return false;
- }
-
- mysg.elem = ep;
- mysg.g = g;
- mysg.selectdone = nil;
- g->param = nil;
- enqueue(&c->sendq, &mysg);
- runtime_parkunlock(c, "chan send");
-
- if(g->param == nil) {
- runtime_lock(c);
- if(!c->closed)
- runtime_throw("chansend: spurious wakeup");
- goto closed;
- }
-
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
-
- return true;
-
-asynch:
- if(c->closed)
- goto closed;
-
- if(c->qcount >= c->dataqsiz) {
- if(!block) {
- runtime_unlock(c);
- return false;
- }
- mysg.g = g;
- mysg.elem = nil;
- mysg.selectdone = nil;
- enqueue(&c->sendq, &mysg);
- runtime_parkunlock(c, "chan send");
-
- runtime_lock(c);
- goto asynch;
- }
-
- runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize);
- if(++c->sendx == c->dataqsiz)
- c->sendx = 0;
- c->qcount++;
-
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- gp = sg->g;
- runtime_unlock(c);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else
- runtime_unlock(c);
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-closed:
- runtime_unlock(c);
- runtime_panicstring("send on closed channel");
- return false; // not reached
-}
-
-
-static bool
-chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
-{
- SudoG *sg;
- SudoG mysg;
- G *gp;
- int64 t0;
- G *g;
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug)
- runtime_printf("chanrecv: chan=%p\n", c);
-
- g = runtime_g();
-
- if(c == nil) {
- USED(t);
- if(!block)
- return false;
- runtime_park(nil, nil, "chan receive (nil chan)");
- return false; // not reached
- }
-
- t0 = 0;
- mysg.releasetime = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- mysg.releasetime = -1;
- }
-
- runtime_lock(c);
- if(c->dataqsiz > 0)
- goto asynch;
-
- if(c->closed)
- goto closed;
-
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- runtime_unlock(c);
-
- if(ep != nil)
- runtime_memmove(ep, sg->elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
-
- if(received != nil)
- *received = true;
- return true;
- }
-
- if(!block) {
- runtime_unlock(c);
- return false;
- }
-
- mysg.elem = ep;
- mysg.g = g;
- mysg.selectdone = nil;
- g->param = nil;
- enqueue(&c->recvq, &mysg);
- runtime_parkunlock(c, "chan receive");
-
- if(g->param == nil) {
- runtime_lock(c);
- if(!c->closed)
- runtime_throw("chanrecv: spurious wakeup");
- goto closed;
- }
-
- if(received != nil)
- *received = true;
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-asynch:
- if(c->qcount <= 0) {
- if(c->closed)
- goto closed;
-
- if(!block) {
- runtime_unlock(c);
- if(received != nil)
- *received = false;
- return false;
- }
- mysg.g = g;
- mysg.elem = nil;
- mysg.selectdone = nil;
- enqueue(&c->recvq, &mysg);
- runtime_parkunlock(c, "chan receive");
-
- runtime_lock(c);
- goto asynch;
- }
-
- if(ep != nil)
- runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize);
- runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
- if(++c->recvx == c->dataqsiz)
- c->recvx = 0;
- c->qcount--;
-
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- gp = sg->g;
- runtime_unlock(c);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else
- runtime_unlock(c);
-
- if(received != nil)
- *received = true;
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-closed:
- if(ep != nil)
- runtime_memclr(ep, c->elemsize);
- if(received != nil)
- *received = false;
- runtime_unlock(c);
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-}
-
-// The compiler generates a call to __go_send_small to send a value 8
-// bytes or smaller.
-void
-__go_send_small(ChanType *t, Hchan* c, uint64 val)
-{
- union
- {
- byte b[sizeof(uint64)];
- uint64 v;
- } u;
- byte *v;
-
- u.v = val;
-#ifndef WORDS_BIGENDIAN
- v = u.b;
-#else
- v = u.b + sizeof(uint64) - t->__element_type->__size;
-#endif
- chansend(t, c, v, true, runtime_getcallerpc(&t));
-}
-
-// The compiler generates a call to __go_send_big to send a value
-// larger than 8 bytes or smaller.
-void
-__go_send_big(ChanType *t, Hchan* c, byte* v)
-{
- chansend(t, c, v, true, runtime_getcallerpc(&t));
-}
-
-// The compiler generates a call to __go_receive to receive a
-// value from a channel.
-void
-__go_receive(ChanType *t, Hchan* c, byte* v)
-{
- chanrecv(t, c, v, true, nil);
-}
-
-_Bool runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
- __asm__ (GOSYM_PREFIX "runtime.chanrecv2");
-
-_Bool
-runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
-{
- bool received = false;
-
- chanrecv(t, c, v, true, &received);
- return received;
-}
-
-// compiler implements
-//
-// select {
-// case c <- v:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbsend(c, v) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbsend(t *ChanType, c *Hchan, elem *byte) (selected bool) {
- selected = chansend(t, c, elem, false, runtime_getcallerpc(&t));
-}
-
-// compiler implements
-//
-// select {
-// case v = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbrecv(&v, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv(t *ChanType, elem *byte, c *Hchan) (selected bool) {
- selected = chanrecv(t, c, elem, false, nil);
-}
-
-// compiler implements
-//
-// select {
-// case v, ok = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if c != nil && selectnbrecv2(&v, &ok, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv2(t *ChanType, elem *byte, received *bool, c *Hchan) (selected bool) {
- bool r;
-
- selected = chanrecv(t, c, elem, false, received == nil ? nil : &r);
- if(received != nil)
- *received = r;
-}
-
-func reflect.chansend(t *ChanType, c *Hchan, elem *byte, nb bool) (selected bool) {
- selected = chansend(t, c, elem, !nb, runtime_getcallerpc(&t));
-}
-
-func reflect.chanrecv(t *ChanType, c *Hchan, nb bool, elem *byte) (selected bool, received bool) {
- received = false;
- selected = chanrecv(t, c, elem, !nb, &received);
-}
-
-static Select* newselect(int32);
-
-func newselect(size int32) (sel *byte) {
- sel = (byte*)newselect(size);
-}
-
-static Select*
-newselect(int32 size)
-{
- int32 n;
- Select *sel;
-
- n = 0;
- if(size > 1)
- n = size-1;
-
- // allocate all the memory we need in a single allocation
- // start with Select with size cases
- // then lockorder with size entries
- // then pollorder with size entries
- sel = runtime_mal(sizeof(*sel) +
- n*sizeof(sel->scase[0]) +
- size*sizeof(sel->lockorder[0]) +
- size*sizeof(sel->pollorder[0]));
-
- sel->tcase = size;
- sel->ncase = 0;
- sel->lockorder = (void*)(sel->scase + size);
- sel->pollorder = (void*)(sel->lockorder + size);
-
- if(debug)
- runtime_printf("newselect s=%p size=%d\n", sel, size);
- return sel;
-}
-
-// cut in half to give stack a chance to split
-static void selectsend(Select *sel, Hchan *c, int index, void *elem);
-
-func selectsend(sel *Select, c *Hchan, elem *byte, index int32) {
- // nil cases do not compete
- if(c != nil)
- selectsend(sel, c, index, elem);
-}
-
-static void
-selectsend(Select *sel, Hchan *c, int index, void *elem)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectsend: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
-
- cas->index = index;
- cas->chan = c;
- cas->kind = CaseSend;
- cas->sg.elem = elem;
-
- if(debug)
- runtime_printf("selectsend s=%p index=%d chan=%p\n",
- sel, cas->index, cas->chan);
-}
-
-// cut in half to give stack a chance to split
-static void selectrecv(Select *sel, Hchan *c, int index, void *elem, bool*);
-
-func selectrecv(sel *Select, c *Hchan, elem *byte, index int32) {
- // nil cases do not compete
- if(c != nil)
- selectrecv(sel, c, index, elem, nil);
-}
-
-func selectrecv2(sel *Select, c *Hchan, elem *byte, received *bool, index int32) {
- // nil cases do not compete
- if(c != nil)
- selectrecv(sel, c, index, elem, received);
-}
-
-static void
-selectrecv(Select *sel, Hchan *c, int index, void *elem, bool *received)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectrecv: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
- cas->index = index;
- cas->chan = c;
-
- cas->kind = CaseRecv;
- cas->sg.elem = elem;
- cas->receivedp = received;
-
- if(debug)
- runtime_printf("selectrecv s=%p index=%d chan=%p\n",
- sel, cas->index, cas->chan);
-}
-
-// cut in half to give stack a chance to split
-static void selectdefault(Select*, int);
-
-func selectdefault(sel *Select, index int32) {
- selectdefault(sel, index);
-}
-
-static void
-selectdefault(Select *sel, int32 index)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectdefault: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
- cas->index = index;
- cas->chan = nil;
-
- cas->kind = CaseDefault;
-
- if(debug)
- runtime_printf("selectdefault s=%p index=%d\n",
- sel, cas->index);
-}
-
-static void
-sellock(Select *sel)
-{
- uint32 i;
- Hchan *c, *c0;
-
- c = nil;
- for(i=0; i<sel->ncase; i++) {
- c0 = sel->lockorder[i];
- if(c0 && c0 != c) {
- c = sel->lockorder[i];
- runtime_lock(c);
- }
- }
-}
-
-static void
-selunlock(Select *sel)
-{
- int32 i, n, r;
- Hchan *c;
-
- // We must be very careful here to not touch sel after we have unlocked
- // the last lock, because sel can be freed right after the last unlock.
- // Consider the following situation.
- // First M calls runtime_park() in runtime_selectgo() passing the sel.
- // Once runtime_park() has unlocked the last lock, another M makes
- // the G that calls select runnable again and schedules it for execution.
- // When the G runs on another M, it locks all the locks and frees sel.
- // Now if the first M touches sel, it will access freed memory.
- n = (int32)sel->ncase;
- r = 0;
- // skip the default case
- if(n>0 && sel->lockorder[0] == nil)
- r = 1;
- for(i = n-1; i >= r; i--) {
- c = sel->lockorder[i];
- if(i>0 && sel->lockorder[i-1] == c)
- continue; // will unlock it on the next iteration
- runtime_unlock(c);
- }
-}
-
-static bool
-selparkcommit(G *gp, void *sel)
-{
- USED(gp);
- selunlock(sel);
- return true;
-}
-
-func block() {
- runtime_park(nil, nil, "select (no cases)"); // forever
-}
-
-static int selectgo(Select**);
-
-// selectgo(sel *byte);
-
-func selectgo(sel *Select) (ret int32) {
- return selectgo(&sel);
-}
-
-static int
-selectgo(Select **selp)
-{
- Select *sel;
- uint32 o, i, j, k, done;
- int64 t0;
- Scase *cas, *dfl;
- Hchan *c;
- SudoG *sg;
- G *gp;
- int index;
- G *g;
-
- sel = *selp;
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug)
- runtime_printf("select: sel=%p\n", sel);
-
- g = runtime_g();
-
- t0 = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- for(i=0; i<sel->ncase; i++)
- sel->scase[i].sg.releasetime = -1;
- }
-
- // The compiler rewrites selects that statically have
- // only 0 or 1 cases plus default into simpler constructs.
- // The only way we can end up with such small sel->ncase
- // values here is for a larger select in which most channels
- // have been nilled out. The general code handles those
- // cases correctly, and they are rare enough not to bother
- // optimizing (and needing to test).
-
- // generate permuted order
- for(i=0; i<sel->ncase; i++)
- sel->pollorder[i] = i;
- for(i=1; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- j = runtime_fastrand1()%(i+1);
- sel->pollorder[i] = sel->pollorder[j];
- sel->pollorder[j] = o;
- }
-
- // sort the cases by Hchan address to get the locking order.
- // simple heap sort, to guarantee n log n time and constant stack footprint.
- for(i=0; i<sel->ncase; i++) {
- j = i;
- c = sel->scase[j].chan;
- while(j > 0 && sel->lockorder[k=(j-1)/2] < c) {
- sel->lockorder[j] = sel->lockorder[k];
- j = k;
- }
- sel->lockorder[j] = c;
- }
- for(i=sel->ncase; i-->0; ) {
- c = sel->lockorder[i];
- sel->lockorder[i] = sel->lockorder[0];
- j = 0;
- for(;;) {
- k = j*2+1;
- if(k >= i)
- break;
- if(k+1 < i && sel->lockorder[k] < sel->lockorder[k+1])
- k++;
- if(c < sel->lockorder[k]) {
- sel->lockorder[j] = sel->lockorder[k];
- j = k;
- continue;
- }
- break;
- }
- sel->lockorder[j] = c;
- }
- /*
- for(i=0; i+1<sel->ncase; i++)
- if(sel->lockorder[i] > sel->lockorder[i+1]) {
- runtime_printf("i=%d %p %p\n", i, sel->lockorder[i], sel->lockorder[i+1]);
- runtime_throw("select: broken sort");
- }
- */
- sellock(sel);
-
-loop:
- // pass 1 - look for something already waiting
- dfl = nil;
- for(i=0; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- cas = &sel->scase[o];
- c = cas->chan;
-
- switch(cas->kind) {
- case CaseRecv:
- if(c->dataqsiz > 0) {
- if(c->qcount > 0)
- goto asyncrecv;
- } else {
- sg = dequeue(&c->sendq);
- if(sg != nil)
- goto syncrecv;
- }
- if(c->closed)
- goto rclose;
- break;
-
- case CaseSend:
- if(c->closed)
- goto sclose;
- if(c->dataqsiz > 0) {
- if(c->qcount < c->dataqsiz)
- goto asyncsend;
- } else {
- sg = dequeue(&c->recvq);
- if(sg != nil)
- goto syncsend;
- }
- break;
-
- case CaseDefault:
- dfl = cas;
- break;
- }
- }
-
- if(dfl != nil) {
- selunlock(sel);
- cas = dfl;
- goto retc;
- }
-
-
- // pass 2 - enqueue on all chans
- done = 0;
- for(i=0; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- cas = &sel->scase[o];
- c = cas->chan;
- sg = &cas->sg;
- sg->g = g;
- sg->selectdone = &done;
-
- switch(cas->kind) {
- case CaseRecv:
- enqueue(&c->recvq, sg);
- break;
-
- case CaseSend:
- enqueue(&c->sendq, sg);
- break;
- }
- }
-
- g->param = nil;
- runtime_park(selparkcommit, sel, "select");
-
- sellock(sel);
- sg = g->param;
-
- // pass 3 - dequeue from unsuccessful chans
- // otherwise they stack up on quiet channels
- for(i=0; i<sel->ncase; i++) {
- cas = &sel->scase[i];
- if(cas != (Scase*)sg) {
- c = cas->chan;
- if(cas->kind == CaseSend)
- dequeueg(&c->sendq);
- else
- dequeueg(&c->recvq);
- }
- }
-
- if(sg == nil)
- goto loop;
-
- cas = (Scase*)sg;
- c = cas->chan;
-
- if(c->dataqsiz > 0)
- runtime_throw("selectgo: shouldn't happen");
-
- if(debug)
- runtime_printf("wait-return: sel=%p c=%p cas=%p kind=%d\n",
- sel, c, cas, cas->kind);
-
- if(cas->kind == CaseRecv) {
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- }
-
- selunlock(sel);
- goto retc;
-
-asyncrecv:
- // can receive from buffer
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- if(cas->sg.elem != nil)
- runtime_memmove(cas->sg.elem, chanbuf(c, c->recvx), c->elemsize);
- runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
- if(++c->recvx == c->dataqsiz)
- c->recvx = 0;
- c->qcount--;
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- gp = sg->g;
- selunlock(sel);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else {
- selunlock(sel);
- }
- goto retc;
-
-asyncsend:
- // can send to buffer
- runtime_memmove(chanbuf(c, c->sendx), cas->sg.elem, c->elemsize);
- if(++c->sendx == c->dataqsiz)
- c->sendx = 0;
- c->qcount++;
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- gp = sg->g;
- selunlock(sel);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else {
- selunlock(sel);
- }
- goto retc;
-
-syncrecv:
- // can receive from sleeping sender (sg)
- selunlock(sel);
- if(debug)
- runtime_printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- if(cas->sg.elem != nil)
- runtime_memmove(cas->sg.elem, sg->elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- goto retc;
-
-rclose:
- // read at end of closed channel
- selunlock(sel);
- if(cas->receivedp != nil)
- *cas->receivedp = false;
- if(cas->sg.elem != nil)
- runtime_memclr(cas->sg.elem, c->elemsize);
- goto retc;
-
-syncsend:
- // can send to sleeping receiver (sg)
- selunlock(sel);
- if(debug)
- runtime_printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
- if(sg->elem != nil)
- runtime_memmove(sg->elem, cas->sg.elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
-
-retc:
- // return index corresponding to chosen case
- index = cas->index;
- if(cas->sg.releasetime > 0)
- runtime_blockevent(cas->sg.releasetime - t0, 2);
- runtime_free(sel);
- return index;
-
-sclose:
- // send on closed channel
- selunlock(sel);
- runtime_panicstring("send on closed channel");
- return 0; // not reached
-}
-
-// This struct must match ../reflect/value.go:/runtimeSelect.
-typedef struct runtimeSelect runtimeSelect;
-struct runtimeSelect
-{
- intgo dir;
- ChanType *typ;
- Hchan *ch;
- byte *val;
-};
-
-// This enum must match ../reflect/value.go:/SelectDir.
-enum SelectDir {
- SelectSend = 1,
- SelectRecv,
- SelectDefault,
-};
-
-func reflect.rselect(cases Slice) (chosen int, recvOK bool) {
- int32 i;
- Select *sel;
- runtimeSelect* rcase, *rc;
-
- chosen = -1;
- recvOK = false;
-
- rcase = (runtimeSelect*)cases.__values;
-
- sel = newselect(cases.__count);
- for(i=0; i<cases.__count; i++) {
- rc = &rcase[i];
- switch(rc->dir) {
- case SelectDefault:
- selectdefault(sel, i);
- break;
- case SelectSend:
- if(rc->ch == nil)
- break;
- selectsend(sel, rc->ch, i, rc->val);
- break;
- case SelectRecv:
- if(rc->ch == nil)
- break;
- selectrecv(sel, rc->ch, i, rc->val, &recvOK);
- break;
- }
- }
-
- chosen = (intgo)(uintptr)selectgo(&sel);
-}
-
-static void closechan(Hchan *c, void *pc);
-
-func closechan(c *Hchan) {
- closechan(c, runtime_getcallerpc(&c));
-}
-
-func reflect.chanclose(c *Hchan) {
- closechan(c, runtime_getcallerpc(&c));
-}
-
-static void
-closechan(Hchan *c, void *pc)
-{
- USED(pc);
- SudoG *sg;
- G* gp;
-
- if(c == nil)
- runtime_panicstring("close of nil channel");
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- runtime_lock(c);
- if(c->closed) {
- runtime_unlock(c);
- runtime_panicstring("close of closed channel");
- }
- c->closed = true;
-
- // release all readers
- for(;;) {
- sg = dequeue(&c->recvq);
- if(sg == nil)
- break;
- gp = sg->g;
- gp->param = nil;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- }
-
- // release all writers
- for(;;) {
- sg = dequeue(&c->sendq);
- if(sg == nil)
- break;
- gp = sg->g;
- gp->param = nil;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- }
-
- runtime_unlock(c);
-}
-
-void
-__go_builtin_close(Hchan *c)
-{
- runtime_closechan(c);
-}
-
-func reflect.chanlen(c *Hchan) (len int) {
- if(c == nil)
- len = 0;
- else
- len = c->qcount;
-}
-
-func reflect.chancap(c *Hchan) (cap int) {
- if(c == nil)
- cap = 0;
- else
- cap = c->dataqsiz;
-}
-
-intgo
-__go_chan_cap(Hchan *c)
-{
- return reflect_chancap(c);
-}
-
-static SudoG*
-dequeue(WaitQ *q)
-{
- SudoG *sgp;
-
-loop:
- sgp = q->first;
- if(sgp == nil)
- return nil;
- q->first = sgp->link;
-
- // if sgp participates in a select and is already signaled, ignore it
- if(sgp->selectdone != nil) {
- // claim the right to signal
- if(*sgp->selectdone != 0 || !runtime_cas(sgp->selectdone, 0, 1))
- goto loop;
- }
-
- return sgp;
-}
-
-static void
-dequeueg(WaitQ *q)
-{
- SudoG **l, *sgp, *prevsgp;
- G *g;
-
- g = runtime_g();
- prevsgp = nil;
- for(l=&q->first; (sgp=*l) != nil; l=&sgp->link, prevsgp=sgp) {
- if(sgp->g == g) {
- *l = sgp->link;
- if(q->last == sgp)
- q->last = prevsgp;
- break;
- }
- }
-}
-
-static void
-enqueue(WaitQ *q, SudoG *sgp)
-{
- sgp->link = nil;
- if(q->first == nil) {
- q->first = sgp;
- q->last = sgp;
- return;
- }
- q->last->link = sgp;
- q->last = sgp;
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-typedef struct WaitQ WaitQ;
-typedef struct SudoG SudoG;
-typedef struct Select Select;
-typedef struct Scase Scase;
-
-typedef struct __go_type_descriptor Type;
-typedef struct __go_channel_type ChanType;
-
-struct SudoG
-{
- G* g;
- uint32* selectdone;
- SudoG* link;
- int64 releasetime;
- byte* elem; // data element
- uint32 ticket;
-};
-
-struct WaitQ
-{
- SudoG* first;
- SudoG* last;
-};
-
-// The garbage collector is assuming that Hchan can only contain pointers into the stack
-// and cannot contain pointers into the heap.
-struct Hchan
-{
- uintgo qcount; // total data in the q
- uintgo dataqsiz; // size of the circular q
- uint16 elemsize;
- uint16 pad; // ensures proper alignment of the buffer that follows Hchan in memory
- bool closed;
- const Type* elemtype; // element type
- uintgo sendx; // send index
- uintgo recvx; // receive index
- WaitQ recvq; // list of recv waiters
- WaitQ sendq; // list of send waiters
- Lock;
-};
-
-// Buffer follows Hchan immediately in memory.
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
-
-enum
-{
- debug = 0,
-
- // Scase.kind
- CaseRecv,
- CaseSend,
- CaseDefault,
-};
-
-struct Scase
-{
- SudoG sg; // must be first member (cast to Scase)
- Hchan* chan; // chan
- uint16 kind;
- uint16 index; // index to return
- bool* receivedp; // pointer to received bool (recv2)
-};
-
-struct Select
-{
- uint16 tcase; // total count of scase[]
- uint16 ncase; // currently filled scase[]
- uint16* pollorder; // case poll order
- Hchan** lockorder; // channel lock order
- Scase scase[1]; // one per case (in order of appearance)
-};
#include "go-panic.h"
#include "go-type.h"
-extern void __go_receive (ChanType *, Hchan *, byte *);
+extern void chanrecv1 (ChanType *, Hchan *, void *)
+ __asm__ (GOSYM_PREFIX "runtime.chanrecv1");
/* Prepare to call from code written in Go to code written in C or
C++. This takes the current goroutine out of the Go scheduler, as
Go. In the case of -buildmode=c-archive or c-shared, this
call may be coming in before package initialization is
complete. Wait until it is. */
- __go_receive (NULL, runtime_main_init_done, NULL);
+ chanrecv1 (NULL, runtime_main_init_done, NULL);
}
mp = runtime_m ();
else
dumpbool(true); // big-endian ptrs
dumpint(PtrSize);
- dumpint(runtime_Hchansize);
+ dumpint(hchanSize);
dumpint((uintptr)runtime_mheap.arena_start);
dumpint((uintptr)runtime_mheap.arena_used);
dumpint(0);
case TypeInfo_Chan:
if(type->__size == 0) // channels may have zero-sized objects in them
break;
- for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size) {
+ for(i = hchanSize; i <= size - type->__size; i += type->__size) {
//playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
}
break;
#include "arch.h"
#include "malloc.h"
#include "mgc0.h"
-#include "chan.h"
#include "go-type.h"
// Map gccgo field names to gc field names.
// There are no heap pointers in struct Hchan,
// so we can ignore the leading sizeof(Hchan) bytes.
if(!(chantype->elem->__code & kindNoPointers)) {
- // Channel's buffer follows Hchan immediately in memory.
- // Size of buffer (cap(c)) is second int in the chan struct.
- chancap = ((uintgo*)chan)[1];
- if(chancap > 0) {
+ chancap = chan->dataqsiz;
+ if(chancap > 0 && markonly(chan->buf)) {
// TODO(atom): split into two chunks so that only the
// in-use part of the circular buffer is scanned.
// (Channel routines zero the unused part, so the current
// code does not lead to leaks, it's just a little inefficient.)
- *sbuf.obj.pos++ = (Obj){(byte*)chan+runtime_Hchansize, chancap*chantype->elem->__size,
+ *sbuf.obj.pos++ = (Obj){chan->buf, chancap*chantype->elem->__size,
(uintptr)chantype->elem->__gc | PRECISE | LOOP};
if(sbuf.obj.pos == sbuf.obj.end)
flushobjbuf(&sbuf);
CHANNEL_BOTH_DIR
};
-extern Hchan *__go_new_channel (ChanType *, uintptr);
+extern Hchan *makechan (ChanType *, int64)
+ __asm__ (GOSYM_PREFIX "runtime.makechan");
extern void closechan(Hchan *) __asm__ (GOSYM_PREFIX "runtime.closechan");
static void
runtime_throw("runtime_main not on m0");
__go_go(runtime_MHeap_Scavenger, nil);
- runtime_main_init_done = __go_new_channel(&chan_bool_type_descriptor, 0);
+ runtime_main_init_done = makechan(&chan_bool_type_descriptor, 0);
_cgo_notify_runtime_init_done();
g->m->locks--;
}
+void goready(G*, int) __asm__ (GOSYM_PREFIX "runtime.goready");
+
+void
+goready(G* gp, int traceskip __attribute__ ((unused)))
+{
+ runtime_ready(gp);
+}
+
int32
runtime_gcprocs(void)
{
runtime_mcall(park0);
}
+void gopark(FuncVal *, void *, String, byte, int)
+ __asm__ ("runtime.gopark");
+
+void
+gopark(FuncVal *unlockf, void *lock, String reason,
+ byte traceEv __attribute__ ((unused)),
+ int traceskip __attribute__ ((unused)))
+{
+ if(g->atomicstatus != _Grunning)
+ runtime_throw("bad g status");
+ g->m->waitlock = lock;
+ g->m->waitunlockf = unlockf == nil ? nil : (void*)unlockf->fn;
+ g->waitreason = reason;
+ runtime_mcall(park0);
+}
+
static bool
parkunlock(G *gp, void *lock)
{
runtime_park(parkunlock, lock, reason);
}
+void goparkunlock(Lock *, String, byte, int)
+ __asm__ (GOSYM_PREFIX "runtime.goparkunlock");
+
+void
+goparkunlock(Lock *lock, String reason, byte traceEv __attribute__ ((unused)),
+ int traceskip __attribute__ ((unused)))
+{
+ if(g->atomicstatus != _Grunning)
+ runtime_throw("bad g status");
+ g->m->waitlock = lock;
+ g->m->waitunlockf = parkunlock;
+ g->waitreason = reason;
+ runtime_mcall(park0);
+}
+
// runtime_park continuation on g0.
static void
park0(G *gp)
typedef struct SigTab SigTab;
typedef struct mcache MCache;
typedef struct FixAlloc FixAlloc;
-typedef struct Hchan Hchan;
+typedef struct hchan Hchan;
typedef struct Timers Timers;
typedef struct Timer Timer;
typedef struct gcstats GCStats;
typedef struct ParForThread ParForThread;
typedef struct cgoMal CgoMal;
typedef struct PollDesc PollDesc;
+typedef struct sudog SudoG;
typedef struct __go_open_array Slice;
typedef struct __go_interface Iface;
extern int8* runtime_goos;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
-extern uint32 runtime_Hchansize;
extern struct debugVars runtime_debug;
extern uintptr runtime_maxstacksize;
package sync
#include "runtime.h"
-#include "chan.h"
#include "arch.h"
typedef struct SemaWaiter SemaWaiter;
if (l->tail == nil) {
l->head = &s;
} else {
- l->tail->link = &s;
+ l->tail->next = &s;
}
l->tail = &s;
runtime_parkunlock(&l->lock, "semacquire");
// Go through the local list and ready all waiters.
while (s != nil) {
- SudoG* next = s->link;
- s->link = nil;
+ SudoG* next = s->next;
+ s->next = nil;
readyWithTime(s, 4);
s = next;
}
// needs to be notified. If it hasn't made it to the list yet we won't
// find it, but it won't park itself once it sees the new notify number.
runtime_atomicstore(&l->notify, t+1);
- for (p = nil, s = l->head; s != nil; p = s, s = s->link) {
+ for (p = nil, s = l->head; s != nil; p = s, s = s->next) {
if (s->ticket == t) {
- SudoG *n = s->link;
+ SudoG *n = s->next;
if (p != nil) {
- p->link = n;
+ p->next = n;
} else {
l->head = n;
}
l->tail = p;
}
runtime_unlock(&l->lock);
- s->link = nil;
+ s->next = nil;
readyWithTime(s, 4);
return;
}