+2016-11-22 Ian Lance Taylor <iant@google.com>
+
+ * go-gcc.cc (Gcc_backend::Gcc_backend): Add builtin function
+ __builtin_frame_address.
+
2016-10-25 David Malcolm <dmalcolm@redhat.com>
* go-lang.c (go_langhook_type_for_mode): Remove redundant cast
this->define_builtin(BUILT_IN_FRAME_ADDRESS, "__builtin_frame_address",
NULL, t, false, false);
+ // The runtime calls __builtin_extract_return_addr when recording
+ // the address to which a function returns.
+ this->define_builtin(BUILT_IN_EXTRACT_RETURN_ADDR,
+ "__builtin_extract_return_addr", NULL,
+ build_function_type_list(ptr_type_node,
+ ptr_type_node,
+ NULL_TREE),
+ false, false);
+
// The compiler uses __builtin_trap for some exception handling
// cases.
this->define_builtin(BUILT_IN_TRAP, "__builtin_trap", NULL,
-bf4762823c4543229867436399be3ae30b4d13bb
+7593cc83a03999331c5e2dc65a9306c5fe57dfd0
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
// Create a statement that runs all deferred calls for FUNCTION. This should
// be a statement that looks like this in C++:
// finish:
- // try { UNDEFER; } catch { CHECK_DEFER; goto finish; }
+ // try { DEFER_RETURN; } catch { CHECK_DEFER; goto finish; }
virtual Bstatement*
function_defer_statement(Bfunction* function, Bexpression* undefer,
Bexpression* check_defer, Location) = 0;
{
switch (e->func_expression()->runtime_code())
{
- case Runtime::PANIC:
+ case Runtime::GOPANIC:
op << "panic";
break;
op << "make";
break;
- case Runtime::DEFER:
+ case Runtime::DEFERPROC:
op << "defer";
break;
- case Runtime::RECOVER:
+ case Runtime::GORECOVER:
op << "recover";
break;
{
switch (fe->runtime_code())
{
- case Runtime::PANIC:
+ case Runtime::GOPANIC:
{
// Argument could leak through recover.
Node* panic_arg = Node::make_node(call->args()->front());
arg = Expression::convert_for_assignment(gogo, empty, arg, location);
Expression* panic =
- Runtime::make_call(Runtime::PANIC, location, 1, arg);
+ Runtime::make_call(Runtime::GOPANIC, location, 1, arg);
return panic->get_backend(context);
}
// because it changes whether it can recover a panic or not.
// See test7 in test/recover1.go.
Expression* recover = Runtime::make_call((this->is_deferred()
- ? Runtime::DEFERRED_RECOVER
- : Runtime::RECOVER),
+ ? Runtime::DEFERREDRECOVER
+ : Runtime::GORECOVER),
location, 0);
Expression* cond =
Expression::make_conditional(arg, recover, nil, location);
// Return the expression to pass for the .can_recover parameter to the
// new function. This indicates whether a call to recover may return
-// non-nil. The expression is
-// __go_can_recover(__builtin_return_address()).
+// non-nil. The expression is runtime.canrecover(__builtin_return_address()).
Expression*
Build_recover_thunks::can_recover_arg(Location location)
results->push_back(Typed_identifier("", boolean_type, bloc));
Function_type* fntype = Type::make_function_type(NULL, param_types,
results, bloc);
- can_recover = Named_object::make_function_declaration("__go_can_recover",
- NULL, fntype,
- bloc);
- can_recover->func_declaration_value()->set_asm_name("__go_can_recover");
+ can_recover =
+ Named_object::make_function_declaration("runtime_canrecover",
+ NULL, fntype, bloc);
+ can_recover->func_declaration_value()->set_asm_name("runtime.canrecover");
}
Expression* fn = Expression::make_func_reference(builtin_return_address,
// function with an extra parameter, which is whether a call to
// recover can succeed. We then move the body of this function to
// that one. We then turn this function into a thunk which calls the
-// new one, passing the value of
-// __go_can_recover(__builtin_return_address()). The function will be
-// marked as not splitting the stack. This will cooperate with the
-// implementation of defer to make recover do the right thing.
+// new one, passing the value of runtime.canrecover(__builtin_return_address()).
+// The function will be marked as not splitting the stack. This will
+// cooperate with the implementation of defer to make recover do the
+// right thing.
void
Gogo::build_recover_thunks()
// libgo/runtime/go-unwind.c.
std::vector<Bstatement*> stmts;
- Expression* call = Runtime::make_call(Runtime::CHECK_DEFER, end_loc, 1,
+ Expression* call = Runtime::make_call(Runtime::CHECKDEFER, end_loc, 1,
this->defer_stack(end_loc));
Translate_context context(gogo, named_function, NULL, NULL);
Bexpression* defer = call->get_backend(&context);
go_assert(*except == NULL);
*except = gogo->backend()->statement_list(stmts);
- call = Runtime::make_call(Runtime::CHECK_DEFER, end_loc, 1,
+ call = Runtime::make_call(Runtime::CHECKDEFER, end_loc, 1,
this->defer_stack(end_loc));
defer = call->get_backend(&context);
- call = Runtime::make_call(Runtime::UNDEFER, end_loc, 1,
+ call = Runtime::make_call(Runtime::DEFERRETURN, end_loc, 1,
this->defer_stack(end_loc));
Bexpression* undefer = call->get_backend(&context);
Bstatement* function_defer =
// Panic.
-DEF_GO_RUNTIME(PANIC, "__go_panic", P1(EFACE), R0())
+DEF_GO_RUNTIME(GOPANIC, "runtime.gopanic", P1(EFACE), R0())
// Recover.
-DEF_GO_RUNTIME(RECOVER, "__go_recover", P0(), R1(EFACE))
+DEF_GO_RUNTIME(GORECOVER, "runtime.gorecover", P0(), R1(EFACE))
// Recover when called directly from defer.
-DEF_GO_RUNTIME(DEFERRED_RECOVER, "__go_deferred_recover", P0(), R1(EFACE))
+DEF_GO_RUNTIME(DEFERREDRECOVER, "runtime.deferredrecover", P0(), R1(EFACE))
// Decide whether this function can call recover.
-DEF_GO_RUNTIME(CAN_RECOVER, "__go_can_recover", P1(POINTER), R1(BOOL))
-
-// Get the return address of the function.
-DEF_GO_RUNTIME(RETURN_ADDRESS, "__go_return_address", P1(INT), R1(POINTER))
+DEF_GO_RUNTIME(CANRECOVER, "runtime.canrecover", P1(POINTER), R1(BOOL))
// Set the return address for defer in a defer thunk.
-DEF_GO_RUNTIME(SET_DEFER_RETADDR, "__go_set_defer_retaddr", P1(POINTER),
+DEF_GO_RUNTIME(SETDEFERRETADDR, "runtime.setdeferretaddr", P1(POINTER),
R1(BOOL))
// Check for a deferred function in an exception handler.
-DEF_GO_RUNTIME(CHECK_DEFER, "__go_check_defer", P1(BOOLPTR), R0())
+DEF_GO_RUNTIME(CHECKDEFER, "runtime.checkdefer", P1(BOOLPTR), R0())
// Run deferred functions.
-DEF_GO_RUNTIME(UNDEFER, "__go_undefer", P1(BOOLPTR), R0())
+DEF_GO_RUNTIME(DEFERRETURN, "runtime.deferreturn", P1(BOOLPTR), R0())
// Panic with a runtime error.
DEF_GO_RUNTIME(RUNTIME_ERROR, "__go_runtime_error", P1(INT32), R0())
DEF_GO_RUNTIME(GO, "__go_go", P2(FUNC_PTR, POINTER), R0())
// Defer a function.
-DEF_GO_RUNTIME(DEFER, "__go_defer", P3(BOOLPTR, FUNC_PTR, POINTER), R0())
+DEF_GO_RUNTIME(DEFERPROC, "runtime.deferproc", P3(BOOLPTR, FUNC_PTR, POINTER),
+ R0())
// Convert an empty interface to an empty interface, returning ok.
{
retaddr_label = gogo->add_label_reference("retaddr", location, false);
Expression* arg = Expression::make_label_addr(retaddr_label, location);
- Expression* call = Runtime::make_call(Runtime::SET_DEFER_RETADDR,
+ Expression* call = Runtime::make_call(Runtime::SETDEFERRETADDR,
location, 1, arg);
// This is a hack to prevent the middle-end from deleting the
Location loc = this->location();
Expression* ds = context->function()->func_value()->defer_stack(loc);
- Expression* call = Runtime::make_call(Runtime::DEFER, loc, 3,
+ Expression* call = Runtime::make_call(Runtime::DEFERPROC, loc, 3,
ds, fn, arg);
Bexpression* bcall = call->get_backend(context);
return context->backend()->expression_statement(bcall);
runtime/go-cdiv.c \
runtime/go-cgo.c \
runtime/go-construct-map.c \
- runtime/go-defer.c \
- runtime/go-deferred-recover.c \
runtime/go-ffi.c \
runtime/go-fieldtrack.c \
runtime/go-matherr.c \
runtime/go-now.c \
runtime/go-new.c \
runtime/go-nosys.c \
- runtime/go-panic.c \
- runtime/go-recover.c \
runtime/go-reflect-call.c \
runtime/go-runtime-error.c \
runtime/go-setenv.c \
@LIBGO_IS_LINUX_TRUE@am__objects_4 = getncpu-linux.lo
am__objects_5 = go-assert.lo go-breakpoint.lo go-caller.lo \
go-callers.lo go-cdiv.lo go-cgo.lo go-construct-map.lo \
- go-defer.lo go-deferred-recover.lo go-ffi.lo go-fieldtrack.lo \
- go-matherr.lo go-memclr.lo go-memcmp.lo go-memequal.lo \
- go-memmove.lo go-nanotime.lo go-now.lo go-new.lo go-nosys.lo \
- go-panic.lo go-recover.lo go-reflect-call.lo \
+ go-ffi.lo go-fieldtrack.lo go-matherr.lo go-memclr.lo \
+ go-memcmp.lo go-memequal.lo go-memmove.lo go-nanotime.lo \
+ go-now.lo go-new.lo go-nosys.lo go-reflect-call.lo \
go-runtime-error.lo go-setenv.lo go-signal.lo go-strslice.lo \
go-type-complex.lo go-type-float.lo go-type-identity.lo \
go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \
runtime/go-cdiv.c \
runtime/go-cgo.c \
runtime/go-construct-map.c \
- runtime/go-defer.c \
- runtime/go-deferred-recover.c \
runtime/go-ffi.c \
runtime/go-fieldtrack.c \
runtime/go-matherr.c \
runtime/go-now.c \
runtime/go-new.c \
runtime/go-nosys.c \
- runtime/go-panic.c \
- runtime/go-recover.c \
runtime/go-reflect-call.c \
runtime/go-runtime-error.c \
runtime/go-setenv.c \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-cdiv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-cgo.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-construct-map.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-defer.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-deferred-recover.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-fieldtrack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-matherr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-new.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-nosys.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-now.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-panic.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-recover.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-reflect-call.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-runtime-error.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-setenv.Plo@am__quote@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-construct-map.lo `test -f 'runtime/go-construct-map.c' || echo '$(srcdir)/'`runtime/go-construct-map.c
-go-defer.lo: runtime/go-defer.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-defer.lo -MD -MP -MF $(DEPDIR)/go-defer.Tpo -c -o go-defer.lo `test -f 'runtime/go-defer.c' || echo '$(srcdir)/'`runtime/go-defer.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-defer.Tpo $(DEPDIR)/go-defer.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-defer.c' object='go-defer.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-defer.lo `test -f 'runtime/go-defer.c' || echo '$(srcdir)/'`runtime/go-defer.c
-
-go-deferred-recover.lo: runtime/go-deferred-recover.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-deferred-recover.lo -MD -MP -MF $(DEPDIR)/go-deferred-recover.Tpo -c -o go-deferred-recover.lo `test -f 'runtime/go-deferred-recover.c' || echo '$(srcdir)/'`runtime/go-deferred-recover.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-deferred-recover.Tpo $(DEPDIR)/go-deferred-recover.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-deferred-recover.c' object='go-deferred-recover.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-deferred-recover.lo `test -f 'runtime/go-deferred-recover.c' || echo '$(srcdir)/'`runtime/go-deferred-recover.c
-
go-ffi.lo: runtime/go-ffi.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-ffi.lo -MD -MP -MF $(DEPDIR)/go-ffi.Tpo -c -o go-ffi.lo `test -f 'runtime/go-ffi.c' || echo '$(srcdir)/'`runtime/go-ffi.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-ffi.Tpo $(DEPDIR)/go-ffi.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-nosys.lo `test -f 'runtime/go-nosys.c' || echo '$(srcdir)/'`runtime/go-nosys.c
-go-panic.lo: runtime/go-panic.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-panic.lo -MD -MP -MF $(DEPDIR)/go-panic.Tpo -c -o go-panic.lo `test -f 'runtime/go-panic.c' || echo '$(srcdir)/'`runtime/go-panic.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-panic.Tpo $(DEPDIR)/go-panic.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-panic.c' object='go-panic.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-panic.lo `test -f 'runtime/go-panic.c' || echo '$(srcdir)/'`runtime/go-panic.c
-
-go-recover.lo: runtime/go-recover.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-recover.lo -MD -MP -MF $(DEPDIR)/go-recover.Tpo -c -o go-recover.lo `test -f 'runtime/go-recover.c' || echo '$(srcdir)/'`runtime/go-recover.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-recover.Tpo $(DEPDIR)/go-recover.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-recover.c' object='go-recover.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-recover.lo `test -f 'runtime/go-recover.c' || echo '$(srcdir)/'`runtime/go-recover.c
-
go-reflect-call.lo: runtime/go-reflect-call.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-reflect-call.lo -MD -MP -MF $(DEPDIR)/go-reflect-call.Tpo -c -o go-reflect-call.lo `test -f 'runtime/go-reflect-call.c' || echo '$(srcdir)/'`runtime/go-reflect-call.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-reflect-call.Tpo $(DEPDIR)/go-reflect-call.Plo
#include "runtime.h"
#include "go-type.h"
-#include "go-panic.h"
#ifdef USE_LIBFFI
function ffiCall with the pointer to the arguments, the results area,
and the closure structure. */
-void FFICallbackGo(void *result, void **args, ffi_go_closure *closure)
+extern void FFICallbackGo(void *result, void **args, ffi_go_closure *closure)
__asm__ (GOSYM_PREFIX "reflect.FFICallbackGo");
+extern void makefuncfficanrecover(Slice)
+ __asm__ (GOSYM_PREFIX "runtime.makefuncfficanrecover");
+
+extern void makefuncreturning(void)
+ __asm__ (GOSYM_PREFIX "runtime.makefuncreturning");
+
static void ffi_callback (ffi_cif *, void *, void **, void *)
__asm__ ("reflect.ffi_callback");
break;
}
if (i < n)
- __go_makefunc_ffi_can_recover (locs + i, n - i);
+ {
+ Slice s;
+
+ s.__values = (void *) &locs[i];
+ s.__count = n - i;
+ s.__capacity = n - i;
+ makefuncfficanrecover (s);
+ }
FFICallbackGo(results, args, closure);
if (i < n)
- __go_makefunc_returning ();
+ makefuncreturning ();
}
/* Allocate an FFI closure and arrange to call ffi_callback. */
// Prints an argument passed to panic.
// There's room for arbitrary complexity here, but we keep it
// simple and handle just a few important cases: int, string, and Stringer.
-func Printany(i interface{}) {
+func printany(i interface{}) {
switch v := i.(type) {
case nil:
print("nil")
// suspend the current goroutine, so execution resumes automatically.
func Gosched()
-// Goexit terminates the goroutine that calls it. No other goroutine is affected.
-// Goexit runs all deferred calls before terminating the goroutine.
-//
-// Calling Goexit from the main goroutine terminates that goroutine
-// without func main returning. Since func main has not returned,
-// the program continues execution of other goroutines.
-// If all other goroutines exit, the program crashes.
-func Goexit()
-
// Caller reports file and line number information about function invocations on
// the calling goroutine's stack. The argument skip is the number of stack frames
// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
package runtime
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname deferproc runtime.deferproc
+//go:linkname deferreturn runtime.deferreturn
+//go:linkname setdeferretaddr runtime.setdeferretaddr
+//go:linkname checkdefer runtime.checkdefer
+//go:linkname gopanic runtime.gopanic
+//go:linkname canrecover runtime.canrecover
+//go:linkname makefuncfficanrecover runtime.makefuncfficanrecover
+//go:linkname makefuncreturning runtime.makefuncreturning
+//go:linkname gorecover runtime.gorecover
+//go:linkname deferredrecover runtime.deferredrecover
+// Temporary for C code to call:
+//go:linkname throw runtime.throw
+
// Calling panic with one of the errors below will call errorString.Error
// which will call mallocgc to concatenate strings. That will fail if
// malloc is locked, causing a confusing error message. Throw a better
throw("recursive call during initialization - linker skew")
}
+// deferproc creates a new deferred function.
+// The compiler turns a defer statement into a call to this.
+// frame points into the stack frame; it is used to determine which
+// deferred functions are for the current stack frame, and whether we
+// have already deferred functions for this frame.
+// pfn is a C function pointer.
+// arg is a value to pass to pfn.
+func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
+ n := newdefer()
+ n.frame = frame
+ n._panic = getg()._panic
+ n.pfn = pfn
+ n.arg = arg
+ n.retaddr = 0
+ n.makefunccanrecover = false
+ n.special = false
+}
+
+// Allocate a Defer, usually using per-P pool.
+// Each defer must be released with freedefer.
+func newdefer() *_defer {
+ var d *_defer
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.deferpool) == 0 && sched.deferpool != nil {
+ lock(&sched.deferlock)
+ for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
+ d := sched.deferpool
+ sched.deferpool = d.link
+ d.link = nil
+ pp.deferpool = append(pp.deferpool, d)
+ }
+ unlock(&sched.deferlock)
+ }
+ if n := len(pp.deferpool); n > 0 {
+ d = pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ }
+ if d == nil {
+ d = new(_defer)
+ }
+ gp := mp.curg
+ d.link = gp._defer
+ gp._defer = d
+ releasem(mp)
+ return d
+}
+
+// Free the given defer.
+// The defer cannot be used after this call.
+func freedefer(d *_defer) {
+ if d.special {
+ return
+ }
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.deferpool) == cap(pp.deferpool) {
+ // Transfer half of local cache to the central cache.
+ var first, last *_defer
+ for len(pp.deferpool) > cap(pp.deferpool)/2 {
+ n := len(pp.deferpool)
+ d := pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ if first == nil {
+ first = d
+ } else {
+ last.link = d
+ }
+ last = d
+ }
+ lock(&sched.deferlock)
+ last.link = sched.deferpool
+ sched.deferpool = first
+ unlock(&sched.deferlock)
+ }
+ *d = _defer{}
+ pp.deferpool = append(pp.deferpool, d)
+ releasem(mp)
+}
+
+// deferreturn is called to undefer the stack.
+// The compiler inserts a call to this function as a finally clause
+// wrapped around the body of any function that calls defer.
+// The frame argument points to the stack frame of the function.
+func deferreturn(frame *bool) {
+ gp := getg()
+ for gp._defer != nil && gp._defer.frame == frame {
+ d := gp._defer
+ pfn := d.pfn
+ d.pfn = 0
+
+ if pfn != 0 {
+ // This is rather awkward.
+ // The gc compiler does this using assembler
+ // code in jmpdefer.
+ var fn func(unsafe.Pointer)
+ *(**uintptr)(unsafe.Pointer(&fn)) = &pfn
+ fn(d.arg)
+ }
+
+ gp._defer = d.link
+
+ freedefer(d)
+
+ // Since we are executing a defer function now, we
+ // know that we are returning from the calling
+ // function. If the calling function, or one of its
+ // callees, panicked, then the defer functions would
+ // be executed by panic.
+ *frame = true
+ }
+}
+
+// __builtin_extract_return_addr is a GCC intrinsic that converts an
+// address returned by __builtin_return_address(0) to a real address.
+// On most architectures this is a nop.
+//extern __builtin_extract_return_addr
+func __builtin_extract_return_addr(uintptr) uintptr
+
+// setdeferretaddr records the address to which the deferred function
+// returns. This is check by canrecover. The frontend relies on this
+// function returning false.
+func setdeferretaddr(retaddr uintptr) bool {
+ gp := getg()
+ if gp._defer != nil {
+ gp._defer.retaddr = __builtin_extract_return_addr(retaddr)
+ }
+ return false
+}
+
+// checkdefer is called by exception handlers used when unwinding the
+// stack after a recovered panic. The exception handler is simply
+// checkdefer(frame)
+// return;
+// If we have not yet reached the frame we are looking for, we
+// continue unwinding.
+func checkdefer(frame *bool) {
+ gp := getg()
+ if gp == nil {
+ // We should never wind up here. Even if some other
+ // language throws an exception, the cgo code
+ // should ensure that g is set.
+ throw("no g in checkdefer")
+ } else if gp.isforeign {
+ // Some other language has thrown an exception.
+ // We need to run the local defer handlers.
+ // If they call recover, we stop unwinding here.
+ var p _panic
+ p.isforeign = true
+ p.link = gp._panic
+ gp._panic = &p
+ for {
+ d := gp._defer
+ if d == nil || d.frame != frame || d.pfn == 0 {
+ break
+ }
+
+ pfn := d.pfn
+ gp._defer = d.link
+
+ var fn func(unsafe.Pointer)
+ *(**uintptr)(unsafe.Pointer(&fn)) = &pfn
+ fn(d.arg)
+
+ freedefer(d)
+
+ if p.recovered {
+ // The recover function caught the panic
+ // thrown by some other language.
+ break
+ }
+ }
+
+ recovered := p.recovered
+ gp._panic = p.link
+
+ if recovered {
+ // Just return and continue executing Go code.
+ *frame = true
+ return
+ }
+
+ // We are panicking through this function.
+ *frame = false
+ } else if gp._defer != nil && gp._defer.pfn == 0 && gp._defer.frame == frame {
+ // This is the defer function that called recover.
+ // Simply return to stop the stack unwind, and let the
+ // Go code continue to execute.
+ d := gp._defer
+ gp._defer = d.link
+ freedefer(d)
+
+ // We are returning from this function.
+ *frame = true
+
+ return
+ }
+
+ // This is some other defer function. It was already run by
+ // the call to panic, or just above. Rethrow the exception.
+ rethrowException()
+ throw("rethrowException returned")
+}
+
+// unwindStack starts unwinding the stack for a panic. We unwind
+// function calls until we reach the one which used a defer function
+// which called recover. Each function which uses a defer statement
+// will have an exception handler, as shown above for checkdefer.
+func unwindStack() {
+ // Allocate the exception type used by the unwind ABI.
+ // It would be nice to define it in runtime_sysinfo.go,
+ // but current definitions don't work because the required
+ // alignment is larger than can be represented in Go.
+ // The type never contains any Go pointers.
+ size := unwindExceptionSize()
+ usize := uintptr(unsafe.Sizeof(uintptr(0)))
+ c := (size + usize - 1) / usize
+ s := make([]uintptr, c)
+ getg().exception = unsafe.Pointer(&s[0])
+ throwException()
+}
+
+// Goexit terminates the goroutine that calls it. No other goroutine is affected.
+// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
+// is not panic, however, any recover calls in those deferred functions will return nil.
+//
+// Calling Goexit from the main goroutine terminates that goroutine
+// without func main returning. Since func main has not returned,
+// the program continues execution of other goroutines.
+// If all other goroutines exit, the program crashes.
+func Goexit() {
+ // Run all deferred functions for the current goroutine.
+ // This code is similar to gopanic, see that implementation
+ // for detailed comments.
+ gp := getg()
+ for {
+ d := gp._defer
+ if d == nil {
+ break
+ }
+ gp._defer = d.link
+
+ pfn := d.pfn
+ d.pfn = 0
+
+ if pfn != 0 {
+ var fn func(unsafe.Pointer)
+ *(**uintptr)(unsafe.Pointer(&fn)) = &pfn
+ fn(d.arg)
+ }
+
+ freedefer(d)
+ // Note: we ignore recovers here because Goexit isn't a panic
+ }
+ goexit1()
+}
+
+// Call all Error and String methods before freezing the world.
+// Used when crashing with panicking.
+// This must match types handled by printany.
+func preprintpanics(p *_panic) {
+ for p != nil {
+ switch v := p.arg.(type) {
+ case error:
+ p.arg = v.Error()
+ case stringer:
+ p.arg = v.String()
+ }
+ p = p.link
+ }
+}
+
+// Print all currently active panics. Used when crashing.
+func printpanics(p *_panic) {
+ if p.link != nil {
+ printpanics(p.link)
+ print("\t")
+ }
+ print("panic: ")
+ printany(p.arg)
+ if p.recovered {
+ print(" [recovered]")
+ }
+ print("\n")
+}
+
+// The implementation of the predeclared function panic.
+func gopanic(e interface{}) {
+ gp := getg()
+ if gp.m.curg != gp {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ throw("panic on system stack")
+ }
+
+ if gp.m.mallocing != 0 {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ throw("panic during malloc")
+ }
+ if gp.m.preemptoff != "" {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ print("preempt off reason: ")
+ print(gp.m.preemptoff)
+ print("\n")
+ throw("panic during preemptoff")
+ }
+ if gp.m.locks != 0 {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ throw("panic holding locks")
+ }
+
+ var p _panic
+ p.arg = e
+ p.link = gp._panic
+ gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
+
+ for {
+ d := gp._defer
+ if d == nil {
+ break
+ }
+
+ pfn := d.pfn
+ d.pfn = 0
+
+ if pfn != 0 {
+ var fn func(unsafe.Pointer)
+ *(**uintptr)(unsafe.Pointer(&fn)) = &pfn
+ fn(d.arg)
+
+ if p.recovered {
+ // Some deferred function called recover.
+ // Stop running this panic.
+ gp._panic = p.link
+
+ // Unwind the stack by throwing an exception.
+ // The compiler has arranged to create
+ // exception handlers in each function
+ // that uses a defer statement. These
+ // exception handlers will check whether
+ // the entry on the top of the defer stack
+ // is from the current function. If it is,
+ // we have unwound the stack far enough.
+ unwindStack()
+
+ throw("unwindStack returned")
+ }
+
+ // Because we executed that defer function by a panic,
+ // and it did not call recover, we know that we are
+ // not returning from the calling function--we are
+ // panicking through it.
+ *d.frame = false
+ }
+
+ gp._defer = d.link
+ freedefer(d)
+ }
+
+ // ran out of deferred calls - old-school panic now
+ // Because it is unsafe to call arbitrary user code after freezing
+ // the world, we call preprintpanics to invoke all necessary Error
+ // and String methods to prepare the panic strings before startpanic.
+ preprintpanics(gp._panic)
+ startpanic()
+ printpanics(gp._panic)
+ dopanic(0) // should not return
+ *(*int)(nil) = 0 // not reached
+}
+
+// currentDefer returns the top of the defer stack if it can be recovered.
+// Otherwise it returns nil.
+func currentDefer() *_defer {
+ gp := getg()
+ d := gp._defer
+ if d == nil {
+ return nil
+ }
+
+ // The panic that would be recovered is the one on the top of
+ // the panic stack. We do not want to recover it if that panic
+ // was on the top of the panic stack when this function was
+ // deferred.
+ if d._panic == gp._panic {
+ return nil
+ }
+
+ // The deferred thunk will call setdeferretaddr. If this has
+ // not happened, then we have not been called via defer, and
+ // we can not recover.
+ if d.retaddr == 0 {
+ return nil
+ }
+
+ return d
+}
+
+// canrecover is called by a thunk to see if the real function would
+// be permitted to recover a panic value. Recovering a value is
+// permitted if the thunk was called directly by defer. retaddr is the
+// return address of the function that is calling canrecover--that is,
+// the thunk.
+func canrecover(retaddr uintptr) bool {
+ d := currentDefer()
+ if d == nil {
+ return false
+ }
+
+ ret := __builtin_extract_return_addr(retaddr)
+ dret := d.retaddr
+ if ret <= dret && ret+16 >= dret {
+ return true
+ }
+
+ // On some systems, in some cases, the return address does not
+ // work reliably. See http://gcc.gnu.org/PR60406. If we are
+ // permitted to call recover, the call stack will look like this:
+ // runtime.gopanic, runtime.deferreturn, etc.
+ // thunk to call deferred function (calls __go_set_defer_retaddr)
+ // function that calls __go_can_recover (passing return address)
+ // runtime.canrecover
+ // Calling callers will skip the thunks. So if our caller's
+ // caller starts with "runtime.", then we are permitted to
+ // call recover.
+ var locs [16]location
+ if callers(2, locs[:2]) < 2 {
+ return false
+ }
+
+ name := locs[1].function
+ if hasprefix(name, "runtime.") {
+ return true
+ }
+
+ // If the function calling recover was created by reflect.MakeFunc,
+ // then makefuncfficanrecover will have set makefunccanrecover.
+ if !d.makefunccanrecover {
+ return false
+ }
+
+ // We look up the stack, ignoring libffi functions and
+ // functions in the reflect package, until we find
+ // reflect.makeFuncStub or reflect.ffi_callback called by FFI
+ // functions. Then we check the caller of that function.
+
+ n := callers(3, locs[:])
+ foundFFICallback := false
+ i := 0
+ for ; i < n; i++ {
+ name = locs[i].function
+ if name == "" {
+ // No function name means this caller isn't Go code.
+ // Assume that this is libffi.
+ continue
+ }
+
+ // Ignore function in libffi.
+ if hasprefix(name, "ffi_") {
+ continue
+ }
+
+ if foundFFICallback {
+ break
+ }
+
+ if name == "reflect.ffi_callback" {
+ foundFFICallback = true
+ continue
+ }
+
+ // Ignore other functions in the reflect package.
+ if hasprefix(name, "reflect.") {
+ continue
+ }
+
+ // We should now be looking at the real caller.
+ break
+ }
+
+ if i < n {
+ name = locs[i].function
+ if hasprefix(name, "runtime.") {
+ return true
+ }
+ }
+
+ return false
+}
+
+// This function is called when code is about to enter a function
+// created by the libffi version of reflect.MakeFunc. This function is
+// passed the names of the callers of the libffi code that called the
+// stub. It uses them to decide whether it is permitted to call
+// recover, and sets d.makefunccanrecover so that gorecover can make
+// the same decision.
+func makefuncfficanrecover(loc []location) {
+ d := currentDefer()
+ if d == nil {
+ return
+ }
+
+ // If we are already in a call stack of MakeFunc functions,
+ // there is nothing we can usefully check here.
+ if d.makefunccanrecover {
+ return
+ }
+
+ // loc starts with the caller of our caller. That will be a thunk.
+ // If its caller was a function function, then it was called
+ // directly by defer.
+ if len(loc) < 2 {
+ return
+ }
+
+ name := loc[1].function
+ if hasprefix(name, "runtime.") {
+ d.makefunccanrecover = true
+ }
+}
+
+// makefuncreturning is called when code is about to exit a function
+// created by reflect.MakeFunc. It is called by the function stub used
+// by reflect.MakeFunc. It clears the makefunccanrecover field. It's
+// OK to always clear this field, because canrecover will only be
+// called by a stub created for a function that calls recover. That
+// stub will not call a function created by reflect.MakeFunc, so by
+// the time we get here any caller higher up on the call stack no
+// longer needs the information.
+func makefuncreturning() {
+ d := getg()._defer
+ if d != nil {
+ d.makefunccanrecover = false
+ }
+}
+
+// The implementation of the predeclared function recover.
+func gorecover() interface{} {
+ gp := getg()
+ p := gp._panic
+ if p != nil && !p.recovered {
+ p.recovered = true
+ return p.arg
+ }
+ return nil
+}
+
+// deferredrecover is called when a call to recover is deferred. That
+// is, something like
+// defer recover()
+//
+// We need to handle this specially. In gc, the recover function
+// looks up the stack frame. In particular, that means that a deferred
+// recover will not recover a panic thrown in the same function that
+// defers the recover. It will only recover a panic thrown in a
+// function that defers the deferred call to recover.
+//
+// In other words:
+//
+// func f1() {
+// defer recover() // does not stop panic
+// panic(0)
+// }
+//
+// func f2() {
+// defer func() {
+// defer recover() // stops panic(0)
+// }()
+// panic(0)
+// }
+//
+// func f3() {
+// defer func() {
+// defer recover() // does not stop panic
+// panic(0)
+// }()
+// panic(1)
+// }
+//
+// func f4() {
+// defer func() {
+// defer func() {
+// defer recover() // stops panic(0)
+// }()
+// panic(0)
+// }()
+// panic(1)
+// }
+//
+// The interesting case here is f3. As can be seen from f2, the
+// deferred recover could pick up panic(1). However, this does not
+// happen because it is blocked by the panic(0).
+//
+// When a function calls recover, then when we invoke it we pass a
+// hidden parameter indicating whether it should recover something.
+// This parameter is set based on whether the function is being
+// invoked directly from defer. The parameter winds up determining
+// whether __go_recover or __go_deferred_recover is called at all.
+//
+// In the case of a deferred recover, the hidden parameter that
+// controls the call is actually the one set up for the function that
+// runs the defer recover() statement. That is the right thing in all
+// the cases above except for f3. In f3 the function is permitted to
+// call recover, but the deferred recover call is not. We address that
+// here by checking for that specific case before calling recover. If
+// this function was deferred when there is already a panic on the
+// panic stack, then we can only recover that panic, not any other.
+
+// Note that we can get away with using a special function here
+// because you are not permitted to take the address of a predeclared
+// function like recover.
+func deferredrecover() interface{} {
+ gp := getg()
+ if gp._defer == nil || gp._defer._panic != gp._panic {
+ return nil
+ }
+ return gorecover()
+}
+
+//go:nosplit
+func throw(s string) {
+ print("fatal error: ", s, "\n")
+ gp := getg()
+ if gp.m.throwing == 0 {
+ gp.m.throwing = 1
+ }
+ startpanic()
+ dopanic(0)
+ *(*int)(nil) = 0 // not reached
+}
+
+//uint32 runtime·panicking;
+var paniclk mutex
+
+func startpanic() {
+ _g_ := getg()
+ // Uncomment when mheap_ is in Go.
+ // if mheap_.cachealloc.size == 0 { // very early
+ // print("runtime: panic before malloc heap initialized\n")
+ // _g_.m.mallocing = 1 // tell rest of panic not to try to malloc
+ // } else
+ if _g_.m.mcache == nil { // can happen if called from signal handler or throw
+ _g_.m.mcache = allocmcache()
+ }
+
+ switch _g_.m.dying {
+ case 0:
+ _g_.m.dying = 1
+ _g_.writebuf = nil
+ atomic.Xadd(&panicking, 1)
+ lock(&paniclk)
+ if debug.schedtrace > 0 || debug.scheddetail > 0 {
+ schedtrace(true)
+ }
+ freezetheworld()
+ return
+ case 1:
+ // Something failed while panicing, probably the print of the
+ // argument to panic(). Just print a stack trace and exit.
+ _g_.m.dying = 2
+ print("panic during panic\n")
+ dopanic(0)
+ exit(3)
+ fallthrough
+ case 2:
+ // This is a genuine bug in the runtime, we couldn't even
+ // print the stack trace successfully.
+ _g_.m.dying = 3
+ print("stack trace unavailable\n")
+ exit(4)
+ fallthrough
+ default:
+ // Can't even print! Just exit.
+ exit(5)
+ }
+}
+
+var didothers bool
+var deadlock mutex
+
+func dopanic(unused int) {
+ gp := getg()
+ if gp.sig != 0 {
+ signame := signame(gp.sig)
+ if signame != "" {
+ print("[signal ", signame)
+ } else {
+ print("[signal ", hex(gp.sig))
+ }
+ print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
+ }
+
+ level, all, docrash := gotraceback()
+ _g_ := getg()
+ if level > 0 {
+ if gp != gp.m.curg {
+ all = true
+ }
+ if gp != gp.m.g0 {
+ print("\n")
+ goroutineheader(gp)
+ traceback(0)
+ } else if level >= 2 || _g_.m.throwing > 0 {
+ print("\nruntime stack:\n")
+ traceback(0)
+ }
+ if !didothers && all {
+ didothers = true
+ tracebackothers(gp)
+ }
+ }
+ unlock(&paniclk)
+
+ if atomic.Xadd(&panicking, -1) != 0 {
+ // Some other m is panicking too.
+ // Let it print what it needs to print.
+ // Wait forever without chewing up cpu.
+ // It will exit when it's done.
+ lock(&deadlock)
+ lock(&deadlock)
+ }
+
+ if docrash {
+ crash()
+ }
+
+ exit(2)
+}
+
//go:nosplit
func canpanic(gp *g) bool {
// Note that g is m->gsignal, different from gp.
mcache *mcache
// Not for gccgo: racectx uintptr
- // Not for gccgo yet: deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
- // Not for gccgo yet: deferpoolbuf [5][32]*_defer
- // Temporary gccgo type for deferpool field.
- deferpool *_defer
+ // gccgo has only one size of defer.
+ deferpool []*_defer
+ deferpoolbuf [32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
// This is the gccgo version.
type _defer struct {
// The next entry in the stack.
- next *_defer
+ link *_defer
// The stack variable for the function which called this defer
// statement. This is set to true if we are returning from
// This is the gccgo version.
type _panic struct {
// The next entry in the stack.
- next *_panic
+ link *_panic
// The value associated with this panic.
arg interface{}
// allm *m
// allp [_MaxGomaxprocs + 1]*p
// gomaxprocs int32
- // panicking uint32
- ncpu int32
+ panicking uint32
+ ncpu int32
// forcegc forcegcstate
//extern syscall
func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
-// throw crashes the program.
-// For gccgo unless and until we port panic.go.
-func throw(string)
-
// newobject allocates a new object.
// For gccgo unless and until we port malloc.go.
func newobject(*_type) unsafe.Pointer
func sigprof()
func mcount() int32
func gcount() int32
+func goexit1()
+func schedtrace(bool)
+func freezetheworld()
// Signal trampoline, written in C.
func sigtramp()
// Implemented in C for gccgo.
func dumpregs(*_siginfo_t, unsafe.Pointer)
-// Temporary for gccgo until we port panic.go.
-func startpanic()
-
// Temporary for gccgo until we port proc.go.
//go:linkname getsched runtime.getsched
func getsched() *schedt {
return &sched
}
+
+// Throw and rethrow an exception.
+func throwException()
+func rethrowException()
+
+// Fetch the size and required alignment of the _Unwind_Exception type
+// used by the stack unwinder.
+func unwindExceptionSize() uintptr
+
+// Temporary for gccgo until C code no longer needs it.
+//go:nosplit
+//go:linkname getPanicking runtime.getPanicking
+func getPanicking() uint32 {
+ return panicking
+}
+
+// Temporary for gccgo until we port mcache.go.
+func allocmcache() *mcache
#include "runtime.h"
#include "go-alloc.h"
-#include "go-panic.h"
#include "go-type.h"
extern void chanrecv1 (ChanType *, Hchan *, void *)
handle this by calling runtime_entersyscall in the personality
function in go-unwind.c. FIXME. */
- __go_panic (e);
+ runtime_panic (e);
}
/* Used for _cgo_wait_runtime_init_done. This is based on code in
+++ /dev/null
-/* go-defer.c -- manage the defer stack.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-panic.h"
-
-/* This function is called each time we need to defer a call. */
-
-void
-__go_defer (_Bool *frame, void (*pfn) (void *), void *arg)
-{
- G *g;
- Defer *n;
-
- g = runtime_g ();
- n = runtime_newdefer ();
- n->next = g->_defer;
- n->frame = frame;
- n->_panic = g->_panic;
- n->pfn = (uintptr) pfn;
- n->arg = arg;
- n->retaddr = 0;
- n->makefunccanrecover = 0;
- n->special = 0;
- g->_defer = n;
-}
-
-/* This function is called when we want to undefer the stack. */
-
-void
-__go_undefer (_Bool *frame)
-{
- G *g;
-
- g = runtime_g ();
- while (g->_defer != NULL && g->_defer->frame == frame)
- {
- Defer *d;
- void (*pfn) (void *);
-
- d = g->_defer;
- pfn = (void (*) (void *)) d->pfn;
- d->pfn = 0;
-
- if (pfn != NULL)
- (*pfn) (d->arg);
-
- g->_defer = d->next;
-
- /* This may be called by a cgo callback routine to defer the
- call to syscall.CgocallBackDone, in which case we will not
- have a memory context. Don't try to free anything in that
- case--the GC will release it later. */
- if (runtime_m () != NULL)
- runtime_freedefer (d);
-
- /* Since we are executing a defer function here, we know we are
- returning from the calling function. If the calling
- function, or one of its callees, paniced, then the defer
- functions would be executed by __go_panic. */
- *frame = 1;
- }
-}
-
-/* This function is called to record the address to which the deferred
- function returns. This may in turn be checked by __go_can_recover.
- The frontend relies on this function returning false. */
-
-_Bool
-__go_set_defer_retaddr (void *retaddr)
-{
- G *g;
-
- g = runtime_g ();
- if (g->_defer != NULL)
- g->_defer->retaddr = (uintptr) __builtin_extract_return_addr (retaddr);
- return 0;
-}
+++ /dev/null
-/* go-deferred-recover.c -- support for a deferred recover function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-panic.h"
-
-/* This is called when a call to recover is deferred. That is,
- something like
- defer recover()
-
- We need to handle this specially. In 6g/8g, the recover function
- looks up the stack frame. In particular, that means that a
- deferred recover will not recover a panic thrown in the same
- function that defers the recover. It will only recover a panic
- thrown in a function that defers the deferred call to recover.
-
- In other words:
-
- func f1() {
- defer recover() // does not stop panic
- panic(0)
- }
-
- func f2() {
- defer func() {
- defer recover() // stops panic(0)
- }()
- panic(0)
- }
-
- func f3() {
- defer func() {
- defer recover() // does not stop panic
- panic(0)
- }()
- panic(1)
- }
-
- func f4() {
- defer func() {
- defer func() {
- defer recover() // stops panic(0)
- }()
- panic(0)
- }()
- panic(1)
- }
-
- The interesting case here is f3. As can be seen from f2, the
- deferred recover could pick up panic(1). However, this does not
- happen because it is blocked by the panic(0).
-
- When a function calls recover, then when we invoke it we pass a
- hidden parameter indicating whether it should recover something.
- This parameter is set based on whether the function is being
- invoked directly from defer. The parameter winds up determining
- whether __go_recover or __go_deferred_recover is called at all.
-
- In the case of a deferred recover, the hidden parameter which
- controls the call is actually the one set up for the function which
- runs the defer recover() statement. That is the right thing in all
- the cases above except for f3. In f3 the function is permitted to
- call recover, but the deferred recover call is not. We address
- that here by checking for that specific case before calling
- recover. If this function was deferred when there is already a
- panic on the panic stack, then we can only recover that panic, not
- any other.
-
- Note that we can get away with using a special function here
- because you are not permitted to take the address of a predeclared
- function like recover. */
-
-Eface
-__go_deferred_recover ()
-{
- G *g;
-
- g = runtime_g ();
- if (g->_defer == NULL || g->_defer->_panic != g->_panic)
- {
- Eface ret;
-
- ret._type = NULL;
- ret.data = NULL;
- return ret;
- }
- return __go_recover ();
-}
+++ /dev/null
-/* go-panic.c -- support for the go panic function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-panic.h"
-
-/* Print the panic stack. This is used when there is no recover. */
-
-static void
-__printpanics (Panic *p)
-{
- if (p->next != NULL)
- {
- __printpanics (p->next);
- runtime_printf ("\t");
- }
- runtime_printf ("panic: ");
- runtime_printany (p->arg);
- if (p->recovered)
- runtime_printf (" [recovered]");
- runtime_printf ("\n");
-}
-
-/* This implements __go_panic which is used for the panic
- function. */
-
-void
-__go_panic (Eface arg)
-{
- G *g;
- Panic *n;
-
- g = runtime_g ();
-
- n = (Panic *) __go_alloc (sizeof (Panic));
- n->arg = arg;
- n->next = g->_panic;
- g->_panic = n;
-
- /* Run all the defer functions. */
-
- while (1)
- {
- Defer *d;
- void (*pfn) (void *);
-
- d = g->_defer;
- if (d == NULL)
- break;
-
- pfn = (void (*) (void *)) d->pfn;
- d->pfn = 0;
-
- if (pfn != NULL)
- {
- (*pfn) (d->arg);
-
- if (n->recovered)
- {
- /* Some defer function called recover. That means that
- we should stop running this panic. */
-
- g->_panic = n->next;
- __go_free (n);
-
- /* Now unwind the stack by throwing an exception. The
- compiler has arranged to create exception handlers in
- each function which uses a defer statement. These
- exception handlers will check whether the entry on
- the top of the defer stack is from the current
- function. If it is, we have unwound the stack far
- enough. */
- __go_unwind_stack ();
-
- /* __go_unwind_stack should not return. */
- abort ();
- }
-
- /* Because we executed that defer function by a panic, and
- it did not call recover, we know that we are not
- returning from the calling function--we are panicing
- through it. */
- *d->frame = 0;
- }
-
- g->_defer = d->next;
-
- /* This may be called by a cgo callback routine to defer the
- call to syscall.CgocallBackDone, in which case we will not
- have a memory context. Don't try to free anything in that
- case--the GC will release it later. */
- if (runtime_m () != NULL)
- runtime_freedefer (d);
- }
-
- /* The panic was not recovered. */
-
- runtime_startpanic ();
- __printpanics (g->_panic);
- runtime_dopanic (0);
-}
+++ /dev/null
-/* go-panic.h -- declare the go panic functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#ifndef LIBGO_GO_PANIC_H
-#define LIBGO_GO_PANIC_H
-
-extern void __go_panic (Eface)
- __attribute__ ((noreturn));
-
-extern void __go_print_string (String);
-
-extern Eface __go_recover (void);
-
-extern _Bool __go_can_recover (void *);
-
-extern void __go_makefunc_can_recover (void *retaddr);
-
-extern void __go_makefunc_ffi_can_recover (Location*, int);
-
-extern void __go_makefunc_returning (void);
-
-extern void __go_unwind_stack (void);
-
-#endif /* !defined(LIBGO_GO_PANIC_H) */
+++ /dev/null
-/* go-recover.c -- support for the go recover function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-panic.h"
-
-/* If the top of the defer stack can be recovered, then return it.
- Otherwise return NULL. */
-
-static Defer *
-current_defer ()
-{
- G *g;
- Defer *d;
-
- g = runtime_g ();
-
- d = g->_defer;
- if (d == NULL)
- return NULL;
-
- /* The panic which would be recovered is the one on the top of the
- panic stack. We do not want to recover it if that panic was on
- the top of the panic stack when this function was deferred. */
- if (d->_panic == g->_panic)
- return NULL;
-
- /* The deferred thunk will call _go_set_defer_retaddr. If this has
- not happened, then we have not been called via defer, and we can
- not recover. */
- if (d->retaddr == 0)
- return NULL;
-
- return d;
-}
-
-/* This is called by a thunk to see if the real function should be
- permitted to recover a panic value. Recovering a value is
- permitted if the thunk was called directly by defer. RETADDR is
- the return address of the function which is calling
- __go_can_recover--this is, the thunk. */
-
-_Bool
-__go_can_recover (void *retaddr)
-{
- Defer *d;
- const char* ret;
- const char* dret;
- Location locs[16];
- const byte *name;
- intgo len;
- int n;
- int i;
- _Bool found_ffi_callback;
-
- d = current_defer ();
- if (d == NULL)
- return 0;
-
- ret = (const char *) __builtin_extract_return_addr (retaddr);
-
- dret = (const char *) (uintptr) d->retaddr;
- if (ret <= dret && ret + 16 >= dret)
- return 1;
-
- /* On some systems, in some cases, the return address does not work
- reliably. See http://gcc.gnu.org/PR60406. If we are permitted
- to call recover, the call stack will look like this:
- __go_panic, __go_undefer, etc.
- thunk to call deferred function (calls __go_set_defer_retaddr)
- function that calls __go_can_recover (passing return address)
- __go_can_recover
- Calling runtime_callers will skip the thunks. So if our caller's
- caller starts with __go, then we are permitted to call
- recover. */
-
- if (runtime_callers (1, &locs[0], 2, false) < 2)
- return 0;
-
- name = locs[1].function.str;
- len = locs[1].function.len;
-
- /* Although locs[1].function is a Go string, we know it is
- NUL-terminated. */
- if (len > 4
- && __builtin_strchr ((const char *) name, '.') == NULL
- && __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- return 1;
-
- /* If we are called from __go_makefunc_can_recover, then we need to
- look one level higher. */
- if (locs[0].function.len > 0
- && __builtin_strcmp ((const char *) locs[0].function.str,
- "__go_makefunc_can_recover") == 0)
- {
- if (runtime_callers (3, &locs[0], 1, false) < 1)
- return 0;
- name = locs[0].function.str;
- len = locs[0].function.len;
- if (len > 4
- && __builtin_strchr ((const char *) name, '.') == NULL
- && __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- return 1;
- }
-
- /* If the function calling recover was created by reflect.MakeFunc,
- then __go_makefunc_can_recover or __go_makefunc_ffi_can_recover
- will have set the __makefunc_can_recover field. */
- if (!d->makefunccanrecover)
- return 0;
-
- /* We look up the stack, ignoring libffi functions and functions in
- the reflect package, until we find reflect.makeFuncStub or
- reflect.ffi_callback called by FFI functions. Then we check the
- caller of that function. */
-
- n = runtime_callers (2, &locs[0], sizeof locs / sizeof locs[0], false);
- found_ffi_callback = 0;
- for (i = 0; i < n; i++)
- {
- const byte *name;
-
- if (locs[i].function.len == 0)
- {
- /* No function name means this caller isn't Go code. Assume
- that this is libffi. */
- continue;
- }
-
- /* Ignore functions in libffi. */
- name = locs[i].function.str;
- if (__builtin_strncmp ((const char *) name, "ffi_", 4) == 0)
- continue;
-
- if (found_ffi_callback)
- break;
-
- if (__builtin_strcmp ((const char *) name, "reflect.ffi_callback") == 0)
- {
- found_ffi_callback = 1;
- continue;
- }
-
- if (__builtin_strcmp ((const char *) name, "reflect.makeFuncStub") == 0)
- {
- i++;
- break;
- }
-
- /* Ignore other functions in the reflect package. */
- if (__builtin_strncmp ((const char *) name, "reflect.", 8) == 0)
- continue;
-
- /* We should now be looking at the real caller. */
- break;
- }
-
- if (i < n && locs[i].function.len > 0)
- {
- name = locs[i].function.str;
- if (__builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- return 1;
- }
-
- return 0;
-}
-
-/* This function is called when code is about to enter a function
- created by reflect.MakeFunc. It is called by the function stub
- used by MakeFunc. If the stub is permitted to call recover, then a
- real MakeFunc function is permitted to call recover. */
-
-void
-__go_makefunc_can_recover (void *retaddr)
-{
- Defer *d;
-
- d = current_defer ();
- if (d == NULL)
- return;
-
- /* If we are already in a call stack of MakeFunc functions, there is
- nothing we can usefully check here. */
- if (d->makefunccanrecover)
- return;
-
- if (__go_can_recover (retaddr))
- d->makefunccanrecover = 1;
-}
-
-/* This function is called when code is about to enter a function
- created by the libffi version of reflect.MakeFunc. This function
- is passed the names of the callers of the libffi code that called
- the stub. It uses to decide whether it is permitted to call
- recover, and sets d->makefunccanrecover so that __go_recover can
- make the same decision. */
-
-void
-__go_makefunc_ffi_can_recover (Location *loc, int n)
-{
- Defer *d;
- const byte *name;
- intgo len;
-
- d = current_defer ();
- if (d == NULL)
- return;
-
- /* If we are already in a call stack of MakeFunc functions, there is
- nothing we can usefully check here. */
- if (d->makefunccanrecover)
- return;
-
- /* LOC points to the caller of our caller. That will be a thunk.
- If its caller was a runtime function, then it was called directly
- by defer. */
-
- if (n < 2)
- return;
-
- name = (loc + 1)->function.str;
- len = (loc + 1)->function.len;
- if (len > 4
- && __builtin_strchr ((const char *) name, '.') == NULL
- && __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- d->makefunccanrecover = 1;
-}
-
-/* This function is called when code is about to exit a function
- created by reflect.MakeFunc. It is called by the function stub
- used by MakeFunc. It clears the makefunccanrecover field. It's OK
- to always clear this field, because __go_can_recover will only be
- called by a stub created for a function that calls recover. That
- stub will not call a function created by reflect.MakeFunc, so by
- the time we get here any caller higher up on the call stack no
- longer needs the information. */
-
-void
-__go_makefunc_returning (void)
-{
- Defer *d;
-
- d = runtime_g ()->_defer;
- if (d != NULL)
- d->makefunccanrecover = 0;
-}
-
-/* This is only called when it is valid for the caller to recover the
- value on top of the panic stack, if there is one. */
-
-Eface
-__go_recover ()
-{
- G *g;
- Panic *p;
-
- g = runtime_g ();
-
- if (g->_panic == NULL || g->_panic->recovered)
- {
- Eface ret;
-
- ret._type = NULL;
- ret.data = NULL;
- return ret;
- }
- p = g->_panic;
- p->recovered = 1;
- return p->arg;
-}
#include <ucontext.h>
#include "runtime.h"
-#include "go-assert.h"
-#include "go-panic.h"
#ifndef SA_RESTART
#define SA_RESTART 0
license that can be found in the LICENSE file. */
#include "runtime.h"
-#include "go-panic.h"
-#include "arch.h"
-#include "malloc.h"
String
__go_string_slice (String s, intgo start, intgo end)
#include "runtime.h"
#include "go-alloc.h"
-#include "go-panic.h"
/* The code for a Go exception. */
<< 8 | (_Unwind_Exception_Class) '\0');
#endif
+/* Rethrow an exception. */
-/* This function is called by exception handlers used when unwinding
- the stack after a recovered panic. The exception handler looks
- like this:
- __go_check_defer (frame);
- return;
- If we have not yet reached the frame we are looking for, we
- continue unwinding. */
+void rethrowException (void) __asm__(GOSYM_PREFIX "runtime.rethrowException");
void
-__go_check_defer (_Bool *frame)
+rethrowException ()
{
- G *g;
struct _Unwind_Exception *hdr;
- g = runtime_g ();
-
- if (g == NULL)
- {
- /* Some other language has thrown an exception. We know there
- are no defer handlers, so there is nothing to do. */
- }
- else if (g->isforeign)
- {
- Panic *n;
- _Bool recovered;
-
- /* Some other language has thrown an exception. We need to run
- the local defer handlers. If they call recover, we stop
- unwinding the stack here. */
-
- n = (Panic *) __go_alloc (sizeof (Panic));
-
- n->arg._type = NULL;
- n->arg.data = NULL;
- n->recovered = 0;
- n->isforeign = 1;
- n->next = g->_panic;
- g->_panic = n;
-
- while (1)
- {
- Defer *d;
- void (*pfn) (void *);
-
- d = g->_defer;
- if (d == NULL || d->frame != frame || d->pfn == 0)
- break;
-
- pfn = (void (*) (void *)) d->pfn;
- g->_defer = d->next;
-
- (*pfn) (d->arg);
-
- if (runtime_m () != NULL)
- runtime_freedefer (d);
-
- if (n->recovered)
- {
- /* The recover function caught the panic thrown by some
- other language. */
- break;
- }
- }
-
- recovered = n->recovered;
- g->_panic = n->next;
- __go_free (n);
-
- if (recovered)
- {
- /* Just return and continue executing Go code. */
- *frame = 1;
- return;
- }
-
- /* We are panicing through this function. */
- *frame = 0;
- }
- else if (g->_defer != NULL
- && g->_defer->pfn == 0
- && g->_defer->frame == frame)
- {
- Defer *d;
-
- /* This is the defer function which called recover. Simply
- return to stop the stack unwind, and let the Go code continue
- to execute. */
- d = g->_defer;
- g->_defer = d->next;
-
- if (runtime_m () != NULL)
- runtime_freedefer (d);
-
- /* We are returning from this function. */
- *frame = 1;
-
- return;
- }
-
- /* This is some other defer function. It was already run by the
- call to panic, or just above. Rethrow the exception. */
-
- hdr = (struct _Unwind_Exception *) g->exception;
+ hdr = (struct _Unwind_Exception *) runtime_g()->exception;
#ifdef __USING_SJLJ_EXCEPTIONS__
_Unwind_SjLj_Resume_or_Rethrow (hdr);
abort();
}
-/* Unwind function calls until we reach the one which used a defer
- function which called recover. Each function which uses a defer
- statement will have an exception handler, as shown above. */
+/* Return the size of the type that holds an exception header, so that
+ it can be allocated by Go code. */
+
+uintptr unwindExceptionSize(void)
+ __asm__ (GOSYM_PREFIX "runtime.unwindExceptionSize");
+
+uintptr
+unwindExceptionSize ()
+{
+ uintptr ret, align;
+
+ ret = sizeof (struct _Unwind_Exception);
+ /* Adjust the size fo make sure that we can get an aligned value. */
+ align = __alignof__ (struct _Unwind_Exception);
+ if (align > __alignof__ (uintptr))
+ ret += align - __alignof__ (uintptr);
+ return ret;
+}
+
+/* Throw an exception. This is called with g->exception pointing to
+ an uninitialized _Unwind_Exception instance. */
+
+void throwException (void) __asm__(GOSYM_PREFIX "runtime.throwException");
void
-__go_unwind_stack ()
+throwException ()
{
struct _Unwind_Exception *hdr;
+ uintptr align;
+
+ hdr = (struct _Unwind_Exception *)runtime_g ()->exception;
+ /* Make sure the value is correctly aligned. It will be large
+ enough, because of unwindExceptionSize. */
+ align = __alignof__ (struct _Unwind_Exception);
hdr = ((struct _Unwind_Exception *)
- __go_alloc (sizeof (struct _Unwind_Exception)));
+ (((uintptr) hdr + align - 1) &~ (align - 1)));
+
__builtin_memcpy (&hdr->exception_class, &__go_exception_class,
sizeof hdr->exception_class);
hdr->exception_cleanup = NULL;
- runtime_g ()->exception = hdr;
-
#ifdef __USING_SJLJ_EXCEPTIONS__
_Unwind_SjLj_RaiseException (hdr);
#else
#include "malloc.h"
#include "mgc0.h"
#include "go-type.h"
-#include "go-panic.h"
#define hash __hash
#define KindNoPointers GO_NO_POINTERS
// runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
// dump defer & panic records
- for(d = gp->_defer; d != nil; d = d->next) {
+ for(d = gp->_defer; d != nil; d = d->link) {
dumpint(TagDefer);
dumpint((uintptr)d);
dumpint((uintptr)gp);
dumpint((uintptr)d->frame);
dumpint((uintptr)d->pfn);
dumpint((uintptr)0);
- dumpint((uintptr)d->next);
+ dumpint((uintptr)d->link);
}
- for (p = gp->_panic; p != nil; p = p->next) {
+ for (p = gp->_panic; p != nil; p = p->link) {
dumpint(TagPanic);
dumpint((uintptr)p);
dumpint((uintptr)gp);
dumpint((uintptr)p->arg._type);
dumpint((uintptr)p->arg.data);
dumpint((uintptr)0);
- dumpint((uintptr)p->next);
+ dumpint((uintptr)p->link);
}
}
{
P *p, **pp;
MCache *c;
+ Defer *d, *dlink;
// clear sync.Pool's
if(poolcleanup != nil) {
c->tiny = nil;
c->tinysize = 0;
}
- // clear defer pools
- p->deferpool = nil;
}
+
+ // Clear central defer pools.
+ // Leave per-P pools alone, they have strictly bounded size.
+ runtime_lock(&runtime_sched->deferlock);
+ for(d = runtime_sched->deferpool; d != nil; d = dlink) {
+ dlink = d->link;
+ d->link = nil;
+ }
+ runtime_sched->deferpool = nil;
+ runtime_unlock(&runtime_sched->deferlock);
}
typedef struct Workbuf Workbuf;
// without a lock will do the gc instead.
m = runtime_m();
pmstats = mstats();
- if(!pmstats->enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking || m->preemptoff.len > 0)
+ if(!pmstats->enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking() || m->preemptoff.len > 0)
return;
if(gcpercent == GcpercentUnknown) { // first time through
// license that can be found in the LICENSE file.
#include "runtime.h"
-#include "malloc.h"
-#include "go-panic.h"
-// Code related to defer, panic and recover.
-
-uint32 runtime_panicking;
-static Lock paniclk;
-
-// Allocate a Defer, usually using per-P pool.
-// Each defer must be released with freedefer.
-Defer*
-runtime_newdefer()
-{
- Defer *d;
- P *p;
-
- d = nil;
- p = (P*)runtime_m()->p;
- d = p->deferpool;
- if(d)
- p->deferpool = d->next;
- if(d == nil) {
- // deferpool is empty
- d = runtime_malloc(sizeof(Defer));
- }
- return d;
-}
-
-// Free the given defer.
-// The defer cannot be used after this call.
-void
-runtime_freedefer(Defer *d)
-{
- P *p;
-
- if(d->special)
- return;
- p = (P*)runtime_m()->p;
- d->next = p->deferpool;
- p->deferpool = d;
- // No need to wipe out pointers in argp/pc/fn/args,
- // because we empty the pool before GC.
-}
-
-// Run all deferred functions for the current goroutine.
-// This is noinline for go_can_recover.
-static void __go_rundefer (void) __attribute__ ((noinline));
-static void
-__go_rundefer(void)
-{
- G *g;
- Defer *d;
-
- g = runtime_g();
- while((d = g->_defer) != nil) {
- void (*pfn)(void*);
-
- g->_defer = d->next;
- pfn = (void (*) (void *))d->pfn;
- d->pfn = 0;
- if (pfn != nil)
- (*pfn)(d->arg);
- runtime_freedefer(d);
- }
-}
-
-void
-runtime_startpanic(void)
-{
- G *g;
- M *m;
-
- g = runtime_g();
- m = g->m;
- if(runtime_mheap.cachealloc.size == 0) { // very early
- runtime_printf("runtime: panic before malloc heap initialized\n");
- m->mallocing = 1; // tell rest of panic not to try to malloc
- } else if(m->mcache == nil) // can happen if called from signal handler or throw
- m->mcache = runtime_allocmcache();
- switch(m->dying) {
- case 0:
- m->dying = 1;
- g->writebuf.__values = nil;
- g->writebuf.__count = 0;
- g->writebuf.__capacity = 0;
- runtime_xadd(&runtime_panicking, 1);
- runtime_lock(&paniclk);
- if(runtime_debug.schedtrace > 0 || runtime_debug.scheddetail > 0)
- runtime_schedtrace(true);
- runtime_freezetheworld();
- return;
- case 1:
- // Something failed while panicing, probably the print of the
- // argument to panic(). Just print a stack trace and exit.
- m->dying = 2;
- runtime_printf("panic during panic\n");
- runtime_dopanic(0);
- runtime_exit(3);
- case 2:
- // This is a genuine bug in the runtime, we couldn't even
- // print the stack trace successfully.
- m->dying = 3;
- runtime_printf("stack trace unavailable\n");
- runtime_exit(4);
- default:
- // Can't even print! Just exit.
- runtime_exit(5);
- }
-}
-
-void
-runtime_dopanic(int32 unused __attribute__ ((unused)))
-{
- G *g;
- static bool didothers;
- bool crash;
- int32 t;
-
- g = runtime_g();
- if(g->sig != 0) {
- runtime_printf("[signal %x code=%p addr=%p",
- g->sig, (void*)g->sigcode0, (void*)g->sigcode1);
- if (g->sigpc != 0)
- runtime_printf(" pc=%p", g->sigpc);
- runtime_printf("]\n");
- }
-
- if((t = runtime_gotraceback(&crash)) > 0){
- if(g != runtime_m()->g0) {
- runtime_printf("\n");
- runtime_goroutineheader(g);
- runtime_traceback(0);
- runtime_printcreatedby(g);
- } else if(t >= 2 || runtime_m()->throwing > 0) {
- runtime_printf("\nruntime stack:\n");
- runtime_traceback(0);
- }
- if(!didothers) {
- didothers = true;
- runtime_tracebackothers(g);
- }
- }
- runtime_unlock(&paniclk);
- if(runtime_xadd(&runtime_panicking, -1) != 0) {
- // Some other m is panicking too.
- // Let it print what it needs to print.
- // Wait forever without chewing up cpu.
- // It will exit when it's done.
- static Lock deadlock;
- runtime_lock(&deadlock);
- runtime_lock(&deadlock);
- }
-
- if(crash)
- runtime_crash();
-
- runtime_exit(2);
-}
-
-bool
-runtime_canpanic(G *gp)
-{
- M *m = runtime_m();
- byte g;
-
- USED(&g); // don't use global g, it points to gsignal
-
- // Is it okay for gp to panic instead of crashing the program?
- // Yes, as long as it is running Go code, not runtime code,
- // and not stuck in a system call.
- if(gp == nil || gp != m->curg)
- return false;
- if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
- return false;
- if(gp->atomicstatus != _Grunning)
- return false;
-#ifdef GOOS_windows
- if(m->libcallsp != 0)
- return false;
-#endif
- return true;
-}
+extern void gothrow(String) __attribute__((noreturn));
+extern void gothrow(String) __asm__(GOSYM_PREFIX "runtime.throw");
void
runtime_throw(const char *s)
{
- M *mp;
-
- mp = runtime_m();
- if(mp->throwing == 0)
- mp->throwing = 1;
- runtime_startpanic();
- runtime_printf("fatal error: %s\n", s);
- runtime_dopanic(0);
- *(int32*)0 = 0; // not reached
- runtime_exit(1); // even more not reached
-}
-
-void throw(String) __asm__ (GOSYM_PREFIX "runtime.throw");
-void
-throw(String s)
-{
- M *mp;
-
- mp = runtime_m();
- if(mp->throwing == 0)
- mp->throwing = 1;
- runtime_startpanic();
- runtime_printf("fatal error: %S\n", s);
- runtime_dopanic(0);
- *(int32*)0 = 0; // not reached
- runtime_exit(1); // even more not reached
+ gothrow(runtime_gostringnocopy((const byte *)s));
}
void
runtime_newErrorCString(s, &err);
runtime_panic(err);
}
-
-void runtime_Goexit (void) __asm__ (GOSYM_PREFIX "runtime.Goexit");
-
-void
-runtime_Goexit(void)
-{
- __go_rundefer();
- runtime_goexit();
-}
-
-void
-runtime_panicdivide(void)
-{
- runtime_panicstring("integer divide by zero");
-}
param = g->param;
g->param = nil;
fn(param);
- runtime_goexit();
+ runtime_goexit1();
}
// Switch context to a different goroutine. This is like longjmp.
//
// Design doc at http://golang.org/s/go11sched.
-typedef struct schedt Sched;
-
enum
{
// Number of goroutine ids to grab from runtime_sched->goidgen to local per-P cache at once.
extern Sched* runtime_getsched() __asm__ (GOSYM_PREFIX "runtime.getsched");
-static Sched* runtime_sched;
+Sched* runtime_sched;
int32 runtime_gomaxprocs;
uint32 runtime_needextram = 1;
M runtime_m0;
// Defer unlock so that runtime.Goexit during init does the unlock too.
d.pfn = (uintptr)(void*)initDone;
- d.next = g->_defer;
+ d.link = g->_defer;
d.arg = (void*)-1;
d._panic = g->_panic;
d.retaddr = 0;
if(g->_defer != &d || (void*)d.pfn != initDone)
runtime_throw("runtime: bad defer entry after init");
- g->_defer = d.next;
+ g->_defer = d.link;
runtime_unlockOSThread();
// For gccgo we have to wait until after main is initialized
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934.
- if(runtime_panicking)
+ if(runtime_panicking())
runtime_park(nil, nil, "panicwait");
runtime_exit(0);
// Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
// Since it does not return it does not matter. But if it is preempted
// at the split stack check, GC will complain about inconsistent sp.
-void runtime_goexit(void) __attribute__ ((noinline));
+void runtime_goexit1(void) __attribute__ ((noinline));
void
-runtime_goexit(void)
+runtime_goexit1(void)
{
if(g->atomicstatus != _Grunning)
runtime_throw("bad g status");
runtime_mcall(goexit0);
}
-// runtime_goexit continuation on g0.
+// runtime_goexit1 continuation on g0.
static void
goexit0(G *gp)
{
bool pempty;
G *gp;
P *p;
+ intgo j;
old = runtime_gomaxprocs;
if(old < 0 || old > _MaxGomaxprocs || new <= 0 || new >_MaxGomaxprocs)
p = (P*)runtime_mallocgc(sizeof(*p), 0, FlagNoInvokeGC);
p->id = i;
p->status = _Pgcstop;
+ p->deferpool.__values = &p->deferpoolbuf[0];
+ p->deferpool.__count = 0;
+ p->deferpool.__capacity = nelem(p->deferpoolbuf);
runtime_atomicstorep(&runtime_allp[i], p);
}
if(p->mcache == nil) {
// free unused P's
for(i = new; i < old; i++) {
p = runtime_allp[i];
+ for(j = 0; j < p->deferpool.__count; j++) {
+ ((struct _defer**)p->deferpool.__values)[j] = nil;
+ }
+ p->deferpool.__count = 0;
runtime_freemcache(p->mcache);
p->mcache = nil;
gfpurge(p);
// freezetheworld will cause all running threads to block.
// And runtime will essentially enter into deadlock state,
// except that there is a thread that will call runtime_exit soon.
- if(runtime_panicking > 0)
+ if(runtime_panicking() > 0)
return;
if(run < 0) {
runtime_printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
typedef struct cgoMal CgoMal;
typedef struct PollDesc PollDesc;
typedef struct sudog SudoG;
+typedef struct schedt Sched;
typedef struct __go_open_array Slice;
typedef struct iface Iface;
extern G* runtime_lastg;
extern M* runtime_allm;
extern P** runtime_allp;
+extern Sched* runtime_sched;
extern int32 runtime_gomaxprocs;
extern uint32 runtime_needextram;
-extern uint32 runtime_panicking;
+extern uint32 runtime_panicking(void)
+ __asm__ (GOSYM_PREFIX "runtime.getPanicking");
extern int8* runtime_goos;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
void runtime_dropm(void)
__asm__ (GOSYM_PREFIX "runtime.dropm");
void runtime_signalstack(byte*, int32);
-MCache* runtime_allocmcache(void);
+MCache* runtime_allocmcache(void)
+ __asm__ (GOSYM_PREFIX "runtime.allocmcache");
void runtime_freemcache(MCache*);
void runtime_mallocinit(void);
void runtime_mprofinit(void);
#define runtime_breakpoint() __builtin_trap()
void runtime_gosched(void);
void runtime_gosched0(G*);
-void runtime_schedtrace(bool);
+void runtime_schedtrace(bool)
+ __asm__ (GOSYM_PREFIX "runtime.schedtrace");
void runtime_park(bool(*)(G*, void*), void*, const char*);
void runtime_parkunlock(Lock*, const char*);
void runtime_tsleep(int64, const char*);
M* runtime_newm(void);
-void runtime_goexit(void);
+void runtime_goexit1(void)
+ __asm__ (GOSYM_PREFIX "runtime.goexit1");
void runtime_entersyscall(int32)
__asm__ (GOSYM_PREFIX "runtime.entersyscall");
void runtime_entersyscallblock(int32)
void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void)
__asm__ (GOSYM_PREFIX "runtime.startpanic");
-void runtime_freezetheworld(void);
+void runtime_freezetheworld(void)
+ __asm__ (GOSYM_PREFIX "runtime.freezetheworld");
void runtime_unwindstack(G*, byte*);
void runtime_sigprof()
__asm__ (GOSYM_PREFIX "runtime.sigprof");
void reflect_call(const struct __go_func_type *, FuncVal *, _Bool, _Bool,
void **, void **)
__asm__ (GOSYM_PREFIX "reflect.call");
-#define runtime_panic __go_panic
+void runtime_panic(Eface)
+ __asm__ (GOSYM_PREFIX "runtime.gopanic");
+void runtime_panic(Eface)
+ __attribute__ ((noreturn));
/*
* runtime c-called (but written in Go)
*/
-void runtime_printany(Eface)
- __asm__ (GOSYM_PREFIX "runtime.Printany");
void runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*)
__asm__ (GOSYM_PREFIX "runtime.NewTypeAssertionError");
void runtime_newErrorCString(const char*, Eface*)