hash uintptr
size uintptr
nstk uintptr
+ skip int
}
// A memRecord is the bucket data for a bucket of type memProfile,
}
// newBucket allocates a bucket with the given type and number of stack entries.
-func newBucket(typ bucketType, nstk int) *bucket {
+func newBucket(typ bucketType, nstk int, skipCount int) *bucket {
size := payloadOffset(typ, uintptr(nstk))
switch typ {
default:
bucketmem += size
b.typ = typ
b.nstk = uintptr(nstk)
+ b.skip = skipCount
return b
}
}
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
+func stkbucket(typ bucketType, size uintptr, skip int, stk []uintptr, alloc bool) *bucket {
if buckhash == nil {
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
if buckhash == nil {
}
// Create new bucket.
- b := newBucket(typ, len(stk))
+ b := newBucket(typ, len(stk), skip)
copy(b.stk(), stk)
b.hash = h
b.size = size
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
var stk [maxStack]uintptr
- nstk := callersRaw(1, stk[:])
+ nstk := callersRaw(stk[:])
lock(&proflock)
- b := stkbucket(memProfile, size, stk[:nstk], true)
+ skip := 1
+ b := stkbucket(memProfile, size, skip, stk[:nstk], true)
c := mProf.cycle
mp := b.mp()
mpc := &mp.future[(c+2)%uint32(len(mp.future))]
var nstk int
var stk [maxStack]uintptr
if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callersRaw(skip, stk[:])
+ nstk = callersRaw(stk[:])
} else {
// FIXME: This should get a traceback of gp.m.curg.
// nstk = gcallers(gp.m.curg, skip, stk[:])
- nstk = callersRaw(skip, stk[:])
+ nstk = callersRaw(stk[:])
}
lock(&proflock)
- b := stkbucket(which, 0, stk[:nstk], true)
+ b := stkbucket(which, 0, skip, stk[:nstk], true)
b.bp().count++
b.bp().cycles += cycles
unlock(&proflock)
// later. Note: there is code in go-callers.c's backtrace_full callback()
// function that performs very similar fixups; these two code paths
// should be kept in sync.
-func fixupStack(stk []uintptr, canonStack *[maxStack]uintptr, size uintptr) int {
+func fixupStack(stk []uintptr, skip int, canonStack *[maxStack]uintptr, size uintptr) int {
var cidx int
var termTrace bool
+ // Increase the skip count to take into account the frames corresponding
+ // to runtime.callersRaw and to the C routine that it invokes.
+ skip += 2
for _, pc := range stk {
// Subtract 1 from PC to undo the 1 we added in callback in
// go-callers.c.
break
}
}
+
+ // Apply skip count. Needs to be done after expanding inline frames.
+ if skip != 0 {
+ if skip >= cidx {
+ return 0
+ }
+ copy(canonStack[:cidx-skip], canonStack[skip:])
+ return cidx - skip
+ }
+
return cidx
}
// the new bucket.
func fixupBucket(b *bucket) {
var canonStack [maxStack]uintptr
- frames := fixupStack(b.stk(), &canonStack, b.size)
- cb := stkbucket(prunedProfile, b.size, canonStack[:frames], true)
+ frames := fixupStack(b.stk(), b.skip, &canonStack, b.size)
+ cb := stkbucket(prunedProfile, b.size, 0, canonStack[:frames], true)
switch b.typ {
default:
throw("invalid profile bucket type")
struct callersRaw_data
{
uintptr* pcbuf;
- int skip;
int index;
int max;
};
{
struct callersRaw_data *arg = (struct callersRaw_data *) data;
- if (arg->skip > 0)
- {
- --arg->skip;
- return 0;
- }
-
/* On the call to backtrace_simple the pc value was most likely
decremented if there was a normal call, since the pc referred to
the instruction where the call returned and not the call itself.
/* runtime_callersRaw is similar to runtime_callers() above, but
it returns raw PC values as opposed to file/func/line locations. */
int32
-runtime_callersRaw (int32 skip, uintptr *pcbuf, int32 m)
+runtime_callersRaw (uintptr *pcbuf, int32 m)
{
struct callersRaw_data data;
struct backtrace_state* state;
data.pcbuf = pcbuf;
- data.skip = skip + 1;
data.index = 0;
data.max = m;
runtime_xadd (&__go_runtime_in_callers, 1);