-6c9070324d5b7c8483bc7c17b0a8faaa1fb1ae30
+681580a3afc687ba3ff9ef240c67e8630e4306e6
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
Struct_type* st = closure_var->var_value()->type()->deref()->struct_type();
Expression* cv = Expression::make_struct_composite_literal(st, initializer,
location);
+
+ // When compiling the runtime, closures do not escape. When escape
+ // analysis becomes the default, and applies to closures, this
+ // should be changed to make it an error if a closure escapes.
+ if (this->gogo_->compiling_runtime()
+ && this->gogo_->package_name() == "runtime")
+ {
+ Temporary_statement* ctemp = Statement::make_temporary(st, cv, location);
+ this->gogo_->add_statement(ctemp);
+ Expression* ref = Expression::make_temporary_reference(ctemp, location);
+ Expression* addr = Expression::make_unary(OPERATOR_AND, ref, location);
+ addr->unary_expression()->set_does_not_escape();
+ return addr;
+ }
+
return Expression::make_heap_expression(cv, location);
}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-// Note: the MemStats struct should be kept in sync with
-// struct MStats in malloc.h
-
-// A MemStats records statistics about the memory allocator.
-type MemStats struct {
- // General statistics.
- Alloc uint64 // bytes allocated and still in use
- TotalAlloc uint64 // bytes allocated (even if freed)
- Sys uint64 // bytes obtained from system (sum of XxxSys below)
- Lookups uint64 // number of pointer lookups
- Mallocs uint64 // number of mallocs
- Frees uint64 // number of frees
-
- // Main allocation heap statistics.
- HeapAlloc uint64 // bytes allocated and still in use
- HeapSys uint64 // bytes obtained from system
- HeapIdle uint64 // bytes in idle spans
- HeapInuse uint64 // bytes in non-idle span
- HeapReleased uint64 // bytes released to the OS
- HeapObjects uint64 // total number of allocated objects
-
- // Low-level fixed-size structure allocator statistics.
- // Inuse is bytes used now.
- // Sys is bytes obtained from system.
- StackInuse uint64 // bootstrap stacks
- StackSys uint64
- MSpanInuse uint64 // mspan structures
- MSpanSys uint64
- MCacheInuse uint64 // mcache structures
- MCacheSys uint64
- BuckHashSys uint64 // profiling bucket hash table
- GCSys uint64 // GC metadata
- OtherSys uint64 // other system allocations
-
- // Garbage collector statistics.
- NextGC uint64 // next run in HeapAlloc time (bytes)
- LastGC uint64 // last run in absolute time (ns)
- PauseTotalNs uint64
- PauseNs [256]uint64 // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256]
- PauseEnd [256]uint64 // circular buffer of recent GC pause end times
- NumGC uint32
- GCCPUFraction float64 // fraction of CPU time used by GC
- EnableGC bool
- DebugGC bool
-
- // Per-size allocation statistics.
- // 61 is NumSizeClasses in the C code.
- BySize [61]struct {
- Size uint32
- Mallocs uint64
- Frees uint64
- }
-}
-
-var Sizeof_C_MStats uintptr // filled in by malloc.goc
-
-func init() {
- var memStats MemStats
- if Sizeof_C_MStats != unsafe.Sizeof(memStats) {
- println(Sizeof_C_MStats, unsafe.Sizeof(memStats))
- panic("MStats vs MemStatsType size mismatch")
- }
-}
-
-// ReadMemStats populates m with memory allocator statistics.
-func ReadMemStats(m *MemStats)
-
-// GC runs a garbage collection.
-func GC()
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Memory statistics
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Statistics.
+// If you edit this structure, also edit type MemStats below.
+type mstats struct {
+ // General statistics.
+ alloc uint64 // bytes allocated and not yet freed
+ total_alloc uint64 // bytes allocated (even if freed)
+ sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
+ nlookup uint64 // number of pointer lookups
+ nmalloc uint64 // number of mallocs
+ nfree uint64 // number of frees
+
+ // Statistics about malloc heap.
+ // protected by mheap.lock
+ heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
+ heap_sys uint64 // bytes obtained from system
+ heap_idle uint64 // bytes in idle spans
+ heap_inuse uint64 // bytes in non-idle spans
+ heap_released uint64 // bytes released to the os
+ heap_objects uint64 // total number of allocated objects
+
+ // Statistics about allocation of low-level fixed-size structures.
+ // Protected by FixAlloc locks.
+ stacks_inuse uint64 // this number is included in heap_inuse above
+ stacks_sys uint64 // always 0 in mstats
+ mspan_inuse uint64 // mspan structures
+ mspan_sys uint64
+ mcache_inuse uint64 // mcache structures
+ mcache_sys uint64
+ buckhash_sys uint64 // profiling bucket hash table
+ gc_sys uint64
+ other_sys uint64
+
+ // Statistics about garbage collector.
+ // Protected by mheap or stopping the world during GC.
+ next_gc uint64 // next gc (in heap_live time)
+ last_gc uint64 // last gc (in absolute time)
+ pause_total_ns uint64
+ pause_ns [256]uint64 // circular buffer of recent gc pause lengths
+ pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
+ numgc uint32
+ gc_cpu_fraction float64 // fraction of CPU time used by GC
+ enablegc bool
+ debuggc bool
+
+ // Statistics about allocation size classes.
+
+ by_size [_NumSizeClasses]struct {
+ size uint32
+ nmalloc uint64
+ nfree uint64
+ }
+
+ // Statistics below here are not exported to Go directly.
+
+ tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
+
+ // heap_live is the number of bytes considered live by the GC.
+ // That is: retained by the most recent GC plus allocated
+ // since then. heap_live <= heap_alloc, since heap_alloc
+ // includes unmarked objects that have not yet been swept (and
+ // hence goes up as we allocate and down as we sweep) while
+ // heap_live excludes these objects (and hence only goes up
+ // between GCs).
+ //
+ // This is updated atomically without locking. To reduce
+ // contention, this is updated only when obtaining a span from
+ // an mcentral and at this point it counts all of the
+ // unallocated slots in that span (which will be allocated
+ // before that mcache obtains another span from that
+ // mcentral). Hence, it slightly overestimates the "true" live
+ // heap size. It's better to overestimate than to
+ // underestimate because 1) this triggers the GC earlier than
+ // necessary rather than potentially too late and 2) this
+ // leads to a conservative GC rate rather than a GC rate that
+ // is potentially too low.
+ //
+ // Whenever this is updated, call traceHeapAlloc() and
+ // gcController.revise().
+ heap_live uint64
+
+ // heap_scan is the number of bytes of "scannable" heap. This
+ // is the live heap (as counted by heap_live), but omitting
+ // no-scan objects and no-scan tails of objects.
+ //
+ // Whenever this is updated, call gcController.revise().
+ heap_scan uint64
+
+ // heap_marked is the number of bytes marked by the previous
+ // GC. After mark termination, heap_live == heap_marked, but
+ // unlike heap_live, heap_marked does not change until the
+ // next mark termination.
+ heap_marked uint64
+
+ // heap_reachable is an estimate of the reachable heap bytes
+ // at the end of the previous GC.
+ heap_reachable uint64
+}
+
+var memstats mstats
+
+// A MemStats records statistics about the memory allocator.
+type MemStats struct {
+ // General statistics.
+ Alloc uint64 // bytes allocated and not yet freed
+ TotalAlloc uint64 // bytes allocated (even if freed)
+ Sys uint64 // bytes obtained from system (sum of XxxSys below)
+ Lookups uint64 // number of pointer lookups
+ Mallocs uint64 // number of mallocs
+ Frees uint64 // number of frees
+
+ // Main allocation heap statistics.
+ HeapAlloc uint64 // bytes allocated and not yet freed (same as Alloc above)
+ HeapSys uint64 // bytes obtained from system
+ HeapIdle uint64 // bytes in idle spans
+ HeapInuse uint64 // bytes in non-idle span
+ HeapReleased uint64 // bytes released to the OS
+ HeapObjects uint64 // total number of allocated objects
+
+ // Low-level fixed-size structure allocator statistics.
+ // Inuse is bytes used now.
+ // Sys is bytes obtained from system.
+ StackInuse uint64 // bytes used by stack allocator
+ StackSys uint64
+ MSpanInuse uint64 // mspan structures
+ MSpanSys uint64
+ MCacheInuse uint64 // mcache structures
+ MCacheSys uint64
+ BuckHashSys uint64 // profiling bucket hash table
+ GCSys uint64 // GC metadata
+ OtherSys uint64 // other system allocations
+
+ // Garbage collector statistics.
+ NextGC uint64 // next collection will happen when HeapAlloc ≥ this amount
+ LastGC uint64 // end time of last collection (nanoseconds since 1970)
+ PauseTotalNs uint64
+ PauseNs [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
+ PauseEnd [256]uint64 // circular buffer of recent GC pause end times
+ NumGC uint32
+ GCCPUFraction float64 // fraction of CPU time used by GC
+ EnableGC bool
+ DebugGC bool
+
+ // Per-size allocation statistics.
+ // 61 is NumSizeClasses in the C code.
+ BySize [61]struct {
+ Size uint32
+ Mallocs uint64
+ Frees uint64
+ }
+}
+
+// Size of the trailing by_size array differs between Go and C,
+// and all data after by_size is local to runtime, not exported.
+// NumSizeClasses was changed, but we cannot change Go struct because of backward compatibility.
+// sizeof_C_MStats is what C thinks about size of Go struct.
+var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
+
+func init() {
+ var memStats MemStats
+ if sizeof_C_MStats != unsafe.Sizeof(memStats) {
+ println(sizeof_C_MStats, unsafe.Sizeof(memStats))
+ throw("MStats vs MemStatsType size mismatch")
+ }
+}
+
+// ReadMemStats populates m with memory allocator statistics.
+func ReadMemStats(m *MemStats) {
+ stopTheWorld("read mem stats")
+
+ systemstack(func() {
+ readmemstats_m(m)
+ })
+
+ startTheWorld()
+}
+
+func readmemstats_m(stats *MemStats) {
+ updatememstats(nil)
+
+ // Size of the trailing by_size array differs between Go and C,
+ // NumSizeClasses was changed, but we cannot change Go struct because of backward compatibility.
+ memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
+
+ // Stack numbers are part of the heap numbers, separate those out for user consumption
+ stats.StackSys += stats.StackInuse
+ stats.HeapInuse -= stats.StackInuse
+ stats.HeapSys -= stats.StackInuse
+}
+
+// For gccgo this is in runtime/mgc0.c.
+func updatememstats(stats *gcstats)
+
+/*
+For gccgo these are still in runtime/mgc0.c.
+
+//go:linkname readGCStats runtime/debug.readGCStats
+func readGCStats(pauses *[]uint64) {
+ systemstack(func() {
+ readGCStats_m(pauses)
+ })
+}
+
+func readGCStats_m(pauses *[]uint64) {
+ p := *pauses
+ // Calling code in runtime/debug should make the slice large enough.
+ if cap(p) < len(memstats.pause_ns)+3 {
+ throw("short slice passed to readGCStats")
+ }
+
+ // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
+ lock(&mheap_.lock)
+
+ n := memstats.numgc
+ if n > uint32(len(memstats.pause_ns)) {
+ n = uint32(len(memstats.pause_ns))
+ }
+
+ // The pause buffer is circular. The most recent pause is at
+ // pause_ns[(numgc-1)%len(pause_ns)], and then backward
+ // from there to go back farther in time. We deliver the times
+ // most recent first (in p[0]).
+ p = p[:cap(p)]
+ for i := uint32(0); i < n; i++ {
+ j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
+ p[i] = memstats.pause_ns[j]
+ p[n+i] = memstats.pause_end[j]
+ }
+
+ p[n+n] = memstats.last_gc
+ p[n+n+1] = uint64(memstats.numgc)
+ p[n+n+2] = memstats.pause_total_ns
+ unlock(&mheap_.lock)
+ *pauses = p[:n+n+3]
+}
+
+//go:nowritebarrier
+func updatememstats(stats *gcstats) {
+ if stats != nil {
+ *stats = gcstats{}
+ }
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if stats != nil {
+ src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
+ dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
+ for i, v := range src {
+ dst[i] += v
+ }
+ mp.gcstats = gcstats{}
+ }
+ }
+
+ memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
+ memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
+ memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
+ memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
+
+ // Calculate memory allocator stats.
+ // During program execution we only count number of frees and amount of freed memory.
+ // Current number of alive object in the heap and amount of alive heap memory
+ // are calculated by scanning all spans.
+ // Total number of mallocs is calculated as number of frees plus number of alive objects.
+ // Similarly, total amount of allocated memory is calculated as amount of freed memory
+ // plus amount of alive heap memory.
+ memstats.alloc = 0
+ memstats.total_alloc = 0
+ memstats.nmalloc = 0
+ memstats.nfree = 0
+ for i := 0; i < len(memstats.by_size); i++ {
+ memstats.by_size[i].nmalloc = 0
+ memstats.by_size[i].nfree = 0
+ }
+
+ // Flush MCache's to MCentral.
+ systemstack(flushallmcaches)
+
+ // Aggregate local stats.
+ cachestats()
+
+ // Scan all spans and count number of alive objects.
+ lock(&mheap_.lock)
+ for i := uint32(0); i < mheap_.nspan; i++ {
+ s := h_allspans[i]
+ if s.state != mSpanInUse {
+ continue
+ }
+ if s.sizeclass == 0 {
+ memstats.nmalloc++
+ memstats.alloc += uint64(s.elemsize)
+ } else {
+ memstats.nmalloc += uint64(s.allocCount)
+ memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount)
+ memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize)
+ }
+ }
+ unlock(&mheap_.lock)
+
+ // Aggregate by size class.
+ smallfree := uint64(0)
+ memstats.nfree = mheap_.nlargefree
+ for i := 0; i < len(memstats.by_size); i++ {
+ memstats.nfree += mheap_.nsmallfree[i]
+ memstats.by_size[i].nfree = mheap_.nsmallfree[i]
+ memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
+ smallfree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
+ }
+ memstats.nfree += memstats.tinyallocs
+ memstats.nmalloc += memstats.nfree
+
+ // Calculate derived stats.
+ memstats.total_alloc = memstats.alloc + mheap_.largefree + smallfree
+ memstats.heap_alloc = memstats.alloc
+ memstats.heap_objects = memstats.nmalloc - memstats.nfree
+}
+
+//go:nowritebarrier
+func cachestats() {
+ for i := 0; ; i++ {
+ p := allp[i]
+ if p == nil {
+ break
+ }
+ c := p.mcache
+ if c == nil {
+ continue
+ }
+ purgecachedstats(c)
+ }
+}
+
+//go:nowritebarrier
+func flushallmcaches() {
+ for i := 0; ; i++ {
+ p := allp[i]
+ if p == nil {
+ break
+ }
+ c := p.mcache
+ if c == nil {
+ continue
+ }
+ c.releaseAll()
+ stackcache_clear(c)
+ }
+}
+
+//go:nosplit
+func purgecachedstats(c *mcache) {
+ // Protected by either heap or GC lock.
+ h := &mheap_
+ memstats.heap_scan += uint64(c.local_scan)
+ c.local_scan = 0
+ memstats.tinyallocs += uint64(c.local_tinyallocs)
+ c.local_tinyallocs = 0
+ memstats.nlookup += uint64(c.local_nlookup)
+ c.local_nlookup = 0
+ h.largefree += uint64(c.local_largefree)
+ c.local_largefree = 0
+ h.nlargefree += uint64(c.local_nlargefree)
+ c.local_nlargefree = 0
+ for i := 0; i < len(c.local_nsmallfree); i++ {
+ h.nsmallfree[i] += uint64(c.local_nsmallfree[i])
+ c.local_nsmallfree[i] = 0
+ }
+}
+
+*/
+
+// Atomically increases a given *system* memory stat. We are counting on this
+// stat never overflowing a uintptr, so this function must only be used for
+// system memory stats.
+//
+// The current implementation for little endian architectures is based on
+// xadduintptr(), which is less than ideal: xadd64() should really be used.
+// Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
+// doesn't use locks. (Locks are a problem as they require a valid G, which
+// restricts their useability.)
+//
+// A side-effect of using xadduintptr() is that we need to check for
+// overflow errors.
+//go:nosplit
+func mSysStatInc(sysStat *uint64, n uintptr) {
+ if sys.BigEndian != 0 {
+ atomic.Xadd64(sysStat, int64(n))
+ return
+ }
+ if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
+ print("runtime: stat overflow: val ", val, ", n ", n, "\n")
+ exit(2)
+ }
+}
+
+// Atomically decreases a given *system* memory stat. Same comments as
+// mSysStatInc apply.
+//go:nosplit
+func mSysStatDec(sysStat *uint64, n uintptr) {
+ if sys.BigEndian != 0 {
+ atomic.Xadd64(sysStat, -int64(n))
+ return
+ }
+ if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
+ print("runtime: stat underflow: val ", val, ", n ", n, "\n")
+ exit(2)
+ }
+}
// Here for gccgo until we port msize.go.
func roundupsize(uintptr) uintptr
+
+// Here for gccgo until we port mgc.go.
+func GC()
+
+// Here for gccgo until we port proc.go.
+var worldsema uint32 = 1
+
+func stopTheWorldWithSema()
+func startTheWorldWithSema()
+
+// For gccgo to call from C code.
+//go:linkname acquireWorldsema runtime.acquireWorldsema
+func acquireWorldsema() {
+ semacquire(&worldsema, false)
+}
+
+// For gccgo to call from C code.
+//go:linkname releaseWorldsema runtime.releaseWorldsema
+func releaseWorldsema() {
+ semrelease(&worldsema)
+}
+
+// Here for gccgo until we port proc.go.
+func stopTheWorld(reason string) {
+ semacquire(&worldsema, false)
+ getg().m.preemptoff = reason
+ getg().m.gcing = 1
+ systemstack(stopTheWorldWithSema)
+}
+
+// Here for gccgo until we port proc.go.
+func startTheWorld() {
+ getg().m.gcing = 0
+ getg().m.locks++
+ systemstack(startTheWorldWithSema)
+ // worldsema must be held over startTheWorldWithSema to ensure
+ // gomaxprocs cannot change while worldsema is held.
+ semrelease(&worldsema)
+ getg().m.preemptoff = ""
+ getg().m.locks--
+}
+
+// For gccgo to call from C code, so that the C code and the Go code
+// can share the memstats variable for now.
+//go:linkname getMstats runtime.getMstats
+func getMstats() *mstats {
+ return &memstats
+}
runtime_lock(&lk);
if(hz > 0) {
if(prof == nil) {
- prof = runtime_SysAlloc(sizeof *prof, &mstats.other_sys);
+ prof = runtime_SysAlloc(sizeof *prof, &mstats()->other_sys);
if(prof == nil) {
runtime_printf("runtime: cpu profiling cannot allocate memory\n");
runtime_unlock(&lk);
int32 i;
dumpint(TagMemStats);
- dumpint(mstats.alloc);
- dumpint(mstats.total_alloc);
- dumpint(mstats.sys);
- dumpint(mstats.nlookup);
- dumpint(mstats.nmalloc);
- dumpint(mstats.nfree);
- dumpint(mstats.heap_alloc);
- dumpint(mstats.heap_sys);
- dumpint(mstats.heap_idle);
- dumpint(mstats.heap_inuse);
- dumpint(mstats.heap_released);
- dumpint(mstats.heap_objects);
- dumpint(mstats.stacks_inuse);
- dumpint(mstats.stacks_sys);
- dumpint(mstats.mspan_inuse);
- dumpint(mstats.mspan_sys);
- dumpint(mstats.mcache_inuse);
- dumpint(mstats.mcache_sys);
- dumpint(mstats.buckhash_sys);
- dumpint(mstats.gc_sys);
- dumpint(mstats.other_sys);
- dumpint(mstats.next_gc);
- dumpint(mstats.last_gc);
- dumpint(mstats.pause_total_ns);
+ dumpint(mstats()->alloc);
+ dumpint(mstats()->total_alloc);
+ dumpint(mstats()->sys);
+ dumpint(mstats()->nlookup);
+ dumpint(mstats()->nmalloc);
+ dumpint(mstats()->nfree);
+ dumpint(mstats()->heap_alloc);
+ dumpint(mstats()->heap_sys);
+ dumpint(mstats()->heap_idle);
+ dumpint(mstats()->heap_inuse);
+ dumpint(mstats()->heap_released);
+ dumpint(mstats()->heap_objects);
+ dumpint(mstats()->stacks_inuse);
+ dumpint(mstats()->stacks_sys);
+ dumpint(mstats()->mspan_inuse);
+ dumpint(mstats()->mspan_sys);
+ dumpint(mstats()->mcache_inuse);
+ dumpint(mstats()->mcache_sys);
+ dumpint(mstats()->buckhash_sys);
+ dumpint(mstats()->gc_sys);
+ dumpint(mstats()->other_sys);
+ dumpint(mstats()->next_gc);
+ dumpint(mstats()->last_gc);
+ dumpint(mstats()->pause_total_ns);
for(i = 0; i < 256; i++)
- dumpint(mstats.pause_ns[i]);
- dumpint(mstats.numgc);
+ dumpint(mstats()->pause_ns[i]);
+ dumpint(mstats()->numgc);
}
static void
G *g;
// Stop the world.
- runtime_semacquire(&runtime_worldsema, false);
+ runtime_acquireWorldsema();
m = runtime_m();
m->gcing = 1;
m->locks++;
- runtime_stoptheworld();
+ runtime_stopTheWorldWithSema();
// Update stats so we can dump them.
// As a side effect, flushes all the MCaches so the MSpan.freelist
// Start up the world again.
m->gcing = 0;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
m->locks--;
}
// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
MHeap runtime_mheap;
-MStats mstats;
int32 runtime_checking;
-extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
-
extern volatile intgo runtime_MemProfileRate
__asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
MLink *v, *next;
byte *tiny;
bool incallback;
+ MStats *pmstats;
if(size == 0) {
// All 0-length allocations use this pointer.
flag |= FlagNoInvokeGC;
}
- if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) {
+ if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC) && m->preemptoff.len == 0) {
runtime_gosched();
m = runtime_m();
}
m->locks--;
- if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
+ pmstats = mstats();
+ if(!(flag & FlagNoInvokeGC) && pmstats->heap_alloc >= pmstats->next_gc)
runtime_gc(0);
if(incallback)
// Protected by either heap or GC lock.
h = &runtime_mheap;
- mstats.heap_alloc += (intptr)c->local_cachealloc;
+ mstats()->heap_alloc += (intptr)c->local_cachealloc;
c->local_cachealloc = 0;
- mstats.nlookup += c->local_nlookup;
+ mstats()->nlookup += c->local_nlookup;
c->local_nlookup = 0;
h->largefree += c->local_largefree;
c->local_largefree = 0;
}
}
-extern uintptr runtime_sizeof_C_MStats
- __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
-
-// Size of the trailing by_size array differs between Go and C,
-// _NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
-// sizeof_C_MStats is what C thinks about size of Go struct.
-
// Initialized in mallocinit because it's defined in go/runtime/mem.go.
#define MaxArena32 (2U<<30)
uint64 i;
bool reserved;
- runtime_sizeof_C_MStats = sizeof(MStats) - (_NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
-
p = nil;
p_size = 0;
arena_size = 0;
if(n <= (uintptr)(h->arena_end - h->arena_used)) {
// Keep taking from our reservation.
p = h->arena_used;
- runtime_SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
+ runtime_SysMap(p, n, h->arena_reserved, &mstats()->heap_sys);
h->arena_used += n;
runtime_MHeap_MapBits(h);
runtime_MHeap_MapSpans(h);
// try to get memory at a location chosen by the OS
// and hope that it is in the range we allocated bitmap for.
p_size = ROUND(n, PageSize) + PageSize;
- p = runtime_SysAlloc(p_size, &mstats.heap_sys);
+ p = runtime_SysAlloc(p_size, &mstats()->heap_sys);
if(p == nil)
return nil;
if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) {
runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
p, h->arena_start, h->arena_start+MaxArena32);
- runtime_SysFree(p, p_size, &mstats.heap_sys);
+ runtime_SysFree(p, p_size, &mstats()->heap_sys);
return nil;
}
runtime_lock(&persistent);
persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
if(persistent.pos + size > persistent.end) {
- persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
+ persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats()->other_sys);
if(persistent.pos == nil) {
runtime_unlock(&persistent);
runtime_throw("runtime: cannot allocate memory");
p = persistent.pos;
persistent.pos += size;
runtime_unlock(&persistent);
- if(stat != &mstats.other_sys) {
+ if(stat != &mstats()->other_sys) {
// reaccount the allocation against provided stat
runtime_xadd64(stat, size);
- runtime_xadd64(&mstats.other_sys, -(uint64)size);
+ runtime_xadd64(&mstats()->other_sys, -(uint64)size);
}
return p;
}
typedef struct MCentral MCentral;
typedef struct MHeap MHeap;
typedef struct mspan MSpan;
-typedef struct MStats MStats;
+typedef struct mstats MStats;
typedef struct mlink MLink;
typedef struct mtypes MTypes;
typedef struct gcstats GCStats;
void* runtime_FixAlloc_Alloc(FixAlloc *f);
void runtime_FixAlloc_Free(FixAlloc *f, void *p);
-
-// Statistics.
-// Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
-struct MStats
-{
- // General statistics.
- uint64 alloc; // bytes allocated and still in use
- uint64 total_alloc; // bytes allocated (even if freed)
- uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
- uint64 nlookup; // number of pointer lookups
- uint64 nmalloc; // number of mallocs
- uint64 nfree; // number of frees
-
- // Statistics about malloc heap.
- // protected by mheap.Lock
- uint64 heap_alloc; // bytes allocated and still in use
- uint64 heap_sys; // bytes obtained from system
- uint64 heap_idle; // bytes in idle spans
- uint64 heap_inuse; // bytes in non-idle spans
- uint64 heap_released; // bytes released to the OS
- uint64 heap_objects; // total number of allocated objects
-
- // Statistics about allocation of low-level fixed-size structures.
- // Protected by FixAlloc locks.
- uint64 stacks_inuse; // bootstrap stacks
- uint64 stacks_sys;
- uint64 mspan_inuse; // MSpan structures
- uint64 mspan_sys;
- uint64 mcache_inuse; // MCache structures
- uint64 mcache_sys;
- uint64 buckhash_sys; // profiling bucket hash table
- uint64 gc_sys;
- uint64 other_sys;
-
- // Statistics about garbage collector.
- // Protected by mheap or stopping the world during GC.
- uint64 next_gc; // next GC (in heap_alloc time)
- uint64 last_gc; // last GC (in absolute time)
- uint64 pause_total_ns;
- uint64 pause_ns[256];
- uint64 pause_end[256];
- uint32 numgc;
- float64 gc_cpu_fraction;
- bool enablegc;
- bool debuggc;
-
- // Statistics about allocation size classes.
- struct {
- uint32 size;
- uint64 nmalloc;
- uint64 nfree;
- } by_size[_NumSizeClasses];
-};
-
-extern MStats mstats
- __asm__ (GOSYM_PREFIX "runtime.memStats");
-void runtime_updatememstats(GCStats *stats);
+extern MStats *mstats(void)
+ __asm__ (GOSYM_PREFIX "runtime.getMstats");
+void runtime_updatememstats(GCStats *stats)
+ __asm__ (GOSYM_PREFIX "runtime.updatememstats");
// Size classes. Computed and initialized by InitSizes.
//
{
void *p;
- mstats.sys += n;
+ mstats()->sys += n;
errno = posix_memalign(&p, PageSize, n);
if (errno > 0) {
perror("posix_memalign");
void
runtime_SysFree(void *v, uintptr n)
{
- mstats.sys -= n;
+ mstats()->sys -= n;
free(v);
}
}
}
-// Holding worldsema grants an M the right to try to stop the world.
-// The procedure is:
-//
-// runtime_semacquire(&runtime_worldsema);
-// m->gcing = 1;
-// runtime_stoptheworld();
-//
-// ... do stuff ...
-//
-// m->gcing = 0;
-// runtime_semrelease(&runtime_worldsema);
-// runtime_starttheworld();
-//
-uint32 runtime_worldsema = 1;
-
typedef struct Workbuf Workbuf;
struct Workbuf
{
runtime_lock(&work);
if(work.nchunk < sizeof *b) {
work.nchunk = 1<<20;
- work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
+ work.chunk = runtime_SysAlloc(work.nchunk, &mstats()->gc_sys);
if(work.chunk == nil)
runtime_throw("runtime: cannot allocate memory");
}
runtime_lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) {
- finc = runtime_persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
+ finc = runtime_persistentalloc(FinBlockSize, 0, &mstats()->gc_sys);
finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
finc->alllink = allfin;
allfin = finc;
runtime_MHeap_Free(&runtime_mheap, s, 1);
c->local_nlargefree++;
c->local_largefree += size;
- runtime_xadd64(&mstats.next_gc, -(uint64)(size * (gcpercent + 100)/100));
+ runtime_xadd64(&mstats()->next_gc, -(uint64)(size * (gcpercent + 100)/100));
res = true;
} else {
// Free small object.
if(nfree > 0) {
c->local_nsmallfree[cl] += nfree;
c->local_cachealloc -= nfree * size;
- runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
+ runtime_xadd64(&mstats()->next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
//MCentral_FreeSpan updates sweepgen
}
uint32 i;
uint64 stacks_inuse, smallfree;
uint64 *src, *dst;
+ MStats *pmstats;
if(stats)
runtime_memclr((byte*)stats, sizeof(*stats));
runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
}
}
- mstats.stacks_inuse = stacks_inuse;
- mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
- mstats.mspan_inuse = runtime_mheap.spanalloc.inuse;
- mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
- mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
+ pmstats = mstats();
+ pmstats->stacks_inuse = stacks_inuse;
+ pmstats->mcache_inuse = runtime_mheap.cachealloc.inuse;
+ pmstats->mspan_inuse = runtime_mheap.spanalloc.inuse;
+ pmstats->sys = pmstats->heap_sys + pmstats->stacks_sys + pmstats->mspan_sys +
+ pmstats->mcache_sys + pmstats->buckhash_sys + pmstats->gc_sys + pmstats->other_sys;
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.
// Total number of mallocs is calculated as number of frees plus number of alive objects.
// Similarly, total amount of allocated memory is calculated as amount of freed memory
// plus amount of alive heap memory.
- mstats.alloc = 0;
- mstats.total_alloc = 0;
- mstats.nmalloc = 0;
- mstats.nfree = 0;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.by_size[i].nmalloc = 0;
- mstats.by_size[i].nfree = 0;
+ pmstats->alloc = 0;
+ pmstats->total_alloc = 0;
+ pmstats->nmalloc = 0;
+ pmstats->nfree = 0;
+ for(i = 0; i < nelem(pmstats->by_size); i++) {
+ pmstats->by_size[i].nmalloc = 0;
+ pmstats->by_size[i].nfree = 0;
}
// Flush MCache's to MCentral.
if(s->state != MSpanInUse)
continue;
if(s->sizeclass == 0) {
- mstats.nmalloc++;
- mstats.alloc += s->elemsize;
+ pmstats->nmalloc++;
+ pmstats->alloc += s->elemsize;
} else {
- mstats.nmalloc += s->ref;
- mstats.by_size[s->sizeclass].nmalloc += s->ref;
- mstats.alloc += s->ref*s->elemsize;
+ pmstats->nmalloc += s->ref;
+ pmstats->by_size[s->sizeclass].nmalloc += s->ref;
+ pmstats->alloc += s->ref*s->elemsize;
}
}
// Aggregate by size class.
smallfree = 0;
- mstats.nfree = runtime_mheap.nlargefree;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.nfree += runtime_mheap.nsmallfree[i];
- mstats.by_size[i].nfree = runtime_mheap.nsmallfree[i];
- mstats.by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
+ pmstats->nfree = runtime_mheap.nlargefree;
+ for(i = 0; i < nelem(pmstats->by_size); i++) {
+ pmstats->nfree += runtime_mheap.nsmallfree[i];
+ pmstats->by_size[i].nfree = runtime_mheap.nsmallfree[i];
+ pmstats->by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
smallfree += runtime_mheap.nsmallfree[i] * runtime_class_to_size[i];
}
- mstats.nmalloc += mstats.nfree;
+ pmstats->nmalloc += pmstats->nfree;
// Calculate derived stats.
- mstats.total_alloc = mstats.alloc + runtime_mheap.largefree + smallfree;
- mstats.heap_alloc = mstats.alloc;
- mstats.heap_objects = mstats.nmalloc - mstats.nfree;
+ pmstats->total_alloc = pmstats->alloc + runtime_mheap.largefree + smallfree;
+ pmstats->heap_alloc = pmstats->alloc;
+ pmstats->heap_objects = pmstats->nmalloc - pmstats->nfree;
}
// Structure of arguments passed to function gc().
G *g;
struct gc_args a;
int32 i;
+ MStats *pmstats;
// The atomic operations are not atomic if the uint64s
// are not aligned on uint64 boundaries. This has been
// while holding a lock. The next mallocgc
// without a lock will do the gc instead.
m = runtime_m();
- if(!mstats.enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking)
+ pmstats = mstats();
+ if(!pmstats->enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking || m->preemptoff.len > 0)
return;
if(gcpercent == GcpercentUnknown) { // first time through
if(gcpercent < 0)
return;
- runtime_semacquire(&runtime_worldsema, false);
- if(force==0 && mstats.heap_alloc < mstats.next_gc) {
+ runtime_acquireWorldsema();
+ if(force==0 && pmstats->heap_alloc < pmstats->next_gc) {
// typically threads which lost the race to grab
// worldsema exit here when gc is done.
- runtime_semrelease(&runtime_worldsema);
+ runtime_releaseWorldsema();
return;
}
a.start_time = runtime_nanotime();
a.eagersweep = force >= 2;
m->gcing = 1;
- runtime_stoptheworld();
+ runtime_stopTheWorldWithSema();
clearpools();
// all done
m->gcing = 0;
m->locks++;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
m->locks--;
// now that gc is done, kick off finalizer thread if needed
uint64 heap0, heap1, obj, ninstr;
GCStats stats;
uint32 i;
+ MStats *pmstats;
// Eface eface;
m = runtime_m();
cachestats();
// next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
// estimate what was live heap size after previous GC (for tracing only)
- heap0 = mstats.next_gc*100/(gcpercent+100);
+ pmstats = mstats();
+ heap0 = pmstats->next_gc*100/(gcpercent+100);
// conservatively set next_gc to high value assuming that everything is live
// concurrent/lazy sweep will reduce this number while discovering new garbage
- mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100;
+ pmstats->next_gc = pmstats->heap_alloc+(pmstats->heap_alloc-runtime_stacks_sys)*gcpercent/100;
tm4 = runtime_nanotime();
- mstats.last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
- mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = tm4 - tm0;
- mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = mstats.last_gc;
- mstats.pause_total_ns += tm4 - tm0;
- mstats.numgc++;
- if(mstats.debuggc)
+ pmstats->last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
+ pmstats->pause_ns[pmstats->numgc%nelem(pmstats->pause_ns)] = tm4 - tm0;
+ pmstats->pause_end[pmstats->numgc%nelem(pmstats->pause_end)] = pmstats->last_gc;
+ pmstats->pause_total_ns += tm4 - tm0;
+ pmstats->numgc++;
+ if(pmstats->debuggc)
runtime_printf("pause %D\n", tm4-tm0);
if(runtime_debug.gctrace) {
- heap1 = mstats.heap_alloc;
+ heap1 = pmstats->heap_alloc;
runtime_updatememstats(&stats);
- if(heap1 != mstats.heap_alloc) {
- runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
+ if(heap1 != pmstats->heap_alloc) {
+ runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, pmstats->heap_alloc);
runtime_throw("mstats skew");
}
- obj = mstats.nmalloc - mstats.nfree;
+ obj = pmstats->nmalloc - pmstats->nfree;
stats.nprocyield += work.markfor->nprocyield;
stats.nosyield += work.markfor->nosyield;
runtime_printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
" %d/%d/%d sweeps,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (tm1-tm0)/1000, (tm2-tm1)/1000, (tm3-tm2)/1000, (tm4-tm3)/1000,
+ pmstats->numgc, work.nproc, (tm1-tm0)/1000, (tm2-tm1)/1000, (tm3-tm2)/1000, (tm4-tm3)/1000,
heap0>>20, heap1>>20, obj,
- mstats.nmalloc, mstats.nfree,
+ pmstats->nmalloc, pmstats->nfree,
sweep.nspan, gcstats.nbgsweep, gcstats.npausesweep,
stats.nhandoff, stats.nhandoffcnt,
work.markfor->nsteal, work.markfor->nstealcnt,
// Free the old cached array if necessary.
if(sweep.spans && sweep.spans != runtime_mheap.allspans)
- runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &mstats.other_sys);
+ runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &pmstats->other_sys);
// Cache the current array.
runtime_mheap.sweepspans = runtime_mheap.allspans;
runtime_mheap.sweepgen += 2;
m->traceback = 0;
}
-extern uintptr runtime_sizeof_C_MStats
- __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
-
-void runtime_ReadMemStats(MStats *)
- __asm__ (GOSYM_PREFIX "runtime.ReadMemStats");
-
-void
-runtime_ReadMemStats(MStats *stats)
-{
- M *m;
-
- // Have to acquire worldsema to stop the world,
- // because stoptheworld can only be used by
- // one goroutine at a time, and there might be
- // a pending garbage collection already calling it.
- runtime_semacquire(&runtime_worldsema, false);
- m = runtime_m();
- m->gcing = 1;
- runtime_stoptheworld();
- runtime_updatememstats(nil);
- // Size of the trailing by_size array differs between Go and C,
- // _NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
- runtime_memmove(stats, &mstats, runtime_sizeof_C_MStats);
- m->gcing = 0;
- m->locks++;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
- m->locks--;
-}
-
void runtime_debug_readGCStats(Slice*)
__asm__("runtime_debug.readGCStats");
{
uint64 *p;
uint32 i, n;
+ MStats *pmstats;
// Calling code in runtime/debug should make the slice large enough.
- if((size_t)pauses->cap < nelem(mstats.pause_ns)+3)
+ pmstats = mstats();
+ if((size_t)pauses->cap < nelem(pmstats->pause_ns)+3)
runtime_throw("runtime: short slice passed to readGCStats");
// Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
p = (uint64*)pauses->array;
runtime_lock(&runtime_mheap);
- n = mstats.numgc;
- if(n > nelem(mstats.pause_ns))
- n = nelem(mstats.pause_ns);
+ n = pmstats->numgc;
+ if(n > nelem(pmstats->pause_ns))
+ n = nelem(pmstats->pause_ns);
// The pause buffer is circular. The most recent pause is at
// pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
// from there to go back farther in time. We deliver the times
// most recent first (in p[0]).
for(i=0; i<n; i++)
- p[i] = mstats.pause_ns[(mstats.numgc-1-i)%nelem(mstats.pause_ns)];
+ p[i] = pmstats->pause_ns[(pmstats->numgc-1-i)%nelem(pmstats->pause_ns)];
- p[n] = mstats.last_gc;
- p[n+1] = mstats.numgc;
- p[n+2] = mstats.pause_total_ns;
+ p[n] = pmstats->last_gc;
+ p[n+1] = pmstats->numgc;
+ p[n+2] = pmstats->pause_total_ns;
runtime_unlock(&runtime_mheap);
pauses->__count = n+3;
}
if(h->bitmap_mapped >= n)
return;
- runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
+ runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats()->gc_sys);
h->bitmap_mapped = n;
}
cap = 64*1024/sizeof(all[0]);
if(cap < h->nspancap*3/2)
cap = h->nspancap*3/2;
- all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
+ all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats()->other_sys);
if(all == nil)
runtime_throw("runtime: cannot allocate memory");
if(h->allspans) {
// Don't free the old array if it's referenced by sweep.
// See the comment in mgc0.c.
if(h->allspans != runtime_mheap.sweepspans)
- runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
+ runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats()->other_sys);
}
h->allspans = all;
h->nspancap = cap;
void
runtime_MHeap_Init(MHeap *h)
{
+ MStats *pmstats;
uint32 i;
- runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
- runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
- runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
- runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
+ pmstats = mstats();
+ runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &pmstats->mspan_sys);
+ runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &pmstats->mcache_sys);
+ runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &pmstats->other_sys);
+ runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &pmstats->other_sys);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++) {
runtime_MSpanList_Init(&h->free[i]);
n = ROUND(n, pagesize);
if(h->spans_mapped >= n)
return;
- runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
+ runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats()->other_sys);
h->spans_mapped = n;
}
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
+ MStats *pmstats;
MSpan *s;
runtime_lock(h);
- mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
+ pmstats = mstats();
+ pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
- mstats.heap_inuse += npage<<PageShift;
+ pmstats->heap_inuse += npage<<PageShift;
if(large) {
- mstats.heap_objects++;
- mstats.heap_alloc += npage<<PageShift;
+ pmstats->heap_objects++;
+ pmstats->heap_alloc += npage<<PageShift;
// Swept spans are at the end of lists.
if(s->npages < nelem(h->free))
runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
runtime_MSpanList_Remove(s);
runtime_atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
- mstats.heap_idle -= s->npages<<PageShift;
- mstats.heap_released -= s->npreleased<<PageShift;
+ mstats()->heap_idle -= s->npages<<PageShift;
+ mstats()->heap_released -= s->npreleased<<PageShift;
if(s->npreleased > 0)
runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
s->npreleased = 0;
v = runtime_MHeap_SysAlloc(h, ask);
}
if(v == nil) {
- runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
+ runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats()->heap_sys);
return false;
}
}
void
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
+ MStats *pmstats;
+
runtime_lock(h);
- mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
+ pmstats = mstats();
+ pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
- mstats.heap_inuse -= s->npages<<PageShift;
+ pmstats->heap_inuse -= s->npages<<PageShift;
if(acct) {
- mstats.heap_alloc -= s->npages<<PageShift;
- mstats.heap_objects--;
+ pmstats->heap_alloc -= s->npages<<PageShift;
+ pmstats->heap_objects--;
}
MHeap_FreeLocked(h, s);
runtime_unlock(h);
s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
runtime_throw("MHeap_FreeLocked - invalid free");
}
- mstats.heap_idle += s->npages<<PageShift;
+ mstats()->heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
runtime_MSpanList_Remove(s);
// Stamp newly unused spans. The scavenger will use that
for(s=list->next; s != list; s=s->next) {
if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
released = (s->npages - s->npreleased) << PageShift;
- mstats.heap_released += released;
+ mstats()->heap_released += released;
sumreleased += released;
s->npreleased = s->npages;
if(sumreleased > 0)
runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
- k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
- mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
+ k, mstats()->heap_inuse>>20, mstats()->heap_idle>>20, mstats()->heap_sys>>20,
+ mstats()->heap_released>>20, (mstats()->heap_sys - mstats()->heap_released)>>20);
}
}
runtime_lock(h);
unixnow = runtime_unixnanotime();
- if(unixnow - mstats.last_gc > forcegc) {
+ if(unixnow - mstats()->last_gc > forcegc) {
runtime_unlock(h);
// The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously.
Bucket *b;
if(buckhash == nil) {
- buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats.buckhash_sys);
+ buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats()->buckhash_sys);
if(buckhash == nil)
runtime_throw("runtime: cannot allocate memory");
}
if(!alloc)
return nil;
- b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats.buckhash_sys);
+ b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats()->buckhash_sys);
bucketmem += sizeof *b + nstk*sizeof stk[0];
runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
b->typ = typ;
pc = (byte*)(uintptr)runtime_getcallerpc(&b);
if(all) {
- runtime_semacquire(&runtime_worldsema, false);
+ runtime_acquireWorldsema();
runtime_m()->gcing = 1;
- runtime_stoptheworld();
- enablegc = mstats.enablegc;
- mstats.enablegc = false;
+ runtime_stopTheWorldWithSema();
+ enablegc = mstats()->enablegc;
+ mstats()->enablegc = false;
}
if(b.__count == 0)
if(all) {
runtime_m()->gcing = 0;
- mstats.enablegc = enablegc;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ mstats()->enablegc = enablegc;
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
}
}
ok = false;
n = runtime_gcount();
if(n <= b.__count) {
- runtime_semacquire(&runtime_worldsema, false);
+ runtime_acquireWorldsema();
runtime_m()->gcing = 1;
- runtime_stoptheworld();
+ runtime_stopTheWorldWithSema();
n = runtime_gcount();
if(n <= b.__count) {
}
runtime_m()->gcing = 0;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
}
}
int32 align, sizeclass, size, nextsize, n;
uint32 i;
uintptr allocsize, npages;
+ MStats *pmstats;
// Initialize the runtime_class_to_size table (and choose class sizes in the process).
runtime_class_to_size[0] = 0;
}
// Copy out for statistics table.
+ pmstats = mstats();
for(i=0; i<nelem(runtime_class_to_size); i++)
- mstats.by_size[i].size = runtime_class_to_size[i];
+ pmstats->by_size[i].size = runtime_class_to_size[i];
return;
dump:
n = 1;
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
- pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
+ pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats()->other_sys);
for(i = 0; i < n; i++) {
pd[i].link = pollcache.first;
pollcache.first = &pd[i];
if(inuse) {
if(!allocatedfds) {
- prfds = runtime_SysAlloc(4 * sizeof fds, &mstats.other_sys);
+ prfds = runtime_SysAlloc(4 * sizeof fds, &mstats()->other_sys);
pwfds = prfds + 1;
pefds = pwfds + 1;
ptfds = pefds + 1;
goto retry;
if(allocatedfds) {
- runtime_SysFree(prfds, 4 * sizeof fds, &mstats.other_sys);
+ runtime_SysFree(prfds, 4 * sizeof fds, &mstats()->other_sys);
} else {
runtime_lock(&selectlock);
inuse = false;
procresize(procs);
// Can not enable GC until all roots are registered.
- // mstats.enablegc = 1;
+ // mstats()->enablegc = 1;
}
extern void main_init(void) __asm__ (GOSYM_PREFIX "__go_init_main");
// For gccgo we have to wait until after main is initialized
// to enable GC, because initializing main registers the GC
// roots.
- mstats.enablegc = 1;
+ mstats()->enablegc = 1;
if(runtime_isarchive) {
// This is not a complete program, but is instead a
}
void
-runtime_stoptheworld(void)
+runtime_stopTheWorldWithSema(void)
{
int32 i;
uint32 s;
}
void
-runtime_starttheworld(void)
+runtime_startTheWorldWithSema(void)
{
P *p, *p1;
M *mp;
mp = (M*)p->m;
p->m = 0;
if(mp->nextp)
- runtime_throw("starttheworld: inconsistent mp->nextp");
+ runtime_throw("startTheWorldWithSema: inconsistent mp->nextp");
mp->nextp = (uintptr)p;
runtime_notewakeup(&mp->park);
} else {
// 32-bit mode, the Go allocation space is all of
// memory anyhow.
if(sizeof(void*) == 8) {
- void *p = runtime_SysAlloc(stacksize, &mstats.other_sys);
+ void *p = runtime_SysAlloc(stacksize, &mstats()->other_sys);
if(p == nil)
runtime_throw("runtime: cannot allocate memory for goroutine stack");
*ret_stack = (byte*)p;
}
runtime_unlock(&runtime_sched);
- runtime_semacquire(&runtime_worldsema, false);
+ runtime_acquireWorldsema();
g->m->gcing = 1;
- runtime_stoptheworld();
+ runtime_stopTheWorldWithSema();
newprocs = n;
g->m->gcing = 0;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
return ret;
}
G* runtime_timejump(void);
void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
-void runtime_stoptheworld(void);
-void runtime_starttheworld(void);
-extern uint32 runtime_worldsema;
+void runtime_stopTheWorldWithSema(void)
+ __asm__(GOSYM_PREFIX "runtime.stopTheWorldWithSema");
+void runtime_startTheWorldWithSema(void)
+ __asm__(GOSYM_PREFIX "runtime.startTheWorldWithSema");
+void runtime_acquireWorldsema(void)
+ __asm__(GOSYM_PREFIX "runtime.acquireWorldsema");
+void runtime_releaseWorldsema(void)
+ __asm__(GOSYM_PREFIX "runtime.releaseWorldsema");
/*
* mutual exclusion locks. in the uncontended case,