freeToHeap = true
}
nfreed := s.allocCount - nalloc
-
- // This check is not reliable with gccgo, because of
- // conservative stack scanning. The test boils down to
- // checking that no new bits have been set in gcmarkBits since
- // the span was added to the sweep count. New bits are set by
- // greyobject. Seeing a new bit means that a live pointer has
- // appeared that was not found during the mark phase. That can
- // not happen when pointers are followed strictly. However,
- // with conservative checking, it is possible for a pointer
- // that will never be used to appear live and to cause a mark
- // to be added. That is unfortunate in that it causes this
- // check to be inaccurate, and it will keep an object live
- // unnecessarily, but provided the pointer is not really live
- // it is not otherwise a problem. So we disable the test for gccgo.
- nfreedSigned := int(nfreed)
if nalloc > s.allocCount {
- if usestackmaps {
- print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
- throw("sweep increased allocation count")
- }
-
- // For gccgo, adjust the freed count as a signed number.
- nfreedSigned = int(s.allocCount) - int(nalloc)
- if uintptr(nalloc) == s.nelems {
- s.freeindex = s.nelems
- }
+ print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+ throw("sweep increased allocation count")
}
s.allocCount = nalloc
wasempty := s.nextFreeIndex() == s.nelems
s.freeindex = 0 // reset allocation index to start of span.
if trace.enabled {
- getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * s.elemsize
+ getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
}
// gcmarkBits becomes the allocBits.
// But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation.
- if freeToHeap || nfreedSigned <= 0 {
+ if freeToHeap || nfreed == 0 {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
atomic.Store(&s.sweepgen, sweepgen)
}
- if spc.sizeclass() != 0 {
- c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned)
- }
-
- if nfreedSigned > 0 && spc.sizeclass() != 0 {
+ if nfreed > 0 && spc.sizeclass() != 0 {
+ c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
// mcentral.freeSpan updates sweepgen
} else if freeToHeap {
// defaults
debug.cgocheck = 1
- // Unfortunately, because gccgo uses conservative stack scanning,
- // we can not enable invalid pointer checking. It is possible for
- // memory block M1 to point to M2, and for both to be dead.
- // We release M2, causing the entire span to be released.
- // Before we release M1, a stack pointer appears that point into it.
- // This stack pointer is presumably dead, but causes M1 to be marked.
- // We scan M1 and see the pointer to M2 on a released span.
- // At that point, if debug.invalidptr is set, we crash.
- // This is not a problem, assuming that M1 really is dead and
- // the pointer we discovered to it will not be used.
- if usestackmaps {
- debug.invalidptr = 1
- }
+ // Gccgo uses conservative stack scanning, so we cannot check
+ // invalid pointers on stack. But we can still enable invalid
+ // pointer check on heap scanning. When scanning the heap, we
+ // ensure that we only trace allocated heap objects, which should
+ // not contain invalid pointers.
+ debug.invalidptr = 1
for p := gogetenv("GODEBUG"); p != ""; {
field := ""