1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Export guts for testing.
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
19 //var F64to32 = f64to32
20 //var F32to64 = f32to64
22 //var Fintto64 = fintto64
23 //var F64toint = f64toint
25 var Entersyscall = entersyscall
26 var Exitsyscall = exitsyscall
27 var LockedOSThread = lockedOSThread
28 var Xadduintptr = atomic.Xadduintptr
32 var Fastlog2 = fastlog2
37 var Nanotime = nanotime
38 var NetpollBreak = netpollBreak
41 var PhysPageSize = physPageSize
42 var PhysHugePageSize = physHugePageSize
44 var NetpollGenericInit = netpollGenericInit
46 var ParseRelease = parseRelease
49 var MemclrNoHeapPointers = memclrNoHeapPointers
51 const PreemptMSupported = preemptMSupported
58 func LFStackPush(head *uint64, node *LFNode) {
59 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
62 func LFStackPop(head *uint64) *LFNode {
63 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
66 func Netpoll(delta int64) {
72 func GCMask(x interface{}) (ret []byte) {
76 func RunSchedLocalQueueTest() {
78 gs := make([]g, len(_p_.runq))
79 for i := 0; i < len(_p_.runq); i++ {
80 if g, _ := runqget(_p_); g != nil {
81 throw("runq is not empty initially")
83 for j := 0; j < i; j++ {
84 runqput(_p_, &gs[i], false)
86 for j := 0; j < i; j++ {
87 if g, _ := runqget(_p_); g != &gs[i] {
88 print("bad element at iter ", i, "/", j, "\n")
92 if g, _ := runqget(_p_); g != nil {
93 throw("runq is not empty afterwards")
98 func RunSchedLocalQueueStealTest() {
101 gs := make([]g, len(p1.runq))
102 for i := 0; i < len(p1.runq); i++ {
103 for j := 0; j < i; j++ {
105 runqput(p1, &gs[j], false)
107 gp := runqsteal(p2, p1, true)
128 for j := 0; j < i; j++ {
130 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
134 if s != i/2 && s != i/2+1 {
135 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
141 func RunSchedLocalQueueEmptyTest(iters int) {
142 // Test that runq is not spuriously reported as empty.
143 // Runq emptiness affects scheduling decisions and spurious emptiness
144 // can lead to underutilization (both runnable Gs and idle Ps coexist
145 // for arbitrary long time).
146 done := make(chan bool, 1)
150 for i := 0; i < iters; i++ {
152 next0 := (i & 1) == 0
153 next1 := (i & 2) == 0
154 runqput(_p_, &gs[0], next0)
155 go func(done chan bool, p *p, ready *uint32, next0, next1 bool) {
156 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
159 println("next:", next0, next1)
160 throw("queue is empty")
163 }(done, _p_, ready, next0, next1)
164 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
166 runqput(_p_, &gs[1], next1)
174 StringHash = stringHash
175 BytesHash = bytesHash
176 Int32Hash = int32Hash
177 Int64Hash = int64Hash
179 MemHash32 = memhash32
180 MemHash64 = memhash64
181 EfaceHash = efaceHash
182 IfaceHash = ifaceHash
185 var UseAeshash = &useAeshash
187 func MemclrBytes(b []byte) {
188 s := (*slice)(unsafe.Pointer(&b))
189 memclrNoHeapPointers(s.array, uintptr(s.len))
192 var HashLoad = &hashLoad
194 // entry point for testing
195 //func GostringW(w []uint16) (s string) {
196 // s = gostringw(&w[0])
200 type Uintreg sys.Uintreg
207 func Envs() []string { return envs }
208 func SetEnvs(e []string) { envs = e }
210 //var BigEndian = sys.BigEndian
214 func BenchSetType(n int, x interface{}) {
219 switch t.kind & kindMask {
221 t = (*ptrtype)(unsafe.Pointer(t)).elem
229 t = (*slicetype)(unsafe.Pointer(t)).elem
230 size = t.size * slice.len
233 allocSize := roundupsize(size)
235 for i := 0; i < n; i++ {
236 heapBitsSetType(uintptr(p), allocSize, size, t)
241 const PtrSize = sys.PtrSize
243 var ForceGCPeriod = &forcegcperiod
245 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
246 // the "environment" traceback level, so later calls to
247 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
248 func SetTracebackEnv(level string) {
250 traceback_env = traceback_cache
253 var ReadUnaligned32 = readUnaligned32
254 var ReadUnaligned64 = readUnaligned64
256 func CountPagesInUse() (pagesInUse, counted uintptr) {
257 stopTheWorld("CountPagesInUse")
259 pagesInUse = uintptr(mheap_.pagesInUse)
261 for _, s := range mheap_.allspans {
262 if s.state.get() == mSpanInUse {
272 func Fastrand() uint32 { return fastrand() }
273 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
277 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
278 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
281 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
282 (*profBuf)(p).write(tag, now, hdr, stk)
286 ProfBufBlocking = profBufBlocking
287 ProfBufNonBlocking = profBufNonBlocking
290 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
291 return (*profBuf)(p).read(profBufReadMode(mode))
294 func (p *ProfBuf) Close() {
295 (*profBuf)(p).close()
298 // ReadMemStatsSlow returns both the runtime-computed MemStats and
299 // MemStats accumulated by scanning the heap.
300 func ReadMemStatsSlow() (base, slow MemStats) {
301 stopTheWorld("ReadMemStatsSlow")
303 // Run on the system stack to avoid stack growth allocation.
305 // Make sure stats don't change.
308 readmemstats_m(&base)
310 // Initialize slow from base and zero the fields we're
317 slow.HeapReleased = 0
318 var bySize [_NumSizeClasses]struct {
319 Mallocs, Frees uint64
322 // Add up current allocations in spans.
323 for _, s := range mheap_.allspans {
324 if s.state.get() != mSpanInUse {
327 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
329 slow.Alloc += uint64(s.elemsize)
331 slow.Mallocs += uint64(s.allocCount)
332 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
333 bySize[sizeclass].Mallocs += uint64(s.allocCount)
337 // Add in frees. readmemstats_m flushed the cached stats, so
338 // these are up-to-date.
340 slow.Frees = mheap_.nlargefree
341 for i := range mheap_.nsmallfree {
342 slow.Frees += mheap_.nsmallfree[i]
343 bySize[i].Frees = mheap_.nsmallfree[i]
344 bySize[i].Mallocs += mheap_.nsmallfree[i]
345 smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
347 slow.Frees += memstats.tinyallocs
348 slow.Mallocs += slow.Frees
350 slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
352 for i := range slow.BySize {
353 slow.BySize[i].Mallocs = bySize[i].Mallocs
354 slow.BySize[i].Frees = bySize[i].Frees
357 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
358 pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
359 slow.HeapReleased += uint64(pg) * pageSize
361 for _, p := range allp {
362 pg := sys.OnesCount64(p.pcache.scav)
363 slow.HeapReleased += uint64(pg) * pageSize
366 // Unused space in the current arena also counts as released space.
367 slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base)
376 // BlockOnSystemStack switches to the system stack, prints "x\n" to
377 // stderr, and blocks in a stack containing
378 // "runtime.blockOnSystemStackInternal".
379 func BlockOnSystemStack() {
380 systemstack(blockOnSystemStackInternal)
383 func blockOnSystemStackInternal() {
389 type RWMutex struct {
393 func (rw *RWMutex) RLock() {
397 func (rw *RWMutex) RUnlock() {
401 func (rw *RWMutex) Lock() {
405 func (rw *RWMutex) Unlock() {
409 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
411 func MapBucketsCount(m map[int]int) int {
412 h := *(**hmap)(unsafe.Pointer(&m))
416 func MapBucketsPointerIsNil(m map[int]int) bool {
417 h := *(**hmap)(unsafe.Pointer(&m))
418 return h.buckets == nil
421 func LockOSCounts() (external, internal uint32) {
423 if g.m.lockedExt+g.m.lockedInt == 0 {
425 panic("lockedm on non-locked goroutine")
429 panic("nil lockedm on locked goroutine")
432 return g.m.lockedExt, g.m.lockedInt
436 func TracebackSystemstack(stk []uintptr, i int) int {
438 return callersRaw(stk)
442 n = TracebackSystemstack(stk, i-1)
447 func KeepNArenaHints(n int) {
448 hint := mheap_.arenaHints
449 for i := 1; i < n; i++ {
458 // MapNextArenaHint reserves a page at the next arena growth hint,
459 // preventing the arena from growing there, and returns the range of
460 // addresses that are no longer viable.
461 func MapNextArenaHint() (start, end uintptr) {
462 hint := mheap_.arenaHints
465 start, end = addr-heapArenaBytes, addr
468 start, end = addr, addr+heapArenaBytes
470 sysReserve(unsafe.Pointer(addr), physPageSize)
474 func GetNextArenaHint() uintptr {
475 return mheap_.arenaHints.addr
487 func PanicForTesting(b []byte, i int) byte {
488 return unexportedPanicForTesting(b, i)
492 func unexportedPanicForTesting(b []byte, i int) byte {
496 func G0StackOverflow() {
502 func stackOverflow(x *byte) {
504 stackOverflow(&buf[0])
507 func MapTombstoneCheck(m map[int]int) {
508 // Make sure emptyOne and emptyRest are distributed correctly.
509 // We should have a series of filled and emptyOne cells, followed by
510 // a series of emptyRest cells.
511 h := *(**hmap)(unsafe.Pointer(&m))
513 t := *(**maptype)(unsafe.Pointer(&i))
515 for x := 0; x < 1<<h.B; x++ {
516 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
518 for b := b0; b != nil; b = b.overflow(t) {
519 for i := 0; i < bucketCnt; i++ {
520 if b.tophash[i] != emptyRest {
526 for b := b0; b != nil; b = b.overflow(t) {
527 for i := 0; i < bucketCnt; i++ {
528 if k < n && b.tophash[i] == emptyRest {
529 panic("early emptyRest")
531 if k >= n && b.tophash[i] != emptyRest {
532 panic("late non-emptyRest")
534 if k == n-1 && b.tophash[i] == emptyOne {
535 panic("last non-emptyRest entry is emptyOne")
543 func RunGetgThreadSwitchTest() {
544 // Test that getg works correctly with thread switch.
545 // With gccgo, if we generate getg inlined, the backend
546 // may cache the address of the TLS variable, which
547 // will become invalid after a thread switch. This test
548 // checks that the bad caching doesn't happen.
551 go func(ch chan int) {
558 // Block on a receive. This is likely to get us a thread
559 // switch. If we yield to the sender goroutine, it will
560 // lock the thread, forcing us to resume on a different
569 // Also test getg after some control flow, as the
570 // backend is sensitive to control flow.
579 PallocChunkPages = pallocChunkPages
580 PageAlloc64Bit = pageAlloc64Bit
581 PallocSumBytes = pallocSumBytes
584 // Expose pallocSum for testing.
585 type PallocSum pallocSum
587 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
588 func (m PallocSum) Start() uint { return pallocSum(m).start() }
589 func (m PallocSum) Max() uint { return pallocSum(m).max() }
590 func (m PallocSum) End() uint { return pallocSum(m).end() }
592 // Expose pallocBits for testing.
593 type PallocBits pallocBits
595 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
596 return (*pallocBits)(b).find(npages, searchIdx)
598 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
599 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
600 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
601 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
603 // SummarizeSlow is a slow but more obviously correct implementation
604 // of (*pallocBits).summarize. Used for testing.
605 func SummarizeSlow(b *PallocBits) PallocSum {
606 var start, max, end uint
608 const N = uint(len(b)) * 64
609 for start < N && (*pageBits)(b).get(start) == 0 {
612 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
616 for i := uint(0); i < N; i++ {
617 if (*pageBits)(b).get(i) == 0 {
626 return PackPallocSum(start, max, end)
629 // Expose non-trivial helpers for testing.
630 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
632 // Given two PallocBits, returns a set of bit ranges where
634 func DiffPallocBits(a, b *PallocBits) []BitRange {
639 base, size := uint(0), uint(0)
640 for i := uint(0); i < uint(len(ba))*64; i++ {
641 if ba.get(i) != bb.get(i) {
648 d = append(d, BitRange{base, size})
654 d = append(d, BitRange{base, size})
659 // StringifyPallocBits gets the bits in the bit range r from b,
660 // and returns a string containing the bits as ASCII 0 and 1
662 func StringifyPallocBits(b *PallocBits, r BitRange) string {
664 for j := r.I; j < r.I+r.N; j++ {
665 if (*pageBits)(b).get(j) != 0 {
674 // Expose pallocData for testing.
675 type PallocData pallocData
677 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
678 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
680 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
681 func (d *PallocData) ScavengedSetRange(i, n uint) {
682 (*pallocData)(d).scavenged.setRange(i, n)
684 func (d *PallocData) PallocBits() *PallocBits {
685 return (*PallocBits)(&(*pallocData)(d).pallocBits)
687 func (d *PallocData) Scavenged() *PallocBits {
688 return (*PallocBits)(&(*pallocData)(d).scavenged)
691 // Expose fillAligned for testing.
692 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
694 // Expose pageCache for testing.
695 type PageCache pageCache
697 const PageCachePages = pageCachePages
699 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
700 return PageCache(pageCache{base: base, cache: cache, scav: scav})
702 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
703 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
704 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
705 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
706 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
707 return (*pageCache)(c).alloc(npages)
709 func (c *PageCache) Flush(s *PageAlloc) {
710 (*pageCache)(c).flush((*pageAlloc)(s))
713 // Expose chunk index type.
714 type ChunkIdx chunkIdx
716 // Expose pageAlloc for testing. Note that because pageAlloc is
717 // not in the heap, so is PageAlloc.
718 type PageAlloc pageAlloc
720 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
721 return (*pageAlloc)(p).alloc(npages)
723 func (p *PageAlloc) AllocToCache() PageCache {
724 return PageCache((*pageAlloc)(p).allocToCache())
726 func (p *PageAlloc) Free(base, npages uintptr) {
727 (*pageAlloc)(p).free(base, npages)
729 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
730 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
732 func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
733 pp := (*pageAlloc)(p)
736 r = pp.scavenge(nbytes, mayUnlock)
741 func (p *PageAlloc) InUse() []AddrRange {
742 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
743 for _, r := range p.inUse.ranges {
744 ranges = append(ranges, AddrRange{
746 Limit: r.limit.addr(),
752 // Returns nil if the PallocData's L2 is missing.
753 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
755 l2 := (*pageAlloc)(p).chunks[ci.l1()]
759 return (*PallocData)(&l2[ci.l2()])
762 // AddrRange represents a range over addresses.
763 // Specifically, it represents the range [Base, Limit).
764 type AddrRange struct {
768 // BitRange represents a range over a bitmap.
769 type BitRange struct {
770 I, N uint // bit index and length in bits
773 // NewPageAlloc creates a new page allocator for testing and
774 // initializes it with the scav and chunks maps. Each key in these maps
775 // represents a chunk index and each value is a series of bit ranges to
776 // set within each bitmap's chunk.
778 // The initialization of the pageAlloc preserves the invariant that if a
779 // scavenged bit is set the alloc bit is necessarily unset, so some
780 // of the bits described by scav may be cleared in the final bitmap if
781 // ranges in chunks overlap with them.
783 // scav is optional, and if nil, the scavenged bitmap will be cleared
784 // (as opposed to all 1s, which it usually is). Furthermore, every
785 // chunk index in scav must appear in chunks; ones that do not are
787 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
790 // We've got an entry, so initialize the pageAlloc.
791 p.init(new(mutex), nil)
792 lockInit(p.mheapLock, lockRankMheap)
795 for i, init := range chunks {
796 addr := chunkBase(chunkIdx(i))
798 // Mark the chunk's existence in the pageAlloc.
799 p.grow(addr, pallocChunkBytes)
801 // Initialize the bitmap and update pageAlloc metadata.
802 chunk := p.chunkOf(chunkIndex(addr))
804 // Clear all the scavenged bits which grow set.
805 chunk.scavenged.clearRange(0, pallocChunkPages)
807 // Apply scavenge state if applicable.
809 if scvg, ok := scav[i]; ok {
810 for _, s := range scvg {
811 // Ignore the case of s.N == 0. setRange doesn't handle
812 // it and it's a no-op anyway.
814 chunk.scavenged.setRange(s.I, s.N)
820 // Apply alloc state.
821 for _, s := range init {
822 // Ignore the case of s.N == 0. allocRange doesn't handle
823 // it and it's a no-op anyway.
825 chunk.allocRange(s.I, s.N)
829 // Update heap metadata for the allocRange calls above.
830 p.update(addr, pallocChunkPages, false, false)
837 return (*PageAlloc)(p)
840 // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
841 // is called the pageAlloc may no longer be used. The object itself will be
842 // collected by the garbage collector once it is no longer live.
843 func FreePageAlloc(pp *PageAlloc) {
844 p := (*pageAlloc)(pp)
846 // Free all the mapped space for the summary levels.
847 if pageAlloc64Bit != 0 {
848 for l := 0; l < summaryLevels; l++ {
849 sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil)
852 resSize := uintptr(0)
853 for _, s := range p.summary {
854 resSize += uintptr(cap(s)) * pallocSumBytes
856 sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil)
859 // Free the mapped space for chunks.
860 for i := range p.chunks {
861 if x := p.chunks[i]; x != nil {
863 // This memory comes from sysAlloc and will always be page-aligned.
864 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
869 // BaseChunkIdx is a convenient chunkIdx value which works on both
870 // 64 bit and 32 bit platforms, allowing the tests to share code
873 // This should not be higher than 0x100*pallocChunkBytes to support
874 // mips and mipsle, which only have 31-bit address spaces.
875 var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*sys.GoosAix*sys.GoarchPpc64))
877 // PageBase returns an address given a chunk index and a page index
878 // relative to that chunk.
879 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
880 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
883 type BitsMismatch struct {
888 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
891 // Run on the system stack to avoid stack growth allocation.
895 // Lock so that we can safely access the bitmap.
898 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
899 chunk := mheap_.pages.chunkOf(i)
900 for j := 0; j < pallocChunkPages/64; j++ {
901 // Run over each 64-bit bitmap section and ensure
902 // scavenged is being cleared properly on allocation.
903 // If a used bit and scavenged bit are both set, that's
904 // an error, and could indicate a larger problem, or
905 // an accounting problem.
906 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
907 got := chunk.scavenged[j]
910 if n >= len(mismatches) {
913 mismatches[n] = BitsMismatch{
914 Base: chunkBase(i) + uintptr(j)*64*pageSize,
929 func PageCachePagesLeaked() (leaked uintptr) {
930 stopTheWorld("PageCachePagesLeaked")
932 // Walk over destroyed Ps and look for unflushed caches.
933 deadp := allp[len(allp):cap(allp)]
934 for _, p := range deadp {
935 // Since we're going past len(allp) we may see nil Ps.
938 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
946 var Semacquire = semacquire
947 var Semrelease1 = semrelease1
949 func SemNwait(addr *uint32) uint32 {
950 root := semroot(addr)
951 return atomic.Load(&root.nwait)
954 // MapHashCheck computes the hash of the key k for the map m, twice.
955 // Method 1 uses the built-in hasher for the map.
956 // Method 2 uses the typehash function (the one used by reflect).
957 // Returns the two hash values, which should always be equal.
958 func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
960 mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type))
961 mh := (*hmap)(efaceOf(&m).data)
964 kt := efaceOf(&k)._type
966 if isDirectIface(kt) {
967 q := efaceOf(&k).data
968 p = unsafe.Pointer(&q)
973 // Compute the hash functions.
974 x := mt.hasher(noescape(p), uintptr(mh.hash0))
975 y := typehash(kt, noescape(p), uintptr(mh.hash0))
979 func MSpanCountAlloc(bits []byte) int {
981 nelems: uintptr(len(bits) * 8),
982 gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])),
984 return s.countAlloc()
987 var Pusestackmaps = &usestackmaps