libgo: export NetBSD-specific types in mksysinfo.sh
[gcc.git] / libgo / go / runtime / mheap.go
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Page heap.
6 //
7 // See malloc.go for overview.
8
9 package runtime
10
11 import (
12 "internal/cpu"
13 "runtime/internal/atomic"
14 "runtime/internal/sys"
15 "unsafe"
16 )
17
18 const (
19 // minPhysPageSize is a lower-bound on the physical page size. The
20 // true physical page size may be larger than this. In contrast,
21 // sys.PhysPageSize is an upper-bound on the physical page size.
22 minPhysPageSize = 4096
23
24 // maxPhysPageSize is the maximum page size the runtime supports.
25 maxPhysPageSize = 512 << 10
26
27 // maxPhysHugePageSize sets an upper-bound on the maximum huge page size
28 // that the runtime supports.
29 maxPhysHugePageSize = pallocChunkBytes
30
31 // pagesPerReclaimerChunk indicates how many pages to scan from the
32 // pageInUse bitmap at a time. Used by the page reclaimer.
33 //
34 // Higher values reduce contention on scanning indexes (such as
35 // h.reclaimIndex), but increase the minimum latency of the
36 // operation.
37 //
38 // The time required to scan this many pages can vary a lot depending
39 // on how many spans are actually freed. Experimentally, it can
40 // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only
41 // free spans at ~32 MB/ms. Using 512 pages bounds this at
42 // roughly 100µs.
43 //
44 // Must be a multiple of the pageInUse bitmap element size and
45 // must also evenly divid pagesPerArena.
46 pagesPerReclaimerChunk = 512
47
48 // go115NewMCentralImpl is a feature flag for the new mcentral implementation.
49 //
50 // This flag depends on go115NewMarkrootSpans because the new mcentral
51 // implementation requires that markroot spans no longer rely on mgcsweepbufs.
52 // The definition of this flag helps ensure that if there's a problem with
53 // the new markroot spans implementation and it gets turned off, that the new
54 // mcentral implementation also gets turned off so the runtime isn't broken.
55 go115NewMCentralImpl = true && go115NewMarkrootSpans
56 )
57
58 // Main malloc heap.
59 // The heap itself is the "free" and "scav" treaps,
60 // but all the other global data is here too.
61 //
62 // mheap must not be heap-allocated because it contains mSpanLists,
63 // which must not be heap-allocated.
64 //
65 //go:notinheap
66 type mheap struct {
67 // lock must only be acquired on the system stack, otherwise a g
68 // could self-deadlock if its stack grows with the lock held.
69 lock mutex
70 pages pageAlloc // page allocation data structure
71 sweepgen uint32 // sweep generation, see comment in mspan; written during STW
72 sweepdone uint32 // all spans are swept
73 sweepers uint32 // number of active sweepone calls
74
75 // allspans is a slice of all mspans ever created. Each mspan
76 // appears exactly once.
77 //
78 // The memory for allspans is manually managed and can be
79 // reallocated and move as the heap grows.
80 //
81 // In general, allspans is protected by mheap_.lock, which
82 // prevents concurrent access as well as freeing the backing
83 // store. Accesses during STW might not hold the lock, but
84 // must ensure that allocation cannot happen around the
85 // access (since that may free the backing store).
86 allspans []*mspan // all spans out there
87
88 // sweepSpans contains two mspan stacks: one of swept in-use
89 // spans, and one of unswept in-use spans. These two trade
90 // roles on each GC cycle. Since the sweepgen increases by 2
91 // on each cycle, this means the swept spans are in
92 // sweepSpans[sweepgen/2%2] and the unswept spans are in
93 // sweepSpans[1-sweepgen/2%2]. Sweeping pops spans from the
94 // unswept stack and pushes spans that are still in-use on the
95 // swept stack. Likewise, allocating an in-use span pushes it
96 // on the swept stack.
97 //
98 // For !go115NewMCentralImpl.
99 sweepSpans [2]gcSweepBuf
100
101 _ uint32 // align uint64 fields on 32-bit for atomics
102
103 // Proportional sweep
104 //
105 // These parameters represent a linear function from heap_live
106 // to page sweep count. The proportional sweep system works to
107 // stay in the black by keeping the current page sweep count
108 // above this line at the current heap_live.
109 //
110 // The line has slope sweepPagesPerByte and passes through a
111 // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
112 // any given time, the system is at (memstats.heap_live,
113 // pagesSwept) in this space.
114 //
115 // It's important that the line pass through a point we
116 // control rather than simply starting at a (0,0) origin
117 // because that lets us adjust sweep pacing at any time while
118 // accounting for current progress. If we could only adjust
119 // the slope, it would create a discontinuity in debt if any
120 // progress has already been made.
121 pagesInUse uint64 // pages of spans in stats mSpanInUse; updated atomically
122 pagesSwept uint64 // pages swept this cycle; updated atomically
123 pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically
124 sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without
125 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
126 // TODO(austin): pagesInUse should be a uintptr, but the 386
127 // compiler can't 8-byte align fields.
128
129 // scavengeGoal is the amount of total retained heap memory (measured by
130 // heapRetained) that the runtime will try to maintain by returning memory
131 // to the OS.
132 scavengeGoal uint64
133
134 // Page reclaimer state
135
136 // reclaimIndex is the page index in allArenas of next page to
137 // reclaim. Specifically, it refers to page (i %
138 // pagesPerArena) of arena allArenas[i / pagesPerArena].
139 //
140 // If this is >= 1<<63, the page reclaimer is done scanning
141 // the page marks.
142 //
143 // This is accessed atomically.
144 reclaimIndex uint64
145 // reclaimCredit is spare credit for extra pages swept. Since
146 // the page reclaimer works in large chunks, it may reclaim
147 // more than requested. Any spare pages released go to this
148 // credit pool.
149 //
150 // This is accessed atomically.
151 reclaimCredit uintptr
152
153 // Malloc stats.
154 largealloc uint64 // bytes allocated for large objects
155 nlargealloc uint64 // number of large object allocations
156 largefree uint64 // bytes freed for large objects (>maxsmallsize)
157 nlargefree uint64 // number of frees for large objects (>maxsmallsize)
158 nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
159
160 // arenas is the heap arena map. It points to the metadata for
161 // the heap for every arena frame of the entire usable virtual
162 // address space.
163 //
164 // Use arenaIndex to compute indexes into this array.
165 //
166 // For regions of the address space that are not backed by the
167 // Go heap, the arena map contains nil.
168 //
169 // Modifications are protected by mheap_.lock. Reads can be
170 // performed without locking; however, a given entry can
171 // transition from nil to non-nil at any time when the lock
172 // isn't held. (Entries never transitions back to nil.)
173 //
174 // In general, this is a two-level mapping consisting of an L1
175 // map and possibly many L2 maps. This saves space when there
176 // are a huge number of arena frames. However, on many
177 // platforms (even 64-bit), arenaL1Bits is 0, making this
178 // effectively a single-level map. In this case, arenas[0]
179 // will never be nil.
180 arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
181
182 // heapArenaAlloc is pre-reserved space for allocating heapArena
183 // objects. This is only used on 32-bit, where we pre-reserve
184 // this space to avoid interleaving it with the heap itself.
185 heapArenaAlloc linearAlloc
186
187 // arenaHints is a list of addresses at which to attempt to
188 // add more heap arenas. This is initially populated with a
189 // set of general hint addresses, and grown with the bounds of
190 // actual heap arena ranges.
191 arenaHints *arenaHint
192
193 // arena is a pre-reserved space for allocating heap arenas
194 // (the actual arenas). This is only used on 32-bit.
195 arena linearAlloc
196
197 // allArenas is the arenaIndex of every mapped arena. This can
198 // be used to iterate through the address space.
199 //
200 // Access is protected by mheap_.lock. However, since this is
201 // append-only and old backing arrays are never freed, it is
202 // safe to acquire mheap_.lock, copy the slice header, and
203 // then release mheap_.lock.
204 allArenas []arenaIdx
205
206 // sweepArenas is a snapshot of allArenas taken at the
207 // beginning of the sweep cycle. This can be read safely by
208 // simply blocking GC (by disabling preemption).
209 sweepArenas []arenaIdx
210
211 // markArenas is a snapshot of allArenas taken at the beginning
212 // of the mark cycle. Because allArenas is append-only, neither
213 // this slice nor its contents will change during the mark, so
214 // it can be read safely.
215 markArenas []arenaIdx
216
217 // curArena is the arena that the heap is currently growing
218 // into. This should always be physPageSize-aligned.
219 curArena struct {
220 base, end uintptr
221 }
222
223 // _ uint32 // ensure 64-bit alignment of central
224
225 // central free lists for small size classes.
226 // the padding makes sure that the mcentrals are
227 // spaced CacheLinePadSize bytes apart, so that each mcentral.lock
228 // gets its own cache line.
229 // central is indexed by spanClass.
230 central [numSpanClasses]struct {
231 mcentral mcentral
232 pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
233 }
234
235 spanalloc fixalloc // allocator for span*
236 cachealloc fixalloc // allocator for mcache*
237 specialfinalizeralloc fixalloc // allocator for specialfinalizer*
238 specialprofilealloc fixalloc // allocator for specialprofile*
239 speciallock mutex // lock for special record allocators.
240 arenaHintAlloc fixalloc // allocator for arenaHints
241
242 unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
243 }
244
245 var mheap_ mheap
246
247 // A heapArena stores metadata for a heap arena. heapArenas are stored
248 // outside of the Go heap and accessed via the mheap_.arenas index.
249 //
250 //go:notinheap
251 type heapArena struct {
252 // bitmap stores the pointer/scalar bitmap for the words in
253 // this arena. See mbitmap.go for a description. Use the
254 // heapBits type to access this.
255 bitmap [heapArenaBitmapBytes]byte
256
257 // spans maps from virtual address page ID within this arena to *mspan.
258 // For allocated spans, their pages map to the span itself.
259 // For free spans, only the lowest and highest pages map to the span itself.
260 // Internal pages map to an arbitrary span.
261 // For pages that have never been allocated, spans entries are nil.
262 //
263 // Modifications are protected by mheap.lock. Reads can be
264 // performed without locking, but ONLY from indexes that are
265 // known to contain in-use or stack spans. This means there
266 // must not be a safe-point between establishing that an
267 // address is live and looking it up in the spans array.
268 spans [pagesPerArena]*mspan
269
270 // pageInUse is a bitmap that indicates which spans are in
271 // state mSpanInUse. This bitmap is indexed by page number,
272 // but only the bit corresponding to the first page in each
273 // span is used.
274 //
275 // Reads and writes are atomic.
276 pageInUse [pagesPerArena / 8]uint8
277
278 // pageMarks is a bitmap that indicates which spans have any
279 // marked objects on them. Like pageInUse, only the bit
280 // corresponding to the first page in each span is used.
281 //
282 // Writes are done atomically during marking. Reads are
283 // non-atomic and lock-free since they only occur during
284 // sweeping (and hence never race with writes).
285 //
286 // This is used to quickly find whole spans that can be freed.
287 //
288 // TODO(austin): It would be nice if this was uint64 for
289 // faster scanning, but we don't have 64-bit atomic bit
290 // operations.
291 pageMarks [pagesPerArena / 8]uint8
292
293 // pageSpecials is a bitmap that indicates which spans have
294 // specials (finalizers or other). Like pageInUse, only the bit
295 // corresponding to the first page in each span is used.
296 //
297 // Writes are done atomically whenever a special is added to
298 // a span and whenever the last special is removed from a span.
299 // Reads are done atomically to find spans containing specials
300 // during marking.
301 pageSpecials [pagesPerArena / 8]uint8
302
303 // zeroedBase marks the first byte of the first page in this
304 // arena which hasn't been used yet and is therefore already
305 // zero. zeroedBase is relative to the arena base.
306 // Increases monotonically until it hits heapArenaBytes.
307 //
308 // This field is sufficient to determine if an allocation
309 // needs to be zeroed because the page allocator follows an
310 // address-ordered first-fit policy.
311 //
312 // Read atomically and written with an atomic CAS.
313 zeroedBase uintptr
314 }
315
316 // arenaHint is a hint for where to grow the heap arenas. See
317 // mheap_.arenaHints.
318 //
319 //go:notinheap
320 type arenaHint struct {
321 addr uintptr
322 down bool
323 next *arenaHint
324 }
325
326 // An mspan is a run of pages.
327 //
328 // When a mspan is in the heap free treap, state == mSpanFree
329 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
330 // If the mspan is in the heap scav treap, then in addition to the
331 // above scavenged == true. scavenged == false in all other cases.
332 //
333 // When a mspan is allocated, state == mSpanInUse or mSpanManual
334 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
335
336 // Every mspan is in one doubly-linked list, either in the mheap's
337 // busy list or one of the mcentral's span lists.
338
339 // An mspan representing actual memory has state mSpanInUse,
340 // mSpanManual, or mSpanFree. Transitions between these states are
341 // constrained as follows:
342 //
343 // * A span may transition from free to in-use or manual during any GC
344 // phase.
345 //
346 // * During sweeping (gcphase == _GCoff), a span may transition from
347 // in-use to free (as a result of sweeping) or manual to free (as a
348 // result of stacks being freed).
349 //
350 // * During GC (gcphase != _GCoff), a span *must not* transition from
351 // manual or in-use to free. Because concurrent GC may read a pointer
352 // and then look up its span, the span state must be monotonic.
353 //
354 // Setting mspan.state to mSpanInUse or mSpanManual must be done
355 // atomically and only after all other span fields are valid.
356 // Likewise, if inspecting a span is contingent on it being
357 // mSpanInUse, the state should be loaded atomically and checked
358 // before depending on other fields. This allows the garbage collector
359 // to safely deal with potentially invalid pointers, since resolving
360 // such pointers may race with a span being allocated.
361 type mSpanState uint8
362
363 const (
364 mSpanDead mSpanState = iota
365 mSpanInUse // allocated for garbage collected heap
366 mSpanManual // allocated for manual management (e.g., stack allocator)
367 )
368
369 // mSpanStateNames are the names of the span states, indexed by
370 // mSpanState.
371 var mSpanStateNames = []string{
372 "mSpanDead",
373 "mSpanInUse",
374 "mSpanManual",
375 "mSpanFree",
376 }
377
378 // mSpanStateBox holds an mSpanState and provides atomic operations on
379 // it. This is a separate type to disallow accidental comparison or
380 // assignment with mSpanState.
381 type mSpanStateBox struct {
382 s mSpanState
383 }
384
385 func (b *mSpanStateBox) set(s mSpanState) {
386 atomic.Store8((*uint8)(&b.s), uint8(s))
387 }
388
389 func (b *mSpanStateBox) get() mSpanState {
390 return mSpanState(atomic.Load8((*uint8)(&b.s)))
391 }
392
393 // mSpanList heads a linked list of spans.
394 //
395 //go:notinheap
396 type mSpanList struct {
397 first *mspan // first span in list, or nil if none
398 last *mspan // last span in list, or nil if none
399 }
400
401 //go:notinheap
402 type mspan struct {
403 next *mspan // next span in list, or nil if none
404 prev *mspan // previous span in list, or nil if none
405 list *mSpanList // For debugging. TODO: Remove.
406
407 startAddr uintptr // address of first byte of span aka s.base()
408 npages uintptr // number of pages in span
409
410 manualFreeList gclinkptr // list of free objects in mSpanManual spans
411
412 // freeindex is the slot index between 0 and nelems at which to begin scanning
413 // for the next free object in this span.
414 // Each allocation scans allocBits starting at freeindex until it encounters a 0
415 // indicating a free object. freeindex is then adjusted so that subsequent scans begin
416 // just past the newly discovered free object.
417 //
418 // If freeindex == nelem, this span has no free objects.
419 //
420 // allocBits is a bitmap of objects in this span.
421 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
422 // then object n is free;
423 // otherwise, object n is allocated. Bits starting at nelem are
424 // undefined and should never be referenced.
425 //
426 // Object n starts at address n*elemsize + (start << pageShift).
427 freeindex uintptr
428 // TODO: Look up nelems from sizeclass and remove this field if it
429 // helps performance.
430 nelems uintptr // number of object in the span.
431
432 // Cache of the allocBits at freeindex. allocCache is shifted
433 // such that the lowest bit corresponds to the bit freeindex.
434 // allocCache holds the complement of allocBits, thus allowing
435 // ctz (count trailing zero) to use it directly.
436 // allocCache may contain bits beyond s.nelems; the caller must ignore
437 // these.
438 allocCache uint64
439
440 // allocBits and gcmarkBits hold pointers to a span's mark and
441 // allocation bits. The pointers are 8 byte aligned.
442 // There are three arenas where this data is held.
443 // free: Dirty arenas that are no longer accessed
444 // and can be reused.
445 // next: Holds information to be used in the next GC cycle.
446 // current: Information being used during this GC cycle.
447 // previous: Information being used during the last GC cycle.
448 // A new GC cycle starts with the call to finishsweep_m.
449 // finishsweep_m moves the previous arena to the free arena,
450 // the current arena to the previous arena, and
451 // the next arena to the current arena.
452 // The next arena is populated as the spans request
453 // memory to hold gcmarkBits for the next GC cycle as well
454 // as allocBits for newly allocated spans.
455 //
456 // The pointer arithmetic is done "by hand" instead of using
457 // arrays to avoid bounds checks along critical performance
458 // paths.
459 // The sweep will free the old allocBits and set allocBits to the
460 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
461 // out memory.
462 allocBits *gcBits
463 gcmarkBits *gcBits
464
465 // sweep generation:
466 // if sweepgen == h->sweepgen - 2, the span needs sweeping
467 // if sweepgen == h->sweepgen - 1, the span is currently being swept
468 // if sweepgen == h->sweepgen, the span is swept and ready to use
469 // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
470 // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
471 // h->sweepgen is incremented by 2 after every GC
472
473 sweepgen uint32
474 divMul uint16 // for divide by elemsize - divMagic.mul
475 baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
476 allocCount uint16 // number of allocated objects
477 spanclass spanClass // size class and noscan (uint8)
478 state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
479 needzero uint8 // needs to be zeroed before allocation
480 divShift uint8 // for divide by elemsize - divMagic.shift
481 divShift2 uint8 // for divide by elemsize - divMagic.shift2
482 elemsize uintptr // computed from sizeclass or from npages
483 limit uintptr // end of data in span
484 speciallock mutex // guards specials list
485 specials *special // linked list of special records sorted by offset.
486 }
487
488 func (s *mspan) base() uintptr {
489 return s.startAddr
490 }
491
492 func (s *mspan) layout() (size, n, total uintptr) {
493 total = s.npages << _PageShift
494 size = s.elemsize
495 if size > 0 {
496 n = total / size
497 }
498 return
499 }
500
501 // recordspan adds a newly allocated span to h.allspans.
502 //
503 // This only happens the first time a span is allocated from
504 // mheap.spanalloc (it is not called when a span is reused).
505 //
506 // Write barriers are disallowed here because it can be called from
507 // gcWork when allocating new workbufs. However, because it's an
508 // indirect call from the fixalloc initializer, the compiler can't see
509 // this.
510 //
511 //go:nowritebarrierrec
512 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
513 h := (*mheap)(vh)
514 s := (*mspan)(p)
515 if len(h.allspans) >= cap(h.allspans) {
516 n := 64 * 1024 / sys.PtrSize
517 if n < cap(h.allspans)*3/2 {
518 n = cap(h.allspans) * 3 / 2
519 }
520 var new []*mspan
521 sp := (*notInHeapSlice)(unsafe.Pointer(&new))
522 sp.array = (*notInHeap)(sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys))
523 if sp.array == nil {
524 throw("runtime: cannot allocate memory")
525 }
526 sp.len = len(h.allspans)
527 sp.cap = n
528 if len(h.allspans) > 0 {
529 copy(new, h.allspans)
530 }
531 oldAllspans := h.allspans
532 *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
533 if len(oldAllspans) != 0 {
534 sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
535 }
536 }
537 h.allspans = h.allspans[:len(h.allspans)+1]
538 h.allspans[len(h.allspans)-1] = s
539 }
540
541 // A spanClass represents the size class and noscan-ness of a span.
542 //
543 // Each size class has a noscan spanClass and a scan spanClass. The
544 // noscan spanClass contains only noscan objects, which do not contain
545 // pointers and thus do not need to be scanned by the garbage
546 // collector.
547 type spanClass uint8
548
549 const (
550 numSpanClasses = _NumSizeClasses << 1
551 tinySpanClass = spanClass(tinySizeClass<<1 | 1)
552 )
553
554 func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
555 return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
556 }
557
558 func (sc spanClass) sizeclass() int8 {
559 return int8(sc >> 1)
560 }
561
562 func (sc spanClass) noscan() bool {
563 return sc&1 != 0
564 }
565
566 // arenaIndex returns the index into mheap_.arenas of the arena
567 // containing metadata for p. This index combines of an index into the
568 // L1 map and an index into the L2 map and should be used as
569 // mheap_.arenas[ai.l1()][ai.l2()].
570 //
571 // If p is outside the range of valid heap addresses, either l1() or
572 // l2() will be out of bounds.
573 //
574 // It is nosplit because it's called by spanOf and several other
575 // nosplit functions.
576 //
577 //go:nosplit
578 func arenaIndex(p uintptr) arenaIdx {
579 return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
580 }
581
582 // arenaBase returns the low address of the region covered by heap
583 // arena i.
584 func arenaBase(i arenaIdx) uintptr {
585 return uintptr(i)*heapArenaBytes + arenaBaseOffset
586 }
587
588 type arenaIdx uint
589
590 func (i arenaIdx) l1() uint {
591 if arenaL1Bits == 0 {
592 // Let the compiler optimize this away if there's no
593 // L1 map.
594 return 0
595 } else {
596 return uint(i) >> arenaL1Shift
597 }
598 }
599
600 func (i arenaIdx) l2() uint {
601 if arenaL1Bits == 0 {
602 return uint(i)
603 } else {
604 return uint(i) & (1<<arenaL2Bits - 1)
605 }
606 }
607
608 // inheap reports whether b is a pointer into a (potentially dead) heap object.
609 // It returns false for pointers into mSpanManual spans.
610 // Non-preemptible because it is used by write barriers.
611 //go:nowritebarrier
612 //go:nosplit
613 func inheap(b uintptr) bool {
614 return spanOfHeap(b) != nil
615 }
616
617 // inHeapOrStack is a variant of inheap that returns true for pointers
618 // into any allocated heap span.
619 //
620 //go:nowritebarrier
621 //go:nosplit
622 func inHeapOrStack(b uintptr) bool {
623 s := spanOf(b)
624 if s == nil || b < s.base() {
625 return false
626 }
627 switch s.state.get() {
628 case mSpanInUse, mSpanManual:
629 return b < s.limit
630 default:
631 return false
632 }
633 }
634
635 // spanOf returns the span of p. If p does not point into the heap
636 // arena or no span has ever contained p, spanOf returns nil.
637 //
638 // If p does not point to allocated memory, this may return a non-nil
639 // span that does *not* contain p. If this is a possibility, the
640 // caller should either call spanOfHeap or check the span bounds
641 // explicitly.
642 //
643 // Must be nosplit because it has callers that are nosplit.
644 //
645 //go:nosplit
646 func spanOf(p uintptr) *mspan {
647 // This function looks big, but we use a lot of constant
648 // folding around arenaL1Bits to get it under the inlining
649 // budget. Also, many of the checks here are safety checks
650 // that Go needs to do anyway, so the generated code is quite
651 // short.
652 ri := arenaIndex(p)
653 if arenaL1Bits == 0 {
654 // If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
655 if ri.l2() >= uint(len(mheap_.arenas[0])) {
656 return nil
657 }
658 } else {
659 // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
660 if ri.l1() >= uint(len(mheap_.arenas)) {
661 return nil
662 }
663 }
664 l2 := mheap_.arenas[ri.l1()]
665 if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
666 return nil
667 }
668 ha := l2[ri.l2()]
669 if ha == nil {
670 return nil
671 }
672 return ha.spans[(p/pageSize)%pagesPerArena]
673 }
674
675 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
676 // that p points into an allocated heap arena.
677 //
678 // Must be nosplit because it has callers that are nosplit.
679 //
680 //go:nosplit
681 func spanOfUnchecked(p uintptr) *mspan {
682 ai := arenaIndex(p)
683 return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
684 }
685
686 // spanOfHeap is like spanOf, but returns nil if p does not point to a
687 // heap object.
688 //
689 // Must be nosplit because it has callers that are nosplit.
690 //
691 //go:nosplit
692 func spanOfHeap(p uintptr) *mspan {
693 s := spanOf(p)
694 // s is nil if it's never been allocated. Otherwise, we check
695 // its state first because we don't trust this pointer, so we
696 // have to synchronize with span initialization. Then, it's
697 // still possible we picked up a stale span pointer, so we
698 // have to check the span's bounds.
699 if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
700 return nil
701 }
702 return s
703 }
704
705 // pageIndexOf returns the arena, page index, and page mask for pointer p.
706 // The caller must ensure p is in the heap.
707 func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) {
708 ai := arenaIndex(p)
709 arena = mheap_.arenas[ai.l1()][ai.l2()]
710 pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
711 pageMask = byte(1 << ((p / pageSize) % 8))
712 return
713 }
714
715 // Initialize the heap.
716 func (h *mheap) init() {
717 lockInit(&h.lock, lockRankMheap)
718 lockInit(&h.sweepSpans[0].spineLock, lockRankSpine)
719 lockInit(&h.sweepSpans[1].spineLock, lockRankSpine)
720 lockInit(&h.speciallock, lockRankMheapSpecial)
721
722 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
723 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
724 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
725 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
726 h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
727
728 // Don't zero mspan allocations. Background sweeping can
729 // inspect a span concurrently with allocating it, so it's
730 // important that the span's sweepgen survive across freeing
731 // and re-allocating a span to prevent background sweeping
732 // from improperly cas'ing it from 0.
733 //
734 // This is safe because mspan contains no heap pointers.
735 h.spanalloc.zero = false
736
737 // h->mapcache needs no init
738
739 for i := range h.central {
740 h.central[i].mcentral.init(spanClass(i))
741 }
742
743 h.pages.init(&h.lock, &memstats.gc_sys)
744 }
745
746 // reclaim sweeps and reclaims at least npage pages into the heap.
747 // It is called before allocating npage pages to keep growth in check.
748 //
749 // reclaim implements the page-reclaimer half of the sweeper.
750 //
751 // h must NOT be locked.
752 func (h *mheap) reclaim(npage uintptr) {
753 // TODO(austin): Half of the time spent freeing spans is in
754 // locking/unlocking the heap (even with low contention). We
755 // could make the slow path here several times faster by
756 // batching heap frees.
757
758 // Bail early if there's no more reclaim work.
759 if atomic.Load64(&h.reclaimIndex) >= 1<<63 {
760 return
761 }
762
763 // Disable preemption so the GC can't start while we're
764 // sweeping, so we can read h.sweepArenas, and so
765 // traceGCSweepStart/Done pair on the P.
766 mp := acquirem()
767
768 if trace.enabled {
769 traceGCSweepStart()
770 }
771
772 arenas := h.sweepArenas
773 locked := false
774 for npage > 0 {
775 // Pull from accumulated credit first.
776 if credit := atomic.Loaduintptr(&h.reclaimCredit); credit > 0 {
777 take := credit
778 if take > npage {
779 // Take only what we need.
780 take = npage
781 }
782 if atomic.Casuintptr(&h.reclaimCredit, credit, credit-take) {
783 npage -= take
784 }
785 continue
786 }
787
788 // Claim a chunk of work.
789 idx := uintptr(atomic.Xadd64(&h.reclaimIndex, pagesPerReclaimerChunk) - pagesPerReclaimerChunk)
790 if idx/pagesPerArena >= uintptr(len(arenas)) {
791 // Page reclaiming is done.
792 atomic.Store64(&h.reclaimIndex, 1<<63)
793 break
794 }
795
796 if !locked {
797 // Lock the heap for reclaimChunk.
798 lock(&h.lock)
799 locked = true
800 }
801
802 // Scan this chunk.
803 nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk)
804 if nfound <= npage {
805 npage -= nfound
806 } else {
807 // Put spare pages toward global credit.
808 atomic.Xadduintptr(&h.reclaimCredit, nfound-npage)
809 npage = 0
810 }
811 }
812 if locked {
813 unlock(&h.lock)
814 }
815
816 if trace.enabled {
817 traceGCSweepDone()
818 }
819 releasem(mp)
820 }
821
822 // reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
823 // It returns the number of pages returned to the heap.
824 //
825 // h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
826 // temporarily unlocked and re-locked in order to do sweeping or if tracing is
827 // enabled.
828 func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
829 // The heap lock must be held because this accesses the
830 // heapArena.spans arrays using potentially non-live pointers.
831 // In particular, if a span were freed and merged concurrently
832 // with this probing heapArena.spans, it would be possible to
833 // observe arbitrary, stale span pointers.
834 n0 := n
835 var nFreed uintptr
836 sg := h.sweepgen
837 for n > 0 {
838 ai := arenas[pageIdx/pagesPerArena]
839 ha := h.arenas[ai.l1()][ai.l2()]
840
841 // Get a chunk of the bitmap to work on.
842 arenaPage := uint(pageIdx % pagesPerArena)
843 inUse := ha.pageInUse[arenaPage/8:]
844 marked := ha.pageMarks[arenaPage/8:]
845 if uintptr(len(inUse)) > n/8 {
846 inUse = inUse[:n/8]
847 marked = marked[:n/8]
848 }
849
850 // Scan this bitmap chunk for spans that are in-use
851 // but have no marked objects on them.
852 for i := range inUse {
853 inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
854 if inUseUnmarked == 0 {
855 continue
856 }
857
858 for j := uint(0); j < 8; j++ {
859 if inUseUnmarked&(1<<j) != 0 {
860 s := ha.spans[arenaPage+uint(i)*8+j]
861 if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
862 npages := s.npages
863 unlock(&h.lock)
864 if s.sweep(false) {
865 nFreed += npages
866 }
867 lock(&h.lock)
868 // Reload inUse. It's possible nearby
869 // spans were freed when we dropped the
870 // lock and we don't want to get stale
871 // pointers from the spans array.
872 inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
873 }
874 }
875 }
876 }
877
878 // Advance.
879 pageIdx += uintptr(len(inUse) * 8)
880 n -= uintptr(len(inUse) * 8)
881 }
882 if trace.enabled {
883 unlock(&h.lock)
884 // Account for pages scanned but not reclaimed.
885 traceGCSweepSpan((n0 - nFreed) * pageSize)
886 lock(&h.lock)
887 }
888 return nFreed
889 }
890
891 // alloc allocates a new span of npage pages from the GC'd heap.
892 //
893 // spanclass indicates the span's size class and scannability.
894 //
895 // If needzero is true, the memory for the returned span will be zeroed.
896 func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan {
897 // Don't do any operations that lock the heap on the G stack.
898 // It might trigger stack growth, and the stack growth code needs
899 // to be able to allocate heap.
900 var s *mspan
901 systemstack(func() {
902 // To prevent excessive heap growth, before allocating n pages
903 // we need to sweep and reclaim at least n pages.
904 if h.sweepdone == 0 {
905 h.reclaim(npages)
906 }
907 s = h.allocSpan(npages, false, spanclass, &memstats.heap_inuse)
908 })
909
910 if s != nil {
911 if needzero && s.needzero != 0 {
912 memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift)
913 }
914 s.needzero = 0
915 }
916 return s
917 }
918
919 // allocManual allocates a manually-managed span of npage pages.
920 // allocManual returns nil if allocation fails.
921 //
922 // allocManual adds the bytes used to *stat, which should be a
923 // memstats in-use field. Unlike allocations in the GC'd heap, the
924 // allocation does *not* count toward heap_inuse or heap_sys.
925 //
926 // The memory backing the returned span may not be zeroed if
927 // span.needzero is set.
928 //
929 // allocManual must be called on the system stack because it may
930 // acquire the heap lock via allocSpan. See mheap for details.
931 //
932 //go:systemstack
933 func (h *mheap) allocManual(npages uintptr, stat *uint64) *mspan {
934 return h.allocSpan(npages, true, 0, stat)
935 }
936
937 // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
938 // is s.
939 func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
940 p := base / pageSize
941 ai := arenaIndex(base)
942 ha := h.arenas[ai.l1()][ai.l2()]
943 for n := uintptr(0); n < npage; n++ {
944 i := (p + n) % pagesPerArena
945 if i == 0 {
946 ai = arenaIndex(base + n*pageSize)
947 ha = h.arenas[ai.l1()][ai.l2()]
948 }
949 ha.spans[i] = s
950 }
951 }
952
953 // allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
954 // assumed to be allocated, needs to be zeroed, updating heap arena metadata for
955 // future allocations.
956 //
957 // This must be called each time pages are allocated from the heap, even if the page
958 // allocator can otherwise prove the memory it's allocating is already zero because
959 // they're fresh from the operating system. It updates heapArena metadata that is
960 // critical for future page allocations.
961 //
962 // There are no locking constraints on this method.
963 func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
964 for npage > 0 {
965 ai := arenaIndex(base)
966 ha := h.arenas[ai.l1()][ai.l2()]
967
968 zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
969 arenaBase := base % heapArenaBytes
970 if arenaBase < zeroedBase {
971 // We extended into the non-zeroed part of the
972 // arena, so this region needs to be zeroed before use.
973 //
974 // zeroedBase is monotonically increasing, so if we see this now then
975 // we can be sure we need to zero this memory region.
976 //
977 // We still need to update zeroedBase for this arena, and
978 // potentially more arenas.
979 needZero = true
980 }
981 // We may observe arenaBase > zeroedBase if we're racing with one or more
982 // allocations which are acquiring memory directly before us in the address
983 // space. But, because we know no one else is acquiring *this* memory, it's
984 // still safe to not zero.
985
986 // Compute how far into the arena we extend into, capped
987 // at heapArenaBytes.
988 arenaLimit := arenaBase + npage*pageSize
989 if arenaLimit > heapArenaBytes {
990 arenaLimit = heapArenaBytes
991 }
992 // Increase ha.zeroedBase so it's >= arenaLimit.
993 // We may be racing with other updates.
994 for arenaLimit > zeroedBase {
995 if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) {
996 break
997 }
998 zeroedBase = atomic.Loaduintptr(&ha.zeroedBase)
999 // Sanity check zeroedBase.
1000 if zeroedBase <= arenaLimit && zeroedBase > arenaBase {
1001 // The zeroedBase moved into the space we were trying to
1002 // claim. That's very bad, and indicates someone allocated
1003 // the same region we did.
1004 throw("potentially overlapping in-use allocations detected")
1005 }
1006 }
1007
1008 // Move base forward and subtract from npage to move into
1009 // the next arena, or finish.
1010 base += arenaLimit - arenaBase
1011 npage -= (arenaLimit - arenaBase) / pageSize
1012 }
1013 return
1014 }
1015
1016 // tryAllocMSpan attempts to allocate an mspan object from
1017 // the P-local cache, but may fail.
1018 //
1019 // h need not be locked.
1020 //
1021 // This caller must ensure that its P won't change underneath
1022 // it during this function. Currently to ensure that we enforce
1023 // that the function is run on the system stack, because that's
1024 // the only place it is used now. In the future, this requirement
1025 // may be relaxed if its use is necessary elsewhere.
1026 //
1027 //go:systemstack
1028 func (h *mheap) tryAllocMSpan() *mspan {
1029 pp := getg().m.p.ptr()
1030 // If we don't have a p or the cache is empty, we can't do
1031 // anything here.
1032 if pp == nil || pp.mspancache.len == 0 {
1033 return nil
1034 }
1035 // Pull off the last entry in the cache.
1036 s := pp.mspancache.buf[pp.mspancache.len-1]
1037 pp.mspancache.len--
1038 return s
1039 }
1040
1041 // allocMSpanLocked allocates an mspan object.
1042 //
1043 // h must be locked.
1044 //
1045 // allocMSpanLocked must be called on the system stack because
1046 // its caller holds the heap lock. See mheap for details.
1047 // Running on the system stack also ensures that we won't
1048 // switch Ps during this function. See tryAllocMSpan for details.
1049 //
1050 //go:systemstack
1051 func (h *mheap) allocMSpanLocked() *mspan {
1052 pp := getg().m.p.ptr()
1053 if pp == nil {
1054 // We don't have a p so just do the normal thing.
1055 return (*mspan)(h.spanalloc.alloc())
1056 }
1057 // Refill the cache if necessary.
1058 if pp.mspancache.len == 0 {
1059 const refillCount = len(pp.mspancache.buf) / 2
1060 for i := 0; i < refillCount; i++ {
1061 pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc())
1062 }
1063 pp.mspancache.len = refillCount
1064 }
1065 // Pull off the last entry in the cache.
1066 s := pp.mspancache.buf[pp.mspancache.len-1]
1067 pp.mspancache.len--
1068 return s
1069 }
1070
1071 // freeMSpanLocked free an mspan object.
1072 //
1073 // h must be locked.
1074 //
1075 // freeMSpanLocked must be called on the system stack because
1076 // its caller holds the heap lock. See mheap for details.
1077 // Running on the system stack also ensures that we won't
1078 // switch Ps during this function. See tryAllocMSpan for details.
1079 //
1080 //go:systemstack
1081 func (h *mheap) freeMSpanLocked(s *mspan) {
1082 pp := getg().m.p.ptr()
1083 // First try to free the mspan directly to the cache.
1084 if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
1085 pp.mspancache.buf[pp.mspancache.len] = s
1086 pp.mspancache.len++
1087 return
1088 }
1089 // Failing that (or if we don't have a p), just free it to
1090 // the heap.
1091 h.spanalloc.free(unsafe.Pointer(s))
1092 }
1093
1094 // allocSpan allocates an mspan which owns npages worth of memory.
1095 //
1096 // If manual == false, allocSpan allocates a heap span of class spanclass
1097 // and updates heap accounting. If manual == true, allocSpan allocates a
1098 // manually-managed span (spanclass is ignored), and the caller is
1099 // responsible for any accounting related to its use of the span. Either
1100 // way, allocSpan will atomically add the bytes in the newly allocated
1101 // span to *sysStat.
1102 //
1103 // The returned span is fully initialized.
1104 //
1105 // h must not be locked.
1106 //
1107 // allocSpan must be called on the system stack both because it acquires
1108 // the heap lock and because it must block GC transitions.
1109 //
1110 //go:systemstack
1111 func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysStat *uint64) (s *mspan) {
1112 // Function-global state.
1113 gp := getg()
1114 base, scav := uintptr(0), uintptr(0)
1115
1116 // If the allocation is small enough, try the page cache!
1117 pp := gp.m.p.ptr()
1118 if pp != nil && npages < pageCachePages/4 {
1119 c := &pp.pcache
1120
1121 // If the cache is empty, refill it.
1122 if c.empty() {
1123 lock(&h.lock)
1124 *c = h.pages.allocToCache()
1125 unlock(&h.lock)
1126 }
1127
1128 // Try to allocate from the cache.
1129 base, scav = c.alloc(npages)
1130 if base != 0 {
1131 s = h.tryAllocMSpan()
1132
1133 if s != nil && gcBlackenEnabled == 0 && (manual || spanclass.sizeclass() != 0) {
1134 goto HaveSpan
1135 }
1136 // We're either running duing GC, failed to acquire a mspan,
1137 // or the allocation is for a large object. This means we
1138 // have to lock the heap and do a bunch of extra work,
1139 // so go down the HaveBaseLocked path.
1140 //
1141 // We must do this during GC to avoid skew with heap_scan
1142 // since we flush mcache stats whenever we lock.
1143 //
1144 // TODO(mknyszek): It would be nice to not have to
1145 // lock the heap if it's a large allocation, but
1146 // it's fine for now. The critical section here is
1147 // short and large object allocations are relatively
1148 // infrequent.
1149 }
1150 }
1151
1152 // For one reason or another, we couldn't get the
1153 // whole job done without the heap lock.
1154 lock(&h.lock)
1155
1156 if base == 0 {
1157 // Try to acquire a base address.
1158 base, scav = h.pages.alloc(npages)
1159 if base == 0 {
1160 if !h.grow(npages) {
1161 unlock(&h.lock)
1162 return nil
1163 }
1164 base, scav = h.pages.alloc(npages)
1165 if base == 0 {
1166 throw("grew heap, but no adequate free space found")
1167 }
1168 }
1169 }
1170 if s == nil {
1171 // We failed to get an mspan earlier, so grab
1172 // one now that we have the heap lock.
1173 s = h.allocMSpanLocked()
1174 }
1175 if !manual {
1176 // This is a heap span, so we should do some additional accounting
1177 // which may only be done with the heap locked.
1178
1179 // Transfer stats from mcache to global.
1180 var c *mcache
1181 if gp.m.p != 0 {
1182 c = gp.m.p.ptr().mcache
1183 } else {
1184 // This case occurs while bootstrapping.
1185 // See the similar code in mallocgc.
1186 c = mcache0
1187 if c == nil {
1188 throw("mheap.allocSpan called with no P")
1189 }
1190 }
1191 memstats.heap_scan += uint64(c.local_scan)
1192 c.local_scan = 0
1193 memstats.tinyallocs += uint64(c.local_tinyallocs)
1194 c.local_tinyallocs = 0
1195
1196 // Do some additional accounting if it's a large allocation.
1197 if spanclass.sizeclass() == 0 {
1198 mheap_.largealloc += uint64(npages * pageSize)
1199 mheap_.nlargealloc++
1200 atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
1201 }
1202
1203 // Either heap_live or heap_scan could have been updated.
1204 if gcBlackenEnabled != 0 {
1205 gcController.revise()
1206 }
1207 }
1208 unlock(&h.lock)
1209
1210 HaveSpan:
1211 // At this point, both s != nil and base != 0, and the heap
1212 // lock is no longer held. Initialize the span.
1213 s.init(base, npages)
1214 if h.allocNeedsZero(base, npages) {
1215 s.needzero = 1
1216 }
1217 nbytes := npages * pageSize
1218 if manual {
1219 s.manualFreeList = 0
1220 s.nelems = 0
1221 s.limit = s.base() + s.npages*pageSize
1222 // Manually managed memory doesn't count toward heap_sys.
1223 mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
1224 s.state.set(mSpanManual)
1225 } else {
1226 // We must set span properties before the span is published anywhere
1227 // since we're not holding the heap lock.
1228 s.spanclass = spanclass
1229 if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
1230 s.elemsize = nbytes
1231 s.nelems = 1
1232
1233 s.divShift = 0
1234 s.divMul = 0
1235 s.divShift2 = 0
1236 s.baseMask = 0
1237 } else {
1238 s.elemsize = uintptr(class_to_size[sizeclass])
1239 s.nelems = nbytes / s.elemsize
1240
1241 m := &class_to_divmagic[sizeclass]
1242 s.divShift = m.shift
1243 s.divMul = m.mul
1244 s.divShift2 = m.shift2
1245 s.baseMask = m.baseMask
1246 }
1247
1248 // Initialize mark and allocation structures.
1249 s.freeindex = 0
1250 s.allocCache = ^uint64(0) // all 1s indicating all free.
1251 s.gcmarkBits = newMarkBits(s.nelems)
1252 s.allocBits = newAllocBits(s.nelems)
1253
1254 // It's safe to access h.sweepgen without the heap lock because it's
1255 // only ever updated with the world stopped and we run on the
1256 // systemstack which blocks a STW transition.
1257 atomic.Store(&s.sweepgen, h.sweepgen)
1258
1259 // Now that the span is filled in, set its state. This
1260 // is a publication barrier for the other fields in
1261 // the span. While valid pointers into this span
1262 // should never be visible until the span is returned,
1263 // if the garbage collector finds an invalid pointer,
1264 // access to the span may race with initialization of
1265 // the span. We resolve this race by atomically
1266 // setting the state after the span is fully
1267 // initialized, and atomically checking the state in
1268 // any situation where a pointer is suspect.
1269 s.state.set(mSpanInUse)
1270 }
1271
1272 // Commit and account for any scavenged memory that the span now owns.
1273 if scav != 0 {
1274 // sysUsed all the pages that are actually available
1275 // in the span since some of them might be scavenged.
1276 sysUsed(unsafe.Pointer(base), nbytes)
1277 mSysStatDec(&memstats.heap_released, scav)
1278 }
1279 // Update stats.
1280 mSysStatInc(sysStat, nbytes)
1281 mSysStatDec(&memstats.heap_idle, nbytes)
1282
1283 // Publish the span in various locations.
1284
1285 // This is safe to call without the lock held because the slots
1286 // related to this span will only ever be read or modified by
1287 // this thread until pointers into the span are published (and
1288 // we execute a publication barrier at the end of this function
1289 // before that happens) or pageInUse is updated.
1290 h.setSpans(s.base(), npages, s)
1291
1292 if !manual {
1293 if !go115NewMCentralImpl {
1294 // Add to swept in-use list.
1295 //
1296 // This publishes the span to root marking.
1297 //
1298 // h.sweepgen is guaranteed to only change during STW,
1299 // and preemption is disabled in the page allocator.
1300 h.sweepSpans[h.sweepgen/2%2].push(s)
1301 }
1302
1303 // Mark in-use span in arena page bitmap.
1304 //
1305 // This publishes the span to the page sweeper, so
1306 // it's imperative that the span be completely initialized
1307 // prior to this line.
1308 arena, pageIdx, pageMask := pageIndexOf(s.base())
1309 atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
1310
1311 // Update related page sweeper stats.
1312 atomic.Xadd64(&h.pagesInUse, int64(npages))
1313
1314 if trace.enabled {
1315 // Trace that a heap alloc occurred.
1316 traceHeapAlloc()
1317 }
1318 }
1319
1320 // Make sure the newly allocated span will be observed
1321 // by the GC before pointers into the span are published.
1322 publicationBarrier()
1323
1324 return s
1325 }
1326
1327 // Try to add at least npage pages of memory to the heap,
1328 // returning whether it worked.
1329 //
1330 // h must be locked.
1331 func (h *mheap) grow(npage uintptr) bool {
1332 // We must grow the heap in whole palloc chunks.
1333 ask := alignUp(npage, pallocChunkPages) * pageSize
1334
1335 totalGrowth := uintptr(0)
1336 // This may overflow because ask could be very large
1337 // and is otherwise unrelated to h.curArena.base.
1338 end := h.curArena.base + ask
1339 nBase := alignUp(end, physPageSize)
1340 if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
1341 // Not enough room in the current arena. Allocate more
1342 // arena space. This may not be contiguous with the
1343 // current arena, so we have to request the full ask.
1344 av, asize := h.sysAlloc(ask)
1345 if av == nil {
1346 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
1347 return false
1348 }
1349
1350 if uintptr(av) == h.curArena.end {
1351 // The new space is contiguous with the old
1352 // space, so just extend the current space.
1353 h.curArena.end = uintptr(av) + asize
1354 } else {
1355 // The new space is discontiguous. Track what
1356 // remains of the current space and switch to
1357 // the new space. This should be rare.
1358 if size := h.curArena.end - h.curArena.base; size != 0 {
1359 h.pages.grow(h.curArena.base, size)
1360 totalGrowth += size
1361 }
1362 // Switch to the new space.
1363 h.curArena.base = uintptr(av)
1364 h.curArena.end = uintptr(av) + asize
1365 }
1366
1367 // The memory just allocated counts as both released
1368 // and idle, even though it's not yet backed by spans.
1369 //
1370 // The allocation is always aligned to the heap arena
1371 // size which is always > physPageSize, so its safe to
1372 // just add directly to heap_released.
1373 mSysStatInc(&memstats.heap_released, asize)
1374 mSysStatInc(&memstats.heap_idle, asize)
1375
1376 // Recalculate nBase.
1377 // We know this won't overflow, because sysAlloc returned
1378 // a valid region starting at h.curArena.base which is at
1379 // least ask bytes in size.
1380 nBase = alignUp(h.curArena.base+ask, physPageSize)
1381 }
1382
1383 // Grow into the current arena.
1384 v := h.curArena.base
1385 h.curArena.base = nBase
1386 h.pages.grow(v, nBase-v)
1387 totalGrowth += nBase - v
1388
1389 // We just caused a heap growth, so scavenge down what will soon be used.
1390 // By scavenging inline we deal with the failure to allocate out of
1391 // memory fragments by scavenging the memory fragments that are least
1392 // likely to be re-used.
1393 if retained := heapRetained(); retained+uint64(totalGrowth) > h.scavengeGoal {
1394 todo := totalGrowth
1395 if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
1396 todo = overage
1397 }
1398 h.pages.scavenge(todo, false)
1399 }
1400 return true
1401 }
1402
1403 // Free the span back into the heap.
1404 func (h *mheap) freeSpan(s *mspan) {
1405 systemstack(func() {
1406 c := getg().m.p.ptr().mcache
1407 lock(&h.lock)
1408 memstats.heap_scan += uint64(c.local_scan)
1409 c.local_scan = 0
1410 memstats.tinyallocs += uint64(c.local_tinyallocs)
1411 c.local_tinyallocs = 0
1412 if msanenabled {
1413 // Tell msan that this entire span is no longer in use.
1414 base := unsafe.Pointer(s.base())
1415 bytes := s.npages << _PageShift
1416 msanfree(base, bytes)
1417 }
1418 if gcBlackenEnabled != 0 {
1419 // heap_scan changed.
1420 gcController.revise()
1421 }
1422 h.freeSpanLocked(s, true, true)
1423 unlock(&h.lock)
1424 })
1425 }
1426
1427 // freeManual frees a manually-managed span returned by allocManual.
1428 // stat must be the same as the stat passed to the allocManual that
1429 // allocated s.
1430 //
1431 // This must only be called when gcphase == _GCoff. See mSpanState for
1432 // an explanation.
1433 //
1434 // freeManual must be called on the system stack because it acquires
1435 // the heap lock. See mheap for details.
1436 //
1437 //go:systemstack
1438 func (h *mheap) freeManual(s *mspan, stat *uint64) {
1439 s.needzero = 1
1440 lock(&h.lock)
1441 mSysStatDec(stat, s.npages*pageSize)
1442 mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
1443 h.freeSpanLocked(s, false, true)
1444 unlock(&h.lock)
1445 }
1446
1447 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
1448 switch s.state.get() {
1449 case mSpanManual:
1450 if s.allocCount != 0 {
1451 throw("mheap.freeSpanLocked - invalid stack free")
1452 }
1453 case mSpanInUse:
1454 if s.allocCount != 0 || s.sweepgen != h.sweepgen {
1455 print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
1456 throw("mheap.freeSpanLocked - invalid free")
1457 }
1458 atomic.Xadd64(&h.pagesInUse, -int64(s.npages))
1459
1460 // Clear in-use bit in arena page bitmap.
1461 arena, pageIdx, pageMask := pageIndexOf(s.base())
1462 atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
1463 default:
1464 throw("mheap.freeSpanLocked - invalid span state")
1465 }
1466
1467 if acctinuse {
1468 mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
1469 }
1470 if acctidle {
1471 mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
1472 }
1473
1474 // Mark the space as free.
1475 h.pages.free(s.base(), s.npages)
1476
1477 // Free the span structure. We no longer have a use for it.
1478 s.state.set(mSpanDead)
1479 h.freeMSpanLocked(s)
1480 }
1481
1482 // scavengeAll acquires the heap lock (blocking any additional
1483 // manipulation of the page allocator) and iterates over the whole
1484 // heap, scavenging every free page available.
1485 func (h *mheap) scavengeAll() {
1486 // Disallow malloc or panic while holding the heap lock. We do
1487 // this here because this is a non-mallocgc entry-point to
1488 // the mheap API.
1489 gp := getg()
1490 gp.m.mallocing++
1491 lock(&h.lock)
1492 // Start a new scavenge generation so we have a chance to walk
1493 // over the whole heap.
1494 h.pages.scavengeStartGen()
1495 released := h.pages.scavenge(^uintptr(0), false)
1496 gen := h.pages.scav.gen
1497 unlock(&h.lock)
1498 gp.m.mallocing--
1499
1500 if debug.scavtrace > 0 {
1501 printScavTrace(gen, released, true)
1502 }
1503 }
1504
1505 //go:linkname runtime_debug_freeOSMemory runtime..z2fdebug.freeOSMemory
1506 func runtime_debug_freeOSMemory() {
1507 GC()
1508 systemstack(func() { mheap_.scavengeAll() })
1509 }
1510
1511 // Initialize a new span with the given start and npages.
1512 func (span *mspan) init(base uintptr, npages uintptr) {
1513 // span is *not* zeroed.
1514 span.next = nil
1515 span.prev = nil
1516 span.list = nil
1517 span.startAddr = base
1518 span.npages = npages
1519 span.allocCount = 0
1520 span.spanclass = 0
1521 span.elemsize = 0
1522 span.speciallock.key = 0
1523 span.specials = nil
1524 span.needzero = 0
1525 span.freeindex = 0
1526 span.allocBits = nil
1527 span.gcmarkBits = nil
1528 span.state.set(mSpanDead)
1529 lockInit(&span.speciallock, lockRankMspanSpecial)
1530 }
1531
1532 func (span *mspan) inList() bool {
1533 return span.list != nil
1534 }
1535
1536 // Initialize an empty doubly-linked list.
1537 func (list *mSpanList) init() {
1538 list.first = nil
1539 list.last = nil
1540 }
1541
1542 func (list *mSpanList) remove(span *mspan) {
1543 if span.list != list {
1544 print("runtime: failed mSpanList.remove span.npages=", span.npages,
1545 " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
1546 throw("mSpanList.remove")
1547 }
1548 if list.first == span {
1549 list.first = span.next
1550 } else {
1551 span.prev.next = span.next
1552 }
1553 if list.last == span {
1554 list.last = span.prev
1555 } else {
1556 span.next.prev = span.prev
1557 }
1558 span.next = nil
1559 span.prev = nil
1560 span.list = nil
1561 }
1562
1563 func (list *mSpanList) isEmpty() bool {
1564 return list.first == nil
1565 }
1566
1567 func (list *mSpanList) insert(span *mspan) {
1568 if span.next != nil || span.prev != nil || span.list != nil {
1569 println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list)
1570 throw("mSpanList.insert")
1571 }
1572 span.next = list.first
1573 if list.first != nil {
1574 // The list contains at least one span; link it in.
1575 // The last span in the list doesn't change.
1576 list.first.prev = span
1577 } else {
1578 // The list contains no spans, so this is also the last span.
1579 list.last = span
1580 }
1581 list.first = span
1582 span.list = list
1583 }
1584
1585 func (list *mSpanList) insertBack(span *mspan) {
1586 if span.next != nil || span.prev != nil || span.list != nil {
1587 println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list)
1588 throw("mSpanList.insertBack")
1589 }
1590 span.prev = list.last
1591 if list.last != nil {
1592 // The list contains at least one span.
1593 list.last.next = span
1594 } else {
1595 // The list contains no spans, so this is also the first span.
1596 list.first = span
1597 }
1598 list.last = span
1599 span.list = list
1600 }
1601
1602 // takeAll removes all spans from other and inserts them at the front
1603 // of list.
1604 func (list *mSpanList) takeAll(other *mSpanList) {
1605 if other.isEmpty() {
1606 return
1607 }
1608
1609 // Reparent everything in other to list.
1610 for s := other.first; s != nil; s = s.next {
1611 s.list = list
1612 }
1613
1614 // Concatenate the lists.
1615 if list.isEmpty() {
1616 *list = *other
1617 } else {
1618 // Neither list is empty. Put other before list.
1619 other.last.next = list.first
1620 list.first.prev = other.last
1621 list.first = other.first
1622 }
1623
1624 other.first, other.last = nil, nil
1625 }
1626
1627 const (
1628 _KindSpecialFinalizer = 1
1629 _KindSpecialProfile = 2
1630 // Note: The finalizer special must be first because if we're freeing
1631 // an object, a finalizer special will cause the freeing operation
1632 // to abort, and we want to keep the other special records around
1633 // if that happens.
1634 )
1635
1636 //go:notinheap
1637 type special struct {
1638 next *special // linked list in span
1639 offset uint16 // span offset of object
1640 kind byte // kind of special
1641 }
1642
1643 // spanHasSpecials marks a span as having specials in the arena bitmap.
1644 func spanHasSpecials(s *mspan) {
1645 arenaPage := (s.base() / pageSize) % pagesPerArena
1646 ai := arenaIndex(s.base())
1647 ha := mheap_.arenas[ai.l1()][ai.l2()]
1648 atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8))
1649 }
1650
1651 // spanHasNoSpecials marks a span as having no specials in the arena bitmap.
1652 func spanHasNoSpecials(s *mspan) {
1653 arenaPage := (s.base() / pageSize) % pagesPerArena
1654 ai := arenaIndex(s.base())
1655 ha := mheap_.arenas[ai.l1()][ai.l2()]
1656 atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8)))
1657 }
1658
1659 // Adds the special record s to the list of special records for
1660 // the object p. All fields of s should be filled in except for
1661 // offset & next, which this routine will fill in.
1662 // Returns true if the special was successfully added, false otherwise.
1663 // (The add will fail only if a record with the same p and s->kind
1664 // already exists.)
1665 func addspecial(p unsafe.Pointer, s *special) bool {
1666 span := spanOfHeap(uintptr(p))
1667 if span == nil {
1668 throw("addspecial on invalid pointer")
1669 }
1670
1671 // Ensure that the span is swept.
1672 // Sweeping accesses the specials list w/o locks, so we have
1673 // to synchronize with it. And it's just much safer.
1674 mp := acquirem()
1675 span.ensureSwept()
1676
1677 offset := uintptr(p) - span.base()
1678 kind := s.kind
1679
1680 lock(&span.speciallock)
1681
1682 // Find splice point, check for existing record.
1683 t := &span.specials
1684 for {
1685 x := *t
1686 if x == nil {
1687 break
1688 }
1689 if offset == uintptr(x.offset) && kind == x.kind {
1690 unlock(&span.speciallock)
1691 releasem(mp)
1692 return false // already exists
1693 }
1694 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
1695 break
1696 }
1697 t = &x.next
1698 }
1699
1700 // Splice in record, fill in offset.
1701 s.offset = uint16(offset)
1702 s.next = *t
1703 *t = s
1704 if go115NewMarkrootSpans {
1705 spanHasSpecials(span)
1706 }
1707 unlock(&span.speciallock)
1708 releasem(mp)
1709
1710 return true
1711 }
1712
1713 // Removes the Special record of the given kind for the object p.
1714 // Returns the record if the record existed, nil otherwise.
1715 // The caller must FixAlloc_Free the result.
1716 func removespecial(p unsafe.Pointer, kind uint8) *special {
1717 span := spanOfHeap(uintptr(p))
1718 if span == nil {
1719 throw("removespecial on invalid pointer")
1720 }
1721
1722 // Ensure that the span is swept.
1723 // Sweeping accesses the specials list w/o locks, so we have
1724 // to synchronize with it. And it's just much safer.
1725 mp := acquirem()
1726 span.ensureSwept()
1727
1728 offset := uintptr(p) - span.base()
1729
1730 var result *special
1731 lock(&span.speciallock)
1732 t := &span.specials
1733 for {
1734 s := *t
1735 if s == nil {
1736 break
1737 }
1738 // This function is used for finalizers only, so we don't check for
1739 // "interior" specials (p must be exactly equal to s->offset).
1740 if offset == uintptr(s.offset) && kind == s.kind {
1741 *t = s.next
1742 result = s
1743 break
1744 }
1745 t = &s.next
1746 }
1747 if go115NewMarkrootSpans && span.specials == nil {
1748 spanHasNoSpecials(span)
1749 }
1750 unlock(&span.speciallock)
1751 releasem(mp)
1752 return result
1753 }
1754
1755 // The described object has a finalizer set for it.
1756 //
1757 // specialfinalizer is allocated from non-GC'd memory, so any heap
1758 // pointers must be specially handled.
1759 //
1760 //go:notinheap
1761 type specialfinalizer struct {
1762 special special
1763 fn *funcval // May be a heap pointer.
1764 ft *functype // May be a heap pointer, but always live.
1765 ot *ptrtype // May be a heap pointer, but always live.
1766 }
1767
1768 // Adds a finalizer to the object p. Returns true if it succeeded.
1769 func addfinalizer(p unsafe.Pointer, f *funcval, ft *functype, ot *ptrtype) bool {
1770 lock(&mheap_.speciallock)
1771 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
1772 unlock(&mheap_.speciallock)
1773 s.special.kind = _KindSpecialFinalizer
1774 s.fn = f
1775 s.ft = ft
1776 s.ot = ot
1777 if addspecial(p, &s.special) {
1778 // This is responsible for maintaining the same
1779 // GC-related invariants as markrootSpans in any
1780 // situation where it's possible that markrootSpans
1781 // has already run but mark termination hasn't yet.
1782 if gcphase != _GCoff {
1783 base, _, _ := findObject(uintptr(p), 0, 0, false)
1784 mp := acquirem()
1785 gcw := &mp.p.ptr().gcw
1786 // Mark everything reachable from the object
1787 // so it's retained for the finalizer.
1788 scanobject(base, gcw)
1789 // Mark the finalizer itself, since the
1790 // special isn't part of the GC'd heap.
1791 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
1792 releasem(mp)
1793 }
1794 return true
1795 }
1796
1797 // There was an old finalizer
1798 lock(&mheap_.speciallock)
1799 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1800 unlock(&mheap_.speciallock)
1801 return false
1802 }
1803
1804 // Removes the finalizer (if any) from the object p.
1805 func removefinalizer(p unsafe.Pointer) {
1806 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
1807 if s == nil {
1808 return // there wasn't a finalizer to remove
1809 }
1810 lock(&mheap_.speciallock)
1811 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1812 unlock(&mheap_.speciallock)
1813 }
1814
1815 // The described object is being heap profiled.
1816 //
1817 //go:notinheap
1818 type specialprofile struct {
1819 special special
1820 b *bucket
1821 }
1822
1823 // Set the heap profile bucket associated with addr to b.
1824 func setprofilebucket(p unsafe.Pointer, b *bucket) {
1825 lock(&mheap_.speciallock)
1826 s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
1827 unlock(&mheap_.speciallock)
1828 s.special.kind = _KindSpecialProfile
1829 s.b = b
1830 if !addspecial(p, &s.special) {
1831 throw("setprofilebucket: profile already set")
1832 }
1833 }
1834
1835 // Do whatever cleanup needs to be done to deallocate s. It has
1836 // already been unlinked from the mspan specials list.
1837 func freespecial(s *special, p unsafe.Pointer, size uintptr) {
1838 switch s.kind {
1839 case _KindSpecialFinalizer:
1840 sf := (*specialfinalizer)(unsafe.Pointer(s))
1841 queuefinalizer(p, sf.fn, sf.ft, sf.ot)
1842 lock(&mheap_.speciallock)
1843 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
1844 unlock(&mheap_.speciallock)
1845 case _KindSpecialProfile:
1846 sp := (*specialprofile)(unsafe.Pointer(s))
1847 mProf_Free(sp.b, size)
1848 lock(&mheap_.speciallock)
1849 mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
1850 unlock(&mheap_.speciallock)
1851 default:
1852 throw("bad special kind")
1853 panic("not reached")
1854 }
1855 }
1856
1857 // gcBits is an alloc/mark bitmap. This is always used as *gcBits.
1858 //
1859 //go:notinheap
1860 type gcBits uint8
1861
1862 // bytep returns a pointer to the n'th byte of b.
1863 func (b *gcBits) bytep(n uintptr) *uint8 {
1864 return addb((*uint8)(b), n)
1865 }
1866
1867 // bitp returns a pointer to the byte containing bit n and a mask for
1868 // selecting that bit from *bytep.
1869 func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
1870 return b.bytep(n / 8), 1 << (n % 8)
1871 }
1872
1873 const gcBitsChunkBytes = uintptr(64 << 10)
1874 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
1875
1876 type gcBitsHeader struct {
1877 free uintptr // free is the index into bits of the next free byte.
1878 next uintptr // *gcBits triggers recursive type bug. (issue 14620)
1879 }
1880
1881 //go:notinheap
1882 type gcBitsArena struct {
1883 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
1884 free uintptr // free is the index into bits of the next free byte; read/write atomically
1885 next *gcBitsArena
1886 bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
1887 }
1888
1889 var gcBitsArenas struct {
1890 lock mutex
1891 free *gcBitsArena
1892 next *gcBitsArena // Read atomically. Write atomically under lock.
1893 current *gcBitsArena
1894 previous *gcBitsArena
1895 }
1896
1897 // tryAlloc allocates from b or returns nil if b does not have enough room.
1898 // This is safe to call concurrently.
1899 func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
1900 if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
1901 return nil
1902 }
1903 // Try to allocate from this block.
1904 end := atomic.Xadduintptr(&b.free, bytes)
1905 if end > uintptr(len(b.bits)) {
1906 return nil
1907 }
1908 // There was enough room.
1909 start := end - bytes
1910 return &b.bits[start]
1911 }
1912
1913 // newMarkBits returns a pointer to 8 byte aligned bytes
1914 // to be used for a span's mark bits.
1915 func newMarkBits(nelems uintptr) *gcBits {
1916 blocksNeeded := uintptr((nelems + 63) / 64)
1917 bytesNeeded := blocksNeeded * 8
1918
1919 // Try directly allocating from the current head arena.
1920 head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
1921 if p := head.tryAlloc(bytesNeeded); p != nil {
1922 return p
1923 }
1924
1925 // There's not enough room in the head arena. We may need to
1926 // allocate a new arena.
1927 lock(&gcBitsArenas.lock)
1928 // Try the head arena again, since it may have changed. Now
1929 // that we hold the lock, the list head can't change, but its
1930 // free position still can.
1931 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
1932 unlock(&gcBitsArenas.lock)
1933 return p
1934 }
1935
1936 // Allocate a new arena. This may temporarily drop the lock.
1937 fresh := newArenaMayUnlock()
1938 // If newArenaMayUnlock dropped the lock, another thread may
1939 // have put a fresh arena on the "next" list. Try allocating
1940 // from next again.
1941 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
1942 // Put fresh back on the free list.
1943 // TODO: Mark it "already zeroed"
1944 fresh.next = gcBitsArenas.free
1945 gcBitsArenas.free = fresh
1946 unlock(&gcBitsArenas.lock)
1947 return p
1948 }
1949
1950 // Allocate from the fresh arena. We haven't linked it in yet, so
1951 // this cannot race and is guaranteed to succeed.
1952 p := fresh.tryAlloc(bytesNeeded)
1953 if p == nil {
1954 throw("markBits overflow")
1955 }
1956
1957 // Add the fresh arena to the "next" list.
1958 fresh.next = gcBitsArenas.next
1959 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
1960
1961 unlock(&gcBitsArenas.lock)
1962 return p
1963 }
1964
1965 // newAllocBits returns a pointer to 8 byte aligned bytes
1966 // to be used for this span's alloc bits.
1967 // newAllocBits is used to provide newly initialized spans
1968 // allocation bits. For spans not being initialized the
1969 // mark bits are repurposed as allocation bits when
1970 // the span is swept.
1971 func newAllocBits(nelems uintptr) *gcBits {
1972 return newMarkBits(nelems)
1973 }
1974
1975 // nextMarkBitArenaEpoch establishes a new epoch for the arenas
1976 // holding the mark bits. The arenas are named relative to the
1977 // current GC cycle which is demarcated by the call to finishweep_m.
1978 //
1979 // All current spans have been swept.
1980 // During that sweep each span allocated room for its gcmarkBits in
1981 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
1982 // where the GC will mark objects and after each span is swept these bits
1983 // will be used to allocate objects.
1984 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's
1985 // gcAllocBits live until all the spans have been swept during this GC cycle.
1986 // The span's sweep extinguishes all the references to gcBitsArenas.previous
1987 // by pointing gcAllocBits into the gcBitsArenas.current.
1988 // The gcBitsArenas.previous is released to the gcBitsArenas.free list.
1989 func nextMarkBitArenaEpoch() {
1990 lock(&gcBitsArenas.lock)
1991 if gcBitsArenas.previous != nil {
1992 if gcBitsArenas.free == nil {
1993 gcBitsArenas.free = gcBitsArenas.previous
1994 } else {
1995 // Find end of previous arenas.
1996 last := gcBitsArenas.previous
1997 for last = gcBitsArenas.previous; last.next != nil; last = last.next {
1998 }
1999 last.next = gcBitsArenas.free
2000 gcBitsArenas.free = gcBitsArenas.previous
2001 }
2002 }
2003 gcBitsArenas.previous = gcBitsArenas.current
2004 gcBitsArenas.current = gcBitsArenas.next
2005 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
2006 unlock(&gcBitsArenas.lock)
2007 }
2008
2009 // newArenaMayUnlock allocates and zeroes a gcBits arena.
2010 // The caller must hold gcBitsArena.lock. This may temporarily release it.
2011 func newArenaMayUnlock() *gcBitsArena {
2012 var result *gcBitsArena
2013 if gcBitsArenas.free == nil {
2014 unlock(&gcBitsArenas.lock)
2015 result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
2016 if result == nil {
2017 throw("runtime: cannot allocate memory")
2018 }
2019 lock(&gcBitsArenas.lock)
2020 } else {
2021 result = gcBitsArenas.free
2022 gcBitsArenas.free = gcBitsArenas.free.next
2023 memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
2024 }
2025 result.next = nil
2026 // If result.bits is not 8 byte aligned adjust index so
2027 // that &result.bits[result.free] is 8 byte aligned.
2028 if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
2029 result.free = 0
2030 } else {
2031 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
2032 }
2033 return result
2034 }