1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.h for overview.
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
19 static MSpan
*MHeap_AllocLocked(MHeap
*, uintptr
, int32
);
20 static bool MHeap_Grow(MHeap
*, uintptr
);
21 static void MHeap_FreeLocked(MHeap
*, MSpan
*);
22 static MSpan
*MHeap_AllocLarge(MHeap
*, uintptr
);
23 static MSpan
*BestFit(MSpan
*, uintptr
, MSpan
*);
26 RecordSpan(void *vh
, byte
*p
)
35 if(h
->nspan
>= h
->nspancap
) {
36 cap
= 64*1024/sizeof(all
[0]);
37 if(cap
< h
->nspancap
*3/2)
38 cap
= h
->nspancap
*3/2;
39 all
= (MSpan
**)runtime_SysAlloc(cap
*sizeof(all
[0]));
41 runtime_memmove(all
, h
->allspans
, h
->nspancap
*sizeof(all
[0]));
42 runtime_SysFree(h
->allspans
, h
->nspancap
*sizeof(all
[0]));
47 h
->allspans
[h
->nspan
++] = s
;
50 // Initialize the heap; fetch memory using alloc.
52 runtime_MHeap_Init(MHeap
*h
, void *(*alloc
)(uintptr
))
56 runtime_FixAlloc_Init(&h
->spanalloc
, sizeof(MSpan
), alloc
, RecordSpan
, h
);
57 runtime_FixAlloc_Init(&h
->cachealloc
, sizeof(MCache
), alloc
, nil
, nil
);
58 // h->mapcache needs no init
59 for(i
=0; i
<nelem(h
->free
); i
++)
60 runtime_MSpanList_Init(&h
->free
[i
]);
61 runtime_MSpanList_Init(&h
->large
);
62 for(i
=0; i
<nelem(h
->central
); i
++)
63 runtime_MCentral_Init(&h
->central
[i
], i
);
66 // Allocate a new span of npage pages from the heap
67 // and record its size class in the HeapMap and HeapMapCache.
69 runtime_MHeap_Alloc(MHeap
*h
, uintptr npage
, int32 sizeclass
, int32 acct
, int32 zeroed
)
74 runtime_purgecachedstats(runtime_m()->mcache
);
75 s
= MHeap_AllocLocked(h
, npage
, sizeclass
);
77 mstats
.heap_inuse
+= npage
<<PageShift
;
79 mstats
.heap_objects
++;
80 mstats
.heap_alloc
+= npage
<<PageShift
;
84 if(s
!= nil
&& *(uintptr
*)(s
->start
<<PageShift
) != 0 && zeroed
)
85 runtime_memclr((byte
*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
90 MHeap_AllocLocked(MHeap
*h
, uintptr npage
, int32 sizeclass
)
96 // Try in fixed-size lists up to max.
97 for(n
=npage
; n
< nelem(h
->free
); n
++) {
98 if(!runtime_MSpanList_IsEmpty(&h
->free
[n
])) {
104 // Best fit in list of large spans.
105 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
) {
106 if(!MHeap_Grow(h
, npage
))
108 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
)
114 if(s
->state
!= MSpanFree
)
115 runtime_throw("MHeap_AllocLocked - MSpan not free");
116 if(s
->npages
< npage
)
117 runtime_throw("MHeap_AllocLocked - bad npages");
118 runtime_MSpanList_Remove(s
);
119 s
->state
= MSpanInUse
;
120 mstats
.heap_idle
-= s
->npages
<<PageShift
;
121 mstats
.heap_released
-= s
->npreleased
<<PageShift
;
124 if(s
->npages
> npage
) {
125 // Trim extra and put it back in the heap.
126 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
127 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
128 mstats
.mspan_sys
= h
->spanalloc
.sys
;
129 runtime_MSpan_Init(t
, s
->start
+ npage
, s
->npages
- npage
);
132 if(sizeof(void*) == 8)
133 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
137 h
->map
[p
+t
->npages
-1] = t
;
138 *(uintptr
*)(t
->start
<<PageShift
) = *(uintptr
*)(s
->start
<<PageShift
); // copy "needs zeroing" mark
139 t
->state
= MSpanInUse
;
140 MHeap_FreeLocked(h
, t
);
141 t
->unusedsince
= s
->unusedsince
; // preserve age
145 // Record span info, because gc needs to be
146 // able to map interior pointer to containing span.
147 s
->sizeclass
= sizeclass
;
148 s
->elemsize
= (sizeclass
==0 ? s
->npages
<<PageShift
: (uintptr
)runtime_class_to_size
[sizeclass
]);
149 s
->types
.compression
= MTypes_Empty
;
151 if(sizeof(void*) == 8)
152 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
153 for(n
=0; n
<npage
; n
++)
158 // Allocate a span of exactly npage pages from the list of large spans.
160 MHeap_AllocLarge(MHeap
*h
, uintptr npage
)
162 return BestFit(&h
->large
, npage
, nil
);
165 // Search list for smallest span with >= npage pages.
166 // If there are multiple smallest spans, take the one
167 // with the earliest starting address.
169 BestFit(MSpan
*list
, uintptr npage
, MSpan
*best
)
173 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
174 if(s
->npages
< npage
)
177 || s
->npages
< best
->npages
178 || (s
->npages
== best
->npages
&& s
->start
< best
->start
))
184 // Try to add at least npage pages of memory to the heap,
185 // returning whether it worked.
187 MHeap_Grow(MHeap
*h
, uintptr npage
)
194 // Ask for a big chunk, to reduce the number of mappings
195 // the operating system needs to track; also amortizes
196 // the overhead of an operating system mapping.
197 // Allocate a multiple of 64kB (16 pages).
198 npage
= (npage
+15)&~15;
199 ask
= npage
<<PageShift
;
200 if(ask
< HeapAllocChunk
)
201 ask
= HeapAllocChunk
;
203 v
= runtime_MHeap_SysAlloc(h
, ask
);
205 if(ask
> (npage
<<PageShift
)) {
206 ask
= npage
<<PageShift
;
207 v
= runtime_MHeap_SysAlloc(h
, ask
);
210 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64
)ask
, mstats
.heap_sys
);
214 mstats
.heap_sys
+= ask
;
216 // Create a fake "in use" span and free it, so that the
217 // right coalescing happens.
218 s
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
219 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
220 mstats
.mspan_sys
= h
->spanalloc
.sys
;
221 runtime_MSpan_Init(s
, (uintptr
)v
>>PageShift
, ask
>>PageShift
);
223 if(sizeof(void*) == 8)
224 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
226 h
->map
[p
+ s
->npages
- 1] = s
;
227 s
->state
= MSpanInUse
;
228 MHeap_FreeLocked(h
, s
);
232 // Look up the span at the given address.
233 // Address is guaranteed to be in map
234 // and is guaranteed to be start or end of span.
236 runtime_MHeap_Lookup(MHeap
*h
, void *v
)
241 if(sizeof(void*) == 8)
242 p
-= (uintptr
)h
->arena_start
;
243 return h
->map
[p
>> PageShift
];
246 // Look up the span at the given address.
247 // Address is *not* guaranteed to be in map
248 // and may be anywhere in the span.
249 // Map entries for the middle of a span are only
250 // valid for allocated spans. Free spans may have
251 // other garbage in their middles, so we have to
254 runtime_MHeap_LookupMaybe(MHeap
*h
, void *v
)
259 if((byte
*)v
< h
->arena_start
|| (byte
*)v
>= h
->arena_used
)
261 p
= (uintptr
)v
>>PageShift
;
263 if(sizeof(void*) == 8)
264 q
-= (uintptr
)h
->arena_start
>> PageShift
;
266 if(s
== nil
|| p
< s
->start
|| p
- s
->start
>= s
->npages
)
268 if(s
->state
!= MSpanInUse
)
273 // Free the span back into the heap.
275 runtime_MHeap_Free(MHeap
*h
, MSpan
*s
, int32 acct
)
278 runtime_purgecachedstats(runtime_m()->mcache
);
279 mstats
.heap_inuse
-= s
->npages
<<PageShift
;
281 mstats
.heap_alloc
-= s
->npages
<<PageShift
;
282 mstats
.heap_objects
--;
284 MHeap_FreeLocked(h
, s
);
289 MHeap_FreeLocked(MHeap
*h
, MSpan
*s
)
295 if(s
->types
.sysalloc
)
296 runtime_settype_sysfree(s
);
297 s
->types
.compression
= MTypes_Empty
;
299 if(s
->state
!= MSpanInUse
|| s
->ref
!= 0) {
300 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s
, s
->start
<<PageShift
, s
->state
, s
->ref
);
301 runtime_throw("MHeap_FreeLocked - invalid free");
303 mstats
.heap_idle
+= s
->npages
<<PageShift
;
304 s
->state
= MSpanFree
;
305 runtime_MSpanList_Remove(s
);
306 sp
= (uintptr
*)(s
->start
<<PageShift
);
307 // Stamp newly unused spans. The scavenger will use that
308 // info to potentially give back some pages to the OS.
309 s
->unusedsince
= runtime_nanotime();
312 // Coalesce with earlier, later spans.
314 if(sizeof(void*) == 8)
315 p
-= (uintptr
)h
->arena_start
>> PageShift
;
316 if(p
> 0 && (t
= h
->map
[p
-1]) != nil
&& t
->state
!= MSpanInUse
) {
317 tp
= (uintptr
*)(t
->start
<<PageShift
);
318 *tp
|= *sp
; // propagate "needs zeroing" mark
320 s
->npages
+= t
->npages
;
321 s
->npreleased
= t
->npreleased
; // absorb released pages
324 runtime_MSpanList_Remove(t
);
325 t
->state
= MSpanDead
;
326 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
327 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
328 mstats
.mspan_sys
= h
->spanalloc
.sys
;
330 if(p
+s
->npages
< nelem(h
->map
) && (t
= h
->map
[p
+s
->npages
]) != nil
&& t
->state
!= MSpanInUse
) {
331 tp
= (uintptr
*)(t
->start
<<PageShift
);
332 *sp
|= *tp
; // propagate "needs zeroing" mark
333 s
->npages
+= t
->npages
;
334 s
->npreleased
+= t
->npreleased
;
335 h
->map
[p
+ s
->npages
- 1] = s
;
336 runtime_MSpanList_Remove(t
);
337 t
->state
= MSpanDead
;
338 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
339 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
340 mstats
.mspan_sys
= h
->spanalloc
.sys
;
343 // Insert s into appropriate list.
344 if(s
->npages
< nelem(h
->free
))
345 runtime_MSpanList_Insert(&h
->free
[s
->npages
], s
);
347 runtime_MSpanList_Insert(&h
->large
, s
);
351 forcegchelper(void *vnote
)
353 Note
*note
= (Note
*)vnote
;
356 runtime_notewakeup(note
);
359 // Release (part of) unused memory to OS.
360 // Goroutine created at startup.
363 runtime_MHeap_Scavenger(void* dummy
)
367 uint64 tick
, now
, forcegc
, limit
;
369 uintptr released
, sumreleased
;
376 // If we go two minutes without a garbage collection, force one to run.
378 // If a span goes unused for 5 minutes after a garbage collection,
379 // we hand it back to the operating system.
381 // Make wake-up period small enough for the sampling to be correct.
388 env
= runtime_getenv("GOGCTRACE");
390 trace
= runtime_atoi(env
) > 0;
394 runtime_noteclear(¬e
);
395 runtime_entersyscall();
396 runtime_notetsleep(¬e
, tick
);
397 runtime_exitsyscall();
400 now
= runtime_nanotime();
401 if(now
- mstats
.last_gc
> forcegc
) {
403 // The scavenger can not block other goroutines,
404 // otherwise deadlock detector can fire spuriously.
405 // GC blocks other goroutines via the runtime_worldsema.
406 runtime_noteclear(¬e
);
408 __go_go(forcegchelper
, (void*)notep
);
409 runtime_entersyscall();
410 runtime_notesleep(¬e
);
411 runtime_exitsyscall();
413 runtime_printf("scvg%d: GC forced\n", k
);
415 now
= runtime_nanotime();
418 for(i
=0; i
< nelem(h
->free
)+1; i
++) {
419 if(i
< nelem(h
->free
))
423 if(runtime_MSpanList_IsEmpty(list
))
425 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
426 if((now
- s
->unusedsince
) > limit
) {
427 released
= (s
->npages
- s
->npreleased
) << PageShift
;
428 mstats
.heap_released
+= released
;
429 sumreleased
+= released
;
430 s
->npreleased
= s
->npages
;
431 runtime_SysUnused((void*)(s
->start
<< PageShift
), s
->npages
<< PageShift
);
439 runtime_printf("scvg%d: %p MB released\n", k
, sumreleased
>>20);
440 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
441 k
, mstats
.heap_inuse
>>20, mstats
.heap_idle
>>20, mstats
.heap_sys
>>20,
442 mstats
.heap_released
>>20, (mstats
.heap_sys
- mstats
.heap_released
)>>20);
447 // Initialize a new span with the given start and npages.
449 runtime_MSpan_Init(MSpan
*span
, PageID start
, uintptr npages
)
454 span
->npages
= npages
;
455 span
->freelist
= nil
;
460 span
->unusedsince
= 0;
461 span
->npreleased
= 0;
462 span
->types
.compression
= MTypes_Empty
;
465 // Initialize an empty doubly-linked list.
467 runtime_MSpanList_Init(MSpan
*list
)
469 list
->state
= MSpanListHead
;
475 runtime_MSpanList_Remove(MSpan
*span
)
477 if(span
->prev
== nil
&& span
->next
== nil
)
479 span
->prev
->next
= span
->next
;
480 span
->next
->prev
= span
->prev
;
486 runtime_MSpanList_IsEmpty(MSpan
*list
)
488 return list
->next
== list
;
492 runtime_MSpanList_Insert(MSpan
*list
, MSpan
*span
)
494 if(span
->next
!= nil
|| span
->prev
!= nil
) {
495 runtime_printf("failed MSpanList_Insert %p %p %p\n", span
, span
->next
, span
->prev
);
496 runtime_throw("MSpanList_Insert");
498 span
->next
= list
->next
;
500 span
->next
->prev
= span
;
501 span
->prev
->next
= span
;