1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // See malloc.h for overview.
7 // TODO(rsc): double-check stats.
17 #include "go-string.h"
18 #include "interface.h"
23 extern MStats mstats; // defined in extern.go
25 extern volatile int32 runtime_MemProfileRate
26 __asm__ ("runtime.MemProfileRate");
28 // Allocate an object of at least size bytes.
29 // Small objects are allocated from the per-thread cache's free lists.
30 // Large objects (> 32 kB) are allocated straight from the heap.
32 runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
36 int32 sizeclass, rate;
44 if(g->status == Gsyscall)
46 if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) {
51 runtime_throw("malloc/free - deadlock");
58 if(size <= MaxSmallSize) {
59 // Allocate from mcache free lists.
60 sizeclass = runtime_SizeToClass(size);
61 size = runtime_class_to_size[sizeclass];
62 v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
64 runtime_throw("out of memory");
65 c->local_alloc += size;
66 c->local_total_alloc += size;
67 c->local_by_size[sizeclass].nmalloc++;
69 // TODO(rsc): Report tracebacks for very large allocations.
71 // Allocate directly from heap.
72 npages = size >> PageShift;
73 if((size & PageMask) != 0)
75 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
77 runtime_throw("out of memory");
78 size = npages<<PageShift;
79 c->local_alloc += size;
80 c->local_total_alloc += size;
81 v = (void*)(s->start << PageShift);
83 // setup for mark sweep
84 runtime_markspan(v, 0, 0, true);
86 if(!(flag & FlagNoGC))
87 runtime_markallocated(v, size, (flag&FlagNoPointers) != 0);
91 if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
92 if(size >= (uint32) rate)
94 if((uint32) m->mcache->next_sample > size)
95 m->mcache->next_sample -= size;
97 // pick next profile time
98 // If you change this, also change allocmcache.
99 if(rate > 0x3fffffff) // make 2*rate not overflow
101 m->mcache->next_sample = runtime_fastrand1() % (2*rate);
103 runtime_setblockspecial(v, true);
104 runtime_MProf_Malloc(v, size);
108 if(dogc && mstats.heap_alloc >= mstats.next_gc)
114 __go_alloc(uintptr size)
116 return runtime_mallocgc(size, 0, 0, 1);
119 // Free the object whose base pointer is v.
133 // If you change this also change mgc0.c:/^sweep,
134 // which has a copy of the guts of free.
138 runtime_throw("malloc/free - deadlock");
141 if(!runtime_mlookup(v, nil, nil, &s)) {
142 runtime_printf("free %p: not an allocated block\n", v);
143 runtime_throw("free runtime_mlookup");
145 prof = runtime_blockspecial(v);
147 // Find size class for v.
148 sizeclass = s->sizeclass;
152 size = s->npages<<PageShift;
153 *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed"
154 // Must mark v freed before calling unmarkspan and MHeap_Free:
155 // they might coalesce v into other spans and change the bitmap further.
156 runtime_markfreed(v, size);
157 runtime_unmarkspan(v, 1<<PageShift);
158 runtime_MHeap_Free(&runtime_mheap, s, 1);
161 size = runtime_class_to_size[sizeclass];
162 if(size > sizeof(uintptr))
163 ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
164 // Must mark v freed before calling MCache_Free:
165 // it might coalesce v and other blocks into a bigger span
166 // and change the bitmap further.
167 runtime_markfreed(v, size);
168 c->local_by_size[sizeclass].nfree++;
169 runtime_MCache_Free(c, v, sizeclass, size);
172 c->local_alloc -= size;
174 runtime_MProf_Free(v, size);
179 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
185 runtime_m()->mcache->local_nlookup++;
186 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
190 runtime_checkfreed(v, 1);
198 p = (byte*)((uintptr)s->start<<PageShift);
199 if(s->sizeclass == 0) {
204 *size = s->npages<<PageShift;
208 if((byte*)v >= (byte*)s->limit) {
209 // pointers past the last block do not count as pointers.
213 n = runtime_class_to_size[s->sizeclass];
215 i = ((byte*)v - p)/n;
225 runtime_allocmcache(void)
230 runtime_lock(&runtime_mheap);
231 c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
232 mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
233 mstats.mcache_sys = runtime_mheap.cachealloc.sys;
234 runtime_unlock(&runtime_mheap);
236 // Set first allocation sample size.
237 rate = runtime_MemProfileRate;
238 if(rate > 0x3fffffff) // make 2*rate not overflow
241 c->next_sample = runtime_fastrand1() % (2*rate);
247 runtime_purgecachedstats(M* m)
251 // Protected by either heap or GC lock.
253 mstats.heap_alloc += c->local_cachealloc;
254 c->local_cachealloc = 0;
255 mstats.heap_objects += c->local_objects;
256 c->local_objects = 0;
257 mstats.nmalloc += c->local_nmalloc;
258 c->local_nmalloc = 0;
259 mstats.nfree += c->local_nfree;
261 mstats.nlookup += c->local_nlookup;
262 c->local_nlookup = 0;
263 mstats.alloc += c->local_alloc;
265 mstats.total_alloc += c->local_total_alloc;
266 c->local_total_alloc= 0;
269 extern uintptr runtime_sizeof_C_MStats
270 __asm__ ("runtime.Sizeof_C_MStats");
272 #define MaxArena32 (2U<<30)
275 runtime_mallocinit(void)
278 uintptr arena_size, bitmap_size;
283 runtime_sizeof_C_MStats = sizeof(MStats);
296 limit = runtime_memlimit();
298 // Set up the allocation arena, a contiguous area of memory where
299 // allocated data will be found. The arena begins with a bitmap large
300 // enough to hold 4 bits per allocated word.
301 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
302 // On a 64-bit machine, allocate from a single contiguous reservation.
303 // 16 GB should be big enough for now.
305 // The code will work with the reservation at any address, but ask
306 // SysReserve to use 0x000000f800000000 if possible.
307 // Allocating a 16 GB region takes away 36 bits, and the amd64
308 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
309 // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
310 // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
311 // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
312 // they are otherwise as far from ff (likely a common byte) as possible.
313 // Choosing 0x00 for the leading 6 bits was more arbitrary, but it
314 // is not a common ASCII code point either. Using 0x11f8 instead
315 // caused out of memory errors on OS X during thread allocations.
316 // These choices are both for debuggability and to reduce the
317 // odds of the conservative garbage collector not collecting memory
318 // because some non-pointer block of memory had a bit pattern
319 // that matched a memory address.
321 // Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
322 // but it hardly matters: fc is not valid UTF-8 either, and we have to
323 // allocate 15 GB before we get that far.
325 // If this fails we fall back to the 32 bit memory mechanism
326 arena_size = (uintptr)(16LL<<30);
327 bitmap_size = arena_size / (sizeof(void*)*8/4);
328 p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
331 // On a 32-bit machine, we can't typically get away
332 // with a giant virtual address space reservation.
333 // Instead we map the memory information bitmap
334 // immediately after the data segment, large enough
335 // to handle another 2GB of mappings (256 MB),
336 // along with a reservation for another 512 MB of memory.
337 // When that gets used up, we'll start asking the kernel
338 // for any memory anywhere and hope it's in the 2GB
339 // following the bitmap (presumably the executable begins
340 // near the bottom of memory, so we'll have to use up
341 // most of memory before the kernel resorts to giving out
342 // memory before the beginning of the text segment).
344 // Alternatively we could reserve 512 MB bitmap, enough
345 // for 4GB of mappings, and then accept any memory the
346 // kernel threw at us, but normally that's a waste of 512 MB
347 // of address space, which is probably too much in a 32-bit world.
348 bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
349 arena_size = 512<<20;
350 if(limit > 0 && arena_size+bitmap_size > limit) {
351 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
352 arena_size = bitmap_size * 8;
355 // SysReserve treats the address we ask for, end, as a hint,
356 // not as an absolute requirement. If we ask for the end
357 // of the data segment but the operating system requires
358 // a little more space before we can start allocating, it will
359 // give out a slightly higher pointer. Except QEMU, which
360 // is buggy, as usual: it won't adjust the pointer upward.
361 // So adjust it upward a little bit ourselves: 1/4 MB to get
362 // away from the running binary image and then round up
364 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1));
365 if(0xffffffff - (uintptr)want <= bitmap_size + arena_size)
367 p = runtime_SysReserve(want, bitmap_size + arena_size);
369 runtime_throw("runtime: cannot reserve arena virtual address space");
370 if((uintptr)p & (((uintptr)1<<PageShift)-1))
371 runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, bitmap_size+arena_size);
373 if((uintptr)p & (((uintptr)1<<PageShift)-1))
374 runtime_throw("runtime: SysReserve returned unaligned address");
376 runtime_mheap.bitmap = p;
377 runtime_mheap.arena_start = p + bitmap_size;
378 runtime_mheap.arena_used = runtime_mheap.arena_start;
379 runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size;
381 // Initialize the rest of the allocator.
382 runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
383 runtime_m()->mcache = runtime_allocmcache();
386 runtime_free(runtime_malloc(1));
390 runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
395 if(n > (uintptr)(h->arena_end - h->arena_used)) {
396 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
397 // Reserve some more space.
401 needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
402 // Round wanted arena size to a multiple of 256MB.
403 needed = (needed + (256<<20) - 1) & ~((256<<20)-1);
404 new_end = h->arena_end + needed;
405 if(new_end <= h->arena_start + MaxArena32) {
406 p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
407 if(p == h->arena_end)
408 h->arena_end = new_end;
411 if(n <= (uintptr)(h->arena_end - h->arena_used)) {
412 // Keep taking from our reservation.
414 runtime_SysMap(p, n);
416 runtime_MHeap_MapBits(h);
420 // If using 64-bit, our reservation is all we have.
421 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
424 // On 32-bit, once the reservation is gone we can
425 // try to get memory at a location chosen by the OS
426 // and hope that it is in the range we allocated bitmap for.
427 p = runtime_SysAlloc(n);
431 if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) {
432 runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
433 p, h->arena_start, h->arena_start+MaxArena32);
434 runtime_SysFree(p, n);
438 if(p+n > h->arena_used) {
440 if(h->arena_used > h->arena_end)
441 h->arena_end = h->arena_used;
442 runtime_MHeap_MapBits(h);
451 runtime_mal(uintptr n)
453 return runtime_mallocgc(n, 0, 1, 1);
456 func new(typ *Type) (ret *uint8) {
457 uint32 flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
458 ret = runtime_mallocgc(typ->__size, flag, 1, 1);
465 func SetFinalizer(obj Eface, finalizer Eface) {
470 if(obj.__type_descriptor == nil) {
471 runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
474 if(obj.__type_descriptor->__code != GO_PTR) {
475 runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection);
478 if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
479 runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
483 if(finalizer.__type_descriptor != nil) {
484 if(finalizer.__type_descriptor->__code != GO_FUNC)
486 ft = (const FuncType*)finalizer.__type_descriptor;
487 if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
491 if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft)) {
492 runtime_printf("runtime.SetFinalizer: finalizer already set\n");
498 runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.__type_descriptor->__reflection, *obj.__type_descriptor->__reflection);
500 runtime_throw("runtime.SetFinalizer");