runtime: correct a logic error in hashmap growth.
[gcc.git] / libgo / runtime / malloc.goc
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // See malloc.h for overview.
6 //
7 // TODO(rsc): double-check stats.
8
9 package runtime
10 #include <stddef.h>
11 #include <errno.h>
12 #include <stdlib.h>
13 #include "go-alloc.h"
14 #include "runtime.h"
15 #include "arch.h"
16 #include "malloc.h"
17 #include "go-string.h"
18 #include "interface.h"
19 #include "go-type.h"
20
21 MHeap runtime_mheap;
22
23 extern MStats mstats; // defined in extern.go
24
25 extern volatile int32 runtime_MemProfileRate
26 __asm__ ("runtime.MemProfileRate");
27
28 // Allocate an object of at least size bytes.
29 // Small objects are allocated from the per-thread cache's free lists.
30 // Large objects (> 32 kB) are allocated straight from the heap.
31 void*
32 runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
33 {
34 M *m;
35 G *g;
36 int32 sizeclass, rate;
37 MCache *c;
38 uintptr npages;
39 MSpan *s;
40 void *v;
41
42 m = runtime_m();
43 g = runtime_g();
44 if(g->status == Gsyscall)
45 dogc = 0;
46 if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) {
47 runtime_gosched();
48 m = runtime_m();
49 }
50 if(m->mallocing)
51 runtime_throw("malloc/free - deadlock");
52 m->mallocing = 1;
53 if(size == 0)
54 size = 1;
55
56 c = m->mcache;
57 c->local_nmalloc++;
58 if(size <= MaxSmallSize) {
59 // Allocate from mcache free lists.
60 sizeclass = runtime_SizeToClass(size);
61 size = runtime_class_to_size[sizeclass];
62 v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
63 if(v == nil)
64 runtime_throw("out of memory");
65 c->local_alloc += size;
66 c->local_total_alloc += size;
67 c->local_by_size[sizeclass].nmalloc++;
68 } else {
69 // TODO(rsc): Report tracebacks for very large allocations.
70
71 // Allocate directly from heap.
72 npages = size >> PageShift;
73 if((size & PageMask) != 0)
74 npages++;
75 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
76 if(s == nil)
77 runtime_throw("out of memory");
78 size = npages<<PageShift;
79 c->local_alloc += size;
80 c->local_total_alloc += size;
81 v = (void*)(s->start << PageShift);
82
83 // setup for mark sweep
84 runtime_markspan(v, 0, 0, true);
85 }
86 if(!(flag & FlagNoGC))
87 runtime_markallocated(v, size, (flag&FlagNoPointers) != 0);
88
89 m->mallocing = 0;
90
91 if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
92 if(size >= (uint32) rate)
93 goto profile;
94 if((uint32) m->mcache->next_sample > size)
95 m->mcache->next_sample -= size;
96 else {
97 // pick next profile time
98 // If you change this, also change allocmcache.
99 if(rate > 0x3fffffff) // make 2*rate not overflow
100 rate = 0x3fffffff;
101 m->mcache->next_sample = runtime_fastrand1() % (2*rate);
102 profile:
103 runtime_setblockspecial(v, true);
104 runtime_MProf_Malloc(v, size);
105 }
106 }
107
108 if(dogc && mstats.heap_alloc >= mstats.next_gc)
109 runtime_gc(0);
110 return v;
111 }
112
113 void*
114 __go_alloc(uintptr size)
115 {
116 return runtime_mallocgc(size, 0, 0, 1);
117 }
118
119 // Free the object whose base pointer is v.
120 void
121 __go_free(void *v)
122 {
123 M *m;
124 int32 sizeclass;
125 MSpan *s;
126 MCache *c;
127 uint32 prof;
128 uintptr size;
129
130 if(v == nil)
131 return;
132
133 // If you change this also change mgc0.c:/^sweep,
134 // which has a copy of the guts of free.
135
136 m = runtime_m();
137 if(m->mallocing)
138 runtime_throw("malloc/free - deadlock");
139 m->mallocing = 1;
140
141 if(!runtime_mlookup(v, nil, nil, &s)) {
142 runtime_printf("free %p: not an allocated block\n", v);
143 runtime_throw("free runtime_mlookup");
144 }
145 prof = runtime_blockspecial(v);
146
147 // Find size class for v.
148 sizeclass = s->sizeclass;
149 c = m->mcache;
150 if(sizeclass == 0) {
151 // Large object.
152 size = s->npages<<PageShift;
153 *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed"
154 // Must mark v freed before calling unmarkspan and MHeap_Free:
155 // they might coalesce v into other spans and change the bitmap further.
156 runtime_markfreed(v, size);
157 runtime_unmarkspan(v, 1<<PageShift);
158 runtime_MHeap_Free(&runtime_mheap, s, 1);
159 } else {
160 // Small object.
161 size = runtime_class_to_size[sizeclass];
162 if(size > sizeof(uintptr))
163 ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
164 // Must mark v freed before calling MCache_Free:
165 // it might coalesce v and other blocks into a bigger span
166 // and change the bitmap further.
167 runtime_markfreed(v, size);
168 c->local_by_size[sizeclass].nfree++;
169 runtime_MCache_Free(c, v, sizeclass, size);
170 }
171 c->local_nfree++;
172 c->local_alloc -= size;
173 if(prof)
174 runtime_MProf_Free(v, size);
175 m->mallocing = 0;
176 }
177
178 int32
179 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
180 {
181 uintptr n, i;
182 byte *p;
183 MSpan *s;
184
185 runtime_m()->mcache->local_nlookup++;
186 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
187 if(sp)
188 *sp = s;
189 if(s == nil) {
190 runtime_checkfreed(v, 1);
191 if(base)
192 *base = nil;
193 if(size)
194 *size = 0;
195 return 0;
196 }
197
198 p = (byte*)((uintptr)s->start<<PageShift);
199 if(s->sizeclass == 0) {
200 // Large object.
201 if(base)
202 *base = p;
203 if(size)
204 *size = s->npages<<PageShift;
205 return 1;
206 }
207
208 if((byte*)v >= (byte*)s->limit) {
209 // pointers past the last block do not count as pointers.
210 return 0;
211 }
212
213 n = runtime_class_to_size[s->sizeclass];
214 if(base) {
215 i = ((byte*)v - p)/n;
216 *base = p + i*n;
217 }
218 if(size)
219 *size = n;
220
221 return 1;
222 }
223
224 MCache*
225 runtime_allocmcache(void)
226 {
227 int32 rate;
228 MCache *c;
229
230 runtime_lock(&runtime_mheap);
231 c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
232 mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
233 mstats.mcache_sys = runtime_mheap.cachealloc.sys;
234 runtime_unlock(&runtime_mheap);
235
236 // Set first allocation sample size.
237 rate = runtime_MemProfileRate;
238 if(rate > 0x3fffffff) // make 2*rate not overflow
239 rate = 0x3fffffff;
240 if(rate != 0)
241 c->next_sample = runtime_fastrand1() % (2*rate);
242
243 return c;
244 }
245
246 void
247 runtime_purgecachedstats(M* m)
248 {
249 MCache *c;
250
251 // Protected by either heap or GC lock.
252 c = m->mcache;
253 mstats.heap_alloc += c->local_cachealloc;
254 c->local_cachealloc = 0;
255 mstats.heap_objects += c->local_objects;
256 c->local_objects = 0;
257 mstats.nmalloc += c->local_nmalloc;
258 c->local_nmalloc = 0;
259 mstats.nfree += c->local_nfree;
260 c->local_nfree = 0;
261 mstats.nlookup += c->local_nlookup;
262 c->local_nlookup = 0;
263 mstats.alloc += c->local_alloc;
264 c->local_alloc= 0;
265 mstats.total_alloc += c->local_total_alloc;
266 c->local_total_alloc= 0;
267 }
268
269 extern uintptr runtime_sizeof_C_MStats
270 __asm__ ("runtime.Sizeof_C_MStats");
271
272 #define MaxArena32 (2U<<30)
273
274 void
275 runtime_mallocinit(void)
276 {
277 byte *p;
278 uintptr arena_size, bitmap_size;
279 extern byte end[];
280 byte *want;
281 uintptr limit;
282
283 runtime_sizeof_C_MStats = sizeof(MStats);
284
285 p = nil;
286 arena_size = 0;
287 bitmap_size = 0;
288
289 // for 64-bit build
290 USED(p);
291 USED(arena_size);
292 USED(bitmap_size);
293
294 runtime_InitSizes();
295
296 limit = runtime_memlimit();
297
298 // Set up the allocation arena, a contiguous area of memory where
299 // allocated data will be found. The arena begins with a bitmap large
300 // enough to hold 4 bits per allocated word.
301 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
302 // On a 64-bit machine, allocate from a single contiguous reservation.
303 // 16 GB should be big enough for now.
304 //
305 // The code will work with the reservation at any address, but ask
306 // SysReserve to use 0x000000f800000000 if possible.
307 // Allocating a 16 GB region takes away 36 bits, and the amd64
308 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
309 // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
310 // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
311 // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
312 // they are otherwise as far from ff (likely a common byte) as possible.
313 // Choosing 0x00 for the leading 6 bits was more arbitrary, but it
314 // is not a common ASCII code point either. Using 0x11f8 instead
315 // caused out of memory errors on OS X during thread allocations.
316 // These choices are both for debuggability and to reduce the
317 // odds of the conservative garbage collector not collecting memory
318 // because some non-pointer block of memory had a bit pattern
319 // that matched a memory address.
320 //
321 // Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
322 // but it hardly matters: fc is not valid UTF-8 either, and we have to
323 // allocate 15 GB before we get that far.
324 //
325 // If this fails we fall back to the 32 bit memory mechanism
326 arena_size = (uintptr)(16LL<<30);
327 bitmap_size = arena_size / (sizeof(void*)*8/4);
328 p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
329 }
330 if (p == nil) {
331 // On a 32-bit machine, we can't typically get away
332 // with a giant virtual address space reservation.
333 // Instead we map the memory information bitmap
334 // immediately after the data segment, large enough
335 // to handle another 2GB of mappings (256 MB),
336 // along with a reservation for another 512 MB of memory.
337 // When that gets used up, we'll start asking the kernel
338 // for any memory anywhere and hope it's in the 2GB
339 // following the bitmap (presumably the executable begins
340 // near the bottom of memory, so we'll have to use up
341 // most of memory before the kernel resorts to giving out
342 // memory before the beginning of the text segment).
343 //
344 // Alternatively we could reserve 512 MB bitmap, enough
345 // for 4GB of mappings, and then accept any memory the
346 // kernel threw at us, but normally that's a waste of 512 MB
347 // of address space, which is probably too much in a 32-bit world.
348 bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
349 arena_size = 512<<20;
350 if(limit > 0 && arena_size+bitmap_size > limit) {
351 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
352 arena_size = bitmap_size * 8;
353 }
354
355 // SysReserve treats the address we ask for, end, as a hint,
356 // not as an absolute requirement. If we ask for the end
357 // of the data segment but the operating system requires
358 // a little more space before we can start allocating, it will
359 // give out a slightly higher pointer. Except QEMU, which
360 // is buggy, as usual: it won't adjust the pointer upward.
361 // So adjust it upward a little bit ourselves: 1/4 MB to get
362 // away from the running binary image and then round up
363 // to a MB boundary.
364 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1));
365 if(0xffffffff - (uintptr)want <= bitmap_size + arena_size)
366 want = 0;
367 p = runtime_SysReserve(want, bitmap_size + arena_size);
368 if(p == nil)
369 runtime_throw("runtime: cannot reserve arena virtual address space");
370 if((uintptr)p & (((uintptr)1<<PageShift)-1))
371 runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, bitmap_size+arena_size);
372 }
373 if((uintptr)p & (((uintptr)1<<PageShift)-1))
374 runtime_throw("runtime: SysReserve returned unaligned address");
375
376 runtime_mheap.bitmap = p;
377 runtime_mheap.arena_start = p + bitmap_size;
378 runtime_mheap.arena_used = runtime_mheap.arena_start;
379 runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size;
380
381 // Initialize the rest of the allocator.
382 runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
383 runtime_m()->mcache = runtime_allocmcache();
384
385 // See if it works.
386 runtime_free(runtime_malloc(1));
387 }
388
389 void*
390 runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
391 {
392 byte *p;
393
394
395 if(n > (uintptr)(h->arena_end - h->arena_used)) {
396 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
397 // Reserve some more space.
398 byte *new_end;
399 uintptr needed;
400
401 needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
402 // Round wanted arena size to a multiple of 256MB.
403 needed = (needed + (256<<20) - 1) & ~((256<<20)-1);
404 new_end = h->arena_end + needed;
405 if(new_end <= h->arena_start + MaxArena32) {
406 p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
407 if(p == h->arena_end)
408 h->arena_end = new_end;
409 }
410 }
411 if(n <= (uintptr)(h->arena_end - h->arena_used)) {
412 // Keep taking from our reservation.
413 p = h->arena_used;
414 runtime_SysMap(p, n);
415 h->arena_used += n;
416 runtime_MHeap_MapBits(h);
417 return p;
418 }
419
420 // If using 64-bit, our reservation is all we have.
421 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
422 return nil;
423
424 // On 32-bit, once the reservation is gone we can
425 // try to get memory at a location chosen by the OS
426 // and hope that it is in the range we allocated bitmap for.
427 p = runtime_SysAlloc(n);
428 if(p == nil)
429 return nil;
430
431 if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) {
432 runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
433 p, h->arena_start, h->arena_start+MaxArena32);
434 runtime_SysFree(p, n);
435 return nil;
436 }
437
438 if(p+n > h->arena_used) {
439 h->arena_used = p+n;
440 if(h->arena_used > h->arena_end)
441 h->arena_end = h->arena_used;
442 runtime_MHeap_MapBits(h);
443 }
444
445 return p;
446 }
447
448 // Runtime stubs.
449
450 void*
451 runtime_mal(uintptr n)
452 {
453 return runtime_mallocgc(n, 0, 1, 1);
454 }
455
456 func new(typ *Type) (ret *uint8) {
457 uint32 flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
458 ret = runtime_mallocgc(typ->__size, flag, 1, 1);
459 }
460
461 func GC() {
462 runtime_gc(1);
463 }
464
465 func SetFinalizer(obj Eface, finalizer Eface) {
466 byte *base;
467 uintptr size;
468 const FuncType *ft;
469
470 if(obj.__type_descriptor == nil) {
471 runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
472 goto throw;
473 }
474 if(obj.__type_descriptor->__code != GO_PTR) {
475 runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection);
476 goto throw;
477 }
478 if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
479 runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
480 goto throw;
481 }
482 ft = nil;
483 if(finalizer.__type_descriptor != nil) {
484 if(finalizer.__type_descriptor->__code != GO_FUNC)
485 goto badfunc;
486 ft = (const FuncType*)finalizer.__type_descriptor;
487 if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
488 goto badfunc;
489 }
490
491 if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft)) {
492 runtime_printf("runtime.SetFinalizer: finalizer already set\n");
493 goto throw;
494 }
495 return;
496
497 badfunc:
498 runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.__type_descriptor->__reflection, *obj.__type_descriptor->__reflection);
499 throw:
500 runtime_throw("runtime.SetFinalizer");
501 }