Imported version version 5.0alpha6.
[gcc.git] / boehm-gc / allchblk.c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #define DEBUG
18 #undef DEBUG
19 #include <stdio.h>
20 #include "gc_priv.h"
21
22 GC_bool GC_use_entire_heap = 0;
23
24 /*
25 * Free heap blocks are kept on one of several free lists,
26 * depending on the size of the block. Each free list is doubly linked.
27 * Adjacent free blocks are coalesced.
28 */
29
30
31 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
32 /* largest block we will allocate starting on a black */
33 /* listed block. Must be >= HBLKSIZE. */
34
35
36 # define UNIQUE_THRESHOLD 32
37 /* Sizes up to this many HBLKs each have their own free list */
38 # define HUGE_THRESHOLD 256
39 /* Sizes of at least this many heap blocks are mapped to a */
40 /* single free list. */
41 # define FL_COMPRESSION 8
42 /* In between sizes map this many distinct sizes to a single */
43 /* bin. */
44
45 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
46 + UNIQUE_THRESHOLD
47
48 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
49
50 /* Map a number of blocks to the appropriate large block free list index. */
51 int GC_hblk_fl_from_blocks(blocks_needed)
52 word blocks_needed;
53 {
54 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
55 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
56 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
57 + UNIQUE_THRESHOLD;
58
59 }
60
61 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
62 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
63 # define NHDR(hhdr) HDR(hhdr -> hb_next)
64
65 # ifdef USE_MUNMAP
66 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
67 # else /* !USE_MMAP */
68 # define IS_MAPPED(hhdr) 1
69 # endif /* USE_MUNMAP */
70
71 # if !defined(NO_DEBUGGING)
72 void GC_print_hblkfreelist()
73 {
74 struct hblk * h;
75 word total_free = 0;
76 hdr * hhdr;
77 word sz;
78 int i;
79
80 for (i = 0; i <= N_HBLK_FLS; ++i) {
81 h = GC_hblkfreelist[i];
82 if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
83 while (h != 0) {
84 hhdr = HDR(h);
85 sz = hhdr -> hb_sz;
86 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
87 total_free += sz;
88 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
89 GC_printf0("start black listed\n");
90 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
91 GC_printf0("partially black listed\n");
92 } else {
93 GC_printf0("not black listed\n");
94 }
95 h = hhdr -> hb_next;
96 }
97 }
98 if (total_free != GC_large_free_bytes) {
99 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
100 (unsigned long) GC_large_free_bytes);
101 }
102 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
103 }
104
105 /* Return the free list index on which the block described by the header */
106 /* appears, or -1 if it appears nowhere. */
107 int free_list_index_of(wanted)
108 hdr * wanted;
109 {
110 struct hblk * h;
111 hdr * hhdr;
112 int i;
113
114 for (i = 0; i <= N_HBLK_FLS; ++i) {
115 h = GC_hblkfreelist[i];
116 while (h != 0) {
117 hhdr = HDR(h);
118 if (hhdr == wanted) return i;
119 h = hhdr -> hb_next;
120 }
121 }
122 return -1;
123 }
124
125 void GC_dump_regions()
126 {
127 unsigned i;
128 ptr_t start, end;
129 ptr_t p;
130 size_t bytes;
131 hdr *hhdr;
132 for (i = 0; i < GC_n_heap_sects; ++i) {
133 start = GC_heap_sects[i].hs_start;
134 bytes = GC_heap_sects[i].hs_bytes;
135 end = start + bytes;
136 /* Merge in contiguous sections. */
137 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
138 ++i;
139 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
140 }
141 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
142 for (p = start; p < end;) {
143 hhdr = HDR(p);
144 GC_printf1("\t0x%lx ", (unsigned long)p);
145 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
146 GC_printf1("Missing header!!\n", hhdr);
147 p += HBLKSIZE;
148 continue;
149 }
150 if (HBLK_IS_FREE(hhdr)) {
151 int correct_index = GC_hblk_fl_from_blocks(
152 divHBLKSZ(hhdr -> hb_sz));
153 int actual_index;
154
155 GC_printf1("\tfree block of size 0x%lx bytes",
156 (unsigned long)(hhdr -> hb_sz));
157 if (IS_MAPPED(hhdr)) {
158 GC_printf0("\n");
159 } else {
160 GC_printf0("(unmapped)\n");
161 }
162 actual_index = free_list_index_of(hhdr);
163 if (-1 == actual_index) {
164 GC_printf1("\t\tBlock not on free list %ld!!\n",
165 correct_index);
166 } else if (correct_index != actual_index) {
167 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
168 actual_index, correct_index);
169 }
170 p += hhdr -> hb_sz;
171 } else {
172 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
173 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
174 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
175 }
176 }
177 }
178 }
179
180 # endif /* NO_DEBUGGING */
181
182 /* Initialize hdr for a block containing the indicated size and */
183 /* kind of objects. */
184 /* Return FALSE on failure. */
185 static GC_bool setup_header(hhdr, sz, kind, flags)
186 register hdr * hhdr;
187 word sz; /* object size in words */
188 int kind;
189 unsigned char flags;
190 {
191 register word descr;
192
193 /* Add description of valid object pointers */
194 if (!GC_add_map_entry(sz)) return(FALSE);
195 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
196
197 /* Set size, kind and mark proc fields */
198 hhdr -> hb_sz = sz;
199 hhdr -> hb_obj_kind = kind;
200 hhdr -> hb_flags = flags;
201 descr = GC_obj_kinds[kind].ok_descriptor;
202 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
203 hhdr -> hb_descr = descr;
204
205 /* Clear mark bits */
206 GC_clear_hdr_marks(hhdr);
207
208 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
209 return(TRUE);
210 }
211
212 #define FL_UNKNOWN -1
213 /*
214 * Remove hhdr from the appropriate free list.
215 * We assume it is on the nth free list, or on the size
216 * appropriate free list if n is FL_UNKNOWN.
217 */
218 void GC_remove_from_fl(hhdr, n)
219 hdr * hhdr;
220 int n;
221 {
222 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
223 if (hhdr -> hb_prev == 0) {
224 int index;
225 if (FL_UNKNOWN == n) {
226 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
227 } else {
228 index = n;
229 }
230 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
231 GC_hblkfreelist[index] = hhdr -> hb_next;
232 } else {
233 hdr *phdr;
234 GET_HDR(hhdr -> hb_prev, phdr);
235 phdr -> hb_next = hhdr -> hb_next;
236 }
237 if (0 != hhdr -> hb_next) {
238 hdr * nhdr;
239 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
240 GET_HDR(hhdr -> hb_next, nhdr);
241 nhdr -> hb_prev = hhdr -> hb_prev;
242 }
243 }
244
245 /*
246 * Return a pointer to the free block ending just before h, if any.
247 */
248 struct hblk * GC_free_block_ending_at(h)
249 struct hblk *h;
250 {
251 struct hblk * p = h - 1;
252 hdr * phdr;
253
254 GET_HDR(p, phdr);
255 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
256 p = FORWARDED_ADDR(p,phdr);
257 phdr = HDR(p);
258 }
259 if (0 != phdr) {
260 if(HBLK_IS_FREE(phdr)) {
261 return p;
262 } else {
263 return 0;
264 }
265 }
266 p = GC_prev_block(h - 1);
267 if (0 != p) {
268 phdr = HDR(p);
269 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
270 return p;
271 }
272 }
273 return 0;
274 }
275
276 /*
277 * Add hhdr to the appropriate free list.
278 * We maintain individual free lists sorted by address.
279 */
280 void GC_add_to_fl(h, hhdr)
281 struct hblk *h;
282 hdr * hhdr;
283 {
284 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
285 struct hblk *second = GC_hblkfreelist[index];
286 hdr * second_hdr;
287 # ifdef GC_ASSERTIONS
288 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
289 hdr * nexthdr = HDR(next);
290 struct hblk *prev = GC_free_block_ending_at(h);
291 hdr * prevhdr = HDR(prev);
292 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
293 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
294 # endif
295 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
296 GC_hblkfreelist[index] = h;
297 hhdr -> hb_next = second;
298 hhdr -> hb_prev = 0;
299 if (0 != second) {
300 GET_HDR(second, second_hdr);
301 second_hdr -> hb_prev = h;
302 }
303 GC_invalidate_map(hhdr);
304 }
305
306 #ifdef USE_MUNMAP
307
308 /* Unmap blocks that haven't been recently touched. This is the only way */
309 /* way blocks are ever unmapped. */
310 void GC_unmap_old(void)
311 {
312 struct hblk * h;
313 hdr * hhdr;
314 word sz;
315 unsigned short last_rec, threshold;
316 int i;
317 # define UNMAP_THRESHOLD 6
318
319 for (i = 0; i <= N_HBLK_FLS; ++i) {
320 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
321 hhdr = HDR(h);
322 if (!IS_MAPPED(hhdr)) continue;
323 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
324 last_rec = hhdr -> hb_last_reclaimed;
325 if (last_rec > GC_gc_no
326 || last_rec < threshold && threshold < GC_gc_no
327 /* not recently wrapped */) {
328 sz = hhdr -> hb_sz;
329 GC_unmap((ptr_t)h, sz);
330 hhdr -> hb_flags |= WAS_UNMAPPED;
331 }
332 }
333 }
334 }
335
336 /* Merge all unmapped blocks that are adjacent to other free */
337 /* blocks. This may involve remapping, since all blocks are either */
338 /* fully mapped or fully unmapped. */
339 void GC_merge_unmapped(void)
340 {
341 struct hblk * h, *next;
342 hdr * hhdr, *nexthdr;
343 word size, nextsize;
344 int i;
345
346 for (i = 0; i <= N_HBLK_FLS; ++i) {
347 h = GC_hblkfreelist[i];
348 while (h != 0) {
349 GET_HDR(h, hhdr);
350 size = hhdr->hb_sz;
351 next = (struct hblk *)((word)h + size);
352 GET_HDR(next, nexthdr);
353 /* Coalesce with successor, if possible */
354 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
355 nextsize = nexthdr -> hb_sz;
356 if (IS_MAPPED(hhdr)) {
357 GC_ASSERT(!IS_MAPPED(nexthdr));
358 /* make both consistent, so that we can merge */
359 if (size > nextsize) {
360 GC_remap((ptr_t)next, nextsize);
361 } else {
362 GC_unmap((ptr_t)h, size);
363 hhdr -> hb_flags |= WAS_UNMAPPED;
364 }
365 } else if (IS_MAPPED(nexthdr)) {
366 GC_ASSERT(!IS_MAPPED(hhdr));
367 if (size > nextsize) {
368 GC_unmap((ptr_t)next, nextsize);
369 } else {
370 GC_remap((ptr_t)h, size);
371 hhdr -> hb_flags &= ~WAS_UNMAPPED;
372 }
373 } else {
374 /* Unmap any gap in the middle */
375 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
376 }
377 /* If they are both unmapped, we merge, but leave unmapped. */
378 GC_remove_from_fl(hhdr, i);
379 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
380 hhdr -> hb_sz += nexthdr -> hb_sz;
381 GC_remove_header(next);
382 GC_add_to_fl(h, hhdr);
383 /* Start over at beginning of list */
384 h = GC_hblkfreelist[i];
385 } else /* not mergable with successor */ {
386 h = hhdr -> hb_next;
387 }
388 } /* while (h != 0) ... */
389 } /* for ... */
390 }
391
392 #endif /* USE_MUNMAP */
393
394 /*
395 * Return a pointer to a block starting at h of length bytes.
396 * Memory for the block is mapped.
397 * Remove the block from its free list, and return the remainder (if any)
398 * to its appropriate free list.
399 * May fail by returning 0.
400 * The header for the returned block must be set up by the caller.
401 * If the return value is not 0, then hhdr is the header for it.
402 */
403 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
404 struct hblk *h;
405 hdr * hhdr;
406 word bytes;
407 int index;
408 {
409 word total_size = hhdr -> hb_sz;
410 struct hblk * rest;
411 hdr * rest_hdr;
412
413 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
414 GC_remove_from_fl(hhdr, index);
415 if (total_size == bytes) return h;
416 rest = (struct hblk *)((word)h + bytes);
417 rest_hdr = GC_install_header(rest);
418 if (0 == rest_hdr) return(0);
419 rest_hdr -> hb_sz = total_size - bytes;
420 rest_hdr -> hb_flags = 0;
421 # ifdef GC_ASSERTIONS
422 // Mark h not free, to avoid assertion about adjacent free blocks.
423 hhdr -> hb_map = 0;
424 # endif
425 GC_add_to_fl(rest, rest_hdr);
426 return h;
427 }
428
429 /*
430 * H is a free block. N points at an address inside it.
431 * A new header for n has already been set up. Fix up h's header
432 * to reflect the fact that it is being split, move it to the
433 * appropriate free list.
434 * N replaces h in the original free list.
435 *
436 * Nhdr is not completely filled in, since it is about to allocated.
437 * It may in fact end up on the wrong free list for its size.
438 * (Hence adding it to a free list is silly. But this path is hopefully
439 * rare enough that it doesn't matter. The code is cleaner this way.)
440 */
441 void GC_split_block(h, hhdr, n, nhdr, index)
442 struct hblk *h;
443 hdr * hhdr;
444 struct hblk *n;
445 hdr * nhdr;
446 int index; /* Index of free list */
447 {
448 word total_size = hhdr -> hb_sz;
449 word h_size = (word)n - (word)h;
450 struct hblk *prev = hhdr -> hb_prev;
451 struct hblk *next = hhdr -> hb_next;
452
453 /* Replace h with n on its freelist */
454 nhdr -> hb_prev = prev;
455 nhdr -> hb_next = next;
456 nhdr -> hb_sz = total_size - h_size;
457 nhdr -> hb_flags = 0;
458 if (0 != prev) {
459 HDR(prev) -> hb_next = n;
460 } else {
461 GC_hblkfreelist[index] = n;
462 }
463 if (0 != next) {
464 HDR(next) -> hb_prev = n;
465 }
466 # ifdef GC_ASSERTIONS
467 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
468 /* free blocks in GC_add_to_fl. */
469 # endif
470 # ifdef USE_MUNMAP
471 hhdr -> hb_last_reclaimed = GC_gc_no;
472 # endif
473 hhdr -> hb_sz = h_size;
474 GC_add_to_fl(h, hhdr);
475 GC_invalidate_map(nhdr);
476 }
477
478 struct hblk * GC_allochblk_nth();
479
480 /*
481 * Allocate (and return pointer to) a heap block
482 * for objects of size sz words, searching the nth free list.
483 *
484 * NOTE: We set obj_map field in header correctly.
485 * Caller is responsible for building an object freelist in block.
486 *
487 * We clear the block if it is destined for large objects, and if
488 * kind requires that newly allocated objects be cleared.
489 */
490 struct hblk *
491 GC_allochblk(sz, kind, flags)
492 word sz;
493 int kind;
494 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
495 {
496 int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
497 int i;
498 for (i = start_list; i <= N_HBLK_FLS; ++i) {
499 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
500 if (0 != result) return result;
501 }
502 return 0;
503 }
504 /*
505 * The same, but with search restricted to nth free list.
506 */
507 struct hblk *
508 GC_allochblk_nth(sz, kind, flags, n)
509 word sz;
510 int kind;
511 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
512 int n;
513 {
514 register struct hblk *hbp;
515 register hdr * hhdr; /* Header corr. to hbp */
516 register struct hblk *thishbp;
517 register hdr * thishdr; /* Header corr. to hbp */
518 signed_word size_needed; /* number of bytes in requested objects */
519 signed_word size_avail; /* bytes available in this block */
520
521 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
522
523 /* search for a big enough block in free list */
524 hbp = GC_hblkfreelist[n];
525 for(; 0 != hbp; hbp = hhdr -> hb_next) {
526 GET_HDR(hbp, hhdr);
527 size_avail = hhdr->hb_sz;
528 if (size_avail < size_needed) continue;
529 if (!GC_use_entire_heap) {
530 if (size_avail != size_needed
531 && USED_HEAP_SIZE >= GC_requested_heapsize
532 && !GC_incremental && GC_should_collect()) {
533 continue;
534 }
535 }
536 /* If the next heap block is obviously better, go on. */
537 /* This prevents us from disassembling a single large block */
538 /* to get tiny blocks. */
539 {
540 signed_word next_size;
541
542 thishbp = hhdr -> hb_next;
543 if (thishbp != 0) {
544 GET_HDR(thishbp, thishdr);
545 next_size = (signed_word)(thishdr -> hb_sz);
546 if (next_size < size_avail
547 && next_size >= size_needed
548 && !GC_is_black_listed(thishbp, (word)size_needed)) {
549 continue;
550 }
551 }
552 }
553 if ( !IS_UNCOLLECTABLE(kind) &&
554 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
555 struct hblk * lasthbp = hbp;
556 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
557 signed_word orig_avail = size_avail;
558 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
559 HBLKSIZE
560 : size_needed);
561
562
563 while ((ptr_t)lasthbp <= search_end
564 && (thishbp = GC_is_black_listed(lasthbp,
565 (word)eff_size_needed))) {
566 lasthbp = thishbp;
567 }
568 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
569 thishbp = lasthbp;
570 if (size_avail >= size_needed) {
571 if (thishbp != hbp &&
572 0 != (thishdr = GC_install_header(thishbp))) {
573 /* Make sure it's mapped before we mangle it. */
574 # ifdef USE_MUNMAP
575 if (!IS_MAPPED(hhdr)) {
576 GC_remap((ptr_t)hbp, size_avail);
577 hhdr -> hb_flags &= ~WAS_UNMAPPED;
578 }
579 # endif
580 /* Split the block at thishbp */
581 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
582 /* Advance to thishbp */
583 hbp = thishbp;
584 hhdr = thishdr;
585 /* We must now allocate thishbp, since it may */
586 /* be on the wrong free list. */
587 }
588 } else if (size_needed > (signed_word)BL_LIMIT
589 && orig_avail - size_needed
590 > (signed_word)BL_LIMIT) {
591 /* Punt, since anything else risks unreasonable heap growth. */
592 WARN("Needed to allocate blacklisted block at 0x%lx\n",
593 (word)hbp);
594 size_avail = orig_avail;
595 } else if (size_avail == 0 && size_needed == HBLKSIZE
596 && IS_MAPPED(hhdr)) {
597 if (!GC_find_leak) {
598 static unsigned count = 0;
599
600 /* The block is completely blacklisted. We need */
601 /* to drop some such blocks, since otherwise we spend */
602 /* all our time traversing them if pointerfree */
603 /* blocks are unpopular. */
604 /* A dropped block will be reconsidered at next GC. */
605 if ((++count & 3) == 0) {
606 /* Allocate and drop the block in small chunks, to */
607 /* maximize the chance that we will recover some */
608 /* later. */
609 word total_size = hhdr -> hb_sz;
610 struct hblk * limit = hbp + divHBLKSZ(total_size);
611 struct hblk * h;
612 struct hblk * prev = hhdr -> hb_prev;
613
614 GC_words_wasted += total_size;
615 GC_large_free_bytes -= total_size;
616 GC_remove_from_fl(hhdr, n);
617 for (h = hbp; h < limit; h++) {
618 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
619 (void) setup_header(
620 hhdr,
621 BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
622 PTRFREE, 0); /* Cant fail */
623 if (GC_debugging_started) {
624 BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
625 }
626 }
627 }
628 /* Restore hbp to point at free block */
629 hbp = prev;
630 if (0 == hbp) {
631 return GC_allochblk_nth(sz, kind, flags, n);
632 }
633 hhdr = HDR(hbp);
634 }
635 }
636 }
637 }
638 if( size_avail >= size_needed ) {
639 # ifdef USE_MUNMAP
640 if (!IS_MAPPED(hhdr)) {
641 GC_remap((ptr_t)hbp, size_avail);
642 hhdr -> hb_flags &= ~WAS_UNMAPPED;
643 }
644 # endif
645 /* hbp may be on the wrong freelist; the parameter n */
646 /* is important. */
647 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
648 break;
649 }
650 }
651
652 if (0 == hbp) return 0;
653
654 /* Notify virtual dirty bit implementation that we are about to write. */
655 GC_write_hint(hbp);
656
657 /* Add it to map of valid blocks */
658 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
659 /* This leaks memory under very rare conditions. */
660
661 /* Set up header */
662 if (!setup_header(hhdr, sz, kind, flags)) {
663 GC_remove_counts(hbp, (word)size_needed);
664 return(0); /* ditto */
665 }
666
667 /* Clear block if necessary */
668 if (GC_debugging_started
669 || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
670 BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
671 }
672
673 /* We just successfully allocated a block. Restart count of */
674 /* consecutive failures. */
675 {
676 extern unsigned GC_fail_count;
677
678 GC_fail_count = 0;
679 }
680
681 GC_large_free_bytes -= size_needed;
682
683 GC_ASSERT(IS_MAPPED(hhdr));
684 return( hbp );
685 }
686
687 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
688
689 /*
690 * Free a heap block.
691 *
692 * Coalesce the block with its neighbors if possible.
693 *
694 * All mark words are assumed to be cleared.
695 */
696 void
697 GC_freehblk(hbp)
698 struct hblk *hbp;
699 {
700 struct hblk *next, *prev;
701 hdr *hhdr, *prevhdr, *nexthdr;
702 signed_word size;
703
704
705 GET_HDR(hbp, hhdr);
706 size = hhdr->hb_sz;
707 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
708 GC_remove_counts(hbp, (word)size);
709 hhdr->hb_sz = size;
710
711 /* Check for duplicate deallocation in the easy case */
712 if (HBLK_IS_FREE(hhdr)) {
713 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
714 (unsigned long) hbp);
715 }
716
717 GC_ASSERT(IS_MAPPED(hhdr));
718 GC_invalidate_map(hhdr);
719 next = (struct hblk *)((word)hbp + size);
720 GET_HDR(next, nexthdr);
721 prev = GC_free_block_ending_at(hbp);
722 /* Coalesce with successor, if possible */
723 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
724 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
725 hhdr -> hb_sz += nexthdr -> hb_sz;
726 GC_remove_header(next);
727 }
728 /* Coalesce with predecessor, if possible. */
729 if (0 != prev) {
730 prevhdr = HDR(prev);
731 if (IS_MAPPED(prevhdr)) {
732 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
733 prevhdr -> hb_sz += hhdr -> hb_sz;
734 GC_remove_header(hbp);
735 hbp = prev;
736 hhdr = prevhdr;
737 }
738 }
739
740 GC_large_free_bytes += size;
741 GC_add_to_fl(hbp, hhdr);
742 }
743