Imported version version 6.0alpha7.
[gcc.git] / boehm-gc / finalize.c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15 /* Boehm, February 1, 1996 1:19 pm PST */
16 # define I_HIDE_POINTERS
17 # include "private/gc_pmark.h"
18
19 # ifdef FINALIZE_ON_DEMAND
20 int GC_finalize_on_demand = 1;
21 # else
22 int GC_finalize_on_demand = 0;
23 # endif
24
25 # ifdef JAVA_FINALIZATION
26 int GC_java_finalization = 1;
27 # else
28 int GC_java_finalization = 0;
29 # endif
30
31 /* Type of mark procedure used for marking from finalizable object. */
32 /* This procedure normally does not mark the object, only its */
33 /* descendents. */
34 typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
35
36 # define HASH3(addr,size,log_size) \
37 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
38 & ((size) - 1))
39 #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
40
41 struct hash_chain_entry {
42 word hidden_key;
43 struct hash_chain_entry * next;
44 };
45
46 unsigned GC_finalization_failures = 0;
47 /* Number of finalization requests that failed for lack of memory. */
48
49 static struct disappearing_link {
50 struct hash_chain_entry prolog;
51 # define dl_hidden_link prolog.hidden_key
52 /* Field to be cleared. */
53 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
54 # define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
55
56 word dl_hidden_obj; /* Pointer to object base */
57 } **dl_head = 0;
58
59 static signed_word log_dl_table_size = -1;
60 /* Binary log of */
61 /* current size of array pointed to by dl_head. */
62 /* -1 ==> size is 0. */
63
64 word GC_dl_entries = 0; /* Number of entries currently in disappearing */
65 /* link table. */
66
67 static struct finalizable_object {
68 struct hash_chain_entry prolog;
69 # define fo_hidden_base prolog.hidden_key
70 /* Pointer to object base. */
71 /* No longer hidden once object */
72 /* is on finalize_now queue. */
73 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
74 # define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
75 GC_finalization_proc fo_fn; /* Finalizer. */
76 ptr_t fo_client_data;
77 word fo_object_size; /* In bytes. */
78 finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
79 } **fo_head = 0;
80
81 struct finalizable_object * GC_finalize_now = 0;
82 /* LIst of objects that should be finalized now. */
83
84 static signed_word log_fo_table_size = -1;
85
86 word GC_fo_entries = 0;
87
88 void GC_push_finalizer_structures GC_PROTO((void))
89 {
90 GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
91 GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
92 GC_push_all((ptr_t)(&GC_finalize_now),
93 (ptr_t)(&GC_finalize_now) + sizeof(word));
94 }
95
96 /* Double the size of a hash table. *size_ptr is the log of its current */
97 /* size. May be a noop. */
98 /* *table is a pointer to an array of hash headers. If we succeed, we */
99 /* update both *table and *log_size_ptr. */
100 /* Lock is held. Signals are disabled. */
101 void GC_grow_table(table, log_size_ptr)
102 struct hash_chain_entry ***table;
103 signed_word * log_size_ptr;
104 {
105 register word i;
106 register struct hash_chain_entry *p;
107 int log_old_size = *log_size_ptr;
108 register int log_new_size = log_old_size + 1;
109 word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
110 register word new_size = 1 << log_new_size;
111 struct hash_chain_entry **new_table = (struct hash_chain_entry **)
112 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
113 (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
114
115 if (new_table == 0) {
116 if (table == 0) {
117 ABORT("Insufficient space for initial table allocation");
118 } else {
119 return;
120 }
121 }
122 for (i = 0; i < old_size; i++) {
123 p = (*table)[i];
124 while (p != 0) {
125 register ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
126 register struct hash_chain_entry *next = p -> next;
127 register int new_hash = HASH3(real_key, new_size, log_new_size);
128
129 p -> next = new_table[new_hash];
130 new_table[new_hash] = p;
131 p = next;
132 }
133 }
134 *log_size_ptr = log_new_size;
135 *table = new_table;
136 }
137
138 # if defined(__STDC__) || defined(__cplusplus)
139 int GC_register_disappearing_link(GC_PTR * link)
140 # else
141 int GC_register_disappearing_link(link)
142 GC_PTR * link;
143 # endif
144 {
145 ptr_t base;
146
147 base = (ptr_t)GC_base((GC_PTR)link);
148 if (base == 0)
149 ABORT("Bad arg to GC_register_disappearing_link");
150 return(GC_general_register_disappearing_link(link, base));
151 }
152
153 # if defined(__STDC__) || defined(__cplusplus)
154 int GC_general_register_disappearing_link(GC_PTR * link,
155 GC_PTR obj)
156 # else
157 int GC_general_register_disappearing_link(link, obj)
158 GC_PTR * link;
159 GC_PTR obj;
160 # endif
161
162 {
163 struct disappearing_link *curr_dl;
164 int index;
165 struct disappearing_link * new_dl;
166 DCL_LOCK_STATE;
167
168 if ((word)link & (ALIGNMENT-1))
169 ABORT("Bad arg to GC_general_register_disappearing_link");
170 # ifdef THREADS
171 DISABLE_SIGNALS();
172 LOCK();
173 # endif
174 if (log_dl_table_size == -1
175 || GC_dl_entries > ((word)1 << log_dl_table_size)) {
176 # ifndef THREADS
177 DISABLE_SIGNALS();
178 # endif
179 GC_grow_table((struct hash_chain_entry ***)(&dl_head),
180 &log_dl_table_size);
181 # ifdef CONDPRINT
182 if (GC_print_stats) {
183 GC_printf1("Grew dl table to %lu entries\n",
184 (unsigned long)(1 << log_dl_table_size));
185 }
186 # endif
187 # ifndef THREADS
188 ENABLE_SIGNALS();
189 # endif
190 }
191 index = HASH2(link, log_dl_table_size);
192 curr_dl = dl_head[index];
193 for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
194 if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
195 curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
196 # ifdef THREADS
197 UNLOCK();
198 ENABLE_SIGNALS();
199 # endif
200 return(1);
201 }
202 }
203 new_dl = (struct disappearing_link *)
204 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
205 if (new_dl != 0) {
206 new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
207 new_dl -> dl_hidden_link = HIDE_POINTER(link);
208 dl_set_next(new_dl, dl_head[index]);
209 dl_head[index] = new_dl;
210 GC_dl_entries++;
211 } else {
212 GC_finalization_failures++;
213 }
214 # ifdef THREADS
215 UNLOCK();
216 ENABLE_SIGNALS();
217 # endif
218 return(0);
219 }
220
221 # if defined(__STDC__) || defined(__cplusplus)
222 int GC_unregister_disappearing_link(GC_PTR * link)
223 # else
224 int GC_unregister_disappearing_link(link)
225 GC_PTR * link;
226 # endif
227 {
228 struct disappearing_link *curr_dl, *prev_dl;
229 int index;
230 DCL_LOCK_STATE;
231
232 DISABLE_SIGNALS();
233 LOCK();
234 index = HASH2(link, log_dl_table_size);
235 if (((unsigned long)link & (ALIGNMENT-1))) goto out;
236 prev_dl = 0; curr_dl = dl_head[index];
237 while (curr_dl != 0) {
238 if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
239 if (prev_dl == 0) {
240 dl_head[index] = dl_next(curr_dl);
241 } else {
242 dl_set_next(prev_dl, dl_next(curr_dl));
243 }
244 GC_dl_entries--;
245 UNLOCK();
246 ENABLE_SIGNALS();
247 # ifdef DBG_HDRS_ALL
248 dl_next(curr_dl) = 0;
249 # else
250 GC_free((GC_PTR)curr_dl);
251 # endif
252 return(1);
253 }
254 prev_dl = curr_dl;
255 curr_dl = dl_next(curr_dl);
256 }
257 out:
258 UNLOCK();
259 ENABLE_SIGNALS();
260 return(0);
261 }
262
263 /* Possible finalization_marker procedures. Note that mark stack */
264 /* overflow is handled by the caller, and is not a disaster. */
265 GC_API void GC_normal_finalize_mark_proc(p)
266 ptr_t p;
267 {
268 hdr * hhdr = HDR(p);
269
270 PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top,
271 &(GC_mark_stack[GC_mark_stack_size]));
272 }
273
274 /* This only pays very partial attention to the mark descriptor. */
275 /* It does the right thing for normal and atomic objects, and treats */
276 /* most others as normal. */
277 GC_API void GC_ignore_self_finalize_mark_proc(p)
278 ptr_t p;
279 {
280 hdr * hhdr = HDR(p);
281 word descr = hhdr -> hb_descr;
282 ptr_t q, r;
283 ptr_t scan_limit;
284 ptr_t target_limit = p + WORDS_TO_BYTES(hhdr -> hb_sz) - 1;
285
286 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
287 scan_limit = p + descr - sizeof(word);
288 } else {
289 scan_limit = target_limit + 1 - sizeof(word);
290 }
291 for (q = p; q <= scan_limit; q += ALIGNMENT) {
292 r = *(ptr_t *)q;
293 if (r < p || r > target_limit) {
294 GC_PUSH_ONE_HEAP((word)r, q);
295 }
296 }
297 }
298
299 /*ARGSUSED*/
300 GC_API void GC_null_finalize_mark_proc(p)
301 ptr_t p;
302 {
303 }
304
305
306
307 /* Register a finalization function. See gc.h for details. */
308 /* in the nonthreads case, we try to avoid disabling signals, */
309 /* since it can be expensive. Threads packages typically */
310 /* make it cheaper. */
311 /* The last parameter is a procedure that determines */
312 /* marking for finalization ordering. Any objects marked */
313 /* by that procedure will be guaranteed to not have been */
314 /* finalized when this finalizer is invoked. */
315 GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
316 GC_PTR obj;
317 GC_finalization_proc fn;
318 GC_PTR cd;
319 GC_finalization_proc * ofn;
320 GC_PTR * ocd;
321 finalization_mark_proc * mp;
322 {
323 ptr_t base;
324 struct finalizable_object * curr_fo, * prev_fo;
325 int index;
326 struct finalizable_object *new_fo;
327 hdr *hhdr;
328 DCL_LOCK_STATE;
329
330 # ifdef THREADS
331 DISABLE_SIGNALS();
332 LOCK();
333 # endif
334 if (log_fo_table_size == -1
335 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
336 # ifndef THREADS
337 DISABLE_SIGNALS();
338 # endif
339 GC_grow_table((struct hash_chain_entry ***)(&fo_head),
340 &log_fo_table_size);
341 # ifdef CONDPRINT
342 if (GC_print_stats) {
343 GC_printf1("Grew fo table to %lu entries\n",
344 (unsigned long)(1 << log_fo_table_size));
345 }
346 # endif
347 # ifndef THREADS
348 ENABLE_SIGNALS();
349 # endif
350 }
351 /* in the THREADS case signals are disabled and we hold allocation */
352 /* lock; otherwise neither is true. Proceed carefully. */
353 base = (ptr_t)obj;
354 index = HASH2(base, log_fo_table_size);
355 prev_fo = 0; curr_fo = fo_head[index];
356 while (curr_fo != 0) {
357 if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
358 /* Interruption by a signal in the middle of this */
359 /* should be safe. The client may see only *ocd */
360 /* updated, but we'll declare that to be his */
361 /* problem. */
362 if (ocd) *ocd = (GC_PTR) curr_fo -> fo_client_data;
363 if (ofn) *ofn = curr_fo -> fo_fn;
364 /* Delete the structure for base. */
365 if (prev_fo == 0) {
366 fo_head[index] = fo_next(curr_fo);
367 } else {
368 fo_set_next(prev_fo, fo_next(curr_fo));
369 }
370 if (fn == 0) {
371 GC_fo_entries--;
372 /* May not happen if we get a signal. But a high */
373 /* estimate will only make the table larger than */
374 /* necessary. */
375 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
376 GC_free((GC_PTR)curr_fo);
377 # endif
378 } else {
379 curr_fo -> fo_fn = fn;
380 curr_fo -> fo_client_data = (ptr_t)cd;
381 curr_fo -> fo_mark_proc = mp;
382 /* Reinsert it. We deleted it first to maintain */
383 /* consistency in the event of a signal. */
384 if (prev_fo == 0) {
385 fo_head[index] = curr_fo;
386 } else {
387 fo_set_next(prev_fo, curr_fo);
388 }
389 }
390 # ifdef THREADS
391 UNLOCK();
392 ENABLE_SIGNALS();
393 # endif
394 return;
395 }
396 prev_fo = curr_fo;
397 curr_fo = fo_next(curr_fo);
398 }
399 if (ofn) *ofn = 0;
400 if (ocd) *ocd = 0;
401 if (fn == 0) {
402 # ifdef THREADS
403 UNLOCK();
404 ENABLE_SIGNALS();
405 # endif
406 return;
407 }
408 GET_HDR(base, hhdr);
409 if (0 == hhdr) {
410 /* We won't collect it, hence finalizer wouldn't be run. */
411 # ifdef THREADS
412 UNLOCK();
413 ENABLE_SIGNALS();
414 # endif
415 return;
416 }
417 new_fo = (struct finalizable_object *)
418 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
419 if (new_fo != 0) {
420 new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
421 new_fo -> fo_fn = fn;
422 new_fo -> fo_client_data = (ptr_t)cd;
423 new_fo -> fo_object_size = hhdr -> hb_sz;
424 new_fo -> fo_mark_proc = mp;
425 fo_set_next(new_fo, fo_head[index]);
426 GC_fo_entries++;
427 fo_head[index] = new_fo;
428 } else {
429 GC_finalization_failures++;
430 }
431 # ifdef THREADS
432 UNLOCK();
433 ENABLE_SIGNALS();
434 # endif
435 }
436
437 # if defined(__STDC__)
438 void GC_register_finalizer(void * obj,
439 GC_finalization_proc fn, void * cd,
440 GC_finalization_proc *ofn, void ** ocd)
441 # else
442 void GC_register_finalizer(obj, fn, cd, ofn, ocd)
443 GC_PTR obj;
444 GC_finalization_proc fn;
445 GC_PTR cd;
446 GC_finalization_proc * ofn;
447 GC_PTR * ocd;
448 # endif
449 {
450 GC_register_finalizer_inner(obj, fn, cd, ofn,
451 ocd, GC_normal_finalize_mark_proc);
452 }
453
454 # if defined(__STDC__)
455 void GC_register_finalizer_ignore_self(void * obj,
456 GC_finalization_proc fn, void * cd,
457 GC_finalization_proc *ofn, void ** ocd)
458 # else
459 void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd)
460 GC_PTR obj;
461 GC_finalization_proc fn;
462 GC_PTR cd;
463 GC_finalization_proc * ofn;
464 GC_PTR * ocd;
465 # endif
466 {
467 GC_register_finalizer_inner(obj, fn, cd, ofn,
468 ocd, GC_ignore_self_finalize_mark_proc);
469 }
470
471 # if defined(__STDC__)
472 void GC_register_finalizer_no_order(void * obj,
473 GC_finalization_proc fn, void * cd,
474 GC_finalization_proc *ofn, void ** ocd)
475 # else
476 void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd)
477 GC_PTR obj;
478 GC_finalization_proc fn;
479 GC_PTR cd;
480 GC_finalization_proc * ofn;
481 GC_PTR * ocd;
482 # endif
483 {
484 GC_register_finalizer_inner(obj, fn, cd, ofn,
485 ocd, GC_null_finalize_mark_proc);
486 }
487
488 #ifndef NO_DEBUGGING
489 void GC_dump_finalization()
490 {
491 struct disappearing_link * curr_dl;
492 struct finalizable_object * curr_fo;
493 ptr_t real_ptr, real_link;
494 int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
495 int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
496 int i;
497
498 GC_printf0("Disappearing links:\n");
499 for (i = 0; i < dl_size; i++) {
500 for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
501 real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
502 real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
503 GC_printf2("Object: 0x%lx, Link:0x%lx\n", real_ptr, real_link);
504 }
505 }
506 GC_printf0("Finalizers:\n");
507 for (i = 0; i < fo_size; i++) {
508 for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
509 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
510 GC_printf1("Finalizable object: 0x%lx\n", real_ptr);
511 }
512 }
513 }
514 #endif
515
516 /* Called with world stopped. Cause disappearing links to disappear, */
517 /* and invoke finalizers. */
518 void GC_finalize()
519 {
520 struct disappearing_link * curr_dl, * prev_dl, * next_dl;
521 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
522 ptr_t real_ptr, real_link;
523 register int i;
524 int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
525 int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
526
527 /* Make disappearing links disappear */
528 for (i = 0; i < dl_size; i++) {
529 curr_dl = dl_head[i];
530 prev_dl = 0;
531 while (curr_dl != 0) {
532 real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
533 real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
534 if (!GC_is_marked(real_ptr)) {
535 *(word *)real_link = 0;
536 next_dl = dl_next(curr_dl);
537 if (prev_dl == 0) {
538 dl_head[i] = next_dl;
539 } else {
540 dl_set_next(prev_dl, next_dl);
541 }
542 GC_clear_mark_bit((ptr_t)curr_dl);
543 GC_dl_entries--;
544 curr_dl = next_dl;
545 } else {
546 prev_dl = curr_dl;
547 curr_dl = dl_next(curr_dl);
548 }
549 }
550 }
551 /* Mark all objects reachable via chains of 1 or more pointers */
552 /* from finalizable objects. */
553 GC_ASSERT(GC_mark_state == MS_NONE);
554 for (i = 0; i < fo_size; i++) {
555 for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
556 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
557 if (!GC_is_marked(real_ptr)) {
558 GC_MARKED_FOR_FINALIZATION(real_ptr);
559 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
560 if (GC_is_marked(real_ptr)) {
561 WARN("Finalization cycle involving %lx\n", real_ptr);
562 }
563 }
564 }
565 }
566 /* Enqueue for finalization all objects that are still */
567 /* unreachable. */
568 GC_words_finalized = 0;
569 for (i = 0; i < fo_size; i++) {
570 curr_fo = fo_head[i];
571 prev_fo = 0;
572 while (curr_fo != 0) {
573 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
574 if (!GC_is_marked(real_ptr)) {
575 if (!GC_java_finalization) {
576 GC_set_mark_bit(real_ptr);
577 }
578 /* Delete from hash table */
579 next_fo = fo_next(curr_fo);
580 if (prev_fo == 0) {
581 fo_head[i] = next_fo;
582 } else {
583 fo_set_next(prev_fo, next_fo);
584 }
585 GC_fo_entries--;
586 /* Add to list of objects awaiting finalization. */
587 fo_set_next(curr_fo, GC_finalize_now);
588 GC_finalize_now = curr_fo;
589 /* unhide object pointer so any future collections will */
590 /* see it. */
591 curr_fo -> fo_hidden_base =
592 (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
593 GC_words_finalized +=
594 ALIGNED_WORDS(curr_fo -> fo_object_size)
595 + ALIGNED_WORDS(sizeof(struct finalizable_object));
596 GC_ASSERT(GC_is_marked((ptr_t)curr_fo));
597 curr_fo = next_fo;
598 } else {
599 prev_fo = curr_fo;
600 curr_fo = fo_next(curr_fo);
601 }
602 }
603 }
604
605 if (GC_java_finalization) {
606 /* make sure we mark everything reachable from objects finalized
607 using the no_order mark_proc */
608 for (curr_fo = GC_finalize_now;
609 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
610 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
611 if (!GC_is_marked(real_ptr)) {
612 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
613 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
614 }
615 GC_set_mark_bit(real_ptr);
616 }
617 }
618 }
619
620 /* Remove dangling disappearing links. */
621 for (i = 0; i < dl_size; i++) {
622 curr_dl = dl_head[i];
623 prev_dl = 0;
624 while (curr_dl != 0) {
625 real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
626 if (real_link != 0 && !GC_is_marked(real_link)) {
627 next_dl = dl_next(curr_dl);
628 if (prev_dl == 0) {
629 dl_head[i] = next_dl;
630 } else {
631 dl_set_next(prev_dl, next_dl);
632 }
633 GC_clear_mark_bit((ptr_t)curr_dl);
634 GC_dl_entries--;
635 curr_dl = next_dl;
636 } else {
637 prev_dl = curr_dl;
638 curr_dl = dl_next(curr_dl);
639 }
640 }
641 }
642 }
643
644 #ifndef JAVA_FINALIZATION_NOT_NEEDED
645
646 /* Enqueue all remaining finalizers to be run - Assumes lock is
647 * held, and signals are disabled */
648 void GC_enqueue_all_finalizers()
649 {
650 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
651 ptr_t real_ptr;
652 register int i;
653 int fo_size;
654
655 fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
656 GC_words_finalized = 0;
657 for (i = 0; i < fo_size; i++) {
658 curr_fo = fo_head[i];
659 prev_fo = 0;
660 while (curr_fo != 0) {
661 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
662 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
663 GC_set_mark_bit(real_ptr);
664
665 /* Delete from hash table */
666 next_fo = fo_next(curr_fo);
667 if (prev_fo == 0) {
668 fo_head[i] = next_fo;
669 } else {
670 fo_set_next(prev_fo, next_fo);
671 }
672 GC_fo_entries--;
673
674 /* Add to list of objects awaiting finalization. */
675 fo_set_next(curr_fo, GC_finalize_now);
676 GC_finalize_now = curr_fo;
677
678 /* unhide object pointer so any future collections will */
679 /* see it. */
680 curr_fo -> fo_hidden_base =
681 (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
682
683 GC_words_finalized +=
684 ALIGNED_WORDS(curr_fo -> fo_object_size)
685 + ALIGNED_WORDS(sizeof(struct finalizable_object));
686 curr_fo = next_fo;
687 }
688 }
689
690 return;
691 }
692
693 /* Invoke all remaining finalizers that haven't yet been run.
694 * This is needed for strict compliance with the Java standard,
695 * which can make the runtime guarantee that all finalizers are run.
696 * Unfortunately, the Java standard implies we have to keep running
697 * finalizers until there are no more left, a potential infinite loop.
698 * YUCK.
699 * Note that this is even more dangerous than the usual Java
700 * finalizers, in that objects reachable from static variables
701 * may have been finalized when these finalizers are run.
702 * Finalizers run at this point must be prepared to deal with a
703 * mostly broken world.
704 * This routine is externally callable, so is called without
705 * the allocation lock.
706 */
707 GC_API void GC_finalize_all()
708 {
709 DCL_LOCK_STATE;
710
711 DISABLE_SIGNALS();
712 LOCK();
713 while (GC_fo_entries > 0) {
714 GC_enqueue_all_finalizers();
715 UNLOCK();
716 ENABLE_SIGNALS();
717 GC_INVOKE_FINALIZERS();
718 DISABLE_SIGNALS();
719 LOCK();
720 }
721 UNLOCK();
722 ENABLE_SIGNALS();
723 }
724 #endif
725
726 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
727 /* finalizers can only be called from some kind of `safe state' and */
728 /* getting into that safe state is expensive.) */
729 int GC_should_invoke_finalizers GC_PROTO((void))
730 {
731 return GC_finalize_now != 0;
732 }
733
734 /* Invoke finalizers for all objects that are ready to be finalized. */
735 /* Should be called without allocation lock. */
736 int GC_invoke_finalizers()
737 {
738 register struct finalizable_object * curr_fo;
739 register int count = 0;
740 DCL_LOCK_STATE;
741
742 while (GC_finalize_now != 0) {
743 # ifdef THREADS
744 DISABLE_SIGNALS();
745 LOCK();
746 # endif
747 curr_fo = GC_finalize_now;
748 # ifdef THREADS
749 if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
750 UNLOCK();
751 ENABLE_SIGNALS();
752 if (curr_fo == 0) break;
753 # else
754 GC_finalize_now = fo_next(curr_fo);
755 # endif
756 fo_set_next(curr_fo, 0);
757 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
758 curr_fo -> fo_client_data);
759 curr_fo -> fo_client_data = 0;
760 ++count;
761 # ifdef UNDEFINED
762 /* This is probably a bad idea. It throws off accounting if */
763 /* nearly all objects are finalizable. O.w. it shouldn't */
764 /* matter. */
765 GC_free((GC_PTR)curr_fo);
766 # endif
767 }
768 return count;
769 }
770
771 void (* GC_finalizer_notifier)() = (void (*) GC_PROTO((void)))0;
772
773 static GC_word last_finalizer_notification = 0;
774
775 void GC_notify_or_invoke_finalizers GC_PROTO((void))
776 {
777 if (GC_finalize_now == 0) return;
778 if (!GC_finalize_on_demand) {
779 (void) GC_invoke_finalizers();
780 GC_ASSERT(GC_finalize_now == 0);
781 return;
782 }
783 if (GC_finalizer_notifier != (void (*) GC_PROTO((void)))0
784 && last_finalizer_notification != GC_gc_no) {
785 last_finalizer_notification = GC_gc_no;
786 GC_finalizer_notifier();
787 }
788 }
789
790 # ifdef __STDC__
791 GC_PTR GC_call_with_alloc_lock(GC_fn_type fn,
792 GC_PTR client_data)
793 # else
794 GC_PTR GC_call_with_alloc_lock(fn, client_data)
795 GC_fn_type fn;
796 GC_PTR client_data;
797 # endif
798 {
799 GC_PTR result;
800 DCL_LOCK_STATE;
801
802 # ifdef THREADS
803 DISABLE_SIGNALS();
804 LOCK();
805 SET_LOCK_HOLDER();
806 # endif
807 result = (*fn)(client_data);
808 # ifdef THREADS
809 # ifndef GC_ASSERTIONS
810 UNSET_LOCK_HOLDER();
811 # endif /* o.w. UNLOCK() does it implicitly */
812 UNLOCK();
813 ENABLE_SIGNALS();
814 # endif
815 return(result);
816 }