1 /* Copyright (C) 2021 Free Software Foundation, Inc.
4 This file is part of GNU Binutils.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
28 #include "collector.h"
29 #include "libcol_util.h"
30 #include "gp-experiment.h"
33 /* TprintfT(<level>,...) definitions. Adjust per module as needed */
34 #define DBG_LT0 0 // for high-level configuration, unexpected errors/warnings
35 #define DBG_LT1 1 // for configuration details, warnings
44 * chain[0] - linked list of chunks;
45 * chain[1] - linked list of free 16-byte objects;
46 * chain[2] - linked list of free 32-byte objects;
53 * +------------------+---------+-------------------+--+--+-----+
54 * | Var size object | -> <-| Const size objects| | |Chunk|
55 * +------------------+---------+-------------------+--+--+-----+
58 * - one var size object per chunk
59 * - can't allocate const size objects larger than 2^MAXCHAIN
63 #define ALIGNMENT 4 /* 2^ALIGNMENT == minimal size and alignment */
64 #define ALIGN(x) ((((x) - 1)/(1 << ALIGNMENT) + 1) * (1 << ALIGNMENT))
68 collector_mutex_t lock
; /* master lock */
69 void *chain
[MAXCHAIN
]; /* chain[0] - chunks */
70 /* chain[i] - structs of size 2^i */
85 __collector_log_write ("<event kind=\"%s\" id=\"%d\">error memmgr not_implemented()</event>\n",
86 SP_JCMD_CERROR
, COL_ERROR_NOZMEM
);
91 * void __collector_mmgr_init_mutex_locks( Heap *heap )
92 * Iinitialize mmgr mutex locks.
95 __collector_mmgr_init_mutex_locks (Heap
*heap
)
99 if (__collector_mutex_trylock (&heap
->lock
))
102 * We are in a child process immediately after the fork().
103 * Parent process was in the middle of critical section when the fork() happened.
104 * This is a placeholder for the cleanup.
105 * See CR 6997020 for details.
107 __collector_mutex_init (&heap
->lock
);
109 __collector_mutex_init (&heap
->lock
);
113 * alloc_chunk( unsigned sz ) allocates a chunk of at least sz bytes.
114 * If sz == 0, allocates a chunk of the default size.
117 alloc_chunk (unsigned sz
, int log
)
119 static long pgsz
= 0;
125 pgsz
= CALL_UTIL (sysconf
)(_SC_PAGESIZE
);
126 Tprintf (DBG_LT2
, "memmgr: pgsz = %ld (0x%lx)\n", pgsz
, pgsz
);
128 /* Allocate 2^n >= sz bytes */
129 unsigned nsz
= ALIGN (sizeof (Chunk
)) + sz
;
130 for (chunksz
= pgsz
; chunksz
< nsz
; chunksz
*= 2);
132 Tprintf (DBG_LT2
, "alloc_chunk mapping %u, rounded up from %u\n", (unsigned int) chunksz
, sz
);
133 /* mmap64 is only in 32-bits; this call goes to mmap in 64-bits */
134 ptr
= (char*) CALL_UTIL (mmap64
)(0, chunksz
, PROT_READ
| PROT_WRITE
,
135 MAP_PRIVATE
| MAP_ANON
, (int) -1, (off64_t
) 0);
136 if (ptr
== MAP_FAILED
)
138 Tprintf (0, "alloc_chunk mapping failed COL_ERROR_NOZMEMMAP: %s\n", CALL_UTIL (strerror
)(errno
));
139 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
140 SP_JCMD_CERROR
, COL_ERROR_NOZMEMMAP
, errno
, "0");
143 /* Put the chunk descriptor at the end of the chunk */
144 chnk
= (Chunk
*) (ptr
+ chunksz
- ALIGN (sizeof (Chunk
)));
145 chnk
->size
= chunksz
;
147 chnk
->lo
= chnk
->base
;
148 chnk
->hi
= (char*) chnk
;
149 chnk
->next
= (Chunk
*) NULL
;
151 Tprintf (DBG_LT2
, "memmgr: returning new chunk @%p, chunksx=%ld sz=%ld\n",
152 ptr
, (long) chunksz
, (long) sz
);
157 __collector_newHeap ()
161 Tprintf (DBG_LT2
, "__collector_newHeap calling alloc_chunk(0)\n");
162 chnk
= alloc_chunk (0, 1);
166 /* A bit of hackery: allocate heap from its own chunk */
167 chnk
->hi
-= ALIGN (sizeof (Heap
));
168 heap
= (Heap
*) chnk
->hi
;
169 heap
->chain
[0] = (void*) chnk
;
170 __collector_mutex_init (&heap
->lock
);
175 __collector_deleteHeap (Heap
*heap
)
179 /* Note: heap itself is in the last chunk */
180 for (Chunk
*chnk
= heap
->chain
[0]; chnk
;)
182 Chunk
*next
= chnk
->next
;
183 CALL_UTIL (munmap
)((void*) chnk
->base
, chnk
->size
);
189 __collector_allocCSize (Heap
*heap
, unsigned sz
, int log
)
196 /* block all signals and acquire lock */
197 sigset_t old_mask
, new_mask
;
198 CALL_UTIL (sigfillset
)(&new_mask
);
199 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &new_mask
, &old_mask
);
200 __collector_mutex_lock (&heap
->lock
);
202 /* Allocate nsz = 2^idx >= sz bytes */
203 unsigned idx
= ALIGNMENT
;
204 unsigned nsz
= 1 << idx
;
208 /* Look in the corresponding chain first */
211 if (heap
->chain
[idx
] != NULL
)
213 res
= heap
->chain
[idx
];
214 heap
->chain
[idx
] = *(void**) res
;
215 __collector_mutex_unlock (&heap
->lock
);
216 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
218 Tprintf (DBG_LT2
, "memmgr: allocCSize %p sz %d (0x%x) req = 0x%x, from chain idx = %d\n", res
, nsz
, nsz
, sz
, idx
);
225 __collector_mutex_unlock (&heap
->lock
);
226 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
230 /* Chain is empty, allocate from chunks */
231 for (chnk
= (Chunk
*) heap
->chain
[0]; chnk
; chnk
= chnk
->next
)
232 if (chnk
->lo
+ nsz
< chnk
->hi
)
236 /* Get a new chunk */
238 Tprintf (DBG_LT2
, "__collector_allocCSize (%u) calling alloc_chunk(%u)\n", sz
, nsz
);
239 chnk
= alloc_chunk (nsz
, 1);
242 __collector_mutex_unlock (&heap
->lock
);
243 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
246 chnk
->next
= (Chunk
*) heap
->chain
[0];
247 heap
->chain
[0] = chnk
;
250 /* Allocate from the chunk */
252 res
= (void*) chnk
->hi
;
253 __collector_mutex_unlock (&heap
->lock
);
254 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
256 Tprintf (DBG_LT2
, "memmgr: allocCSize %p sz %d (0x%x) req = 0x%x, new chunk\n", res
, nsz
, nsz
, sz
);
261 __collector_freeCSize (Heap
*heap
, void *ptr
, unsigned sz
)
263 if (heap
== NULL
|| ptr
== NULL
)
266 /* block all signals and acquire lock */
267 sigset_t old_mask
, new_mask
;
268 CALL_UTIL (sigfillset
)(&new_mask
);
269 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &new_mask
, &old_mask
);
270 __collector_mutex_lock (&heap
->lock
);
272 /* Free 2^idx >= sz bytes */
273 unsigned idx
= ALIGNMENT
;
274 unsigned nsz
= 1 << idx
;
279 *(void**) ptr
= heap
->chain
[idx
];
280 heap
->chain
[idx
] = ptr
;
284 __collector_mutex_unlock (&heap
->lock
);
285 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
286 Tprintf (DBG_LT4
, "memmgr: freeC %p sz %ld\n", ptr
, (long) sz
);
290 allocVSize_nolock (Heap
*heap
, unsigned sz
)
297 /* Find a good chunk */
298 for (chnk
= (Chunk
*) heap
->chain
[0]; chnk
; chnk
= chnk
->next
)
299 if (chnk
->lo
== chnk
->base
&& chnk
->lo
+ sz
< chnk
->hi
)
303 /* Get a new chunk */
304 Tprintf (DBG_LT2
, "allocVsize_nolock calling alloc_chunk(%u)\n", sz
);
305 chnk
= alloc_chunk (sz
, 0);
308 chnk
->next
= (Chunk
*) heap
->chain
[0];
309 heap
->chain
[0] = chnk
;
311 chnk
->lo
= chnk
->base
+ sz
;
312 res
= (void*) (chnk
->base
);
313 Tprintf (DBG_LT4
, "memmgr: allocV %p for %ld\n", res
, (long) sz
);
318 __collector_allocVSize (Heap
*heap
, unsigned sz
)
324 /* block all signals and acquire lock */
325 sigset_t old_mask
, new_mask
;
326 CALL_UTIL (sigfillset
)(&new_mask
);
327 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &new_mask
, &old_mask
);
328 __collector_mutex_lock (&heap
->lock
);
329 res
= allocVSize_nolock (heap
, sz
);
330 __collector_mutex_unlock (&heap
->lock
);
331 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
336 * reallocVSize( Heap *heap, void *ptr, unsigned newsz )
337 * Changes the size of memory pointed by ptr to newsz.
338 * If ptr == NULL, allocates new memory of size newsz.
339 * If newsz == 0, frees ptr and returns NULL.
342 __collector_reallocVSize (Heap
*heap
, void *ptr
, unsigned newsz
)
349 return __collector_allocVSize (heap
, newsz
);
351 /* block all signals and acquire lock */
352 sigset_t old_mask
, new_mask
;
353 CALL_UTIL (sigfillset
)(&new_mask
);
354 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &new_mask
, &old_mask
);
355 __collector_mutex_lock (&heap
->lock
);
358 for (chnk
= (Chunk
*) heap
->chain
[0]; chnk
; chnk
= chnk
->next
)
359 if (ptr
== chnk
->base
)
363 /* memory corrpution */
365 __collector_mutex_unlock (&heap
->lock
);
366 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
369 if (chnk
->base
+ newsz
< chnk
->hi
)
372 chnk
->lo
= chnk
->base
+ newsz
;
373 res
= newsz
? chnk
->base
: NULL
;
374 __collector_mutex_unlock (&heap
->lock
);
375 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);
376 Tprintf (DBG_LT4
, "memmgr: reallocV %p for %ld\n", ptr
, (long) newsz
);
379 res
= allocVSize_nolock (heap
, newsz
);
380 /* Copy to new location */
383 int size
= chnk
->lo
- chnk
->base
;
386 char *s1
= (char*) res
;
387 char *s2
= chnk
->base
;
392 chnk
->lo
= chnk
->base
;
393 __collector_mutex_unlock (&heap
->lock
);
394 CALL_UTIL (sigprocmask
)(SIG_SETMASK
, &old_mask
, NULL
);