Create TriggerTest.
[riscv-tests.git] / debug / programs / tiny-malloc.c
1 // https://github.com/32bitmicro/newlib-nano-1.0/blob/master/newlib/libc/machine/xstormy16/tiny-malloc.c
2
3 /* A replacement malloc with:
4 - Much reduced code size;
5 - Smaller RAM footprint;
6 - The ability to handle downward-growing heaps;
7 but
8 - Slower;
9 - Probably higher memory fragmentation;
10 - Doesn't support threads (but, if it did support threads,
11 it wouldn't need a global lock, only a compare-and-swap instruction);
12 - Assumes the maximum alignment required is the alignment of a pointer;
13 - Assumes that memory is already there and doesn't need to be allocated.
14
15 * Synopsis of public routines
16
17 malloc(size_t n);
18 Return a pointer to a newly allocated chunk of at least n bytes, or null
19 if no space is available.
20 free(void* p);
21 Release the chunk of memory pointed to by p, or no effect if p is null.
22 realloc(void* p, size_t n);
23 Return a pointer to a chunk of size n that contains the same data
24 as does chunk p up to the minimum of (n, p's size) bytes, or null
25 if no space is available. The returned pointer may or may not be
26 the same as p. If p is null, equivalent to malloc. Unless the
27 #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
28 size argument of zero (re)allocates a minimum-sized chunk.
29 memalign(size_t alignment, size_t n);
30 Return a pointer to a newly allocated chunk of n bytes, aligned
31 in accord with the alignment argument, which must be a power of
32 two. Will fail if 'alignment' is too large.
33 calloc(size_t unit, size_t quantity);
34 Returns a pointer to quantity * unit bytes, with all locations
35 set to zero.
36 cfree(void* p);
37 Equivalent to free(p).
38 malloc_trim(size_t pad);
39 Release all but pad bytes of freed top-most memory back
40 to the system. Return 1 if successful, else 0.
41 malloc_usable_size(void* p);
42 Report the number usable allocated bytes associated with allocated
43 chunk p. This may or may not report more bytes than were requested,
44 due to alignment and minimum size constraints.
45 malloc_stats();
46 Prints brief summary statistics on stderr.
47 mallinfo()
48 Returns (by copy) a struct containing various summary statistics.
49 mallopt(int parameter_number, int parameter_value)
50 Changes one of the tunable parameters described below. Returns
51 1 if successful in changing the parameter, else 0. Actually, returns 0
52 always, as no parameter can be changed.
53 */
54
55 #ifdef __xstormy16__
56 #define MALLOC_DIRECTION -1
57 #endif
58
59 #ifndef MALLOC_DIRECTION
60 #define MALLOC_DIRECTION 1
61 #endif
62
63 #include <stddef.h>
64
65 void* malloc(size_t);
66 void free(void*);
67 void* realloc(void*, size_t);
68 void* memalign(size_t, size_t);
69 void* valloc(size_t);
70 void* pvalloc(size_t);
71 void* calloc(size_t, size_t);
72 void cfree(void*);
73 int malloc_trim(size_t);
74 size_t malloc_usable_size(void*);
75 void malloc_stats(void);
76 int mallopt(int, int);
77 struct mallinfo mallinfo(void);
78
79 typedef struct freelist_entry {
80 size_t size;
81 struct freelist_entry *next;
82 } *fle;
83
84 extern void * __malloc_end;
85 extern fle __malloc_freelist;
86
87 /* Return the number of bytes that need to be added to X to make it
88 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
89 #define M_ALIGN(x, align) (-(size_t)(x) & ((align) - 1))
90
91 /* Return the number of bytes that need to be subtracted from X to make it
92 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
93 #define M_ALIGN_SUB(x, align) ((size_t)(x) & ((align) - 1))
94
95 extern char *__malloc_start;
96
97 /* This is the minimum gap allowed between __malloc_end and the top of
98 the stack. This is only checked for when __malloc_end is
99 decreased; if instead the stack grows into the heap, silent data
100 corruption will result. */
101 #define MALLOC_MINIMUM_GAP 32
102
103 #ifdef __xstormy16__
104 register void * stack_pointer asm ("r15");
105 #define MALLOC_LIMIT stack_pointer
106 #else
107 #define MALLOC_LIMIT __builtin_frame_address (0)
108 #endif
109
110 #if MALLOC_DIRECTION < 0
111 #define CAN_ALLOC_P(required) \
112 (((size_t) __malloc_end - (size_t)MALLOC_LIMIT \
113 - MALLOC_MINIMUM_GAP) >= (required))
114 #else
115 #define CAN_ALLOC_P(required) \
116 (((size_t)MALLOC_LIMIT - (size_t) __malloc_end \
117 - MALLOC_MINIMUM_GAP) >= (required))
118 #endif
119
120 /* real_size is the size we actually have to allocate, allowing for
121 overhead and alignment. */
122 #define REAL_SIZE(sz) \
123 ((sz) < sizeof (struct freelist_entry) - sizeof (size_t) \
124 ? sizeof (struct freelist_entry) \
125 : sz + sizeof (size_t) + M_ALIGN(sz, sizeof (size_t)))
126
127 #ifdef DEFINE_MALLOC
128
129 void * __malloc_end = &__malloc_start;
130 fle __malloc_freelist;
131
132 void *
133 malloc (size_t sz)
134 {
135 fle *nextfree;
136 fle block;
137
138 /* real_size is the size we actually have to allocate, allowing for
139 overhead and alignment. */
140 size_t real_size = REAL_SIZE (sz);
141
142 /* Look for the first block on the freelist that is large enough. */
143 for (nextfree = &__malloc_freelist;
144 *nextfree;
145 nextfree = &(*nextfree)->next)
146 {
147 block = *nextfree;
148
149 if (block->size >= real_size)
150 {
151 /* If the block found is just the right size, remove it from
152 the free list. Otherwise, split it. */
153 if (block->size < real_size + sizeof (struct freelist_entry))
154 {
155 *nextfree = block->next;
156 return (void *)&block->next;
157 }
158 else
159 {
160 size_t newsize = block->size - real_size;
161 fle newnext = block->next;
162 *nextfree = (fle)((size_t)block + real_size);
163 (*nextfree)->size = newsize;
164 (*nextfree)->next = newnext;
165 goto done;
166 }
167 }
168
169 /* If this is the last block on the freelist, and it was too small,
170 enlarge it. */
171 if (! block->next
172 && __malloc_end == (void *)((size_t)block + block->size))
173 {
174 size_t moresize = real_size - block->size;
175 if (! CAN_ALLOC_P (moresize))
176 return NULL;
177
178 *nextfree = NULL;
179 if (MALLOC_DIRECTION < 0)
180 {
181 block = __malloc_end = (void *)((size_t)block - moresize);
182 }
183 else
184 {
185 __malloc_end = (void *)((size_t)block + real_size);
186 }
187
188 goto done;
189 }
190 }
191
192 /* No free space at the end of the free list. Allocate new space
193 and use that. */
194
195 if (! CAN_ALLOC_P (real_size))
196 return NULL;
197
198 if (MALLOC_DIRECTION > 0)
199 {
200 block = __malloc_end;
201 __malloc_end = (void *)((size_t)__malloc_end + real_size);
202 }
203 else
204 {
205 block = __malloc_end = (void *)((size_t)__malloc_end - real_size);
206 }
207 done:
208 block->size = real_size;
209 return (void *)&block->next;
210 }
211
212 #endif
213
214 #ifdef DEFINE_FREE
215
216 void
217 free (void *block_p)
218 {
219 fle *nextfree;
220 fle block = (fle)((size_t) block_p - offsetof (struct freelist_entry, next));
221
222 if (block_p == NULL)
223 return;
224
225 /* Look on the freelist to see if there's a free block just before
226 or just after this block. */
227 for (nextfree = &__malloc_freelist;
228 *nextfree;
229 nextfree = &(*nextfree)->next)
230 {
231 fle thisblock = *nextfree;
232 if ((size_t)thisblock + thisblock->size == (size_t) block)
233 {
234 thisblock->size += block->size;
235 if (MALLOC_DIRECTION > 0
236 && thisblock->next
237 && (size_t) block + block->size == (size_t) thisblock->next)
238 {
239 thisblock->size += thisblock->next->size;
240 thisblock->next = thisblock->next->next;
241 }
242 return;
243 }
244 else if ((size_t) thisblock == (size_t) block + block->size)
245 {
246 if (MALLOC_DIRECTION < 0
247 && thisblock->next
248 && (size_t) block == ((size_t) thisblock->next
249 + thisblock->next->size))
250 {
251 *nextfree = thisblock->next;
252 thisblock->next->size += block->size + thisblock->size;
253 }
254 else
255 {
256 block->size += thisblock->size;
257 block->next = thisblock->next;
258 *nextfree = block;
259 }
260 return;
261 }
262 else if ((MALLOC_DIRECTION > 0
263 && (size_t) thisblock > (size_t) block)
264 || (MALLOC_DIRECTION < 0
265 && (size_t) thisblock < (size_t) block))
266 break;
267 }
268
269 block->next = *nextfree;
270 *nextfree = block;
271 return;
272 }
273 #endif
274
275 #ifdef DEFINE_REALLOC
276 void *
277 realloc (void *block_p, size_t sz)
278 {
279 fle block = (fle)((size_t) block_p - offsetof (struct freelist_entry, next));
280 size_t real_size = REAL_SIZE (sz);
281 size_t old_real_size;
282
283 if (block_p == NULL)
284 return malloc (sz);
285
286 old_real_size = block->size;
287
288 /* Perhaps we need to allocate more space. */
289 if (old_real_size < real_size)
290 {
291 void *result;
292 size_t old_size = old_real_size - sizeof (size_t);
293
294 /* Need to allocate, copy, and free. */
295 result = malloc (sz);
296 if (result == NULL)
297 return NULL;
298 memcpy (result, block_p, old_size < sz ? old_size : sz);
299 free (block_p);
300 return result;
301 }
302 /* Perhaps we can free some space. */
303 if (old_real_size - real_size >= sizeof (struct freelist_entry))
304 {
305 fle newblock = (fle)((size_t)block + real_size);
306 block->size = real_size;
307 newblock->size = old_real_size - real_size;
308 free (&newblock->next);
309 }
310 return block_p;
311 }
312 #endif
313
314 #ifdef DEFINE_CALLOC
315 void *
316 calloc (size_t n, size_t elem_size)
317 {
318 void *result;
319 size_t sz = n * elem_size;
320 result = malloc (sz);
321 if (result != NULL)
322 memset (result, 0, sz);
323 return result;
324 }
325 #endif
326
327 #ifdef DEFINE_CFREE
328 void
329 cfree (void *p)
330 {
331 free (p);
332 }
333 #endif
334
335 #ifdef DEFINE_MEMALIGN
336 void *
337 memalign (size_t align, size_t sz)
338 {
339 fle *nextfree;
340 fle block;
341
342 /* real_size is the size we actually have to allocate, allowing for
343 overhead and alignment. */
344 size_t real_size = REAL_SIZE (sz);
345
346 /* Some sanity checking on 'align'. */
347 if ((align & (align - 1)) != 0
348 || align <= 0)
349 return NULL;
350
351 /* Look for the first block on the freelist that is large enough. */
352 /* One tricky part is this: We want the result to be a valid pointer
353 to free. That means that there has to be room for a size_t
354 before the block. If there's additional space before the block,
355 it should go on the freelist, or it'll be lost---we could add it
356 to the size of the block before it in memory, but finding the
357 previous block is expensive. */
358 for (nextfree = &__malloc_freelist;
359 ;
360 nextfree = &(*nextfree)->next)
361 {
362 size_t before_size;
363 size_t old_size;
364
365 /* If we've run out of free blocks, allocate more space. */
366 if (! *nextfree)
367 {
368 old_size = real_size;
369 if (MALLOC_DIRECTION < 0)
370 {
371 old_size += M_ALIGN_SUB (((size_t)__malloc_end
372 - old_size + sizeof (size_t)),
373 align);
374 if (! CAN_ALLOC_P (old_size))
375 return NULL;
376 block = __malloc_end = (void *)((size_t)__malloc_end - old_size);
377 }
378 else
379 {
380 block = __malloc_end;
381 old_size += M_ALIGN ((size_t)__malloc_end + sizeof (size_t),
382 align);
383 if (! CAN_ALLOC_P (old_size))
384 return NULL;
385 __malloc_end = (void *)((size_t)__malloc_end + old_size);
386 }
387 *nextfree = block;
388 block->size = old_size;
389 block->next = NULL;
390 }
391 else
392 {
393 block = *nextfree;
394 old_size = block->size;
395 }
396
397
398 before_size = M_ALIGN (&block->next, align);
399 if (before_size != 0)
400 before_size = sizeof (*block) + M_ALIGN (&(block+1)->next, align);
401
402 /* If this is the last block on the freelist, and it is too small,
403 enlarge it. */
404 if (! block->next
405 && old_size < real_size + before_size
406 && __malloc_end == (void *)((size_t)block + block->size))
407 {
408 if (MALLOC_DIRECTION < 0)
409 {
410 size_t moresize = real_size - block->size;
411 moresize += M_ALIGN_SUB ((size_t)&block->next - moresize, align);
412 if (! CAN_ALLOC_P (moresize))
413 return NULL;
414 block = __malloc_end = (void *)((size_t)block - moresize);
415 block->next = NULL;
416 block->size = old_size = old_size + moresize;
417 before_size = 0;
418 }
419 else
420 {
421 if (! CAN_ALLOC_P (before_size + real_size - block->size))
422 return NULL;
423 __malloc_end = (void *)((size_t)block + before_size + real_size);
424 block->size = old_size = before_size + real_size;
425 }
426
427 /* Two out of the four cases below will now be possible; which
428 two depends on MALLOC_DIRECTION. */
429 }
430
431 if (old_size >= real_size + before_size)
432 {
433 /* This block will do. If there needs to be space before it,
434 split the block. */
435 if (before_size != 0)
436 {
437 fle old_block = block;
438
439 old_block->size = before_size;
440 block = (fle)((size_t)block + before_size);
441
442 /* If there's no space after the block, we're now nearly
443 done; just make a note of the size required.
444 Otherwise, we need to create a new free space block. */
445 if (old_size - before_size
446 <= real_size + sizeof (struct freelist_entry))
447 {
448 block->size = old_size - before_size;
449 return (void *)&block->next;
450 }
451 else
452 {
453 fle new_block;
454 new_block = (fle)((size_t)block + real_size);
455 new_block->size = old_size - before_size - real_size;
456 if (MALLOC_DIRECTION > 0)
457 {
458 new_block->next = old_block->next;
459 old_block->next = new_block;
460 }
461 else
462 {
463 new_block->next = old_block;
464 *nextfree = new_block;
465 }
466 goto done;
467 }
468 }
469 else
470 {
471 /* If the block found is just the right size, remove it from
472 the free list. Otherwise, split it. */
473 if (old_size <= real_size + sizeof (struct freelist_entry))
474 {
475 *nextfree = block->next;
476 return (void *)&block->next;
477 }
478 else
479 {
480 size_t newsize = old_size - real_size;
481 fle newnext = block->next;
482 *nextfree = (fle)((size_t)block + real_size);
483 (*nextfree)->size = newsize;
484 (*nextfree)->next = newnext;
485 goto done;
486 }
487 }
488 }
489 }
490
491 done:
492 block->size = real_size;
493 return (void *)&block->next;
494 }
495 #endif
496
497 #ifdef DEFINE_VALLOC
498 void *
499 valloc (size_t sz)
500 {
501 return memalign (128, sz);
502 }
503 #endif
504 #ifdef DEFINE_PVALLOC
505 void *
506 pvalloc (size_t sz)
507 {
508 return memalign (128, sz + M_ALIGN (sz, 128));
509 }
510 #endif
511
512 #ifdef DEFINE_MALLINFO
513 #include "malloc.h"
514
515 struct mallinfo
516 mallinfo (void)
517 {
518 struct mallinfo r;
519 fle fr;
520 size_t free_size;
521 size_t total_size;
522 size_t free_blocks;
523
524 memset (&r, 0, sizeof (r));
525
526 free_size = 0;
527 free_blocks = 0;
528 for (fr = __malloc_freelist; fr; fr = fr->next)
529 {
530 free_size += fr->size;
531 free_blocks++;
532 if (! fr->next)
533 {
534 int atend;
535 if (MALLOC_DIRECTION > 0)
536 atend = (void *)((size_t)fr + fr->size) == __malloc_end;
537 else
538 atend = (void *)fr == __malloc_end;
539 if (atend)
540 r.keepcost = fr->size;
541 }
542 }
543
544 if (MALLOC_DIRECTION > 0)
545 total_size = (char *)__malloc_end - (char *)&__malloc_start;
546 else
547 total_size = (char *)&__malloc_start - (char *)__malloc_end;
548
549 #ifdef DEBUG
550 /* Fixme: should walk through all the in-use blocks and see if
551 they're valid. */
552 #endif
553
554 r.arena = total_size;
555 r.fordblks = free_size;
556 r.uordblks = total_size - free_size;
557 r.ordblks = free_blocks;
558 return r;
559 }
560 #endif
561
562 #ifdef DEFINE_MALLOC_STATS
563 #include "malloc.h"
564 #include <stdio.h>
565
566 void
567 malloc_stats(void)
568 {
569 struct mallinfo i;
570 FILE *fp;
571
572 fp = stderr;
573 i = mallinfo();
574 fprintf (fp, "malloc has reserved %u bytes between %p and %p\n",
575 i.arena, &__malloc_start, __malloc_end);
576 fprintf (fp, "there are %u bytes free in %u chunks\n",
577 i.fordblks, i.ordblks);
578 fprintf (fp, "of which %u bytes are at the end of the reserved space\n",
579 i.keepcost);
580 fprintf (fp, "and %u bytes are in use.\n", i.uordblks);
581 }
582 #endif
583
584 #ifdef DEFINE_MALLOC_USABLE_SIZE
585 size_t
586 malloc_usable_size (void *block_p)
587 {
588 fle block = (fle)((size_t) block_p - offsetof (struct freelist_entry, next));
589 return block->size - sizeof (size_t);
590 }
591 #endif
592
593 #ifdef DEFINE_MALLOPT
594 int
595 mallopt (int n, int v)
596 {
597 (void)n; (void)v;
598 return 0;
599 }
600 #endif