1 // https://github.com/32bitmicro/newlib-nano-1.0/blob/master/newlib/libc/machine/xstormy16/tiny-malloc.c
3 /* A replacement malloc with:
4 - Much reduced code size;
5 - Smaller RAM footprint;
6 - The ability to handle downward-growing heaps;
9 - Probably higher memory fragmentation;
10 - Doesn't support threads (but, if it did support threads,
11 it wouldn't need a global lock, only a compare-and-swap instruction);
12 - Assumes the maximum alignment required is the alignment of a pointer;
13 - Assumes that memory is already there and doesn't need to be allocated.
15 * Synopsis of public routines
18 Return a pointer to a newly allocated chunk of at least n bytes, or null
19 if no space is available.
21 Release the chunk of memory pointed to by p, or no effect if p is null.
22 realloc(void* p, size_t n);
23 Return a pointer to a chunk of size n that contains the same data
24 as does chunk p up to the minimum of (n, p's size) bytes, or null
25 if no space is available. The returned pointer may or may not be
26 the same as p. If p is null, equivalent to malloc. Unless the
27 #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
28 size argument of zero (re)allocates a minimum-sized chunk.
29 memalign(size_t alignment, size_t n);
30 Return a pointer to a newly allocated chunk of n bytes, aligned
31 in accord with the alignment argument, which must be a power of
32 two. Will fail if 'alignment' is too large.
33 calloc(size_t unit, size_t quantity);
34 Returns a pointer to quantity * unit bytes, with all locations
37 Equivalent to free(p).
38 malloc_trim(size_t pad);
39 Release all but pad bytes of freed top-most memory back
40 to the system. Return 1 if successful, else 0.
41 malloc_usable_size(void* p);
42 Report the number usable allocated bytes associated with allocated
43 chunk p. This may or may not report more bytes than were requested,
44 due to alignment and minimum size constraints.
46 Prints brief summary statistics on stderr.
48 Returns (by copy) a struct containing various summary statistics.
49 mallopt(int parameter_number, int parameter_value)
50 Changes one of the tunable parameters described below. Returns
51 1 if successful in changing the parameter, else 0. Actually, returns 0
52 always, as no parameter can be changed.
56 #define MALLOC_DIRECTION -1
59 #ifndef MALLOC_DIRECTION
60 #define MALLOC_DIRECTION 1
67 void* realloc(void*, size_t);
68 void* memalign(size_t, size_t);
70 void* pvalloc(size_t);
71 void* calloc(size_t, size_t);
73 int malloc_trim(size_t);
74 size_t malloc_usable_size(void*);
75 void malloc_stats(void);
76 int mallopt(int, int);
77 struct mallinfo
mallinfo(void);
79 typedef struct freelist_entry
{
81 struct freelist_entry
*next
;
84 extern void * __malloc_end
;
85 extern fle __malloc_freelist
;
87 /* Return the number of bytes that need to be added to X to make it
88 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
89 #define M_ALIGN(x, align) (-(size_t)(x) & ((align) - 1))
91 /* Return the number of bytes that need to be subtracted from X to make it
92 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
93 #define M_ALIGN_SUB(x, align) ((size_t)(x) & ((align) - 1))
95 extern char *__malloc_start
;
97 /* This is the minimum gap allowed between __malloc_end and the top of
98 the stack. This is only checked for when __malloc_end is
99 decreased; if instead the stack grows into the heap, silent data
100 corruption will result. */
101 #define MALLOC_MINIMUM_GAP 32
104 register void * stack_pointer
asm ("r15");
105 #define MALLOC_LIMIT stack_pointer
107 #define MALLOC_LIMIT __builtin_frame_address (0)
110 #if MALLOC_DIRECTION < 0
111 #define CAN_ALLOC_P(required) \
112 (((size_t) __malloc_end - (size_t)MALLOC_LIMIT \
113 - MALLOC_MINIMUM_GAP) >= (required))
115 #define CAN_ALLOC_P(required) \
116 (((size_t)MALLOC_LIMIT - (size_t) __malloc_end \
117 - MALLOC_MINIMUM_GAP) >= (required))
120 /* real_size is the size we actually have to allocate, allowing for
121 overhead and alignment. */
122 #define REAL_SIZE(sz) \
123 ((sz) < sizeof (struct freelist_entry) - sizeof (size_t) \
124 ? sizeof (struct freelist_entry) \
125 : sz + sizeof (size_t) + M_ALIGN(sz, sizeof (size_t)))
129 void * __malloc_end
= &__malloc_start
;
130 fle __malloc_freelist
;
138 /* real_size is the size we actually have to allocate, allowing for
139 overhead and alignment. */
140 size_t real_size
= REAL_SIZE (sz
);
142 /* Look for the first block on the freelist that is large enough. */
143 for (nextfree
= &__malloc_freelist
;
145 nextfree
= &(*nextfree
)->next
)
149 if (block
->size
>= real_size
)
151 /* If the block found is just the right size, remove it from
152 the free list. Otherwise, split it. */
153 if (block
->size
< real_size
+ sizeof (struct freelist_entry
))
155 *nextfree
= block
->next
;
156 return (void *)&block
->next
;
160 size_t newsize
= block
->size
- real_size
;
161 fle newnext
= block
->next
;
162 *nextfree
= (fle
)((size_t)block
+ real_size
);
163 (*nextfree
)->size
= newsize
;
164 (*nextfree
)->next
= newnext
;
169 /* If this is the last block on the freelist, and it was too small,
172 && __malloc_end
== (void *)((size_t)block
+ block
->size
))
174 size_t moresize
= real_size
- block
->size
;
175 if (! CAN_ALLOC_P (moresize
))
179 if (MALLOC_DIRECTION
< 0)
181 block
= __malloc_end
= (void *)((size_t)block
- moresize
);
185 __malloc_end
= (void *)((size_t)block
+ real_size
);
192 /* No free space at the end of the free list. Allocate new space
195 if (! CAN_ALLOC_P (real_size
))
198 if (MALLOC_DIRECTION
> 0)
200 block
= __malloc_end
;
201 __malloc_end
= (void *)((size_t)__malloc_end
+ real_size
);
205 block
= __malloc_end
= (void *)((size_t)__malloc_end
- real_size
);
208 block
->size
= real_size
;
209 return (void *)&block
->next
;
220 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
225 /* Look on the freelist to see if there's a free block just before
226 or just after this block. */
227 for (nextfree
= &__malloc_freelist
;
229 nextfree
= &(*nextfree
)->next
)
231 fle thisblock
= *nextfree
;
232 if ((size_t)thisblock
+ thisblock
->size
== (size_t) block
)
234 thisblock
->size
+= block
->size
;
235 if (MALLOC_DIRECTION
> 0
237 && (size_t) block
+ block
->size
== (size_t) thisblock
->next
)
239 thisblock
->size
+= thisblock
->next
->size
;
240 thisblock
->next
= thisblock
->next
->next
;
244 else if ((size_t) thisblock
== (size_t) block
+ block
->size
)
246 if (MALLOC_DIRECTION
< 0
248 && (size_t) block
== ((size_t) thisblock
->next
249 + thisblock
->next
->size
))
251 *nextfree
= thisblock
->next
;
252 thisblock
->next
->size
+= block
->size
+ thisblock
->size
;
256 block
->size
+= thisblock
->size
;
257 block
->next
= thisblock
->next
;
262 else if ((MALLOC_DIRECTION
> 0
263 && (size_t) thisblock
> (size_t) block
)
264 || (MALLOC_DIRECTION
< 0
265 && (size_t) thisblock
< (size_t) block
))
269 block
->next
= *nextfree
;
275 #ifdef DEFINE_REALLOC
277 realloc (void *block_p
, size_t sz
)
279 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
280 size_t real_size
= REAL_SIZE (sz
);
281 size_t old_real_size
;
286 old_real_size
= block
->size
;
288 /* Perhaps we need to allocate more space. */
289 if (old_real_size
< real_size
)
292 size_t old_size
= old_real_size
- sizeof (size_t);
294 /* Need to allocate, copy, and free. */
295 result
= malloc (sz
);
298 memcpy (result
, block_p
, old_size
< sz
? old_size
: sz
);
302 /* Perhaps we can free some space. */
303 if (old_real_size
- real_size
>= sizeof (struct freelist_entry
))
305 fle newblock
= (fle
)((size_t)block
+ real_size
);
306 block
->size
= real_size
;
307 newblock
->size
= old_real_size
- real_size
;
308 free (&newblock
->next
);
316 calloc (size_t n
, size_t elem_size
)
319 size_t sz
= n
* elem_size
;
320 result
= malloc (sz
);
322 memset (result
, 0, sz
);
335 #ifdef DEFINE_MEMALIGN
337 memalign (size_t align
, size_t sz
)
342 /* real_size is the size we actually have to allocate, allowing for
343 overhead and alignment. */
344 size_t real_size
= REAL_SIZE (sz
);
346 /* Some sanity checking on 'align'. */
347 if ((align
& (align
- 1)) != 0
351 /* Look for the first block on the freelist that is large enough. */
352 /* One tricky part is this: We want the result to be a valid pointer
353 to free. That means that there has to be room for a size_t
354 before the block. If there's additional space before the block,
355 it should go on the freelist, or it'll be lost---we could add it
356 to the size of the block before it in memory, but finding the
357 previous block is expensive. */
358 for (nextfree
= &__malloc_freelist
;
360 nextfree
= &(*nextfree
)->next
)
365 /* If we've run out of free blocks, allocate more space. */
368 old_size
= real_size
;
369 if (MALLOC_DIRECTION
< 0)
371 old_size
+= M_ALIGN_SUB (((size_t)__malloc_end
372 - old_size
+ sizeof (size_t)),
374 if (! CAN_ALLOC_P (old_size
))
376 block
= __malloc_end
= (void *)((size_t)__malloc_end
- old_size
);
380 block
= __malloc_end
;
381 old_size
+= M_ALIGN ((size_t)__malloc_end
+ sizeof (size_t),
383 if (! CAN_ALLOC_P (old_size
))
385 __malloc_end
= (void *)((size_t)__malloc_end
+ old_size
);
388 block
->size
= old_size
;
394 old_size
= block
->size
;
398 before_size
= M_ALIGN (&block
->next
, align
);
399 if (before_size
!= 0)
400 before_size
= sizeof (*block
) + M_ALIGN (&(block
+1)->next
, align
);
402 /* If this is the last block on the freelist, and it is too small,
405 && old_size
< real_size
+ before_size
406 && __malloc_end
== (void *)((size_t)block
+ block
->size
))
408 if (MALLOC_DIRECTION
< 0)
410 size_t moresize
= real_size
- block
->size
;
411 moresize
+= M_ALIGN_SUB ((size_t)&block
->next
- moresize
, align
);
412 if (! CAN_ALLOC_P (moresize
))
414 block
= __malloc_end
= (void *)((size_t)block
- moresize
);
416 block
->size
= old_size
= old_size
+ moresize
;
421 if (! CAN_ALLOC_P (before_size
+ real_size
- block
->size
))
423 __malloc_end
= (void *)((size_t)block
+ before_size
+ real_size
);
424 block
->size
= old_size
= before_size
+ real_size
;
427 /* Two out of the four cases below will now be possible; which
428 two depends on MALLOC_DIRECTION. */
431 if (old_size
>= real_size
+ before_size
)
433 /* This block will do. If there needs to be space before it,
435 if (before_size
!= 0)
437 fle old_block
= block
;
439 old_block
->size
= before_size
;
440 block
= (fle
)((size_t)block
+ before_size
);
442 /* If there's no space after the block, we're now nearly
443 done; just make a note of the size required.
444 Otherwise, we need to create a new free space block. */
445 if (old_size
- before_size
446 <= real_size
+ sizeof (struct freelist_entry
))
448 block
->size
= old_size
- before_size
;
449 return (void *)&block
->next
;
454 new_block
= (fle
)((size_t)block
+ real_size
);
455 new_block
->size
= old_size
- before_size
- real_size
;
456 if (MALLOC_DIRECTION
> 0)
458 new_block
->next
= old_block
->next
;
459 old_block
->next
= new_block
;
463 new_block
->next
= old_block
;
464 *nextfree
= new_block
;
471 /* If the block found is just the right size, remove it from
472 the free list. Otherwise, split it. */
473 if (old_size
<= real_size
+ sizeof (struct freelist_entry
))
475 *nextfree
= block
->next
;
476 return (void *)&block
->next
;
480 size_t newsize
= old_size
- real_size
;
481 fle newnext
= block
->next
;
482 *nextfree
= (fle
)((size_t)block
+ real_size
);
483 (*nextfree
)->size
= newsize
;
484 (*nextfree
)->next
= newnext
;
492 block
->size
= real_size
;
493 return (void *)&block
->next
;
501 return memalign (128, sz
);
504 #ifdef DEFINE_PVALLOC
508 return memalign (128, sz
+ M_ALIGN (sz
, 128));
512 #ifdef DEFINE_MALLINFO
524 memset (&r
, 0, sizeof (r
));
528 for (fr
= __malloc_freelist
; fr
; fr
= fr
->next
)
530 free_size
+= fr
->size
;
535 if (MALLOC_DIRECTION
> 0)
536 atend
= (void *)((size_t)fr
+ fr
->size
) == __malloc_end
;
538 atend
= (void *)fr
== __malloc_end
;
540 r
.keepcost
= fr
->size
;
544 if (MALLOC_DIRECTION
> 0)
545 total_size
= (char *)__malloc_end
- (char *)&__malloc_start
;
547 total_size
= (char *)&__malloc_start
- (char *)__malloc_end
;
550 /* Fixme: should walk through all the in-use blocks and see if
554 r
.arena
= total_size
;
555 r
.fordblks
= free_size
;
556 r
.uordblks
= total_size
- free_size
;
557 r
.ordblks
= free_blocks
;
562 #ifdef DEFINE_MALLOC_STATS
574 fprintf (fp
, "malloc has reserved %u bytes between %p and %p\n",
575 i
.arena
, &__malloc_start
, __malloc_end
);
576 fprintf (fp
, "there are %u bytes free in %u chunks\n",
577 i
.fordblks
, i
.ordblks
);
578 fprintf (fp
, "of which %u bytes are at the end of the reserved space\n",
580 fprintf (fp
, "and %u bytes are in use.\n", i
.uordblks
);
584 #ifdef DEFINE_MALLOC_USABLE_SIZE
586 malloc_usable_size (void *block_p
)
588 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
589 return block
->size
- sizeof (size_t);
593 #ifdef DEFINE_MALLOPT
595 mallopt (int n
, int v
)