--- /dev/null
+/* Memory allocator `malloc'.
+ Copyright 1990, 1991, 1992 Free Software Foundation
+
+ Written May 1989 by Mike Haertel.
+ Heavily modified Mar 1992 by Fred Fish for mmap'c version.
+
+The GNU C Library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public License as
+published by the Free Software Foundation; either version 2 of the
+License, or (at your option) any later version.
+
+The GNU C Library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with the GNU C Library; see the file COPYING.LIB. If
+not, write to the Free Software Foundation, Inc., 675 Mass Ave,
+Cambridge, MA 02139, USA.
+
+ The author may be reached (Email) at the address mike@ai.mit.edu,
+ or (US mail) as Mike Haertel c/o Free Software Foundation. */
+
+#include "mmalloc.h"
+
+/* Prototypes for local functions */
+
+static int initialize PARAMS ((struct mdesc *));
+static PTR morecore PARAMS ((struct mdesc *, size_t));
+static PTR align PARAMS ((struct mdesc *, size_t));
+
+/* Aligned allocation. */
+
+static PTR
+align (mdp, size)
+ struct mdesc *mdp;
+ size_t size;
+{
+ PTR result;
+ unsigned long int adj;
+
+ result = mdp -> morecore (mdp, size);
+ adj = RESIDUAL (result, BLOCKSIZE);
+ if (adj != 0)
+ {
+ adj = BLOCKSIZE - adj;
+ (void) mdp -> morecore (mdp, adj);
+ result = (char *) result + adj;
+ }
+ return (result);
+}
+
+/* Set everything up and remember that we have. */
+
+static int
+initialize (mdp)
+ struct mdesc *mdp;
+{
+ mdp -> heapsize = HEAP / BLOCKSIZE;
+ mdp -> heapinfo = (malloc_info *)
+ align (mdp, mdp -> heapsize * sizeof (malloc_info));
+ if (mdp -> heapinfo == NULL)
+ {
+ return (0);
+ }
+ memset ((PTR)mdp -> heapinfo, 0, mdp -> heapsize * sizeof (malloc_info));
+ mdp -> heapinfo[0].free.size = 0;
+ mdp -> heapinfo[0].free.next = mdp -> heapinfo[0].free.prev = 0;
+ mdp -> heapindex = 0;
+ mdp -> heapbase = (char *) mdp -> heapinfo;
+ mdp -> flags |= MMALLOC_INITIALIZED;
+ return (1);
+}
+
+/* Get neatly aligned memory, initializing or
+ growing the heap info table as necessary. */
+
+static PTR
+morecore (mdp, size)
+ struct mdesc *mdp;
+ size_t size;
+{
+ PTR result;
+ malloc_info *newinfo, *oldinfo;
+ size_t newsize;
+
+ result = align (mdp, size);
+ if (result == NULL)
+ {
+ return (NULL);
+ }
+
+ /* Check if we need to grow the info table. */
+ if ((size_t) BLOCK ((char *) result + size) > mdp -> heapsize)
+ {
+ newsize = mdp -> heapsize;
+ while ((size_t) BLOCK ((char *) result + size) > newsize)
+ {
+ newsize *= 2;
+ }
+ newinfo = (malloc_info *) align (mdp, newsize * sizeof (malloc_info));
+ if (newinfo == NULL)
+ {
+ mdp -> morecore (mdp, -size);
+ return (NULL);
+ }
+ memset ((PTR)newinfo, 0, newsize * sizeof (malloc_info));
+ memcpy ((PTR)newinfo, (PTR)mdp -> heapinfo,
+ mdp -> heapsize * sizeof (malloc_info));
+ oldinfo = mdp -> heapinfo;
+ newinfo[BLOCK (oldinfo)].busy.type = 0;
+ newinfo[BLOCK (oldinfo)].busy.info.size
+ = BLOCKIFY (mdp -> heapsize * sizeof (malloc_info));
+ mdp -> heapinfo = newinfo;
+ __mmalloc_free (mdp, (PTR)oldinfo);
+ mdp -> heapsize = newsize;
+ }
+
+ mdp -> heaplimit = BLOCK ((char *) result + size);
+ return (result);
+}
+
+/* Allocate memory from the heap. */
+
+PTR
+mmalloc (md, size)
+ PTR md;
+ size_t size;
+{
+ struct mdesc *mdp;
+ PTR result;
+ size_t block, blocks, lastblocks, start;
+ register size_t i;
+ struct list *next;
+ register size_t log;
+
+ if (size == 0)
+ {
+ return (NULL);
+ }
+
+ mdp = MD_TO_MDP (md);
+
+ if (mdp -> mmalloc_hook != NULL)
+ {
+ return ((*mdp -> mmalloc_hook) (md, size));
+ }
+
+ if (!(mdp -> flags & MMALLOC_INITIALIZED))
+ {
+ if (!initialize (mdp))
+ {
+ return (NULL);
+ }
+ }
+
+ if (size < sizeof (struct list))
+ {
+ size = sizeof (struct list);
+ }
+
+ /* Determine the allocation policy based on the request size. */
+ if (size <= BLOCKSIZE / 2)
+ {
+ /* Small allocation to receive a fragment of a block.
+ Determine the logarithm to base two of the fragment size. */
+ log = 1;
+ --size;
+ while ((size /= 2) != 0)
+ {
+ ++log;
+ }
+
+ /* Look in the fragment lists for a
+ free fragment of the desired size. */
+ next = mdp -> fraghead[log].next;
+ if (next != NULL)
+ {
+ /* There are free fragments of this size.
+ Pop a fragment out of the fragment list and return it.
+ Update the block's nfree and first counters. */
+ result = (PTR) next;
+ next -> prev -> next = next -> next;
+ if (next -> next != NULL)
+ {
+ next -> next -> prev = next -> prev;
+ }
+ block = BLOCK (result);
+ if (--mdp -> heapinfo[block].busy.info.frag.nfree != 0)
+ {
+ mdp -> heapinfo[block].busy.info.frag.first =
+ RESIDUAL (next -> next, BLOCKSIZE) >> log;
+ }
+
+ /* Update the statistics. */
+ mdp -> heapstats.chunks_used++;
+ mdp -> heapstats.bytes_used += 1 << log;
+ mdp -> heapstats.chunks_free--;
+ mdp -> heapstats.bytes_free -= 1 << log;
+ }
+ else
+ {
+ /* No free fragments of the desired size, so get a new block
+ and break it into fragments, returning the first. */
+ result = mmalloc (md, BLOCKSIZE);
+ if (result == NULL)
+ {
+ return (NULL);
+ }
+
+ /* Link all fragments but the first into the free list. */
+ for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i)
+ {
+ next = (struct list *) ((char *) result + (i << log));
+ next -> next = mdp -> fraghead[log].next;
+ next -> prev = &mdp -> fraghead[log];
+ next -> prev -> next = next;
+ if (next -> next != NULL)
+ {
+ next -> next -> prev = next;
+ }
+ }
+
+ /* Initialize the nfree and first counters for this block. */
+ block = BLOCK (result);
+ mdp -> heapinfo[block].busy.type = log;
+ mdp -> heapinfo[block].busy.info.frag.nfree = i - 1;
+ mdp -> heapinfo[block].busy.info.frag.first = i - 1;
+
+ mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1;
+ mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log);
+ mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log);
+ }
+ }
+ else
+ {
+ /* Large allocation to receive one or more blocks.
+ Search the free list in a circle starting at the last place visited.
+ If we loop completely around without finding a large enough
+ space we will have to get more memory from the system. */
+ blocks = BLOCKIFY(size);
+ start = block = MALLOC_SEARCH_START;
+ while (mdp -> heapinfo[block].free.size < blocks)
+ {
+ block = mdp -> heapinfo[block].free.next;
+ if (block == start)
+ {
+ /* Need to get more from the system. Check to see if
+ the new core will be contiguous with the final free
+ block; if so we don't need to get as much. */
+ block = mdp -> heapinfo[0].free.prev;
+ lastblocks = mdp -> heapinfo[block].free.size;
+ if (mdp -> heaplimit != 0 &&
+ block + lastblocks == mdp -> heaplimit &&
+ mdp -> morecore (mdp, 0) == ADDRESS(block + lastblocks) &&
+ (morecore (mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL)
+ {
+ mdp -> heapinfo[block].free.size = blocks;
+ mdp -> heapstats.bytes_free +=
+ (blocks - lastblocks) * BLOCKSIZE;
+ continue;
+ }
+ result = morecore(mdp, blocks * BLOCKSIZE);
+ if (result == NULL)
+ {
+ return (NULL);
+ }
+ block = BLOCK (result);
+ mdp -> heapinfo[block].busy.type = 0;
+ mdp -> heapinfo[block].busy.info.size = blocks;
+ mdp -> heapstats.chunks_used++;
+ mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
+ return (result);
+ }
+ }
+
+ /* At this point we have found a suitable free list entry.
+ Figure out how to remove what we need from the list. */
+ result = ADDRESS(block);
+ if (mdp -> heapinfo[block].free.size > blocks)
+ {
+ /* The block we found has a bit left over,
+ so relink the tail end back into the free list. */
+ mdp -> heapinfo[block + blocks].free.size
+ = mdp -> heapinfo[block].free.size - blocks;
+ mdp -> heapinfo[block + blocks].free.next
+ = mdp -> heapinfo[block].free.next;
+ mdp -> heapinfo[block + blocks].free.prev
+ = mdp -> heapinfo[block].free.prev;
+ mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next
+ = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev
+ = mdp -> heapindex = block + blocks;
+ }
+ else
+ {
+ /* The block exactly matches our requirements,
+ so just remove it from the list. */
+ mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev
+ = mdp -> heapinfo[block].free.prev;
+ mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next
+ = mdp -> heapindex = mdp -> heapinfo[block].free.next;
+ mdp -> heapstats.chunks_free--;
+ }
+
+ mdp -> heapinfo[block].busy.type = 0;
+ mdp -> heapinfo[block].busy.info.size = blocks;
+ mdp -> heapstats.chunks_used++;
+ mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
+ mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE;
+ }
+
+ return (result);
+}
+
+/* When using this package, provide a version of malloc/realloc/free built
+ on top of it, so that if we use the default sbrk() region we will not
+ collide with another malloc package trying to do the same thing, if
+ the application contains any "hidden" calls to malloc/realloc/free (such
+ as inside a system library). */
+
+PTR
+malloc (size)
+ size_t size;
+{
+ return (mmalloc ((void *) NULL, size));
+}
--- /dev/null
+/* Declarations for `mmalloc' and friends.
+ Copyright 1990, 1991, 1992 Free Software Foundation
+
+ Written May 1989 by Mike Haertel.
+ Heavily modified Mar 1992 by Fred Fish. (fnf@cygnus.com)
+
+The GNU C Library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public License as
+published by the Free Software Foundation; either version 2 of the
+License, or (at your option) any later version.
+
+The GNU C Library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with the GNU C Library; see the file COPYING.LIB. If
+not, write to the Free Software Foundation, Inc., 675 Mass Ave,
+Cambridge, MA 02139, USA.
+
+ The author may be reached (Email) at the address mike@ai.mit.edu,
+ or (US mail) as Mike Haertel c/o Free Software Foundation. */
+
+
+#ifndef __MMALLOC_H
+#define __MMALLOC_H 1
+
+/* Ugly kludge to work around problem with some vendors (Sun for example)
+ that ship the ANSI <stdlib.h> file with non-ANSI compliant declarations
+ for malloc(), realloc(), calloc(), and free(). Since we don't use
+ these functions internally, but simply provide compatible replacements
+ layered on top of the m* equivalents, just hide whatever definitions
+ <stdlib.h> might supply. */
+
+#define malloc hide_malloc
+#define calloc hide_calloc
+#define realloc hide_realloc
+#define free hide_free
+
+#ifdef __STDC__
+# include <stddef.h>
+# include <stdlib.h>
+# define PTR void *
+# define CONST const
+# define PARAMS(paramlist) paramlist
+# include <limits.h>
+# ifndef NULL
+# define NULL (void *) 0
+# endif
+#else
+# undef size_t
+# define size_t unsigned int
+# define CHAR_BIT 8
+# define PTR char *
+# define CONST /* nothing */
+# define PARAMS(paramlist) ()
+# ifndef NULL
+# define NULL 0
+# endif
+#endif
+
+#undef malloc /* Undo the kludge to hide non-ANSI compliant declarations */
+#undef calloc
+#undef realloc
+#undef free
+
+#if defined(USG)
+# include <string.h>
+#endif
+
+#ifndef MIN
+# define MIN(A, B) ((A) < (B) ? (A) : (B))
+#endif
+
+#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */
+#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */
+#define MMALLOC_VERSION 1 /* Current mmalloc version */
+#define MMALLOC_KEYS 16 /* Keys for application use */
+
+/* The allocator divides the heap into blocks of fixed size; large
+ requests receive one or more whole blocks, and small requests
+ receive a fragment of a block. Fragment sizes are powers of two,
+ and all fragments of a block are the same size. When all the
+ fragments in a block have been freed, the block itself is freed. */
+
+#define INT_BIT (CHAR_BIT * sizeof(int))
+#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
+#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG)
+#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
+
+/* The difference between two pointers is a signed int. On machines where
+ the data addresses have the high bit set, we need to ensure that the
+ difference becomes an unsigned int when we are using the address as an
+ integral value. In addition, when using with the '%' operator, the
+ sign of the result is machine dependent for negative values, so force
+ it to be treated as an unsigned int. */
+
+#define ADDR2UINT(addr) ((unsigned int) ((char *) (addr) - (char *) NULL))
+#define RESIDUAL(addr,bsize) ((unsigned int) (ADDR2UINT (addr) % (bsize)))
+
+/* Determine the amount of memory spanned by the initial heap table
+ (not an absolute limit). */
+
+#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
+
+/* Number of contiguous free blocks allowed to build up at the end of
+ memory before they will be returned to the system. */
+
+#define FINAL_FREE_BLOCKS 8
+
+/* Where to start searching the free list when looking for new memory.
+ The two possible values are 0 and heapindex. Starting at 0 seems
+ to reduce total memory usage, while starting at heapindex seems to
+ run faster. */
+
+#define MALLOC_SEARCH_START mdp -> heapindex
+
+/* Address to block number and vice versa. */
+
+#define BLOCK(A) (((char *) (A) - mdp -> heapbase) / BLOCKSIZE + 1)
+
+#define ADDRESS(B) ((PTR) (((B) - 1) * BLOCKSIZE + mdp -> heapbase))
+
+/* Data structure giving per-block information. */
+
+typedef union
+ {
+ /* Heap information for a busy block. */
+ struct
+ {
+ /* Zero for a large block, or positive giving the
+ logarithm to the base two of the fragment size. */
+ int type;
+ union
+ {
+ struct
+ {
+ size_t nfree; /* Free fragments in a fragmented block. */
+ size_t first; /* First free fragment of the block. */
+ } frag;
+ /* Size (in blocks) of a large cluster. */
+ size_t size;
+ } info;
+ } busy;
+ /* Heap information for a free block (that may be the first of
+ a free cluster). */
+ struct
+ {
+ size_t size; /* Size (in blocks) of a free cluster. */
+ size_t next; /* Index of next free cluster. */
+ size_t prev; /* Index of previous free cluster. */
+ } free;
+ } malloc_info;
+
+/* List of blocks allocated with `mmemalign' (or `mvalloc'). */
+
+struct alignlist
+ {
+ struct alignlist *next;
+ PTR aligned; /* The address that mmemaligned returned. */
+ PTR exact; /* The address that malloc returned. */
+ };
+
+/* Doubly linked lists of free fragments. */
+
+struct list
+ {
+ struct list *next;
+ struct list *prev;
+ };
+
+/* Statistics available to the user.
+ FIXME: By design, the internals of the malloc package are no longer
+ exported to the user via an include file, so access to this data needs
+ to be via some other mechanism, such as mmstat_<something> where the
+ return value is the <something> the user is interested in. */
+
+struct mstats
+ {
+ size_t bytes_total; /* Total size of the heap. */
+ size_t chunks_used; /* Chunks allocated by the user. */
+ size_t bytes_used; /* Byte total of user-allocated chunks. */
+ size_t chunks_free; /* Chunks in the free list. */
+ size_t bytes_free; /* Byte total of chunks in the free list. */
+ };
+
+/* Internal structure that defines the format of the malloc-descriptor.
+ This gets written to the base address of the region that mmalloc is
+ managing, and thus also becomes the file header for the mapped file,
+ if such a file exists. */
+
+struct mdesc
+{
+ /* The "magic number" for an mmalloc file. */
+
+ char magic[MMALLOC_MAGIC_SIZE];
+
+ /* The size in bytes of this structure, used as a sanity check when reusing
+ a previously created mapped file. */
+
+ unsigned int headersize;
+
+ /* The version number of the mmalloc package that created this file. */
+
+ unsigned char version;
+
+ /* Some flag bits to keep track of various internal things. */
+
+ unsigned int flags;
+
+ /* If a system call made by the mmalloc package fails, the errno is
+ preserved for future examination. */
+
+ int errno;
+
+ /* Pointer to the function that is used to get more core, or return core
+ to the system, for requests using this malloc descriptor. For memory
+ mapped regions, this is the mmap() based routine. There may also be
+ a single malloc descriptor that points to an sbrk() based routine
+ for systems without mmap() or for applications that call the mmalloc()
+ package with a NULL malloc descriptor. */
+
+ PTR (*morecore) PARAMS ((struct mdesc *, ptrdiff_t));
+
+ /* Pointer to the function that causes an abort when the memory checking
+ features are activated. By default this is set to abort(), but can
+ be set to another function by the application using mmalloc(). */
+
+ void (*abortfunc) PARAMS ((void));
+
+ /* Debugging hook for free. */
+
+ void (*mfree_hook) PARAMS ((PTR, PTR));
+
+ /* Debugging hook for `malloc'. */
+
+ PTR (*mmalloc_hook) PARAMS ((PTR, size_t));
+
+ /* Debugging hook for realloc. */
+
+ PTR (*mrealloc_hook) PARAMS ((PTR, PTR, size_t));
+
+ /* Number of info entries. */
+
+ size_t heapsize;
+
+ /* Pointer to first block of the heap (base of the first block). */
+
+ char *heapbase;
+
+ /* Current search index for the heap table. */
+ /* Search index in the info table. */
+
+ size_t heapindex;
+
+ /* Limit of valid info table indices. */
+
+ size_t heaplimit;
+
+ /* Block information table.
+ Allocated with malign/__mmalloc_free (not mmalloc/mfree). */
+ /* Table indexed by block number giving per-block information. */
+
+ malloc_info *heapinfo;
+
+ /* Instrumentation. */
+
+ struct mstats heapstats;
+
+ /* Free list headers for each fragment size. */
+ /* Free lists for each fragment size. */
+
+ struct list fraghead[BLOCKLOG];
+
+ /* List of blocks allocated by memalign. */
+
+ struct alignlist *aligned_blocks;
+
+ /* The base address of the memory region for this malloc heap. This
+ is the location where the bookkeeping data for mmap and for malloc
+ begins. */
+
+ char *base;
+
+ /* The current location in the memory region for this malloc heap which
+ represents the end of memory in use. */
+
+ char *breakval;
+
+ /* The end of the current memory region for this malloc heap. This is
+ the first location past the end of mapped memory. */
+
+ char *top;
+
+ /* Open file descriptor for the file to which this malloc heap is mapped.
+ This will always be a valid file descriptor, since /dev/zero is used
+ by default if no open file is supplied by the client. Also note that
+ it may change each time the region is mapped and unmapped. */
+
+ int fd;
+
+ /* An array of keys to data within the mapped region, for use by the
+ application. */
+
+ void *keys[MMALLOC_KEYS];
+
+};
+
+/* Bits to look at in the malloc descriptor flags word */
+
+#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */
+#define MMALLOC_INITIALIZED (1 << 1) /* Initialized mmalloc */
+#define MMALLOC_MMCHECK_USED (1 << 2) /* mmcheck() called already */
+
+/* Allocate SIZE bytes of memory. */
+
+extern PTR mmalloc PARAMS ((PTR, size_t));
+
+/* Re-allocate the previously allocated block in PTR, making the new block
+ SIZE bytes long. */
+
+extern PTR mrealloc PARAMS ((PTR, PTR, size_t));
+
+/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
+
+extern PTR mcalloc PARAMS ((PTR, size_t, size_t));
+
+/* Free a block allocated by `mmalloc', `mrealloc' or `mcalloc'. */
+
+extern void mfree PARAMS ((PTR, PTR));
+
+/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
+
+extern PTR mmemalign PARAMS ((PTR, size_t, size_t));
+
+/* Allocate SIZE bytes on a page boundary. */
+
+extern PTR mvalloc PARAMS ((PTR, size_t));
+
+/* Activate a standard collection of debugging hooks. */
+
+extern int mmcheck PARAMS ((PTR, void (*) (void)));
+
+/* Pick up the current statistics. (see FIXME elsewhere) */
+
+extern struct mstats mmstats PARAMS ((PTR));
+
+/* Internal version of `mfree' used in `morecore'. */
+
+extern void __mmalloc_free PARAMS ((struct mdesc *, PTR));
+
+/* Hooks for debugging versions. */
+
+extern void (*__mfree_hook) PARAMS ((PTR, PTR));
+extern PTR (*__mmalloc_hook) PARAMS ((PTR, size_t));
+extern PTR (*__mrealloc_hook) PARAMS ((PTR, PTR, size_t));
+
+/* A default malloc descriptor for the single sbrk() managed region. */
+
+extern struct mdesc *__mmalloc_default_mdp;
+
+/* Initialize the first use of the default malloc descriptor, which uses
+ an sbrk() region. */
+
+extern struct mdesc *__mmalloc_sbrk_init PARAMS ((void));
+
+/* Grow or shrink a contiguous region using sbrk(). */
+
+extern PTR __mmalloc_sbrk_morecore PARAMS ((struct mdesc *, int));
+
+/* Grow or shrink a contiguous mapped region using mmap().
+ Works much like sbrk() */
+
+#if defined(HAVE_MMAP)
+
+extern PTR __mmalloc_mmap_morecore PARAMS ((struct mdesc *, int));
+
+#endif
+
+/* Remap a mmalloc region that was previously mapped. */
+
+extern PTR __mmalloc_remap_core PARAMS ((struct mdesc *));
+
+/* Macro to convert from a user supplied malloc descriptor to pointer to the
+ internal malloc descriptor. If the user supplied descriptor is NULL, then
+ use the default internal version, initializing it if necessary. Otherwise
+ just cast the user supplied version (which is void *) to the proper type
+ (struct mdesc *). */
+
+#define MD_TO_MDP(md) \
+ ((md) == NULL \
+ ? (__mmalloc_default_mdp == NULL \
+ ? __mmalloc_sbrk_init () \
+ : __mmalloc_default_mdp) \
+ : (struct mdesc *) (md))
+
+#endif /* __MMALLOC_H */