1 // MT-optimized allocator -*- C++ -*-
3 // Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
34 #ifndef _MT_ALLOCATOR_H
35 #define _MT_ALLOCATOR_H 1
39 #include <bits/functexcept.h>
40 #include <bits/gthr.h>
41 #include <bits/atomicity.h>
43 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx
)
48 typedef void (*__destroy_handler
)(void*);
50 /// @brief Base class for pool object.
53 // Using short int as type for the binmap implies we are never
54 // caching blocks larger than 65535 with this allocator.
55 typedef unsigned short int _Binmap_type
;
57 // Variables used to configure the behavior of the allocator,
58 // assigned and explained in detail below.
61 // Compile time constants for the default _Tune values.
62 enum { _S_align
= 8 };
63 enum { _S_max_bytes
= 128 };
64 enum { _S_min_bin
= 8 };
65 enum { _S_chunk_size
= 4096 - 4 * sizeof(void*) };
66 enum { _S_max_threads
= 4096 };
67 enum { _S_freelist_headroom
= 10 };
70 // NB: In any case must be >= sizeof(_Block_record), that
71 // is 4 on 32 bit machines and 8 on 64 bit machines.
74 // Allocation requests (after round-up to power of 2) below
75 // this value will be handled by the allocator. A raw new/
76 // call will be used for requests larger than this value.
79 // Size in bytes of the smallest bin.
80 // NB: Must be a power of 2 and >= _M_align.
83 // In order to avoid fragmenting and minimize the number of
84 // new() calls we always request new memory using this
85 // value. Based on previous discussions on the libstdc++
86 // mailing list we have choosen the value below.
87 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
90 // The maximum number of supported threads. For
91 // single-threaded operation, use one. Maximum values will
92 // vary depending on details of the underlying system. (For
93 // instance, Linux 2.4.18 reports 4070 in
94 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
96 size_t _M_max_threads
;
98 // Each time a deallocation occurs in a threaded application
99 // we make sure that there are no more than
100 // _M_freelist_headroom % of used memory on the freelist. If
101 // the number of additional records is more than
102 // _M_freelist_headroom % of the freelist, we move these
103 // records back to the global pool.
104 size_t _M_freelist_headroom
;
106 // Set to true forces all allocations to use new().
111 : _M_align(_S_align
), _M_max_bytes(_S_max_bytes
), _M_min_bin(_S_min_bin
),
112 _M_chunk_size(_S_chunk_size
), _M_max_threads(_S_max_threads
),
113 _M_freelist_headroom(_S_freelist_headroom
),
114 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
118 _Tune(size_t __align
, size_t __maxb
, size_t __minbin
, size_t __chunk
,
119 size_t __maxthreads
, size_t __headroom
, bool __force
)
120 : _M_align(__align
), _M_max_bytes(__maxb
), _M_min_bin(__minbin
),
121 _M_chunk_size(__chunk
), _M_max_threads(__maxthreads
),
122 _M_freelist_headroom(__headroom
), _M_force_new(__force
)
126 struct _Block_address
129 _Block_address
* _M_next
;
133 _M_get_options() const
134 { return _M_options
; }
137 _M_set_options(_Tune __t
)
144 _M_check_threshold(size_t __bytes
)
145 { return __bytes
> _M_options
._M_max_bytes
|| _M_options
._M_force_new
; }
148 _M_get_binmap(size_t __bytes
)
149 { return _M_binmap
[__bytes
]; }
153 { return _M_options
._M_align
; }
157 : _M_options(_Tune()), _M_binmap(NULL
), _M_init(false) { }
160 __pool_base(const _Tune
& __options
)
161 : _M_options(__options
), _M_binmap(NULL
), _M_init(false) { }
165 __pool_base(const __pool_base
&);
168 operator=(const __pool_base
&);
171 // Configuration options.
174 _Binmap_type
* _M_binmap
;
176 // Configuration of the pool object via _M_options can happen
177 // after construction but before initialization. After
178 // initialization is complete, this variable is set to true.
184 * @brief Data describing the underlying memory pool, parameterized on
187 template<bool _Thread
>
190 /// Specialization for single thread.
192 class __pool
<false> : public __pool_base
197 // Points to the block_record of the next free block.
198 _Block_record
* volatile _M_next
;
203 // An "array" of pointers to the first free block.
204 _Block_record
** volatile _M_first
;
206 // A list of the initial addresses of all allocated blocks.
207 _Block_address
* _M_address
;
213 if (__builtin_expect(_M_init
== false, false))
218 _M_destroy() throw();
221 _M_reserve_block(size_t __bytes
, const size_t __thread_id
);
224 _M_reclaim_block(char* __p
, size_t __bytes
);
227 _M_get_thread_id() { return 0; }
230 _M_get_bin(size_t __which
)
231 { return _M_bin
[__which
]; }
234 _M_adjust_freelist(const _Bin_record
&, _Block_record
*, size_t)
238 : _M_bin(NULL
), _M_bin_size(1) { }
240 explicit __pool(const __pool_base::_Tune
& __tune
)
241 : __pool_base(__tune
), _M_bin(NULL
), _M_bin_size(1) { }
244 // An "array" of bin_records each of which represents a specific
245 // power of 2 size. Memory to this "array" is allocated in
247 _Bin_record
* volatile _M_bin
;
249 // Actual value calculated in _M_initialize().
257 /// Specialization for thread enabled, via gthreads.h.
259 class __pool
<true> : public __pool_base
262 // Each requesting thread is assigned an id ranging from 1 to
263 // _S_max_threads. Thread id 0 is used as a global memory pool.
264 // In order to get constant performance on the thread assignment
265 // routine, we keep a list of free ids. When a thread first
266 // requests memory we remove the first record in this list and
267 // stores the address in a __gthread_key. When initializing the
268 // __gthread_key we specify a destructor. When this destructor
269 // (i.e. the thread dies) is called, we return the thread id to
270 // the front of this list.
271 struct _Thread_record
273 // Points to next free thread id record. NULL if last record in list.
274 _Thread_record
* volatile _M_next
;
276 // Thread id ranging from 1 to _S_max_threads.
282 // Points to the block_record of the next free block.
283 _Block_record
* volatile _M_next
;
285 // The thread id of the thread which has requested this block.
291 // An "array" of pointers to the first free block for each
292 // thread id. Memory to this "array" is allocated in
293 // _S_initialize() for _S_max_threads + global pool 0.
294 _Block_record
** volatile _M_first
;
296 // A list of the initial addresses of all allocated blocks.
297 _Block_address
* _M_address
;
299 // An "array" of counters used to keep track of the amount of
300 // blocks that are on the freelist/used for each thread id.
301 // Memory to these "arrays" is allocated in _S_initialize() for
302 // _S_max_threads + global pool 0.
303 size_t* volatile _M_free
;
304 size_t* volatile _M_used
;
306 // Each bin has its own mutex which is used to ensure data
307 // integrity while changing "ownership" on a block. The mutex
308 // is initialized in _S_initialize().
309 __gthread_mutex_t
* _M_mutex
;
312 // XXX GLIBCXX_ABI Deprecated
314 _M_initialize(__destroy_handler
);
319 if (__builtin_expect(_M_init
== false, false))
324 _M_destroy() throw();
327 _M_reserve_block(size_t __bytes
, const size_t __thread_id
);
330 _M_reclaim_block(char* __p
, size_t __bytes
);
333 _M_get_bin(size_t __which
)
334 { return _M_bin
[__which
]; }
337 _M_adjust_freelist(const _Bin_record
& __bin
, _Block_record
* __block
,
340 if (__gthread_active_p())
342 __block
->_M_thread_id
= __thread_id
;
343 --__bin
._M_free
[__thread_id
];
344 ++__bin
._M_used
[__thread_id
];
348 // XXX GLIBCXX_ABI Deprecated
350 _M_destroy_thread_key(void*);
356 : _M_bin(NULL
), _M_bin_size(1), _M_thread_freelist(NULL
)
359 explicit __pool(const __pool_base::_Tune
& __tune
)
360 : __pool_base(__tune
), _M_bin(NULL
), _M_bin_size(1),
361 _M_thread_freelist(NULL
)
365 // An "array" of bin_records each of which represents a specific
366 // power of 2 size. Memory to this "array" is allocated in
368 _Bin_record
* volatile _M_bin
;
370 // Actual value calculated in _M_initialize().
373 _Thread_record
* _M_thread_freelist
;
374 void* _M_thread_freelist_initial
;
381 template<template <bool> class _PoolTp
, bool _Thread
>
384 typedef _PoolTp
<_Thread
> pool_type
;
389 static pool_type _S_pool
;
394 template<template <bool> class _PoolTp
, bool _Thread
>
395 struct __common_pool_base
;
397 template<template <bool> class _PoolTp
>
398 struct __common_pool_base
<_PoolTp
, false>
399 : public __common_pool
<_PoolTp
, false>
401 using __common_pool
<_PoolTp
, false>::_S_get_pool
;
407 if (__builtin_expect(__init
== false, false))
409 _S_get_pool()._M_initialize_once();
416 template<template <bool> class _PoolTp
>
417 struct __common_pool_base
<_PoolTp
, true>
418 : public __common_pool
<_PoolTp
, true>
420 using __common_pool
<_PoolTp
, true>::_S_get_pool
;
424 { _S_get_pool()._M_initialize_once(); }
430 if (__builtin_expect(__init
== false, false))
432 if (__gthread_active_p())
434 // On some platforms, __gthread_once_t is an aggregate.
435 static __gthread_once_t __once
= __GTHREAD_ONCE_INIT
;
436 __gthread_once(&__once
, _S_initialize
);
439 // Double check initialization. May be necessary on some
440 // systems for proper construction when not compiling with
442 _S_get_pool()._M_initialize_once();
449 /// @brief Policy for shared __pool objects.
450 template<template <bool> class _PoolTp
, bool _Thread
>
451 struct __common_pool_policy
: public __common_pool_base
<_PoolTp
, _Thread
>
453 template<typename _Tp1
, template <bool> class _PoolTp1
= _PoolTp
,
454 bool _Thread1
= _Thread
>
456 { typedef __common_pool_policy
<_PoolTp1
, _Thread1
> other
; };
458 using __common_pool_base
<_PoolTp
, _Thread
>::_S_get_pool
;
459 using __common_pool_base
<_PoolTp
, _Thread
>::_S_initialize_once
;
463 template<typename _Tp
, template <bool> class _PoolTp
, bool _Thread
>
464 struct __per_type_pool
466 typedef _Tp value_type
;
467 typedef _PoolTp
<_Thread
> pool_type
;
472 // Sane defaults for the _PoolTp.
473 typedef typename
pool_type::_Block_record _Block_record
;
474 const static size_t __a
= (__alignof__(_Tp
) >= sizeof(_Block_record
)
475 ? __alignof__(_Tp
) : sizeof(_Block_record
));
477 typedef typename
__pool_base::_Tune _Tune
;
478 static _Tune
_S_tune(__a
, sizeof(_Tp
) * 64,
479 sizeof(_Tp
) * 2 >= __a
? sizeof(_Tp
) * 2 : __a
,
480 sizeof(_Tp
) * size_t(_Tune::_S_chunk_size
),
481 _Tune::_S_max_threads
,
482 _Tune::_S_freelist_headroom
,
483 getenv("GLIBCXX_FORCE_NEW") ? true : false);
484 static pool_type
_S_pool(_S_tune
);
489 template<typename _Tp
, template <bool> class _PoolTp
, bool _Thread
>
490 struct __per_type_pool_base
;
492 template<typename _Tp
, template <bool> class _PoolTp
>
493 struct __per_type_pool_base
<_Tp
, _PoolTp
, false>
494 : public __per_type_pool
<_Tp
, _PoolTp
, false>
496 using __per_type_pool
<_Tp
, _PoolTp
, false>::_S_get_pool
;
502 if (__builtin_expect(__init
== false, false))
504 _S_get_pool()._M_initialize_once();
511 template<typename _Tp
, template <bool> class _PoolTp
>
512 struct __per_type_pool_base
<_Tp
, _PoolTp
, true>
513 : public __per_type_pool
<_Tp
, _PoolTp
, true>
515 using __per_type_pool
<_Tp
, _PoolTp
, true>::_S_get_pool
;
519 { _S_get_pool()._M_initialize_once(); }
525 if (__builtin_expect(__init
== false, false))
527 if (__gthread_active_p())
529 // On some platforms, __gthread_once_t is an aggregate.
530 static __gthread_once_t __once
= __GTHREAD_ONCE_INIT
;
531 __gthread_once(&__once
, _S_initialize
);
534 // Double check initialization. May be necessary on some
535 // systems for proper construction when not compiling with
537 _S_get_pool()._M_initialize_once();
544 /// @brief Policy for individual __pool objects.
545 template<typename _Tp
, template <bool> class _PoolTp
, bool _Thread
>
546 struct __per_type_pool_policy
547 : public __per_type_pool_base
<_Tp
, _PoolTp
, _Thread
>
549 template<typename _Tp1
, template <bool> class _PoolTp1
= _PoolTp
,
550 bool _Thread1
= _Thread
>
552 { typedef __per_type_pool_policy
<_Tp1
, _PoolTp1
, _Thread1
> other
; };
554 using __per_type_pool_base
<_Tp
, _PoolTp
, _Thread
>::_S_get_pool
;
555 using __per_type_pool_base
<_Tp
, _PoolTp
, _Thread
>::_S_initialize_once
;
559 /// @brief Base class for _Tp dependent member functions.
560 template<typename _Tp
>
561 class __mt_alloc_base
564 typedef size_t size_type
;
565 typedef ptrdiff_t difference_type
;
566 typedef _Tp
* pointer
;
567 typedef const _Tp
* const_pointer
;
568 typedef _Tp
& reference
;
569 typedef const _Tp
& const_reference
;
570 typedef _Tp value_type
;
573 address(reference __x
) const
577 address(const_reference __x
) const
581 max_size() const throw()
582 { return size_t(-1) / sizeof(_Tp
); }
584 // _GLIBCXX_RESOLVE_LIB_DEFECTS
585 // 402. wrong new expression in [some_] allocator::construct
587 construct(pointer __p
, const _Tp
& __val
)
588 { ::new(__p
) _Tp(__val
); }
591 destroy(pointer __p
) { __p
->~_Tp(); }
595 #define __thread_default true
597 #define __thread_default false
601 * @brief This is a fixed size (power of 2) allocator which - when
602 * compiled with thread support - will maintain one freelist per
603 * size per thread plus a "global" one. Steps are taken to limit
604 * the per thread freelist sizes (by returning excess back to
605 * the "global" list).
608 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
610 template<typename _Tp
,
611 typename _Poolp
= __common_pool_policy
<__pool
, __thread_default
> >
612 class __mt_alloc
: public __mt_alloc_base
<_Tp
>
615 typedef size_t size_type
;
616 typedef ptrdiff_t difference_type
;
617 typedef _Tp
* pointer
;
618 typedef const _Tp
* const_pointer
;
619 typedef _Tp
& reference
;
620 typedef const _Tp
& const_reference
;
621 typedef _Tp value_type
;
622 typedef _Poolp __policy_type
;
623 typedef typename
_Poolp::pool_type __pool_type
;
625 template<typename _Tp1
, typename _Poolp1
= _Poolp
>
628 typedef typename
_Poolp1::template _M_rebind
<_Tp1
>::other pol_type
;
629 typedef __mt_alloc
<_Tp1
, pol_type
> other
;
632 __mt_alloc() throw() { }
634 __mt_alloc(const __mt_alloc
&) throw() { }
636 template<typename _Tp1
, typename _Poolp1
>
637 __mt_alloc(const __mt_alloc
<_Tp1
, _Poolp1
>&) throw() { }
639 ~__mt_alloc() throw() { }
642 allocate(size_type __n
, const void* = 0);
645 deallocate(pointer __p
, size_type __n
);
647 const __pool_base::_Tune
650 // Return a copy, not a reference, for external consumption.
651 return __policy_type::_S_get_pool()._M_get_options();
655 _M_set_options(__pool_base::_Tune __t
)
656 { __policy_type::_S_get_pool()._M_set_options(__t
); }
659 template<typename _Tp
, typename _Poolp
>
660 typename __mt_alloc
<_Tp
, _Poolp
>::pointer
661 __mt_alloc
<_Tp
, _Poolp
>::
662 allocate(size_type __n
, const void*)
664 if (__builtin_expect(__n
> this->max_size(), false))
665 std::__throw_bad_alloc();
667 __policy_type::_S_initialize_once();
669 // Requests larger than _M_max_bytes are handled by operator
670 // new/delete directly.
671 __pool_type
& __pool
= __policy_type::_S_get_pool();
672 const size_t __bytes
= __n
* sizeof(_Tp
);
673 if (__pool
._M_check_threshold(__bytes
))
675 void* __ret
= ::operator new(__bytes
);
676 return static_cast<_Tp
*>(__ret
);
679 // Round up to power of 2 and figure out which bin to use.
680 const size_t __which
= __pool
._M_get_binmap(__bytes
);
681 const size_t __thread_id
= __pool
._M_get_thread_id();
683 // Find out if we have blocks on our freelist. If so, go ahead
684 // and use them directly without having to lock anything.
686 typedef typename
__pool_type::_Bin_record _Bin_record
;
687 const _Bin_record
& __bin
= __pool
._M_get_bin(__which
);
688 if (__bin
._M_first
[__thread_id
])
691 typedef typename
__pool_type::_Block_record _Block_record
;
692 _Block_record
* __block
= __bin
._M_first
[__thread_id
];
693 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
695 __pool
._M_adjust_freelist(__bin
, __block
, __thread_id
);
696 __c
= reinterpret_cast<char*>(__block
) + __pool
._M_get_align();
701 __c
= __pool
._M_reserve_block(__bytes
, __thread_id
);
703 return static_cast<_Tp
*>(static_cast<void*>(__c
));
706 template<typename _Tp
, typename _Poolp
>
708 __mt_alloc
<_Tp
, _Poolp
>::
709 deallocate(pointer __p
, size_type __n
)
711 if (__builtin_expect(__p
!= 0, true))
713 // Requests larger than _M_max_bytes are handled by
714 // operators new/delete directly.
715 __pool_type
& __pool
= __policy_type::_S_get_pool();
716 const size_t __bytes
= __n
* sizeof(_Tp
);
717 if (__pool
._M_check_threshold(__bytes
))
718 ::operator delete(__p
);
720 __pool
._M_reclaim_block(reinterpret_cast<char*>(__p
), __bytes
);
724 template<typename _Tp
, typename _Poolp
>
726 operator==(const __mt_alloc
<_Tp
, _Poolp
>&, const __mt_alloc
<_Tp
, _Poolp
>&)
729 template<typename _Tp
, typename _Poolp
>
731 operator!=(const __mt_alloc
<_Tp
, _Poolp
>&, const __mt_alloc
<_Tp
, _Poolp
>&)
734 #undef __thread_default
736 _GLIBCXX_END_NAMESPACE