namespace __gnu_cxx
{
- using std::__throw_bad_alloc;
-
/**
* @if maint
* Uses various allocators to fulfill underlying requests (and makes as
* information that we can return the object to the proper free list
* without permanently losing part of the object.
*
- * The template parameter specifies whether more than one thread may use
- * this allocator. It is safe to allocate an object from one instance
- * of the allocator and deallocate it with another one. This effectively
- * transfers its ownership to the second one. This may have undesirable
- * effects on reference locality.
- *
* @endif
* (See @link Allocators allocators info @endlink for more.)
*/
- template<bool __threads>
- struct __pool_base
+ class __pool_base
{
+ protected:
+
enum { _S_align = 8 };
enum { _S_max_bytes = 128 };
- enum { _S_freelists = _S_max_bytes / _S_align };
+ enum { _S_free_list_size = _S_max_bytes / _S_align };
+ // It would be nice to use _STL_auto_lock here. But we need a
+ // test whether threads are in use.
+ struct _Lock
+ {
+ static _STL_mutex_lock _S_lock;
+ _Lock() { _S_lock._M_acquire_lock(); }
+ ~_Lock() { _S_lock._M_release_lock(); }
+ };
+
union _Obj
{
union _Obj* _M_free_list_link;
char _M_client_data[1]; // The client sees this.
};
- static _Obj* volatile _S_free_list[_S_freelists];
+ static _Obj* volatile _S_free_list[_S_free_list_size];
// Chunk allocation state.
static char* _S_start_free;
static char* _S_end_free;
- static size_t _S_heap_size;
-
- static _STL_mutex_lock _S_lock;
- static _Atomic_word _S_force_new;
+ static size_t _S_heap_size;
- static size_t
- _S_round_up(size_t __bytes)
+ size_t
+ _M_round_up(size_t __bytes)
{ return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
- static size_t
- _S_freelist_index(size_t __bytes)
- { return ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); }
+ _Obj* volatile*
+ _M_get_free_list(size_t __bytes);
// Returns an object of size __n, and optionally adds to size __n
// free list.
- static void*
- _S_refill(size_t __n);
+ void*
+ _M_refill(size_t __n);
// Allocates a chunk for nobjs of size size. nobjs may be reduced
// if it is inconvenient to allocate the requested number.
- static char*
- _S_chunk_alloc(size_t __n, int& __nobjs);
-
- // It would be nice to use _STL_auto_lock here. But we need a
- // test whether threads are in use.
- struct _Lock
- {
- _Lock() { if (__threads) _S_lock._M_acquire_lock(); }
- ~_Lock() { if (__threads) _S_lock._M_release_lock(); }
- } __attribute__ ((__unused__));
- friend struct _Lock;
+ char*
+ _M_allocate_chunk(size_t __n, int& __nobjs);
};
- typedef __pool_base<true> __pool_alloc_base;
template<typename _Tp>
- class __pool_alloc : private __pool_alloc_base
+ class __pool_alloc : private __pool_base
{
+ private:
+ static _Atomic_word _S_force_new;
+
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
{ return false; }
- // Allocate memory in large chunks in order to avoid fragmenting the
- // heap too much. Assume that __n is properly aligned. We hold
- // the allocation lock.
- template<bool __threads>
- char*
- __pool_base<__threads>::_S_chunk_alloc(size_t __n, int& __nobjs)
- {
- char* __result;
- size_t __total_bytes = __n * __nobjs;
- size_t __bytes_left = _S_end_free - _S_start_free;
-
- if (__bytes_left >= __total_bytes)
- {
- __result = _S_start_free;
- _S_start_free += __total_bytes;
- return __result ;
- }
- else if (__bytes_left >= __n)
- {
- __nobjs = (int)(__bytes_left / __n);
- __total_bytes = __n * __nobjs;
- __result = _S_start_free;
- _S_start_free += __total_bytes;
- return __result;
- }
- else
- {
- size_t __bytes_to_get = (2 * __total_bytes
- + _S_round_up(_S_heap_size >> 4));
- // Try to make use of the left-over piece.
- if (__bytes_left > 0)
- {
- _Obj* volatile* __free_list = (_S_free_list
- + _S_freelist_index(__bytes_left));
-
- ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
- *__free_list = (_Obj*)(void*)_S_start_free;
- }
-
- _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
- if (_S_start_free == 0)
- {
- size_t __i;
- _Obj* volatile* __free_list;
- _Obj* __p;
- // Try to make do with what we have. That can't hurt. We
- // do not try smaller requests, since that tends to result
- // in disaster on multi-process machines.
- __i = __n;
- for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
- {
- __free_list = _S_free_list + _S_freelist_index(__i);
- __p = *__free_list;
- if (__p != 0)
- {
- *__free_list = __p -> _M_free_list_link;
- _S_start_free = (char*)__p;
- _S_end_free = _S_start_free + __i;
- return _S_chunk_alloc(__n, __nobjs);
- // Any leftover piece will eventually make it to the
- // right free list.
- }
- }
- _S_end_free = 0; // In case of exception.
- _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
- // This should either throw an exception or remedy the situation.
- // Thus we assume it succeeded.
- }
- _S_heap_size += __bytes_to_get;
- _S_end_free = _S_start_free + __bytes_to_get;
- return _S_chunk_alloc(__n, __nobjs);
- }
- }
-
- // Returns an object of size __n, and optionally adds to "size
- // __n"'s free list. We assume that __n is properly aligned. We
- // hold the allocation lock.
- template<bool __threads>
- void*
- __pool_base<__threads>::_S_refill(size_t __n)
- {
- int __nobjs = 20;
- char* __chunk = _S_chunk_alloc(__n, __nobjs);
- _Obj* volatile* __free_list;
- _Obj* __result;
- _Obj* __current_obj;
- _Obj* __next_obj;
- int __i;
-
- if (1 == __nobjs)
- return __chunk;
- __free_list = _S_free_list + _S_freelist_index(__n);
-
- // Build free list in chunk.
- __result = (_Obj*)(void*)__chunk;
- *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
- for (__i = 1; ; __i++)
- {
- __current_obj = __next_obj;
- __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
- if (__nobjs - 1 == __i)
- {
- __current_obj -> _M_free_list_link = 0;
- break;
- }
- else
- __current_obj -> _M_free_list_link = __next_obj;
- }
- return __result;
- }
+ template<typename _Tp>
+ _Atomic_word
+ __pool_alloc<_Tp>::_S_force_new;
template<typename _Tp>
_Tp*
{
if (__n <= max_size())
{
- const size_t __bytes = __n * sizeof(_Tp);
// If there is a race through here, assume answer from getenv
// will resolve in same direction. Inspired by techniques
// to efficiently support threading found in basic_string.h.
else
__atomic_add(&_S_force_new, -1);
}
-
- if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0))
+
+ const size_t __bytes = __n * sizeof(_Tp);
+ if (__bytes > size_t(_S_max_bytes) || _S_force_new == 1)
__ret = static_cast<_Tp*>(::operator new(__bytes));
else
{
- _Obj* volatile* __free_list = (_S_free_list
- + _S_freelist_index(__bytes));
+ _Obj* volatile* __free_list = _M_get_free_list(__bytes);
+
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
_Obj* __restrict__ __result = *__free_list;
if (__builtin_expect(__result == 0, 0))
- __ret = static_cast<_Tp*>(_S_refill(_S_round_up(__bytes)));
+ __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
else
{
*__free_list = __result->_M_free_list_link;
__ret = reinterpret_cast<_Tp*>(__result);
}
if (__builtin_expect(__ret == 0, 0))
- __throw_bad_alloc();
+ std::__throw_bad_alloc();
}
}
else
- __throw_bad_alloc();
+ std::__throw_bad_alloc();
}
return __ret;
}
if (__n)
{
const size_t __bytes = __n * sizeof(_Tp);
- if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0))
+ if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new == 1)
::operator delete(__p);
else
{
- _Obj* volatile* __free_list = (_S_free_list
- + _S_freelist_index(__bytes));
- _Obj* __q = (_Obj*)__p;
+ _Obj* volatile* __free_list = _M_get_free_list(__bytes);
+ _Obj* __q = reinterpret_cast<_Obj*>(__p);
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
- __q -> _M_free_list_link = *__free_list;
+ __q ->_M_free_list_link = *__free_list;
*__free_list = __q;
}
}
}
-
- template<bool __threads>
- typename __pool_base<__threads>::_Obj* volatile
- __pool_base<__threads>::_S_free_list[_S_freelists];
-
- template<bool __threads>
- char* __pool_base<__threads>::_S_start_free = 0;
-
- template<bool __threads>
- char* __pool_base<__threads>::_S_end_free = 0;
-
- template<bool __threads>
- size_t __pool_base<__threads>::_S_heap_size = 0;
-
- template<bool __threads>
- _STL_mutex_lock
- __pool_base<__threads>::_S_lock __STL_MUTEX_INITIALIZER;
-
- template<bool __threads>
- _Atomic_word
- __pool_base<__threads>::_S_force_new = 0;
} // namespace __gnu_cxx
#endif
#include <ext/mt_allocator.h>
#include <ext/pool_allocator.h>
-// Explicitly instantiate the static data members of the underlying
-// allocator.
namespace __gnu_cxx
{
+ // Instantiations for __mt_alloc.
template class __mt_alloc<char>;
template class __mt_alloc<wchar_t>;
- // Static members of __pool_alloc.
+ // Definitions and instantiations for __pool_alloc and base class.
+ __pool_base::_Obj* volatile*
+ __pool_base::_M_get_free_list(size_t __bytes)
+ {
+ size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1);
+ return _S_free_list + __i - 1;
+ }
+
+ // Allocate memory in large chunks in order to avoid fragmenting the
+ // heap too much. Assume that __n is properly aligned. We hold the
+ // allocation lock.
+ char*
+ __pool_base::_M_allocate_chunk(size_t __n, int& __nobjs)
+ {
+ char* __result;
+ size_t __total_bytes = __n * __nobjs;
+ size_t __bytes_left = _S_end_free - _S_start_free;
+
+ if (__bytes_left >= __total_bytes)
+ {
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return __result ;
+ }
+ else if (__bytes_left >= __n)
+ {
+ __nobjs = (int)(__bytes_left / __n);
+ __total_bytes = __n * __nobjs;
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return __result;
+ }
+ else
+ {
+ // Try to make use of the left-over piece.
+ if (__bytes_left > 0)
+ {
+ _Obj* volatile* __free_list = _M_get_free_list(__bytes_left);
+ ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
+ *__free_list = (_Obj*)(void*)_S_start_free;
+ }
+
+ size_t __bytes_to_get = (2 * __total_bytes
+ + _M_round_up(_S_heap_size >> 4));
+ _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
+ if (_S_start_free == 0)
+ {
+ size_t __i;
+ _Obj* volatile* __free_list;
+ _Obj* __p;
+
+ // Try to make do with what we have. That can't hurt. We
+ // do not try smaller requests, since that tends to result
+ // in disaster on multi-process machines.
+ __i = __n;
+ for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
+ {
+ __free_list = _M_get_free_list(__i);
+ __p = *__free_list;
+ if (__p != 0)
+ {
+ *__free_list = __p -> _M_free_list_link;
+ _S_start_free = (char*)__p;
+ _S_end_free = _S_start_free + __i;
+ return _M_allocate_chunk(__n, __nobjs);
+ // Any leftover piece will eventually make it to the
+ // right free list.
+ }
+ }
+ _S_end_free = 0; // In case of exception.
+ _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
+ // This should either throw an exception or remedy the situation.
+ // Thus we assume it succeeded.
+ }
+ _S_heap_size += __bytes_to_get;
+ _S_end_free = _S_start_free + __bytes_to_get;
+ return _M_allocate_chunk(__n, __nobjs);
+ }
+ }
+
+ // Returns an object of size __n, and optionally adds to "size
+ // __n"'s free list. We assume that __n is properly aligned. We
+ // hold the allocation lock.
+ void*
+ __pool_base::_M_refill(size_t __n)
+ {
+ int __nobjs = 20;
+ char* __chunk = _M_allocate_chunk(__n, __nobjs);
+ _Obj* volatile* __free_list;
+ _Obj* __result;
+ _Obj* __current_obj;
+ _Obj* __next_obj;
+ int __i;
+
+ if (1 == __nobjs)
+ return __chunk;
+ __free_list = _M_get_free_list(__n);
+
+ // Build free list in chunk.
+ __result = (_Obj*)(void*)__chunk;
+ *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
+ for (__i = 1; ; __i++)
+ {
+ __current_obj = __next_obj;
+ __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
+ if (__nobjs - 1 == __i)
+ {
+ __current_obj -> _M_free_list_link = 0;
+ break;
+ }
+ else
+ __current_obj -> _M_free_list_link = __next_obj;
+ }
+ return __result;
+ }
+
+ __pool_base::_Obj* volatile __pool_base::_S_free_list[_S_free_list_size];
+
+ char* __pool_base::_S_start_free = 0;
+
+ char* __pool_base::_S_end_free = 0;
+
+ size_t __pool_base::_S_heap_size = 0;
+
+ _STL_mutex_lock __pool_base::_Lock::_S_lock __STL_MUTEX_INITIALIZER;
+
template class __pool_alloc<char>;
template class __pool_alloc<wchar_t>;
-
- template class __pool_base<true>;
} // namespace __gnu_cxx