From 2832d07bd187c8298675cf1f6f2020033624e0df Mon Sep 17 00:00:00 2001 From: Benjamin Kosnik Date: Fri, 18 Jun 2004 16:52:42 +0000 Subject: [PATCH] pool_allocator.h: Qualify __throw_bad_alloc. 2004-06-18 Benjamin Kosnik * include/ext/pool_allocator.h: Qualify __throw_bad_alloc. (__pool_base): Remove unused template parameter. Add protected. Move lock data into __pool_base::_Lock. Remove static on member functions. (__pool_base::_M_get_free_list): New. (__pool_alloc): Move _S_force new here. * src/allocator.cc: Move out of line __pool_base definitions here. * config/linker-map.gnu: Export bits from __pool_base. From-SVN: r83355 --- libstdc++-v3/ChangeLog | 11 ++ libstdc++-v3/config/linker-map.gnu | 7 + libstdc++-v3/include/ext/pool_allocator.h | 222 ++++------------------ libstdc++-v3/src/allocator.cc | 132 ++++++++++++- 4 files changed, 187 insertions(+), 185 deletions(-) diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 3af763107d5..4561e9e27a9 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,14 @@ +2004-06-18 Benjamin Kosnik + + * include/ext/pool_allocator.h: Qualify __throw_bad_alloc. + (__pool_base): Remove unused template parameter. Add + protected. Move lock data into __pool_base::_Lock. Remove static + on member functions. + (__pool_base::_M_get_free_list): New. + (__pool_alloc): Move _S_force new here. + * src/allocator.cc: Move out of line __pool_base definitions here. + * config/linker-map.gnu: Export bits from __pool_base. + 2004-06-18 Paolo Carlini * config/locale/gnu/numeric_members.cc diff --git a/libstdc++-v3/config/linker-map.gnu b/libstdc++-v3/config/linker-map.gnu index aac10bb6968..a8f5c02bf6b 100644 --- a/libstdc++-v3/config/linker-map.gnu +++ b/libstdc++-v3/config/linker-map.gnu @@ -255,6 +255,13 @@ GLIBCXX_3.4.1 { } GLIBCXX_3.4; +GLIBCXX_3.4.2 { + + _ZN9__gnu_cxx11__pool_base5_Lock7_S_lockE; + _ZN9__gnu_cxx11__pool_base9_M_refillEj; + _ZN9__gnu_cxx11__pool_base16_M_get_free_listEj; + +} GLIBCXX_3.4.1; # Symbols in the support library (libsupc++) have their own tag. CXXABI_1.3 { diff --git a/libstdc++-v3/include/ext/pool_allocator.h b/libstdc++-v3/include/ext/pool_allocator.h index 2f0aec50362..e5efac36f5b 100644 --- a/libstdc++-v3/include/ext/pool_allocator.h +++ b/libstdc++-v3/include/ext/pool_allocator.h @@ -55,8 +55,6 @@ namespace __gnu_cxx { - using std::__throw_bad_alloc; - /** * @if maint * Uses various allocators to fulfill underlying requests (and makes as @@ -71,71 +69,64 @@ namespace __gnu_cxx * information that we can return the object to the proper free list * without permanently losing part of the object. * - * The template parameter specifies whether more than one thread may use - * this allocator. It is safe to allocate an object from one instance - * of the allocator and deallocate it with another one. This effectively - * transfers its ownership to the second one. This may have undesirable - * effects on reference locality. - * * @endif * (See @link Allocators allocators info @endlink for more.) */ - template - struct __pool_base + class __pool_base { + protected: + enum { _S_align = 8 }; enum { _S_max_bytes = 128 }; - enum { _S_freelists = _S_max_bytes / _S_align }; + enum { _S_free_list_size = _S_max_bytes / _S_align }; + // It would be nice to use _STL_auto_lock here. But we need a + // test whether threads are in use. + struct _Lock + { + static _STL_mutex_lock _S_lock; + _Lock() { _S_lock._M_acquire_lock(); } + ~_Lock() { _S_lock._M_release_lock(); } + }; + union _Obj { union _Obj* _M_free_list_link; char _M_client_data[1]; // The client sees this. }; - static _Obj* volatile _S_free_list[_S_freelists]; + static _Obj* volatile _S_free_list[_S_free_list_size]; // Chunk allocation state. static char* _S_start_free; static char* _S_end_free; - static size_t _S_heap_size; - - static _STL_mutex_lock _S_lock; - static _Atomic_word _S_force_new; + static size_t _S_heap_size; - static size_t - _S_round_up(size_t __bytes) + size_t + _M_round_up(size_t __bytes) { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); } - static size_t - _S_freelist_index(size_t __bytes) - { return ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); } + _Obj* volatile* + _M_get_free_list(size_t __bytes); // Returns an object of size __n, and optionally adds to size __n // free list. - static void* - _S_refill(size_t __n); + void* + _M_refill(size_t __n); // Allocates a chunk for nobjs of size size. nobjs may be reduced // if it is inconvenient to allocate the requested number. - static char* - _S_chunk_alloc(size_t __n, int& __nobjs); - - // It would be nice to use _STL_auto_lock here. But we need a - // test whether threads are in use. - struct _Lock - { - _Lock() { if (__threads) _S_lock._M_acquire_lock(); } - ~_Lock() { if (__threads) _S_lock._M_release_lock(); } - } __attribute__ ((__unused__)); - friend struct _Lock; + char* + _M_allocate_chunk(size_t __n, int& __nobjs); }; - typedef __pool_base __pool_alloc_base; template - class __pool_alloc : private __pool_alloc_base + class __pool_alloc : private __pool_base { + private: + static _Atomic_word _S_force_new; + public: typedef size_t size_type; typedef ptrdiff_t difference_type; @@ -194,116 +185,9 @@ namespace __gnu_cxx operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) { return false; } - // Allocate memory in large chunks in order to avoid fragmenting the - // heap too much. Assume that __n is properly aligned. We hold - // the allocation lock. - template - char* - __pool_base<__threads>::_S_chunk_alloc(size_t __n, int& __nobjs) - { - char* __result; - size_t __total_bytes = __n * __nobjs; - size_t __bytes_left = _S_end_free - _S_start_free; - - if (__bytes_left >= __total_bytes) - { - __result = _S_start_free; - _S_start_free += __total_bytes; - return __result ; - } - else if (__bytes_left >= __n) - { - __nobjs = (int)(__bytes_left / __n); - __total_bytes = __n * __nobjs; - __result = _S_start_free; - _S_start_free += __total_bytes; - return __result; - } - else - { - size_t __bytes_to_get = (2 * __total_bytes - + _S_round_up(_S_heap_size >> 4)); - // Try to make use of the left-over piece. - if (__bytes_left > 0) - { - _Obj* volatile* __free_list = (_S_free_list - + _S_freelist_index(__bytes_left)); - - ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list; - *__free_list = (_Obj*)(void*)_S_start_free; - } - - _S_start_free = static_cast(::operator new(__bytes_to_get)); - if (_S_start_free == 0) - { - size_t __i; - _Obj* volatile* __free_list; - _Obj* __p; - // Try to make do with what we have. That can't hurt. We - // do not try smaller requests, since that tends to result - // in disaster on multi-process machines. - __i = __n; - for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align) - { - __free_list = _S_free_list + _S_freelist_index(__i); - __p = *__free_list; - if (__p != 0) - { - *__free_list = __p -> _M_free_list_link; - _S_start_free = (char*)__p; - _S_end_free = _S_start_free + __i; - return _S_chunk_alloc(__n, __nobjs); - // Any leftover piece will eventually make it to the - // right free list. - } - } - _S_end_free = 0; // In case of exception. - _S_start_free = static_cast(::operator new(__bytes_to_get)); - // This should either throw an exception or remedy the situation. - // Thus we assume it succeeded. - } - _S_heap_size += __bytes_to_get; - _S_end_free = _S_start_free + __bytes_to_get; - return _S_chunk_alloc(__n, __nobjs); - } - } - - // Returns an object of size __n, and optionally adds to "size - // __n"'s free list. We assume that __n is properly aligned. We - // hold the allocation lock. - template - void* - __pool_base<__threads>::_S_refill(size_t __n) - { - int __nobjs = 20; - char* __chunk = _S_chunk_alloc(__n, __nobjs); - _Obj* volatile* __free_list; - _Obj* __result; - _Obj* __current_obj; - _Obj* __next_obj; - int __i; - - if (1 == __nobjs) - return __chunk; - __free_list = _S_free_list + _S_freelist_index(__n); - - // Build free list in chunk. - __result = (_Obj*)(void*)__chunk; - *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n); - for (__i = 1; ; __i++) - { - __current_obj = __next_obj; - __next_obj = (_Obj*)(void*)((char*)__next_obj + __n); - if (__nobjs - 1 == __i) - { - __current_obj -> _M_free_list_link = 0; - break; - } - else - __current_obj -> _M_free_list_link = __next_obj; - } - return __result; - } + template + _Atomic_word + __pool_alloc<_Tp>::_S_force_new; template _Tp* @@ -314,7 +198,6 @@ namespace __gnu_cxx { if (__n <= max_size()) { - const size_t __bytes = __n * sizeof(_Tp); // If there is a race through here, assume answer from getenv // will resolve in same direction. Inspired by techniques // to efficiently support threading found in basic_string.h. @@ -325,31 +208,32 @@ namespace __gnu_cxx else __atomic_add(&_S_force_new, -1); } - - if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0)) + + const size_t __bytes = __n * sizeof(_Tp); + if (__bytes > size_t(_S_max_bytes) || _S_force_new == 1) __ret = static_cast<_Tp*>(::operator new(__bytes)); else { - _Obj* volatile* __free_list = (_S_free_list - + _S_freelist_index(__bytes)); + _Obj* volatile* __free_list = _M_get_free_list(__bytes); + // Acquire the lock here with a constructor call. This // ensures that it is released in exit or during stack // unwinding. _Lock __lock_instance; _Obj* __restrict__ __result = *__free_list; if (__builtin_expect(__result == 0, 0)) - __ret = static_cast<_Tp*>(_S_refill(_S_round_up(__bytes))); + __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes))); else { *__free_list = __result->_M_free_list_link; __ret = reinterpret_cast<_Tp*>(__result); } if (__builtin_expect(__ret == 0, 0)) - __throw_bad_alloc(); + std::__throw_bad_alloc(); } } else - __throw_bad_alloc(); + std::__throw_bad_alloc(); } return __ret; } @@ -361,44 +245,22 @@ namespace __gnu_cxx if (__n) { const size_t __bytes = __n * sizeof(_Tp); - if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0)) + if (__bytes > static_cast(_S_max_bytes) || _S_force_new == 1) ::operator delete(__p); else { - _Obj* volatile* __free_list = (_S_free_list - + _S_freelist_index(__bytes)); - _Obj* __q = (_Obj*)__p; + _Obj* volatile* __free_list = _M_get_free_list(__bytes); + _Obj* __q = reinterpret_cast<_Obj*>(__p); // Acquire the lock here with a constructor call. This // ensures that it is released in exit or during stack // unwinding. _Lock __lock_instance; - __q -> _M_free_list_link = *__free_list; + __q ->_M_free_list_link = *__free_list; *__free_list = __q; } } } - - template - typename __pool_base<__threads>::_Obj* volatile - __pool_base<__threads>::_S_free_list[_S_freelists]; - - template - char* __pool_base<__threads>::_S_start_free = 0; - - template - char* __pool_base<__threads>::_S_end_free = 0; - - template - size_t __pool_base<__threads>::_S_heap_size = 0; - - template - _STL_mutex_lock - __pool_base<__threads>::_S_lock __STL_MUTEX_INITIALIZER; - - template - _Atomic_word - __pool_base<__threads>::_S_force_new = 0; } // namespace __gnu_cxx #endif diff --git a/libstdc++-v3/src/allocator.cc b/libstdc++-v3/src/allocator.cc index 3a0efedbc40..d4928735848 100644 --- a/libstdc++-v3/src/allocator.cc +++ b/libstdc++-v3/src/allocator.cc @@ -36,16 +36,138 @@ #include #include -// Explicitly instantiate the static data members of the underlying -// allocator. namespace __gnu_cxx { + // Instantiations for __mt_alloc. template class __mt_alloc; template class __mt_alloc; - // Static members of __pool_alloc. + // Definitions and instantiations for __pool_alloc and base class. + __pool_base::_Obj* volatile* + __pool_base::_M_get_free_list(size_t __bytes) + { + size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); + return _S_free_list + __i - 1; + } + + // Allocate memory in large chunks in order to avoid fragmenting the + // heap too much. Assume that __n is properly aligned. We hold the + // allocation lock. + char* + __pool_base::_M_allocate_chunk(size_t __n, int& __nobjs) + { + char* __result; + size_t __total_bytes = __n * __nobjs; + size_t __bytes_left = _S_end_free - _S_start_free; + + if (__bytes_left >= __total_bytes) + { + __result = _S_start_free; + _S_start_free += __total_bytes; + return __result ; + } + else if (__bytes_left >= __n) + { + __nobjs = (int)(__bytes_left / __n); + __total_bytes = __n * __nobjs; + __result = _S_start_free; + _S_start_free += __total_bytes; + return __result; + } + else + { + // Try to make use of the left-over piece. + if (__bytes_left > 0) + { + _Obj* volatile* __free_list = _M_get_free_list(__bytes_left); + ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list; + *__free_list = (_Obj*)(void*)_S_start_free; + } + + size_t __bytes_to_get = (2 * __total_bytes + + _M_round_up(_S_heap_size >> 4)); + _S_start_free = static_cast(::operator new(__bytes_to_get)); + if (_S_start_free == 0) + { + size_t __i; + _Obj* volatile* __free_list; + _Obj* __p; + + // Try to make do with what we have. That can't hurt. We + // do not try smaller requests, since that tends to result + // in disaster on multi-process machines. + __i = __n; + for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align) + { + __free_list = _M_get_free_list(__i); + __p = *__free_list; + if (__p != 0) + { + *__free_list = __p -> _M_free_list_link; + _S_start_free = (char*)__p; + _S_end_free = _S_start_free + __i; + return _M_allocate_chunk(__n, __nobjs); + // Any leftover piece will eventually make it to the + // right free list. + } + } + _S_end_free = 0; // In case of exception. + _S_start_free = static_cast(::operator new(__bytes_to_get)); + // This should either throw an exception or remedy the situation. + // Thus we assume it succeeded. + } + _S_heap_size += __bytes_to_get; + _S_end_free = _S_start_free + __bytes_to_get; + return _M_allocate_chunk(__n, __nobjs); + } + } + + // Returns an object of size __n, and optionally adds to "size + // __n"'s free list. We assume that __n is properly aligned. We + // hold the allocation lock. + void* + __pool_base::_M_refill(size_t __n) + { + int __nobjs = 20; + char* __chunk = _M_allocate_chunk(__n, __nobjs); + _Obj* volatile* __free_list; + _Obj* __result; + _Obj* __current_obj; + _Obj* __next_obj; + int __i; + + if (1 == __nobjs) + return __chunk; + __free_list = _M_get_free_list(__n); + + // Build free list in chunk. + __result = (_Obj*)(void*)__chunk; + *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n); + for (__i = 1; ; __i++) + { + __current_obj = __next_obj; + __next_obj = (_Obj*)(void*)((char*)__next_obj + __n); + if (__nobjs - 1 == __i) + { + __current_obj -> _M_free_list_link = 0; + break; + } + else + __current_obj -> _M_free_list_link = __next_obj; + } + return __result; + } + + __pool_base::_Obj* volatile __pool_base::_S_free_list[_S_free_list_size]; + + char* __pool_base::_S_start_free = 0; + + char* __pool_base::_S_end_free = 0; + + size_t __pool_base::_S_heap_size = 0; + + _STL_mutex_lock __pool_base::_Lock::_S_lock __STL_MUTEX_INITIALIZER; + template class __pool_alloc; template class __pool_alloc; - - template class __pool_base; } // namespace __gnu_cxx -- 2.30.2