mt_allocator (__mt_alloc<>::_Tune): Add _M_align, the alignment requested.
[gcc.git] / libstdc++-v3 / include / ext / mt_allocator.h
1 // MT-optimized allocator -*- C++ -*-
2
3 // Copyright (C) 2003, 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
32 * You should only include this header if you are using GCC 3 or later.
33 */
34
35 #ifndef _MT_ALLOCATOR_H
36 #define _MT_ALLOCATOR_H 1
37
38 #include <new>
39 #include <cstdlib>
40 #include <bits/functexcept.h>
41 #include <bits/gthr.h>
42 #include <bits/atomicity.h>
43
44 namespace __gnu_cxx
45 {
46 /**
47 * This is a fixed size (power of 2) allocator which - when
48 * compiled with thread support - will maintain one freelist per
49 * size per thread plus a "global" one. Steps are taken to limit
50 * the per thread freelist sizes (by returning excess back to
51 * "global").
52 *
53 * Further details:
54 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
55 */
56 template<typename _Tp>
57 class __mt_alloc
58 {
59 public:
60 typedef size_t size_type;
61 typedef ptrdiff_t difference_type;
62 typedef _Tp* pointer;
63 typedef const _Tp* const_pointer;
64 typedef _Tp& reference;
65 typedef const _Tp& const_reference;
66 typedef _Tp value_type;
67
68 template<typename _Tp1>
69 struct rebind
70 { typedef __mt_alloc<_Tp1> other; };
71
72 __mt_alloc() throw()
73 {
74 // XXX
75 }
76
77 __mt_alloc(const __mt_alloc&) throw()
78 {
79 // XXX
80 }
81
82 template<typename _Tp1>
83 __mt_alloc(const __mt_alloc<_Tp1>& obj) throw()
84 {
85 // XXX
86 }
87
88 ~__mt_alloc() throw() { }
89
90 pointer
91 address(reference __x) const
92 { return &__x; }
93
94 const_pointer
95 address(const_reference __x) const
96 { return &__x; }
97
98 size_type
99 max_size() const throw()
100 { return size_t(-1) / sizeof(_Tp); }
101
102 // _GLIBCXX_RESOLVE_LIB_DEFECTS
103 // 402. wrong new expression in [some_] allocator::construct
104 void
105 construct(pointer __p, const _Tp& __val)
106 { ::new(__p) _Tp(__val); }
107
108 void
109 destroy(pointer __p) { __p->~_Tp(); }
110
111 pointer
112 allocate(size_type __n, const void* = 0);
113
114 void
115 deallocate(pointer __p, size_type __n);
116
117 // Variables used to configure the behavior of the allocator,
118 // assigned and explained in detail below.
119 struct _Tune
120 {
121 // Alignment needed.
122 // NB: In any case must be >= sizeof(_Block_record), that
123 // is 4 on 32 bit machines and 8 on 64 bit machines.
124 size_t _M_align;
125
126 // Allocation requests (after round-up to power of 2) below
127 // this value will be handled by the allocator. A raw new/
128 // call will be used for requests larger than this value.
129 size_t _M_max_bytes;
130
131 // Size in bytes of the smallest bin.
132 // NB: Must be a power of 2 and >= _M_align.
133 size_t _M_min_bin;
134
135 // In order to avoid fragmenting and minimize the number of
136 // new() calls we always request new memory using this
137 // value. Based on previous discussions on the libstdc++
138 // mailing list we have choosen the value below.
139 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
140 size_t _M_chunk_size;
141
142 // The maximum number of supported threads. Our Linux 2.4.18
143 // reports 4070 in /proc/sys/kernel/threads-max
144 size_t _M_max_threads;
145
146 // Each time a deallocation occurs in a threaded application
147 // we make sure that there are no more than
148 // _M_freelist_headroom % of used memory on the freelist. If
149 // the number of additional records is more than
150 // _M_freelist_headroom % of the freelist, we move these
151 // records back to the global pool.
152 size_t _M_freelist_headroom;
153
154 // Set to true forces all allocations to use new().
155 bool _M_force_new;
156
157 explicit
158 _Tune()
159 : _M_align(8), _M_max_bytes(128), _M_min_bin(8),
160 _M_chunk_size(4096 - 4 * sizeof(void*)),
161 _M_max_threads(4096), _M_freelist_headroom(10),
162 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
163 { }
164
165 explicit
166 _Tune(size_t __align, size_t __maxb, size_t __minbin,
167 size_t __chunk, size_t __maxthreads, size_t __headroom,
168 bool __force)
169 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
170 _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
171 _M_freelist_headroom(__headroom), _M_force_new(__force)
172 { }
173 };
174
175 private:
176 // We need to create the initial lists and set up some variables
177 // before we can answer to the first request for memory.
178 #ifdef __GTHREADS
179 static __gthread_once_t _S_once;
180 #endif
181 static bool _S_init;
182
183 static void
184 _S_initialize();
185
186 // Configuration options.
187 static _Tune _S_options;
188
189 static const _Tune
190 _S_get_options()
191 { return _S_options; }
192
193 static void
194 _S_set_options(_Tune __t)
195 {
196 if (!_S_init)
197 _S_options = __t;
198 }
199
200 // Using short int as type for the binmap implies we are never
201 // caching blocks larger than 65535 with this allocator
202 typedef unsigned short int _Binmap_type;
203 static _Binmap_type* _S_binmap;
204
205 // Each requesting thread is assigned an id ranging from 1 to
206 // _S_max_threads. Thread id 0 is used as a global memory pool.
207 // In order to get constant performance on the thread assignment
208 // routine, we keep a list of free ids. When a thread first
209 // requests memory we remove the first record in this list and
210 // stores the address in a __gthread_key. When initializing the
211 // __gthread_key we specify a destructor. When this destructor
212 // (i.e. the thread dies) is called, we return the thread id to
213 // the front of this list.
214 #ifdef __GTHREADS
215 struct _Thread_record
216 {
217 // Points to next free thread id record. NULL if last record in list.
218 _Thread_record* volatile _M_next;
219
220 // Thread id ranging from 1 to _S_max_threads.
221 size_t _M_id;
222 };
223
224 static _Thread_record* volatile _S_thread_freelist_first;
225 static __gthread_mutex_t _S_thread_freelist_mutex;
226 static __gthread_key_t _S_thread_key;
227
228 static void
229 _S_destroy_thread_key(void* __freelist_pos);
230 #endif
231
232 static size_t
233 _S_get_thread_id();
234
235 union _Block_record
236 {
237 // Points to the block_record of the next free block.
238 _Block_record* volatile _M_next;
239
240 #ifdef __GTHREADS
241 // The thread id of the thread which has requested this block.
242 size_t _M_thread_id;
243 #endif
244 };
245
246 struct _Bin_record
247 {
248 // An "array" of pointers to the first free block for each
249 // thread id. Memory to this "array" is allocated in _S_initialize()
250 // for _S_max_threads + global pool 0.
251 _Block_record** volatile _M_first;
252
253 #ifdef __GTHREADS
254 // An "array" of counters used to keep track of the amount of
255 // blocks that are on the freelist/used for each thread id.
256 // Memory to these "arrays" is allocated in _S_initialize() for
257 // _S_max_threads + global pool 0.
258 size_t* volatile _M_free;
259 size_t* volatile _M_used;
260
261 // Each bin has its own mutex which is used to ensure data
262 // integrity while changing "ownership" on a block. The mutex
263 // is initialized in _S_initialize().
264 __gthread_mutex_t* _M_mutex;
265 #endif
266 };
267
268 // An "array" of bin_records each of which represents a specific
269 // power of 2 size. Memory to this "array" is allocated in
270 // _S_initialize().
271 static _Bin_record* volatile _S_bin;
272
273 // Actual value calculated in _S_initialize().
274 static size_t _S_bin_size;
275 };
276
277 template<typename _Tp>
278 typename __mt_alloc<_Tp>::pointer
279 __mt_alloc<_Tp>::
280 allocate(size_type __n, const void*)
281 {
282 // Although the test in __gthread_once() would suffice, we wrap
283 // test of the once condition in our own unlocked check. This
284 // saves one function call to pthread_once() (which itself only
285 // tests for the once value unlocked anyway and immediately
286 // returns if set)
287 if (!_S_init)
288 {
289 #ifdef __GTHREADS
290 if (__gthread_active_p())
291 __gthread_once(&_S_once, _S_initialize);
292 #endif
293 if (!_S_init)
294 _S_initialize();
295 }
296
297 // Requests larger than _M_max_bytes are handled by new/delete
298 // directly.
299 const size_t __bytes = __n * sizeof(_Tp);
300 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
301 {
302 void* __ret = ::operator new(__bytes);
303 return static_cast<_Tp*>(__ret);
304 }
305
306 // Round up to power of 2 and figure out which bin to use.
307 const size_t __which = _S_binmap[__bytes];
308 const size_t __thread_id = _S_get_thread_id();
309
310 // Find out if we have blocks on our freelist. If so, go ahead
311 // and use them directly without having to lock anything.
312 const _Bin_record& __bin = _S_bin[__which];
313 _Block_record* __block = NULL;
314 if (__bin._M_first[__thread_id] == NULL)
315 {
316 // NB: For alignment reasons, we can't use the first _M_align
317 // bytes, even when sizeof(_Block_record) < _M_align.
318 const size_t __bin_size = ((_S_options._M_min_bin << __which)
319 + _S_options._M_align);
320 size_t __block_count = _S_options._M_chunk_size / __bin_size;
321
322 // Are we using threads?
323 // - Yes, check if there are free blocks on the global
324 // list. If so, grab up to __block_count blocks in one
325 // lock and change ownership. If the global list is
326 // empty, we allocate a new chunk and add those blocks
327 // directly to our own freelist (with us as owner).
328 // - No, all operations are made directly to global pool 0
329 // no need to lock or change ownership but check for free
330 // blocks on global list (and if not add new ones) and
331 // get the first one.
332 #ifdef __GTHREADS
333 if (__gthread_active_p())
334 {
335 __gthread_mutex_lock(__bin._M_mutex);
336 if (__bin._M_first[0] == NULL)
337 {
338 // No need to hold the lock when we are adding a
339 // whole chunk to our own list.
340 __gthread_mutex_unlock(__bin._M_mutex);
341
342 void* __v = ::operator new(_S_options._M_chunk_size);
343 __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
344 __bin._M_free[__thread_id] = __block_count;
345
346 --__block_count;
347 __block = __bin._M_first[__thread_id];
348 while (__block_count-- > 0)
349 {
350 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
351 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
352 __block = __block->_M_next;
353 }
354 __block->_M_next = NULL;
355 }
356 else
357 {
358 // Is the number of required blocks greater than or
359 // equal to the number that can be provided by the
360 // global free list?
361 __bin._M_first[__thread_id] = __bin._M_first[0];
362 if (__block_count >= __bin._M_free[0])
363 {
364 __bin._M_free[__thread_id] = __bin._M_free[0];
365 __bin._M_free[0] = 0;
366 __bin._M_first[0] = NULL;
367 }
368 else
369 {
370 __bin._M_free[__thread_id] = __block_count;
371 __bin._M_free[0] -= __block_count;
372 --__block_count;
373 __block = __bin._M_first[0];
374 while (__block_count-- > 0)
375 __block = __block->_M_next;
376 __bin._M_first[0] = __block->_M_next;
377 __block->_M_next = NULL;
378 }
379 __gthread_mutex_unlock(__bin._M_mutex);
380 }
381 }
382 else
383 #endif
384 {
385 void* __v = ::operator new(_S_options._M_chunk_size);
386 __bin._M_first[0] = static_cast<_Block_record*>(__v);
387
388 --__block_count;
389 __block = __bin._M_first[0];
390 while (__block_count-- > 0)
391 {
392 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
393 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
394 __block = __block->_M_next;
395 }
396 __block->_M_next = NULL;
397 }
398 }
399
400 __block = __bin._M_first[__thread_id];
401 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
402 #ifdef __GTHREADS
403 if (__gthread_active_p())
404 {
405 __block->_M_thread_id = __thread_id;
406 --__bin._M_free[__thread_id];
407 ++__bin._M_used[__thread_id];
408 }
409 #endif
410
411 char* __c = reinterpret_cast<char*>(__block) + _S_options._M_align;
412 return static_cast<_Tp*>(static_cast<void*>(__c));
413 }
414
415 template<typename _Tp>
416 void
417 __mt_alloc<_Tp>::
418 deallocate(pointer __p, size_type __n)
419 {
420 // Requests larger than _M_max_bytes are handled by operators
421 // new/delete directly.
422 const size_t __bytes = __n * sizeof(_Tp);
423 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
424 {
425 ::operator delete(__p);
426 return;
427 }
428
429 // Round up to power of 2 and figure out which bin to use.
430 const size_t __which = _S_binmap[__bytes];
431 const _Bin_record& __bin = _S_bin[__which];
432
433 char* __c = reinterpret_cast<char*>(__p) - _S_options._M_align;
434 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
435
436 #ifdef __GTHREADS
437 if (__gthread_active_p())
438 {
439 // Calculate the number of records to remove from our freelist:
440 // in order to avoid too much contention we wait until the
441 // number of records is "high enough".
442 const size_t __thread_id = _S_get_thread_id();
443
444 long __remove = ((__bin._M_free[__thread_id]
445 * _S_options._M_freelist_headroom)
446 - __bin._M_used[__thread_id]);
447 if (__remove > static_cast<long>(100 * (_S_bin_size - __which)
448 * _S_options._M_freelist_headroom)
449 && __remove > static_cast<long>(__bin._M_free[__thread_id]))
450 {
451 _Block_record* __tmp = __bin._M_first[__thread_id];
452 _Block_record* __first = __tmp;
453 __remove /= _S_options._M_freelist_headroom;
454 const long __removed = __remove;
455 --__remove;
456 while (__remove-- > 0)
457 __tmp = __tmp->_M_next;
458 __bin._M_first[__thread_id] = __tmp->_M_next;
459 __bin._M_free[__thread_id] -= __removed;
460
461 __gthread_mutex_lock(__bin._M_mutex);
462 __tmp->_M_next = __bin._M_first[0];
463 __bin._M_first[0] = __first;
464 __bin._M_free[0] += __removed;
465 __gthread_mutex_unlock(__bin._M_mutex);
466 }
467
468 // Return this block to our list and update counters and
469 // owner id as needed.
470 --__bin._M_used[__block->_M_thread_id];
471
472 __block->_M_next = __bin._M_first[__thread_id];
473 __bin._M_first[__thread_id] = __block;
474
475 ++__bin._M_free[__thread_id];
476 }
477 else
478 #endif
479 {
480 // Single threaded application - return to global pool.
481 __block->_M_next = __bin._M_first[0];
482 __bin._M_first[0] = __block;
483 }
484 }
485
486 template<typename _Tp>
487 void
488 __mt_alloc<_Tp>::
489 _S_initialize()
490 {
491 if (_S_options._M_force_new)
492 return;
493
494 // Calculate the number of bins required based on _M_max_bytes.
495 // _S_bin_size is statically-initialized to one.
496 size_t __bin_size = _S_options._M_min_bin;
497 while (_S_options._M_max_bytes > __bin_size)
498 {
499 __bin_size <<= 1;
500 ++_S_bin_size;
501 }
502
503 // Setup the bin map for quick lookup of the relevant bin.
504 const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
505 _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
506
507 _Binmap_type* __bp = _S_binmap;
508 _Binmap_type __bin_max = _S_options._M_min_bin;
509 _Binmap_type __bint = 0;
510 for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
511 {
512 if (__ct > __bin_max)
513 {
514 __bin_max <<= 1;
515 ++__bint;
516 }
517 *__bp++ = __bint;
518 }
519
520 // Initialize _S_bin and its members.
521 void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
522 _S_bin = static_cast<_Bin_record*>(__v);
523
524 // If __gthread_active_p() create and initialize the list of
525 // free thread ids. Single threaded applications use thread id 0
526 // directly and have no need for this.
527 #ifdef __GTHREADS
528 if (__gthread_active_p())
529 {
530 const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
531 __v = ::operator new(__k);
532 _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
533
534 // NOTE! The first assignable thread id is 1 since the
535 // global pool uses id 0
536 size_t __i;
537 for (__i = 1; __i < _S_options._M_max_threads; ++__i)
538 {
539 _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
540 __tr._M_next = &_S_thread_freelist_first[__i];
541 __tr._M_id = __i;
542 }
543
544 // Set last record.
545 _S_thread_freelist_first[__i - 1]._M_next = NULL;
546 _S_thread_freelist_first[__i - 1]._M_id = __i;
547
548 // Make sure this is initialized.
549 #ifndef __GTHREAD_MUTEX_INIT
550 __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
551 #endif
552 // Initialize per thread key to hold pointer to
553 // _S_thread_freelist.
554 __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
555
556 const size_t __max_threads = _S_options._M_max_threads + 1;
557 for (size_t __n = 0; __n < _S_bin_size; ++__n)
558 {
559 _Bin_record& __bin = _S_bin[__n];
560 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
561 __bin._M_first = static_cast<_Block_record**>(__v);
562
563 __v = ::operator new(sizeof(size_t) * __max_threads);
564 __bin._M_free = static_cast<size_t*>(__v);
565
566 __v = ::operator new(sizeof(size_t) * __max_threads);
567 __bin._M_used = static_cast<size_t*>(__v);
568
569 __v = ::operator new(sizeof(__gthread_mutex_t));
570 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
571
572 #ifdef __GTHREAD_MUTEX_INIT
573 {
574 // Do not copy a POSIX/gthr mutex once in use.
575 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
576 *__bin._M_mutex = __tmp;
577 }
578 #else
579 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
580 #endif
581
582 for (size_t __threadn = 0; __threadn < __max_threads;
583 ++__threadn)
584 {
585 __bin._M_first[__threadn] = NULL;
586 __bin._M_free[__threadn] = 0;
587 __bin._M_used[__threadn] = 0;
588 }
589 }
590 }
591 else
592 #endif
593 for (size_t __n = 0; __n < _S_bin_size; ++__n)
594 {
595 _Bin_record& __bin = _S_bin[__n];
596 __v = ::operator new(sizeof(_Block_record*));
597 __bin._M_first = static_cast<_Block_record**>(__v);
598 __bin._M_first[0] = NULL;
599 }
600
601 _S_init = true;
602 }
603
604 template<typename _Tp>
605 size_t
606 __mt_alloc<_Tp>::
607 _S_get_thread_id()
608 {
609 #ifdef __GTHREADS
610 // If we have thread support and it's active we check the thread
611 // key value and return its id or if it's not set we take the
612 // first record from _S_thread_freelist and sets the key and
613 // returns it's id.
614 if (__gthread_active_p())
615 {
616 _Thread_record* __freelist_pos =
617 static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key));
618 if (__freelist_pos == NULL)
619 {
620 // Since _S_options._M_max_threads must be larger than
621 // the theoretical max number of threads of the OS the
622 // list can never be empty.
623 __gthread_mutex_lock(&_S_thread_freelist_mutex);
624 __freelist_pos = _S_thread_freelist_first;
625 _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
626 __gthread_mutex_unlock(&_S_thread_freelist_mutex);
627
628 __gthread_setspecific(_S_thread_key,
629 static_cast<void*>(__freelist_pos));
630 }
631 return __freelist_pos->_M_id;
632 }
633 #endif
634 // Otherwise (no thread support or inactive) all requests are
635 // served from the global pool 0.
636 return 0;
637 }
638
639 #ifdef __GTHREADS
640 template<typename _Tp>
641 void
642 __mt_alloc<_Tp>::
643 _S_destroy_thread_key(void* __freelist_pos)
644 {
645 // Return this thread id record to front of thread_freelist.
646 __gthread_mutex_lock(&_S_thread_freelist_mutex);
647 _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
648 __tr->_M_next = _S_thread_freelist_first;
649 _S_thread_freelist_first = __tr;
650 __gthread_mutex_unlock(&_S_thread_freelist_mutex);
651 }
652 #endif
653
654 template<typename _Tp>
655 inline bool
656 operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
657 { return true; }
658
659 template<typename _Tp>
660 inline bool
661 operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
662 { return false; }
663
664 template<typename _Tp>
665 bool __mt_alloc<_Tp>::_S_init = false;
666
667 template<typename _Tp>
668 typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;
669
670 template<typename _Tp>
671 typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;
672
673 template<typename _Tp>
674 typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;
675
676 template<typename _Tp>
677 size_t __mt_alloc<_Tp>::_S_bin_size = 1;
678
679 // Actual initialization in _S_initialize().
680 #ifdef __GTHREADS
681 template<typename _Tp>
682 __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
683
684 template<typename _Tp>
685 typename __mt_alloc<_Tp>::_Thread_record*
686 volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
687
688 template<typename _Tp>
689 __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;
690
691 template<typename _Tp>
692 __gthread_mutex_t
693 #ifdef __GTHREAD_MUTEX_INIT
694 __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
695 #else
696 __mt_alloc<_Tp>::_S_thread_freelist_mutex;
697 #endif
698 #endif
699 } // namespace __gnu_cxx
700
701 #endif