3 // Copyright (C) 2008-2020 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
33 #pragma GCC system_header
35 #include <bits/c++config.h>
37 #include <bits/atomic_lockfree_defines.h>
38 #include <bits/move.h>
40 #ifndef _GLIBCXX_ALWAYS_INLINE
41 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
44 namespace std
_GLIBCXX_VISIBILITY(default)
46 _GLIBCXX_BEGIN_NAMESPACE_VERSION
49 * @defgroup atomics Atomics
51 * Components for performing atomic operations.
55 /// Enumeration for memory_order
56 #if __cplusplus > 201703L
57 enum class memory_order
: int
67 inline constexpr memory_order memory_order_relaxed
= memory_order::relaxed
;
68 inline constexpr memory_order memory_order_consume
= memory_order::consume
;
69 inline constexpr memory_order memory_order_acquire
= memory_order::acquire
;
70 inline constexpr memory_order memory_order_release
= memory_order::release
;
71 inline constexpr memory_order memory_order_acq_rel
= memory_order::acq_rel
;
72 inline constexpr memory_order memory_order_seq_cst
= memory_order::seq_cst
;
74 typedef enum memory_order
85 enum __memory_order_modifier
87 __memory_order_mask
= 0x0ffff,
88 __memory_order_modifier_mask
= 0xffff0000,
89 __memory_order_hle_acquire
= 0x10000,
90 __memory_order_hle_release
= 0x20000
93 constexpr memory_order
94 operator|(memory_order __m
, __memory_order_modifier __mod
)
96 return memory_order(int(__m
) | int(__mod
));
99 constexpr memory_order
100 operator&(memory_order __m
, __memory_order_modifier __mod
)
102 return memory_order(int(__m
) & int(__mod
));
105 // Drop release ordering as per [atomics.types.operations.req]/21
106 constexpr memory_order
107 __cmpexch_failure_order2(memory_order __m
) noexcept
109 return __m
== memory_order_acq_rel
? memory_order_acquire
110 : __m
== memory_order_release
? memory_order_relaxed
: __m
;
113 constexpr memory_order
114 __cmpexch_failure_order(memory_order __m
) noexcept
116 return memory_order(__cmpexch_failure_order2(__m
& __memory_order_mask
)
117 | __memory_order_modifier(__m
& __memory_order_modifier_mask
));
120 _GLIBCXX_ALWAYS_INLINE
void
121 atomic_thread_fence(memory_order __m
) noexcept
122 { __atomic_thread_fence(int(__m
)); }
124 _GLIBCXX_ALWAYS_INLINE
void
125 atomic_signal_fence(memory_order __m
) noexcept
126 { __atomic_signal_fence(int(__m
)); }
129 template<typename _Tp
>
131 kill_dependency(_Tp __y
) noexcept
138 // Base types for atomics.
139 template<typename _IntTp
>
140 struct __atomic_base
;
143 #define ATOMIC_VAR_INIT(_VI) { _VI }
145 template<typename _Tp
>
148 template<typename _Tp
>
151 /* The target's "set" value for test-and-set may not be exactly 1. */
152 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
153 typedef bool __atomic_flag_data_type
;
155 typedef unsigned char __atomic_flag_data_type
;
159 * @brief Base type for atomic_flag.
161 * Base type is POD with data, allowing atomic_flag to derive from
162 * it and meet the standard layout type requirement. In addition to
163 * compatibility with a C interface, this allows different
164 * implementations of atomic_flag to use the same atomic operation
165 * functions, via a standard conversion to the __atomic_flag_base
168 _GLIBCXX_BEGIN_EXTERN_C
170 struct __atomic_flag_base
172 __atomic_flag_data_type _M_i
;
175 _GLIBCXX_END_EXTERN_C
177 #define ATOMIC_FLAG_INIT { 0 }
180 struct atomic_flag
: public __atomic_flag_base
182 atomic_flag() noexcept
= default;
183 ~atomic_flag() noexcept
= default;
184 atomic_flag(const atomic_flag
&) = delete;
185 atomic_flag
& operator=(const atomic_flag
&) = delete;
186 atomic_flag
& operator=(const atomic_flag
&) volatile = delete;
188 // Conversion to ATOMIC_FLAG_INIT.
189 constexpr atomic_flag(bool __i
) noexcept
190 : __atomic_flag_base
{ _S_init(__i
) }
193 _GLIBCXX_ALWAYS_INLINE
bool
194 test_and_set(memory_order __m
= memory_order_seq_cst
) noexcept
196 return __atomic_test_and_set (&_M_i
, int(__m
));
199 _GLIBCXX_ALWAYS_INLINE
bool
200 test_and_set(memory_order __m
= memory_order_seq_cst
) volatile noexcept
202 return __atomic_test_and_set (&_M_i
, int(__m
));
205 _GLIBCXX_ALWAYS_INLINE
void
206 clear(memory_order __m
= memory_order_seq_cst
) noexcept
208 memory_order __b
= __m
& __memory_order_mask
;
209 __glibcxx_assert(__b
!= memory_order_consume
);
210 __glibcxx_assert(__b
!= memory_order_acquire
);
211 __glibcxx_assert(__b
!= memory_order_acq_rel
);
213 __atomic_clear (&_M_i
, int(__m
));
216 _GLIBCXX_ALWAYS_INLINE
void
217 clear(memory_order __m
= memory_order_seq_cst
) volatile noexcept
219 memory_order __b
= __m
& __memory_order_mask
;
220 __glibcxx_assert(__b
!= memory_order_consume
);
221 __glibcxx_assert(__b
!= memory_order_acquire
);
222 __glibcxx_assert(__b
!= memory_order_acq_rel
);
224 __atomic_clear (&_M_i
, int(__m
));
228 static constexpr __atomic_flag_data_type
230 { return __i
? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL
: 0; }
234 /// Base class for atomic integrals.
236 // For each of the integral types, define atomic_[integral type] struct
240 // atomic_schar signed char
241 // atomic_uchar unsigned char
242 // atomic_short short
243 // atomic_ushort unsigned short
245 // atomic_uint unsigned int
247 // atomic_ulong unsigned long
248 // atomic_llong long long
249 // atomic_ullong unsigned long long
250 // atomic_char8_t char8_t
251 // atomic_char16_t char16_t
252 // atomic_char32_t char32_t
253 // atomic_wchar_t wchar_t
255 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
256 // 8 bytes, since that is what GCC built-in functions for atomic
257 // memory access expect.
258 template<typename _ITp
>
261 using value_type
= _ITp
;
262 using difference_type
= value_type
;
265 typedef _ITp __int_type
;
267 static constexpr int _S_alignment
=
268 sizeof(_ITp
) > alignof(_ITp
) ? sizeof(_ITp
) : alignof(_ITp
);
270 alignas(_S_alignment
) __int_type _M_i
;
273 __atomic_base() noexcept
= default;
274 ~__atomic_base() noexcept
= default;
275 __atomic_base(const __atomic_base
&) = delete;
276 __atomic_base
& operator=(const __atomic_base
&) = delete;
277 __atomic_base
& operator=(const __atomic_base
&) volatile = delete;
279 // Requires __int_type convertible to _M_i.
280 constexpr __atomic_base(__int_type __i
) noexcept
: _M_i (__i
) { }
282 operator __int_type() const noexcept
285 operator __int_type() const volatile noexcept
289 operator=(__int_type __i
) noexcept
296 operator=(__int_type __i
) volatile noexcept
303 operator++(int) noexcept
304 { return fetch_add(1); }
307 operator++(int) volatile noexcept
308 { return fetch_add(1); }
311 operator--(int) noexcept
312 { return fetch_sub(1); }
315 operator--(int) volatile noexcept
316 { return fetch_sub(1); }
319 operator++() noexcept
320 { return __atomic_add_fetch(&_M_i
, 1, int(memory_order_seq_cst
)); }
323 operator++() volatile noexcept
324 { return __atomic_add_fetch(&_M_i
, 1, int(memory_order_seq_cst
)); }
327 operator--() noexcept
328 { return __atomic_sub_fetch(&_M_i
, 1, int(memory_order_seq_cst
)); }
331 operator--() volatile noexcept
332 { return __atomic_sub_fetch(&_M_i
, 1, int(memory_order_seq_cst
)); }
335 operator+=(__int_type __i
) noexcept
336 { return __atomic_add_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
339 operator+=(__int_type __i
) volatile noexcept
340 { return __atomic_add_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
343 operator-=(__int_type __i
) noexcept
344 { return __atomic_sub_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
347 operator-=(__int_type __i
) volatile noexcept
348 { return __atomic_sub_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
351 operator&=(__int_type __i
) noexcept
352 { return __atomic_and_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
355 operator&=(__int_type __i
) volatile noexcept
356 { return __atomic_and_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
359 operator|=(__int_type __i
) noexcept
360 { return __atomic_or_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
363 operator|=(__int_type __i
) volatile noexcept
364 { return __atomic_or_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
367 operator^=(__int_type __i
) noexcept
368 { return __atomic_xor_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
371 operator^=(__int_type __i
) volatile noexcept
372 { return __atomic_xor_fetch(&_M_i
, __i
, int(memory_order_seq_cst
)); }
375 is_lock_free() const noexcept
377 // Use a fake, minimally aligned pointer.
378 return __atomic_is_lock_free(sizeof(_M_i
),
379 reinterpret_cast<void *>(-_S_alignment
));
383 is_lock_free() const volatile noexcept
385 // Use a fake, minimally aligned pointer.
386 return __atomic_is_lock_free(sizeof(_M_i
),
387 reinterpret_cast<void *>(-_S_alignment
));
390 _GLIBCXX_ALWAYS_INLINE
void
391 store(__int_type __i
, memory_order __m
= memory_order_seq_cst
) noexcept
393 memory_order __b
= __m
& __memory_order_mask
;
394 __glibcxx_assert(__b
!= memory_order_acquire
);
395 __glibcxx_assert(__b
!= memory_order_acq_rel
);
396 __glibcxx_assert(__b
!= memory_order_consume
);
398 __atomic_store_n(&_M_i
, __i
, int(__m
));
401 _GLIBCXX_ALWAYS_INLINE
void
402 store(__int_type __i
,
403 memory_order __m
= memory_order_seq_cst
) volatile noexcept
405 memory_order __b
= __m
& __memory_order_mask
;
406 __glibcxx_assert(__b
!= memory_order_acquire
);
407 __glibcxx_assert(__b
!= memory_order_acq_rel
);
408 __glibcxx_assert(__b
!= memory_order_consume
);
410 __atomic_store_n(&_M_i
, __i
, int(__m
));
413 _GLIBCXX_ALWAYS_INLINE __int_type
414 load(memory_order __m
= memory_order_seq_cst
) const noexcept
416 memory_order __b
= __m
& __memory_order_mask
;
417 __glibcxx_assert(__b
!= memory_order_release
);
418 __glibcxx_assert(__b
!= memory_order_acq_rel
);
420 return __atomic_load_n(&_M_i
, int(__m
));
423 _GLIBCXX_ALWAYS_INLINE __int_type
424 load(memory_order __m
= memory_order_seq_cst
) const volatile noexcept
426 memory_order __b
= __m
& __memory_order_mask
;
427 __glibcxx_assert(__b
!= memory_order_release
);
428 __glibcxx_assert(__b
!= memory_order_acq_rel
);
430 return __atomic_load_n(&_M_i
, int(__m
));
433 _GLIBCXX_ALWAYS_INLINE __int_type
434 exchange(__int_type __i
,
435 memory_order __m
= memory_order_seq_cst
) noexcept
437 return __atomic_exchange_n(&_M_i
, __i
, int(__m
));
441 _GLIBCXX_ALWAYS_INLINE __int_type
442 exchange(__int_type __i
,
443 memory_order __m
= memory_order_seq_cst
) volatile noexcept
445 return __atomic_exchange_n(&_M_i
, __i
, int(__m
));
448 _GLIBCXX_ALWAYS_INLINE
bool
449 compare_exchange_weak(__int_type
& __i1
, __int_type __i2
,
450 memory_order __m1
, memory_order __m2
) noexcept
452 memory_order __b2
= __m2
& __memory_order_mask
;
453 memory_order __b1
= __m1
& __memory_order_mask
;
454 __glibcxx_assert(__b2
!= memory_order_release
);
455 __glibcxx_assert(__b2
!= memory_order_acq_rel
);
456 __glibcxx_assert(__b2
<= __b1
);
458 return __atomic_compare_exchange_n(&_M_i
, &__i1
, __i2
, 1,
459 int(__m1
), int(__m2
));
462 _GLIBCXX_ALWAYS_INLINE
bool
463 compare_exchange_weak(__int_type
& __i1
, __int_type __i2
,
465 memory_order __m2
) volatile noexcept
467 memory_order __b2
= __m2
& __memory_order_mask
;
468 memory_order __b1
= __m1
& __memory_order_mask
;
469 __glibcxx_assert(__b2
!= memory_order_release
);
470 __glibcxx_assert(__b2
!= memory_order_acq_rel
);
471 __glibcxx_assert(__b2
<= __b1
);
473 return __atomic_compare_exchange_n(&_M_i
, &__i1
, __i2
, 1,
474 int(__m1
), int(__m2
));
477 _GLIBCXX_ALWAYS_INLINE
bool
478 compare_exchange_weak(__int_type
& __i1
, __int_type __i2
,
479 memory_order __m
= memory_order_seq_cst
) noexcept
481 return compare_exchange_weak(__i1
, __i2
, __m
,
482 __cmpexch_failure_order(__m
));
485 _GLIBCXX_ALWAYS_INLINE
bool
486 compare_exchange_weak(__int_type
& __i1
, __int_type __i2
,
487 memory_order __m
= memory_order_seq_cst
) volatile noexcept
489 return compare_exchange_weak(__i1
, __i2
, __m
,
490 __cmpexch_failure_order(__m
));
493 _GLIBCXX_ALWAYS_INLINE
bool
494 compare_exchange_strong(__int_type
& __i1
, __int_type __i2
,
495 memory_order __m1
, memory_order __m2
) noexcept
497 memory_order __b2
= __m2
& __memory_order_mask
;
498 memory_order __b1
= __m1
& __memory_order_mask
;
499 __glibcxx_assert(__b2
!= memory_order_release
);
500 __glibcxx_assert(__b2
!= memory_order_acq_rel
);
501 __glibcxx_assert(__b2
<= __b1
);
503 return __atomic_compare_exchange_n(&_M_i
, &__i1
, __i2
, 0,
504 int(__m1
), int(__m2
));
507 _GLIBCXX_ALWAYS_INLINE
bool
508 compare_exchange_strong(__int_type
& __i1
, __int_type __i2
,
510 memory_order __m2
) volatile noexcept
512 memory_order __b2
= __m2
& __memory_order_mask
;
513 memory_order __b1
= __m1
& __memory_order_mask
;
515 __glibcxx_assert(__b2
!= memory_order_release
);
516 __glibcxx_assert(__b2
!= memory_order_acq_rel
);
517 __glibcxx_assert(__b2
<= __b1
);
519 return __atomic_compare_exchange_n(&_M_i
, &__i1
, __i2
, 0,
520 int(__m1
), int(__m2
));
523 _GLIBCXX_ALWAYS_INLINE
bool
524 compare_exchange_strong(__int_type
& __i1
, __int_type __i2
,
525 memory_order __m
= memory_order_seq_cst
) noexcept
527 return compare_exchange_strong(__i1
, __i2
, __m
,
528 __cmpexch_failure_order(__m
));
531 _GLIBCXX_ALWAYS_INLINE
bool
532 compare_exchange_strong(__int_type
& __i1
, __int_type __i2
,
533 memory_order __m
= memory_order_seq_cst
) volatile noexcept
535 return compare_exchange_strong(__i1
, __i2
, __m
,
536 __cmpexch_failure_order(__m
));
539 _GLIBCXX_ALWAYS_INLINE __int_type
540 fetch_add(__int_type __i
,
541 memory_order __m
= memory_order_seq_cst
) noexcept
542 { return __atomic_fetch_add(&_M_i
, __i
, int(__m
)); }
544 _GLIBCXX_ALWAYS_INLINE __int_type
545 fetch_add(__int_type __i
,
546 memory_order __m
= memory_order_seq_cst
) volatile noexcept
547 { return __atomic_fetch_add(&_M_i
, __i
, int(__m
)); }
549 _GLIBCXX_ALWAYS_INLINE __int_type
550 fetch_sub(__int_type __i
,
551 memory_order __m
= memory_order_seq_cst
) noexcept
552 { return __atomic_fetch_sub(&_M_i
, __i
, int(__m
)); }
554 _GLIBCXX_ALWAYS_INLINE __int_type
555 fetch_sub(__int_type __i
,
556 memory_order __m
= memory_order_seq_cst
) volatile noexcept
557 { return __atomic_fetch_sub(&_M_i
, __i
, int(__m
)); }
559 _GLIBCXX_ALWAYS_INLINE __int_type
560 fetch_and(__int_type __i
,
561 memory_order __m
= memory_order_seq_cst
) noexcept
562 { return __atomic_fetch_and(&_M_i
, __i
, int(__m
)); }
564 _GLIBCXX_ALWAYS_INLINE __int_type
565 fetch_and(__int_type __i
,
566 memory_order __m
= memory_order_seq_cst
) volatile noexcept
567 { return __atomic_fetch_and(&_M_i
, __i
, int(__m
)); }
569 _GLIBCXX_ALWAYS_INLINE __int_type
570 fetch_or(__int_type __i
,
571 memory_order __m
= memory_order_seq_cst
) noexcept
572 { return __atomic_fetch_or(&_M_i
, __i
, int(__m
)); }
574 _GLIBCXX_ALWAYS_INLINE __int_type
575 fetch_or(__int_type __i
,
576 memory_order __m
= memory_order_seq_cst
) volatile noexcept
577 { return __atomic_fetch_or(&_M_i
, __i
, int(__m
)); }
579 _GLIBCXX_ALWAYS_INLINE __int_type
580 fetch_xor(__int_type __i
,
581 memory_order __m
= memory_order_seq_cst
) noexcept
582 { return __atomic_fetch_xor(&_M_i
, __i
, int(__m
)); }
584 _GLIBCXX_ALWAYS_INLINE __int_type
585 fetch_xor(__int_type __i
,
586 memory_order __m
= memory_order_seq_cst
) volatile noexcept
587 { return __atomic_fetch_xor(&_M_i
, __i
, int(__m
)); }
591 /// Partial specialization for pointer types.
592 template<typename _PTp
>
593 struct __atomic_base
<_PTp
*>
596 typedef _PTp
* __pointer_type
;
600 // Factored out to facilitate explicit specialization.
602 _M_type_size(ptrdiff_t __d
) const { return __d
* sizeof(_PTp
); }
605 _M_type_size(ptrdiff_t __d
) const volatile { return __d
* sizeof(_PTp
); }
608 __atomic_base() noexcept
= default;
609 ~__atomic_base() noexcept
= default;
610 __atomic_base(const __atomic_base
&) = delete;
611 __atomic_base
& operator=(const __atomic_base
&) = delete;
612 __atomic_base
& operator=(const __atomic_base
&) volatile = delete;
614 // Requires __pointer_type convertible to _M_p.
615 constexpr __atomic_base(__pointer_type __p
) noexcept
: _M_p (__p
) { }
617 operator __pointer_type() const noexcept
620 operator __pointer_type() const volatile noexcept
624 operator=(__pointer_type __p
) noexcept
631 operator=(__pointer_type __p
) volatile noexcept
638 operator++(int) noexcept
639 { return fetch_add(1); }
642 operator++(int) volatile noexcept
643 { return fetch_add(1); }
646 operator--(int) noexcept
647 { return fetch_sub(1); }
650 operator--(int) volatile noexcept
651 { return fetch_sub(1); }
654 operator++() noexcept
655 { return __atomic_add_fetch(&_M_p
, _M_type_size(1),
656 int(memory_order_seq_cst
)); }
659 operator++() volatile noexcept
660 { return __atomic_add_fetch(&_M_p
, _M_type_size(1),
661 int(memory_order_seq_cst
)); }
664 operator--() noexcept
665 { return __atomic_sub_fetch(&_M_p
, _M_type_size(1),
666 int(memory_order_seq_cst
)); }
669 operator--() volatile noexcept
670 { return __atomic_sub_fetch(&_M_p
, _M_type_size(1),
671 int(memory_order_seq_cst
)); }
674 operator+=(ptrdiff_t __d
) noexcept
675 { return __atomic_add_fetch(&_M_p
, _M_type_size(__d
),
676 int(memory_order_seq_cst
)); }
679 operator+=(ptrdiff_t __d
) volatile noexcept
680 { return __atomic_add_fetch(&_M_p
, _M_type_size(__d
),
681 int(memory_order_seq_cst
)); }
684 operator-=(ptrdiff_t __d
) noexcept
685 { return __atomic_sub_fetch(&_M_p
, _M_type_size(__d
),
686 int(memory_order_seq_cst
)); }
689 operator-=(ptrdiff_t __d
) volatile noexcept
690 { return __atomic_sub_fetch(&_M_p
, _M_type_size(__d
),
691 int(memory_order_seq_cst
)); }
694 is_lock_free() const noexcept
696 // Produce a fake, minimally aligned pointer.
697 return __atomic_is_lock_free(sizeof(_M_p
),
698 reinterpret_cast<void *>(-__alignof(_M_p
)));
702 is_lock_free() const volatile noexcept
704 // Produce a fake, minimally aligned pointer.
705 return __atomic_is_lock_free(sizeof(_M_p
),
706 reinterpret_cast<void *>(-__alignof(_M_p
)));
709 _GLIBCXX_ALWAYS_INLINE
void
710 store(__pointer_type __p
,
711 memory_order __m
= memory_order_seq_cst
) noexcept
713 memory_order __b
= __m
& __memory_order_mask
;
715 __glibcxx_assert(__b
!= memory_order_acquire
);
716 __glibcxx_assert(__b
!= memory_order_acq_rel
);
717 __glibcxx_assert(__b
!= memory_order_consume
);
719 __atomic_store_n(&_M_p
, __p
, int(__m
));
722 _GLIBCXX_ALWAYS_INLINE
void
723 store(__pointer_type __p
,
724 memory_order __m
= memory_order_seq_cst
) volatile noexcept
726 memory_order __b
= __m
& __memory_order_mask
;
727 __glibcxx_assert(__b
!= memory_order_acquire
);
728 __glibcxx_assert(__b
!= memory_order_acq_rel
);
729 __glibcxx_assert(__b
!= memory_order_consume
);
731 __atomic_store_n(&_M_p
, __p
, int(__m
));
734 _GLIBCXX_ALWAYS_INLINE __pointer_type
735 load(memory_order __m
= memory_order_seq_cst
) const noexcept
737 memory_order __b
= __m
& __memory_order_mask
;
738 __glibcxx_assert(__b
!= memory_order_release
);
739 __glibcxx_assert(__b
!= memory_order_acq_rel
);
741 return __atomic_load_n(&_M_p
, int(__m
));
744 _GLIBCXX_ALWAYS_INLINE __pointer_type
745 load(memory_order __m
= memory_order_seq_cst
) const volatile noexcept
747 memory_order __b
= __m
& __memory_order_mask
;
748 __glibcxx_assert(__b
!= memory_order_release
);
749 __glibcxx_assert(__b
!= memory_order_acq_rel
);
751 return __atomic_load_n(&_M_p
, int(__m
));
754 _GLIBCXX_ALWAYS_INLINE __pointer_type
755 exchange(__pointer_type __p
,
756 memory_order __m
= memory_order_seq_cst
) noexcept
758 return __atomic_exchange_n(&_M_p
, __p
, int(__m
));
762 _GLIBCXX_ALWAYS_INLINE __pointer_type
763 exchange(__pointer_type __p
,
764 memory_order __m
= memory_order_seq_cst
) volatile noexcept
766 return __atomic_exchange_n(&_M_p
, __p
, int(__m
));
769 _GLIBCXX_ALWAYS_INLINE
bool
770 compare_exchange_strong(__pointer_type
& __p1
, __pointer_type __p2
,
772 memory_order __m2
) noexcept
774 memory_order __b2
= __m2
& __memory_order_mask
;
775 memory_order __b1
= __m1
& __memory_order_mask
;
776 __glibcxx_assert(__b2
!= memory_order_release
);
777 __glibcxx_assert(__b2
!= memory_order_acq_rel
);
778 __glibcxx_assert(__b2
<= __b1
);
780 return __atomic_compare_exchange_n(&_M_p
, &__p1
, __p2
, 0,
781 int(__m1
), int(__m2
));
784 _GLIBCXX_ALWAYS_INLINE
bool
785 compare_exchange_strong(__pointer_type
& __p1
, __pointer_type __p2
,
787 memory_order __m2
) volatile noexcept
789 memory_order __b2
= __m2
& __memory_order_mask
;
790 memory_order __b1
= __m1
& __memory_order_mask
;
792 __glibcxx_assert(__b2
!= memory_order_release
);
793 __glibcxx_assert(__b2
!= memory_order_acq_rel
);
794 __glibcxx_assert(__b2
<= __b1
);
796 return __atomic_compare_exchange_n(&_M_p
, &__p1
, __p2
, 0,
797 int(__m1
), int(__m2
));
800 _GLIBCXX_ALWAYS_INLINE __pointer_type
801 fetch_add(ptrdiff_t __d
,
802 memory_order __m
= memory_order_seq_cst
) noexcept
803 { return __atomic_fetch_add(&_M_p
, _M_type_size(__d
), int(__m
)); }
805 _GLIBCXX_ALWAYS_INLINE __pointer_type
806 fetch_add(ptrdiff_t __d
,
807 memory_order __m
= memory_order_seq_cst
) volatile noexcept
808 { return __atomic_fetch_add(&_M_p
, _M_type_size(__d
), int(__m
)); }
810 _GLIBCXX_ALWAYS_INLINE __pointer_type
811 fetch_sub(ptrdiff_t __d
,
812 memory_order __m
= memory_order_seq_cst
) noexcept
813 { return __atomic_fetch_sub(&_M_p
, _M_type_size(__d
), int(__m
)); }
815 _GLIBCXX_ALWAYS_INLINE __pointer_type
816 fetch_sub(ptrdiff_t __d
,
817 memory_order __m
= memory_order_seq_cst
) volatile noexcept
818 { return __atomic_fetch_sub(&_M_p
, _M_type_size(__d
), int(__m
)); }
821 #if __cplusplus > 201703L
822 // Implementation details of atomic_ref and atomic<floating-point>.
823 namespace __atomic_impl
825 // Remove volatile and create a non-deduced context for value arguments.
826 template<typename _Tp
>
827 using _Val
= remove_volatile_t
<_Tp
>;
829 // As above, but for difference_type arguments.
830 template<typename _Tp
>
831 using _Diff
= conditional_t
<is_pointer_v
<_Tp
>, ptrdiff_t, _Val
<_Tp
>>;
833 template<size_t _Size
, size_t _Align
>
834 _GLIBCXX_ALWAYS_INLINE
bool
835 is_lock_free() noexcept
837 // Produce a fake, minimally aligned pointer.
838 return __atomic_is_lock_free(_Size
, reinterpret_cast<void *>(-_Align
));
841 template<typename _Tp
>
842 _GLIBCXX_ALWAYS_INLINE
void
843 store(_Tp
* __ptr
, _Val
<_Tp
> __t
, memory_order __m
) noexcept
844 { __atomic_store(__ptr
, std::__addressof(__t
), int(__m
)); }
846 template<typename _Tp
>
847 _GLIBCXX_ALWAYS_INLINE _Tp
848 load(_Tp
* __ptr
, memory_order __m
) noexcept
850 alignas(_Tp
) unsigned char __buf
[sizeof(_Tp
)];
851 _Tp
* __dest
= reinterpret_cast<_Tp
*>(__buf
);
852 __atomic_load(__ptr
, __dest
, int(__m
));
856 template<typename _Tp
>
857 _GLIBCXX_ALWAYS_INLINE _Tp
858 exchange(_Tp
* __ptr
, _Val
<_Tp
> __desired
, memory_order __m
) noexcept
860 alignas(_Tp
) unsigned char __buf
[sizeof(_Tp
)];
861 _Tp
* __dest
= reinterpret_cast<_Tp
*>(__buf
);
862 __atomic_exchange(__ptr
, std::__addressof(__desired
), __dest
, int(__m
));
866 template<typename _Tp
>
867 _GLIBCXX_ALWAYS_INLINE
bool
868 compare_exchange_weak(_Tp
* __ptr
, _Val
<_Tp
>& __expected
,
869 _Val
<_Tp
> __desired
, memory_order __success
,
870 memory_order __failure
) noexcept
872 return __atomic_compare_exchange(__ptr
, std::__addressof(__expected
),
873 std::__addressof(__desired
), true,
874 int(__success
), int(__failure
));
877 template<typename _Tp
>
878 _GLIBCXX_ALWAYS_INLINE
bool
879 compare_exchange_strong(_Tp
* __ptr
, _Val
<_Tp
>& __expected
,
880 _Val
<_Tp
> __desired
, memory_order __success
,
881 memory_order __failure
) noexcept
883 return __atomic_compare_exchange(__ptr
, std::__addressof(__expected
),
884 std::__addressof(__desired
), false,
885 int(__success
), int(__failure
));
888 template<typename _Tp
>
889 _GLIBCXX_ALWAYS_INLINE _Tp
890 fetch_add(_Tp
* __ptr
, _Diff
<_Tp
> __i
, memory_order __m
) noexcept
891 { return __atomic_fetch_add(__ptr
, __i
, int(__m
)); }
893 template<typename _Tp
>
894 _GLIBCXX_ALWAYS_INLINE _Tp
895 fetch_sub(_Tp
* __ptr
, _Diff
<_Tp
> __i
, memory_order __m
) noexcept
896 { return __atomic_fetch_sub(__ptr
, __i
, int(__m
)); }
898 template<typename _Tp
>
899 _GLIBCXX_ALWAYS_INLINE _Tp
900 fetch_and(_Tp
* __ptr
, _Val
<_Tp
> __i
, memory_order __m
) noexcept
901 { return __atomic_fetch_and(__ptr
, __i
, int(__m
)); }
903 template<typename _Tp
>
904 _GLIBCXX_ALWAYS_INLINE _Tp
905 fetch_or(_Tp
* __ptr
, _Val
<_Tp
> __i
, memory_order __m
) noexcept
906 { return __atomic_fetch_or(__ptr
, __i
, int(__m
)); }
908 template<typename _Tp
>
909 _GLIBCXX_ALWAYS_INLINE _Tp
910 fetch_xor(_Tp
* __ptr
, _Val
<_Tp
> __i
, memory_order __m
) noexcept
911 { return __atomic_fetch_xor(__ptr
, __i
, int(__m
)); }
913 template<typename _Tp
>
914 _GLIBCXX_ALWAYS_INLINE _Tp
915 __add_fetch(_Tp
* __ptr
, _Diff
<_Tp
> __i
) noexcept
916 { return __atomic_add_fetch(__ptr
, __i
, __ATOMIC_SEQ_CST
); }
918 template<typename _Tp
>
919 _GLIBCXX_ALWAYS_INLINE _Tp
920 __sub_fetch(_Tp
* __ptr
, _Diff
<_Tp
> __i
) noexcept
921 { return __atomic_sub_fetch(__ptr
, __i
, __ATOMIC_SEQ_CST
); }
923 template<typename _Tp
>
924 _GLIBCXX_ALWAYS_INLINE _Tp
925 __and_fetch(_Tp
* __ptr
, _Val
<_Tp
> __i
) noexcept
926 { return __atomic_and_fetch(__ptr
, __i
, __ATOMIC_SEQ_CST
); }
928 template<typename _Tp
>
929 _GLIBCXX_ALWAYS_INLINE _Tp
930 __or_fetch(_Tp
* __ptr
, _Val
<_Tp
> __i
) noexcept
931 { return __atomic_or_fetch(__ptr
, __i
, __ATOMIC_SEQ_CST
); }
933 template<typename _Tp
>
934 _GLIBCXX_ALWAYS_INLINE _Tp
935 __xor_fetch(_Tp
* __ptr
, _Val
<_Tp
> __i
) noexcept
936 { return __atomic_xor_fetch(__ptr
, __i
, __ATOMIC_SEQ_CST
); }
938 template<typename _Tp
>
940 __fetch_add_flt(_Tp
* __ptr
, _Val
<_Tp
> __i
, memory_order __m
) noexcept
942 _Val
<_Tp
> __oldval
= load(__ptr
, memory_order_relaxed
);
943 _Val
<_Tp
> __newval
= __oldval
+ __i
;
944 while (!compare_exchange_weak(__ptr
, __oldval
, __newval
, __m
,
945 memory_order_relaxed
))
946 __newval
= __oldval
+ __i
;
950 template<typename _Tp
>
952 __fetch_sub_flt(_Tp
* __ptr
, _Val
<_Tp
> __i
, memory_order __m
) noexcept
954 _Val
<_Tp
> __oldval
= load(__ptr
, memory_order_relaxed
);
955 _Val
<_Tp
> __newval
= __oldval
- __i
;
956 while (!compare_exchange_weak(__ptr
, __oldval
, __newval
, __m
,
957 memory_order_relaxed
))
958 __newval
= __oldval
- __i
;
962 template<typename _Tp
>
964 __add_fetch_flt(_Tp
* __ptr
, _Val
<_Tp
> __i
) noexcept
966 _Val
<_Tp
> __oldval
= load(__ptr
, memory_order_relaxed
);
967 _Val
<_Tp
> __newval
= __oldval
+ __i
;
968 while (!compare_exchange_weak(__ptr
, __oldval
, __newval
,
969 memory_order_seq_cst
,
970 memory_order_relaxed
))
971 __newval
= __oldval
+ __i
;
975 template<typename _Tp
>
977 __sub_fetch_flt(_Tp
* __ptr
, _Val
<_Tp
> __i
) noexcept
979 _Val
<_Tp
> __oldval
= load(__ptr
, memory_order_relaxed
);
980 _Val
<_Tp
> __newval
= __oldval
- __i
;
981 while (!compare_exchange_weak(__ptr
, __oldval
, __newval
,
982 memory_order_seq_cst
,
983 memory_order_relaxed
))
984 __newval
= __oldval
- __i
;
987 } // namespace __atomic_impl
989 // base class for atomic<floating-point-type>
990 template<typename _Fp
>
991 struct __atomic_float
993 static_assert(is_floating_point_v
<_Fp
>);
995 static constexpr size_t _S_alignment
= __alignof__(_Fp
);
998 using value_type
= _Fp
;
999 using difference_type
= value_type
;
1001 static constexpr bool is_always_lock_free
1002 = __atomic_always_lock_free(sizeof(_Fp
), 0);
1004 __atomic_float() = default;
1007 __atomic_float(_Fp __t
) : _M_fp(__t
)
1010 __atomic_float(const __atomic_float
&) = delete;
1011 __atomic_float
& operator=(const __atomic_float
&) = delete;
1012 __atomic_float
& operator=(const __atomic_float
&) volatile = delete;
1015 operator=(_Fp __t
) volatile noexcept
1022 operator=(_Fp __t
) noexcept
1029 is_lock_free() const volatile noexcept
1030 { return __atomic_impl::is_lock_free
<sizeof(_Fp
), _S_alignment
>(); }
1033 is_lock_free() const noexcept
1034 { return __atomic_impl::is_lock_free
<sizeof(_Fp
), _S_alignment
>(); }
1037 store(_Fp __t
, memory_order __m
= memory_order_seq_cst
) volatile noexcept
1038 { __atomic_impl::store(&_M_fp
, __t
, __m
); }
1041 store(_Fp __t
, memory_order __m
= memory_order_seq_cst
) noexcept
1042 { __atomic_impl::store(&_M_fp
, __t
, __m
); }
1045 load(memory_order __m
= memory_order_seq_cst
) const volatile noexcept
1046 { return __atomic_impl::load(&_M_fp
, __m
); }
1049 load(memory_order __m
= memory_order_seq_cst
) const noexcept
1050 { return __atomic_impl::load(&_M_fp
, __m
); }
1052 operator _Fp() const volatile noexcept
{ return this->load(); }
1053 operator _Fp() const noexcept
{ return this->load(); }
1056 exchange(_Fp __desired
,
1057 memory_order __m
= memory_order_seq_cst
) volatile noexcept
1058 { return __atomic_impl::exchange(&_M_fp
, __desired
, __m
); }
1061 exchange(_Fp __desired
,
1062 memory_order __m
= memory_order_seq_cst
) noexcept
1063 { return __atomic_impl::exchange(&_M_fp
, __desired
, __m
); }
1066 compare_exchange_weak(_Fp
& __expected
, _Fp __desired
,
1067 memory_order __success
,
1068 memory_order __failure
) noexcept
1070 return __atomic_impl::compare_exchange_weak(&_M_fp
,
1071 __expected
, __desired
,
1072 __success
, __failure
);
1076 compare_exchange_weak(_Fp
& __expected
, _Fp __desired
,
1077 memory_order __success
,
1078 memory_order __failure
) volatile noexcept
1080 return __atomic_impl::compare_exchange_weak(&_M_fp
,
1081 __expected
, __desired
,
1082 __success
, __failure
);
1086 compare_exchange_strong(_Fp
& __expected
, _Fp __desired
,
1087 memory_order __success
,
1088 memory_order __failure
) noexcept
1090 return __atomic_impl::compare_exchange_strong(&_M_fp
,
1091 __expected
, __desired
,
1092 __success
, __failure
);
1096 compare_exchange_strong(_Fp
& __expected
, _Fp __desired
,
1097 memory_order __success
,
1098 memory_order __failure
) volatile noexcept
1100 return __atomic_impl::compare_exchange_strong(&_M_fp
,
1101 __expected
, __desired
,
1102 __success
, __failure
);
1106 compare_exchange_weak(_Fp
& __expected
, _Fp __desired
,
1107 memory_order __order
= memory_order_seq_cst
)
1110 return compare_exchange_weak(__expected
, __desired
, __order
,
1111 __cmpexch_failure_order(__order
));
1115 compare_exchange_weak(_Fp
& __expected
, _Fp __desired
,
1116 memory_order __order
= memory_order_seq_cst
)
1119 return compare_exchange_weak(__expected
, __desired
, __order
,
1120 __cmpexch_failure_order(__order
));
1124 compare_exchange_strong(_Fp
& __expected
, _Fp __desired
,
1125 memory_order __order
= memory_order_seq_cst
)
1128 return compare_exchange_strong(__expected
, __desired
, __order
,
1129 __cmpexch_failure_order(__order
));
1133 compare_exchange_strong(_Fp
& __expected
, _Fp __desired
,
1134 memory_order __order
= memory_order_seq_cst
)
1137 return compare_exchange_strong(__expected
, __desired
, __order
,
1138 __cmpexch_failure_order(__order
));
1142 fetch_add(value_type __i
,
1143 memory_order __m
= memory_order_seq_cst
) noexcept
1144 { return __atomic_impl::__fetch_add_flt(&_M_fp
, __i
, __m
); }
1147 fetch_add(value_type __i
,
1148 memory_order __m
= memory_order_seq_cst
) volatile noexcept
1149 { return __atomic_impl::__fetch_add_flt(&_M_fp
, __i
, __m
); }
1152 fetch_sub(value_type __i
,
1153 memory_order __m
= memory_order_seq_cst
) noexcept
1154 { return __atomic_impl::__fetch_sub_flt(&_M_fp
, __i
, __m
); }
1157 fetch_sub(value_type __i
,
1158 memory_order __m
= memory_order_seq_cst
) volatile noexcept
1159 { return __atomic_impl::__fetch_sub_flt(&_M_fp
, __i
, __m
); }
1162 operator+=(value_type __i
) noexcept
1163 { return __atomic_impl::__add_fetch_flt(&_M_fp
, __i
); }
1166 operator+=(value_type __i
) volatile noexcept
1167 { return __atomic_impl::__add_fetch_flt(&_M_fp
, __i
); }
1170 operator-=(value_type __i
) noexcept
1171 { return __atomic_impl::__sub_fetch_flt(&_M_fp
, __i
); }
1174 operator-=(value_type __i
) volatile noexcept
1175 { return __atomic_impl::__sub_fetch_flt(&_M_fp
, __i
); }
1178 alignas(_S_alignment
) _Fp _M_fp
;
1181 template<typename _Tp
,
1182 bool = is_integral_v
<_Tp
>, bool = is_floating_point_v
<_Tp
>>
1183 struct __atomic_ref
;
1185 // base class for non-integral, non-floating-point, non-pointer types
1186 template<typename _Tp
>
1187 struct __atomic_ref
<_Tp
, false, false>
1189 static_assert(is_trivially_copyable_v
<_Tp
>);
1191 // 1/2/4/8/16-byte types must be aligned to at least their size.
1192 static constexpr int _S_min_alignment
1193 = (sizeof(_Tp
) & (sizeof(_Tp
) - 1)) || sizeof(_Tp
) > 16
1197 using value_type
= _Tp
;
1199 static constexpr bool is_always_lock_free
1200 = __atomic_always_lock_free(sizeof(_Tp
), 0);
1202 static constexpr size_t required_alignment
1203 = _S_min_alignment
> alignof(_Tp
) ? _S_min_alignment
: alignof(_Tp
);
1205 __atomic_ref
& operator=(const __atomic_ref
&) = delete;
1208 __atomic_ref(_Tp
& __t
) : _M_ptr(std::__addressof(__t
))
1209 { __glibcxx_assert(((uintptr_t)_M_ptr
% required_alignment
) == 0); }
1211 __atomic_ref(const __atomic_ref
&) noexcept
= default;
1214 operator=(_Tp __t
) const noexcept
1220 operator _Tp() const noexcept
{ return this->load(); }
1223 is_lock_free() const noexcept
1224 { return __atomic_impl::is_lock_free
<sizeof(_Tp
), required_alignment
>(); }
1227 store(_Tp __t
, memory_order __m
= memory_order_seq_cst
) const noexcept
1228 { __atomic_impl::store(_M_ptr
, __t
, __m
); }
1231 load(memory_order __m
= memory_order_seq_cst
) const noexcept
1232 { return __atomic_impl::load(_M_ptr
, __m
); }
1235 exchange(_Tp __desired
, memory_order __m
= memory_order_seq_cst
)
1237 { return __atomic_impl::exchange(_M_ptr
, __desired
, __m
); }
1240 compare_exchange_weak(_Tp
& __expected
, _Tp __desired
,
1241 memory_order __success
,
1242 memory_order __failure
) const noexcept
1244 return __atomic_impl::compare_exchange_weak(_M_ptr
,
1245 __expected
, __desired
,
1246 __success
, __failure
);
1250 compare_exchange_strong(_Tp
& __expected
, _Tp __desired
,
1251 memory_order __success
,
1252 memory_order __failure
) const noexcept
1254 return __atomic_impl::compare_exchange_strong(_M_ptr
,
1255 __expected
, __desired
,
1256 __success
, __failure
);
1260 compare_exchange_weak(_Tp
& __expected
, _Tp __desired
,
1261 memory_order __order
= memory_order_seq_cst
)
1264 return compare_exchange_weak(__expected
, __desired
, __order
,
1265 __cmpexch_failure_order(__order
));
1269 compare_exchange_strong(_Tp
& __expected
, _Tp __desired
,
1270 memory_order __order
= memory_order_seq_cst
)
1273 return compare_exchange_strong(__expected
, __desired
, __order
,
1274 __cmpexch_failure_order(__order
));
1281 // base class for atomic_ref<integral-type>
1282 template<typename _Tp
>
1283 struct __atomic_ref
<_Tp
, true, false>
1285 static_assert(is_integral_v
<_Tp
>);
1288 using value_type
= _Tp
;
1289 using difference_type
= value_type
;
1291 static constexpr bool is_always_lock_free
1292 = __atomic_always_lock_free(sizeof(_Tp
), 0);
1294 static constexpr size_t required_alignment
1295 = sizeof(_Tp
) > alignof(_Tp
) ? sizeof(_Tp
) : alignof(_Tp
);
1297 __atomic_ref() = delete;
1298 __atomic_ref
& operator=(const __atomic_ref
&) = delete;
1301 __atomic_ref(_Tp
& __t
) : _M_ptr(&__t
)
1302 { __glibcxx_assert(((uintptr_t)_M_ptr
% required_alignment
) == 0); }
1304 __atomic_ref(const __atomic_ref
&) noexcept
= default;
1307 operator=(_Tp __t
) const noexcept
1313 operator _Tp() const noexcept
{ return this->load(); }
1316 is_lock_free() const noexcept
1318 return __atomic_impl::is_lock_free
<sizeof(_Tp
), required_alignment
>();
1322 store(_Tp __t
, memory_order __m
= memory_order_seq_cst
) const noexcept
1323 { __atomic_impl::store(_M_ptr
, __t
, __m
); }
1326 load(memory_order __m
= memory_order_seq_cst
) const noexcept
1327 { return __atomic_impl::load(_M_ptr
, __m
); }
1330 exchange(_Tp __desired
,
1331 memory_order __m
= memory_order_seq_cst
) const noexcept
1332 { return __atomic_impl::exchange(_M_ptr
, __desired
, __m
); }
1335 compare_exchange_weak(_Tp
& __expected
, _Tp __desired
,
1336 memory_order __success
,
1337 memory_order __failure
) const noexcept
1339 return __atomic_impl::compare_exchange_weak(_M_ptr
,
1340 __expected
, __desired
,
1341 __success
, __failure
);
1345 compare_exchange_strong(_Tp
& __expected
, _Tp __desired
,
1346 memory_order __success
,
1347 memory_order __failure
) const noexcept
1349 return __atomic_impl::compare_exchange_strong(_M_ptr
,
1350 __expected
, __desired
,
1351 __success
, __failure
);
1355 compare_exchange_weak(_Tp
& __expected
, _Tp __desired
,
1356 memory_order __order
= memory_order_seq_cst
)
1359 return compare_exchange_weak(__expected
, __desired
, __order
,
1360 __cmpexch_failure_order(__order
));
1364 compare_exchange_strong(_Tp
& __expected
, _Tp __desired
,
1365 memory_order __order
= memory_order_seq_cst
)
1368 return compare_exchange_strong(__expected
, __desired
, __order
,
1369 __cmpexch_failure_order(__order
));
1373 fetch_add(value_type __i
,
1374 memory_order __m
= memory_order_seq_cst
) const noexcept
1375 { return __atomic_impl::fetch_add(_M_ptr
, __i
, __m
); }
1378 fetch_sub(value_type __i
,
1379 memory_order __m
= memory_order_seq_cst
) const noexcept
1380 { return __atomic_impl::fetch_sub(_M_ptr
, __i
, __m
); }
1383 fetch_and(value_type __i
,
1384 memory_order __m
= memory_order_seq_cst
) const noexcept
1385 { return __atomic_impl::fetch_and(_M_ptr
, __i
, __m
); }
1388 fetch_or(value_type __i
,
1389 memory_order __m
= memory_order_seq_cst
) const noexcept
1390 { return __atomic_impl::fetch_or(_M_ptr
, __i
, __m
); }
1393 fetch_xor(value_type __i
,
1394 memory_order __m
= memory_order_seq_cst
) const noexcept
1395 { return __atomic_impl::fetch_xor(_M_ptr
, __i
, __m
); }
1397 _GLIBCXX_ALWAYS_INLINE value_type
1398 operator++(int) const noexcept
1399 { return fetch_add(1); }
1401 _GLIBCXX_ALWAYS_INLINE value_type
1402 operator--(int) const noexcept
1403 { return fetch_sub(1); }
1406 operator++() const noexcept
1407 { return __atomic_impl::__add_fetch(_M_ptr
, value_type(1)); }
1410 operator--() const noexcept
1411 { return __atomic_impl::__sub_fetch(_M_ptr
, value_type(1)); }
1414 operator+=(value_type __i
) const noexcept
1415 { return __atomic_impl::__add_fetch(_M_ptr
, __i
); }
1418 operator-=(value_type __i
) const noexcept
1419 { return __atomic_impl::__sub_fetch(_M_ptr
, __i
); }
1422 operator&=(value_type __i
) const noexcept
1423 { return __atomic_impl::__and_fetch(_M_ptr
, __i
); }
1426 operator|=(value_type __i
) const noexcept
1427 { return __atomic_impl::__or_fetch(_M_ptr
, __i
); }
1430 operator^=(value_type __i
) const noexcept
1431 { return __atomic_impl::__xor_fetch(_M_ptr
, __i
); }
1437 // base class for atomic_ref<floating-point-type>
1438 template<typename _Fp
>
1439 struct __atomic_ref
<_Fp
, false, true>
1441 static_assert(is_floating_point_v
<_Fp
>);
1444 using value_type
= _Fp
;
1445 using difference_type
= value_type
;
1447 static constexpr bool is_always_lock_free
1448 = __atomic_always_lock_free(sizeof(_Fp
), 0);
1450 static constexpr size_t required_alignment
= __alignof__(_Fp
);
1452 __atomic_ref() = delete;
1453 __atomic_ref
& operator=(const __atomic_ref
&) = delete;
1456 __atomic_ref(_Fp
& __t
) : _M_ptr(&__t
)
1457 { __glibcxx_assert(((uintptr_t)_M_ptr
% required_alignment
) == 0); }
1459 __atomic_ref(const __atomic_ref
&) noexcept
= default;
1462 operator=(_Fp __t
) const noexcept
1468 operator _Fp() const noexcept
{ return this->load(); }
1471 is_lock_free() const noexcept
1473 return __atomic_impl::is_lock_free
<sizeof(_Fp
), required_alignment
>();
1477 store(_Fp __t
, memory_order __m
= memory_order_seq_cst
) const noexcept
1478 { __atomic_impl::store(_M_ptr
, __t
, __m
); }
1481 load(memory_order __m
= memory_order_seq_cst
) const noexcept
1482 { return __atomic_impl::load(_M_ptr
, __m
); }
1485 exchange(_Fp __desired
,
1486 memory_order __m
= memory_order_seq_cst
) const noexcept
1487 { return __atomic_impl::exchange(_M_ptr
, __desired
, __m
); }
1490 compare_exchange_weak(_Fp
& __expected
, _Fp __desired
,
1491 memory_order __success
,
1492 memory_order __failure
) const noexcept
1494 return __atomic_impl::compare_exchange_weak(_M_ptr
,
1495 __expected
, __desired
,
1496 __success
, __failure
);
1500 compare_exchange_strong(_Fp
& __expected
, _Fp __desired
,
1501 memory_order __success
,
1502 memory_order __failure
) const noexcept
1504 return __atomic_impl::compare_exchange_strong(_M_ptr
,
1505 __expected
, __desired
,
1506 __success
, __failure
);
1510 compare_exchange_weak(_Fp
& __expected
, _Fp __desired
,
1511 memory_order __order
= memory_order_seq_cst
)
1514 return compare_exchange_weak(__expected
, __desired
, __order
,
1515 __cmpexch_failure_order(__order
));
1519 compare_exchange_strong(_Fp
& __expected
, _Fp __desired
,
1520 memory_order __order
= memory_order_seq_cst
)
1523 return compare_exchange_strong(__expected
, __desired
, __order
,
1524 __cmpexch_failure_order(__order
));
1528 fetch_add(value_type __i
,
1529 memory_order __m
= memory_order_seq_cst
) const noexcept
1530 { return __atomic_impl::__fetch_add_flt(_M_ptr
, __i
, __m
); }
1533 fetch_sub(value_type __i
,
1534 memory_order __m
= memory_order_seq_cst
) const noexcept
1535 { return __atomic_impl::__fetch_sub_flt(_M_ptr
, __i
, __m
); }
1538 operator+=(value_type __i
) const noexcept
1539 { return __atomic_impl::__add_fetch_flt(_M_ptr
, __i
); }
1542 operator-=(value_type __i
) const noexcept
1543 { return __atomic_impl::__sub_fetch_flt(_M_ptr
, __i
); }
1549 // base class for atomic_ref<pointer-type>
1550 template<typename _Tp
>
1551 struct __atomic_ref
<_Tp
*, false, false>
1554 using value_type
= _Tp
*;
1555 using difference_type
= ptrdiff_t;
1557 static constexpr bool is_always_lock_free
= ATOMIC_POINTER_LOCK_FREE
== 2;
1559 static constexpr size_t required_alignment
= __alignof__(_Tp
*);
1561 __atomic_ref() = delete;
1562 __atomic_ref
& operator=(const __atomic_ref
&) = delete;
1565 __atomic_ref(_Tp
*& __t
) : _M_ptr(std::__addressof(__t
))
1566 { __glibcxx_assert(((uintptr_t)_M_ptr
% required_alignment
) == 0); }
1568 __atomic_ref(const __atomic_ref
&) noexcept
= default;
1571 operator=(_Tp
* __t
) const noexcept
1577 operator _Tp
*() const noexcept
{ return this->load(); }
1580 is_lock_free() const noexcept
1582 return __atomic_impl::is_lock_free
<sizeof(_Tp
*), required_alignment
>();
1586 store(_Tp
* __t
, memory_order __m
= memory_order_seq_cst
) const noexcept
1587 { __atomic_impl::store(_M_ptr
, __t
, __m
); }
1590 load(memory_order __m
= memory_order_seq_cst
) const noexcept
1591 { return __atomic_impl::load(_M_ptr
, __m
); }
1594 exchange(_Tp
* __desired
,
1595 memory_order __m
= memory_order_seq_cst
) const noexcept
1596 { return __atomic_impl::exchange(_M_ptr
, __desired
, __m
); }
1599 compare_exchange_weak(_Tp
*& __expected
, _Tp
* __desired
,
1600 memory_order __success
,
1601 memory_order __failure
) const noexcept
1603 return __atomic_impl::compare_exchange_weak(_M_ptr
,
1604 __expected
, __desired
,
1605 __success
, __failure
);
1609 compare_exchange_strong(_Tp
*& __expected
, _Tp
* __desired
,
1610 memory_order __success
,
1611 memory_order __failure
) const noexcept
1613 return __atomic_impl::compare_exchange_strong(_M_ptr
,
1614 __expected
, __desired
,
1615 __success
, __failure
);
1619 compare_exchange_weak(_Tp
*& __expected
, _Tp
* __desired
,
1620 memory_order __order
= memory_order_seq_cst
)
1623 return compare_exchange_weak(__expected
, __desired
, __order
,
1624 __cmpexch_failure_order(__order
));
1628 compare_exchange_strong(_Tp
*& __expected
, _Tp
* __desired
,
1629 memory_order __order
= memory_order_seq_cst
)
1632 return compare_exchange_strong(__expected
, __desired
, __order
,
1633 __cmpexch_failure_order(__order
));
1636 _GLIBCXX_ALWAYS_INLINE value_type
1637 fetch_add(difference_type __d
,
1638 memory_order __m
= memory_order_seq_cst
) const noexcept
1639 { return __atomic_impl::fetch_add(_M_ptr
, _S_type_size(__d
), __m
); }
1641 _GLIBCXX_ALWAYS_INLINE value_type
1642 fetch_sub(difference_type __d
,
1643 memory_order __m
= memory_order_seq_cst
) const noexcept
1644 { return __atomic_impl::fetch_sub(_M_ptr
, _S_type_size(__d
), __m
); }
1647 operator++(int) const noexcept
1648 { return fetch_add(1); }
1651 operator--(int) const noexcept
1652 { return fetch_sub(1); }
1655 operator++() const noexcept
1657 return __atomic_impl::__add_fetch(_M_ptr
, _S_type_size(1));
1661 operator--() const noexcept
1663 return __atomic_impl::__sub_fetch(_M_ptr
, _S_type_size(1));
1667 operator+=(difference_type __d
) const noexcept
1669 return __atomic_impl::__add_fetch(_M_ptr
, _S_type_size(__d
));
1673 operator-=(difference_type __d
) const noexcept
1675 return __atomic_impl::__sub_fetch(_M_ptr
, _S_type_size(__d
));
1679 static constexpr ptrdiff_t
1680 _S_type_size(ptrdiff_t __d
) noexcept
1682 static_assert(is_object_v
<_Tp
>);
1683 return __d
* sizeof(_Tp
);
1693 _GLIBCXX_END_NAMESPACE_VERSION