Define std::atomic_ref and std::atomic<floating-point> for C++20
authorJonathan Wakely <jwakely@redhat.com>
Thu, 11 Jul 2019 19:43:25 +0000 (20:43 +0100)
committerJonathan Wakely <redi@gcc.gnu.org>
Thu, 11 Jul 2019 19:43:25 +0000 (20:43 +0100)
This adds the new atomic types from C++2a, as proposed by P0019 and
P0020. To reduce duplication the calls to the compiler's atomic
built-ins are wrapped in new functions in the __atomic_impl namespace.
These functions are currently only used by std::atomic<floating-point>
and std::atomic_ref but could also be used for all other specializations
of std::atomic.

* include/bits/atomic_base.h (__atomic_impl): New namespace for
wrappers around atomic built-ins.
(__atomic_float, __atomic_ref): New class templates for use as base
classes.
* include/std/atomic (atomic<float>, atomic<double>)
(atomic<long double>): New explicit specializations.
(atomic_ref): New class template.
(__cpp_lib_atomic_ref): Define.
* include/std/version (__cpp_lib_atomic_ref): Define.
* testsuite/29_atomics/atomic/60695.cc: Adjust dg-error.
     * testsuite/29_atomics/atomic_float/1.cc: New test.
     * testsuite/29_atomics/atomic_float/requirements.cc: New test.
     * testsuite/29_atomics/atomic_ref/deduction.cc: New test.
     * testsuite/29_atomics/atomic_ref/float.cc: New test.
     * testsuite/29_atomics/atomic_ref/generic.cc: New test.
     * testsuite/29_atomics/atomic_ref/integral.cc: New test.
     * testsuite/29_atomics/atomic_ref/pointer.cc: New test.
     * testsuite/29_atomics/atomic_ref/requirements.cc: New test.

From-SVN: r273420

13 files changed:
libstdc++-v3/ChangeLog
libstdc++-v3/include/bits/atomic_base.h
libstdc++-v3/include/std/atomic
libstdc++-v3/include/std/version
libstdc++-v3/testsuite/29_atomics/atomic/60695.cc
libstdc++-v3/testsuite/29_atomics/atomic_float/1.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_float/requirements.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc [new file with mode: 0644]
libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc [new file with mode: 0644]

index f595f9d664e9a68f2f84a36f256a808468a7dfd2..688615f48c9285129c81cd62692469f57ff9ff25 100644 (file)
@@ -1,3 +1,24 @@
+2019-07-11  Jonathan Wakely  <jwakely@redhat.com>
+
+       * include/bits/atomic_base.h (__atomic_impl): New namespace for
+       wrappers around atomic built-ins.
+       (__atomic_float, __atomic_ref): New class templates for use as base
+       classes.
+       * include/std/atomic (atomic<float>, atomic<double>)
+       (atomic<long double>): New explicit specializations.
+       (atomic_ref): New class template.
+       (__cpp_lib_atomic_ref): Define.
+       * include/std/version (__cpp_lib_atomic_ref): Define.
+       * testsuite/29_atomics/atomic/60695.cc: Adjust dg-error.
+       * testsuite/29_atomics/atomic_float/1.cc: New test.
+       * testsuite/29_atomics/atomic_float/requirements.cc: New test.
+       * testsuite/29_atomics/atomic_ref/deduction.cc: New test.
+       * testsuite/29_atomics/atomic_ref/float.cc: New test.
+       * testsuite/29_atomics/atomic_ref/generic.cc: New test.
+       * testsuite/29_atomics/atomic_ref/integral.cc: New test.
+       * testsuite/29_atomics/atomic_ref/pointer.cc: New test.
+       * testsuite/29_atomics/atomic_ref/requirements.cc: New test.
+
 2019-07-06  Jonathan Wakely  <jwakely@redhat.com>
 
        * include/ext/atomicity.h (__exchange_and_add, __atomic_add): Replace
index e30caef91bf1e08b86eaadba0dec4fd86ba9fc04..146e70a9f2ebb0e7915cb4561f82a638b8e4e648 100644 (file)
@@ -35,6 +35,7 @@
 #include <bits/c++config.h>
 #include <stdint.h>
 #include <bits/atomic_lockfree_defines.h>
+#include <bits/move.h>
 
 #ifndef _GLIBCXX_ALWAYS_INLINE
 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
@@ -817,6 +818,876 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
     };
 
+#if __cplusplus > 201703L
+  // Implementation details of atomic_ref and atomic<floating-point>.
+  namespace __atomic_impl
+  {
+    // Remove volatile and create a non-deduced context for value arguments.
+    template<typename _Tp>
+      using _Val = remove_volatile_t<_Tp>;
+
+    // As above, but for difference_type arguments.
+    template<typename _Tp>
+      using _Diff = conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
+
+    template<size_t _Size, size_t _Align>
+      _GLIBCXX_ALWAYS_INLINE bool
+      is_lock_free() noexcept
+      {
+       // Produce a fake, minimally aligned pointer.
+       return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE void
+      store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
+      { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      load(_Tp* __ptr, memory_order __m) noexcept
+      {
+       alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+       _Tp* __dest = reinterpret_cast<_Tp*>(__buf);
+       __atomic_load(__ptr, __dest, int(__m));
+       return *__dest;
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
+      {
+        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+       _Tp* __dest = reinterpret_cast<_Tp*>(__buf);
+       __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
+       return *__dest;
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE bool
+      compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
+                           _Val<_Tp> __desired, memory_order __success,
+                           memory_order __failure) noexcept
+      {
+       return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
+                                        std::__addressof(__desired), true,
+                                        int(__success), int(__failure));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE bool
+      compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
+                             _Val<_Tp> __desired, memory_order __success,
+                             memory_order __failure) noexcept
+      {
+       return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
+                                        std::__addressof(__desired), false,
+                                        int(__success), int(__failure));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_add(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_and(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_or(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
+      { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
+      { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _Tp
+      __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      {
+       _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+       _Val<_Tp> __newval = __oldval + __i;
+       while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
+                                     memory_order_relaxed))
+         __newval = __oldval + __i;
+       return __oldval;
+      }
+
+    template<typename _Tp>
+      _Tp
+      __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      {
+       _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+       _Val<_Tp> __newval = __oldval - __i;
+       while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
+                                     memory_order_relaxed))
+         __newval = __oldval - __i;
+       return __oldval;
+      }
+
+    template<typename _Tp>
+      _Tp
+      __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      {
+       _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+       _Val<_Tp> __newval = __oldval + __i;
+       while (!compare_exchange_weak(__ptr, __oldval, __newval,
+                                     memory_order_seq_cst,
+                                     memory_order_relaxed))
+         __newval = __oldval + __i;
+       return __newval;
+      }
+
+    template<typename _Tp>
+      _Tp
+      __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      {
+       _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+       _Val<_Tp> __newval = __oldval - __i;
+       while (!compare_exchange_weak(__ptr, __oldval, __newval,
+                                     memory_order_seq_cst,
+                                     memory_order_relaxed))
+         __newval = __oldval - __i;
+       return __newval;
+      }
+  } // namespace __atomic_impl
+
+  // base class for atomic<floating-point-type>
+  template<typename _Fp>
+    struct __atomic_float
+    {
+      static_assert(is_floating_point_v<_Fp>);
+
+      static constexpr size_t _S_alignment = __alignof__(_Fp);
+
+    public:
+      using value_type = _Fp;
+      using difference_type = value_type;
+
+      static constexpr bool is_always_lock_free
+       = __atomic_always_lock_free(sizeof(_Fp), 0);
+
+      __atomic_float() = default;
+
+      constexpr
+      __atomic_float(_Fp __t) : _M_fp(__t)
+      { }
+
+      __atomic_float(const __atomic_float&) = delete;
+      __atomic_float& operator=(const __atomic_float&) = delete;
+      __atomic_float& operator=(const __atomic_float&) volatile = delete;
+
+      _Fp
+      operator=(_Fp __t) volatile noexcept
+      {
+       this->store(__t);
+       return __t;
+      }
+
+      _Fp
+      operator=(_Fp __t) noexcept
+      {
+       this->store(__t);
+       return __t;
+      }
+
+      bool
+      is_lock_free() const volatile noexcept
+      { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
+
+      bool
+      is_lock_free() const noexcept
+      { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
+
+      void
+      store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
+      { __atomic_impl::store(&_M_fp, __t, __m); }
+
+      void
+      store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
+      { __atomic_impl::store(&_M_fp, __t, __m); }
+
+      _Fp
+      load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+      { return __atomic_impl::load(&_M_fp, __m); }
+
+      _Fp
+      load(memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::load(&_M_fp, __m); }
+
+      operator _Fp() const volatile noexcept { return this->load(); }
+      operator _Fp() const noexcept { return this->load(); }
+
+      _Fp
+      exchange(_Fp __desired,
+              memory_order __m = memory_order_seq_cst) volatile noexcept
+      { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
+
+      _Fp
+      exchange(_Fp __desired,
+              memory_order __m = memory_order_seq_cst) noexcept
+      { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
+
+      bool
+      compare_exchange_weak(_Fp& __expected, _Fp __desired,
+                           memory_order __success,
+                           memory_order __failure) noexcept
+      {
+       return __atomic_impl::compare_exchange_weak(&_M_fp,
+                                                   __expected, __desired,
+                                                   __success, __failure);
+      }
+
+      bool
+      compare_exchange_weak(_Fp& __expected, _Fp __desired,
+                           memory_order __success,
+                           memory_order __failure) volatile noexcept
+      {
+       return __atomic_impl::compare_exchange_weak(&_M_fp,
+                                                   __expected, __desired,
+                                                   __success, __failure);
+      }
+
+      bool
+      compare_exchange_strong(_Fp& __expected, _Fp __desired,
+                             memory_order __success,
+                             memory_order __failure) noexcept
+      {
+       return __atomic_impl::compare_exchange_strong(&_M_fp,
+                                                     __expected, __desired,
+                                                     __success, __failure);
+      }
+
+      bool
+      compare_exchange_strong(_Fp& __expected, _Fp __desired,
+                             memory_order __success,
+                             memory_order __failure) volatile noexcept
+      {
+       return __atomic_impl::compare_exchange_strong(&_M_fp,
+                                                     __expected, __desired,
+                                                     __success, __failure);
+      }
+
+      bool
+      compare_exchange_weak(_Fp& __expected, _Fp __desired,
+                           memory_order __order = memory_order_seq_cst)
+      noexcept
+      {
+       return compare_exchange_weak(__expected, __desired, __order,
+                                     __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_weak(_Fp& __expected, _Fp __desired,
+                           memory_order __order = memory_order_seq_cst)
+      volatile noexcept
+      {
+       return compare_exchange_weak(__expected, __desired, __order,
+                                     __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_strong(_Fp& __expected, _Fp __desired,
+                             memory_order __order = memory_order_seq_cst)
+      noexcept
+      {
+       return compare_exchange_strong(__expected, __desired, __order,
+                                      __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_strong(_Fp& __expected, _Fp __desired,
+                             memory_order __order = memory_order_seq_cst)
+      volatile noexcept
+      {
+       return compare_exchange_strong(__expected, __desired, __order,
+                                      __cmpexch_failure_order(__order));
+      }
+
+      value_type
+      fetch_add(value_type __i,
+               memory_order __m = memory_order_seq_cst) noexcept
+      { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
+
+      value_type
+      fetch_add(value_type __i,
+               memory_order __m = memory_order_seq_cst) volatile noexcept
+      { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
+
+      value_type
+      fetch_sub(value_type __i,
+               memory_order __m = memory_order_seq_cst) noexcept
+      { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
+
+      value_type
+      fetch_sub(value_type __i,
+               memory_order __m = memory_order_seq_cst) volatile noexcept
+      { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
+
+      value_type
+      operator+=(value_type __i) noexcept
+      { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
+
+      value_type
+      operator+=(value_type __i) volatile noexcept
+      { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
+
+      value_type
+      operator-=(value_type __i) noexcept
+      { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
+
+      value_type
+      operator-=(value_type __i) volatile noexcept
+      { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
+
+    private:
+      alignas(_S_alignment) _Fp _M_fp;
+    };
+
+  template<typename _Tp,
+          bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
+    struct __atomic_ref;
+
+  // base class for non-integral, non-floating-point, non-pointer types
+  template<typename _Tp>
+    struct __atomic_ref<_Tp, false, false>
+    {
+      static_assert(is_trivially_copyable_v<_Tp>);
+
+      // 1/2/4/8/16-byte types must be aligned to at least their size.
+      static constexpr int _S_min_alignment
+       = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
+       ? 0 : sizeof(_Tp);
+
+    public:
+      using value_type = _Tp;
+
+      static constexpr bool is_always_lock_free
+       = __atomic_always_lock_free(sizeof(_Tp), 0);
+
+      static constexpr size_t required_alignment
+       = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
+
+      __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+      explicit
+      __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
+      { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+      __atomic_ref(const __atomic_ref&) noexcept = default;
+
+      _Tp
+      operator=(_Tp __t) const noexcept
+      {
+       this->store(__t);
+       return __t;
+      }
+
+      operator _Tp() const noexcept { return this->load(); }
+
+      bool
+      is_lock_free() const noexcept
+      { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
+
+      void
+      store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
+      { __atomic_impl::store(_M_ptr, __t, __m); }
+
+      _Tp
+      load(memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::load(_M_ptr, __m); }
+
+      _Tp
+      exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
+      const noexcept
+      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+      bool
+      compare_exchange_weak(_Tp& __expected, _Tp __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_weak(_M_ptr,
+                                                   __expected, __desired,
+                                                   __success, __failure);
+      }
+
+      bool
+      compare_exchange_strong(_Tp& __expected, _Tp __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_strong(_M_ptr,
+                                                     __expected, __desired,
+                                                     __success, __failure);
+      }
+
+      bool
+      compare_exchange_weak(_Tp& __expected, _Tp __desired,
+                           memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_weak(__expected, __desired, __order,
+                                     __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_strong(_Tp& __expected, _Tp __desired,
+                             memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_strong(__expected, __desired, __order,
+                                      __cmpexch_failure_order(__order));
+      }
+
+    private:
+      _Tp* _M_ptr;
+    };
+
+  // base class for atomic_ref<integral-type>
+  template<typename _Tp>
+    struct __atomic_ref<_Tp, true, false>
+    {
+      static_assert(is_integral_v<_Tp>);
+
+    public:
+      using value_type = _Tp;
+      using difference_type = value_type;
+
+      static constexpr bool is_always_lock_free
+       = __atomic_always_lock_free(sizeof(_Tp), 0);
+
+      static constexpr size_t required_alignment
+       = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
+
+      __atomic_ref() = delete;
+      __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+      explicit
+      __atomic_ref(_Tp& __t) : _M_ptr(&__t)
+      { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+      __atomic_ref(const __atomic_ref&) noexcept = default;
+
+      _Tp
+      operator=(_Tp __t) const noexcept
+      {
+       this->store(__t);
+       return __t;
+      }
+
+      operator _Tp() const noexcept { return this->load(); }
+
+      bool
+      is_lock_free() const noexcept
+      {
+       return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
+      }
+
+      void
+      store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
+      { __atomic_impl::store(_M_ptr, __t, __m); }
+
+      _Tp
+      load(memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::load(_M_ptr, __m); }
+
+      _Tp
+      exchange(_Tp __desired,
+              memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+      bool
+      compare_exchange_weak(_Tp& __expected, _Tp __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_weak(_M_ptr,
+                                                   __expected, __desired,
+                                                   __success, __failure);
+      }
+
+      bool
+      compare_exchange_strong(_Tp& __expected, _Tp __desired,
+                             memory_order __success,
+                             memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_strong(_M_ptr,
+                                                     __expected, __desired,
+                                                     __success, __failure);
+      }
+
+      bool
+      compare_exchange_weak(_Tp& __expected, _Tp __desired,
+                           memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_weak(__expected, __desired, __order,
+                                     __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_strong(_Tp& __expected, _Tp __desired,
+                             memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_strong(__expected, __desired, __order,
+                                      __cmpexch_failure_order(__order));
+      }
+
+      value_type
+      fetch_add(value_type __i,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
+
+      value_type
+      fetch_sub(value_type __i,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
+
+      value_type
+      fetch_and(value_type __i,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
+
+      value_type
+      fetch_or(value_type __i,
+              memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
+
+      value_type
+      fetch_xor(value_type __i,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
+
+      _GLIBCXX_ALWAYS_INLINE value_type
+      operator++(int) const noexcept
+      { return fetch_add(1); }
+
+      _GLIBCXX_ALWAYS_INLINE value_type
+      operator--(int) const noexcept
+      { return fetch_sub(1); }
+
+      value_type
+      operator++() const noexcept
+      { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
+
+      value_type
+      operator--() const noexcept
+      { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
+
+      value_type
+      operator+=(value_type __i) const noexcept
+      { return __atomic_impl::__add_fetch(_M_ptr, __i); }
+
+      value_type
+      operator-=(value_type __i) const noexcept
+      { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
+
+      value_type
+      operator&=(value_type __i) const noexcept
+      { return __atomic_impl::__and_fetch(_M_ptr, __i); }
+
+      value_type
+      operator|=(value_type __i) const noexcept
+      { return __atomic_impl::__or_fetch(_M_ptr, __i); }
+
+      value_type
+      operator^=(value_type __i) const noexcept
+      { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
+
+    private:
+      _Tp* _M_ptr;
+    };
+
+  // base class for atomic_ref<floating-point-type>
+  template<typename _Fp>
+    struct __atomic_ref<_Fp, false, true>
+    {
+      static_assert(is_floating_point_v<_Fp>);
+
+    public:
+      using value_type = _Fp;
+      using difference_type = value_type;
+
+      static constexpr bool is_always_lock_free
+       = __atomic_always_lock_free(sizeof(_Fp), 0);
+
+      static constexpr size_t required_alignment = __alignof__(_Fp);
+
+      __atomic_ref() = delete;
+      __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+      explicit
+      __atomic_ref(_Fp& __t) : _M_ptr(&__t)
+      { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+      __atomic_ref(const __atomic_ref&) noexcept = default;
+
+      _Fp
+      operator=(_Fp __t) const noexcept
+      {
+       this->store(__t);
+       return __t;
+      }
+
+      operator _Fp() const noexcept { return this->load(); }
+
+      bool
+      is_lock_free() const noexcept
+      {
+       return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
+      }
+
+      void
+      store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
+      { __atomic_impl::store(_M_ptr, __t, __m); }
+
+      _Fp
+      load(memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::load(_M_ptr, __m); }
+
+      _Fp
+      exchange(_Fp __desired,
+              memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+      bool
+      compare_exchange_weak(_Fp& __expected, _Fp __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_weak(_M_ptr,
+                                                   __expected, __desired,
+                                                   __success, __failure);
+      }
+
+      bool
+      compare_exchange_strong(_Fp& __expected, _Fp __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_strong(_M_ptr,
+                                                     __expected, __desired,
+                                                     __success, __failure);
+      }
+
+      bool
+      compare_exchange_weak(_Fp& __expected, _Fp __desired,
+                           memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_weak(__expected, __desired, __order,
+                                     __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_strong(_Fp& __expected, _Fp __desired,
+                             memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_strong(__expected, __desired, __order,
+                                      __cmpexch_failure_order(__order));
+      }
+
+      value_type
+      fetch_add(value_type __i,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
+
+      value_type
+      fetch_sub(value_type __i,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
+
+      value_type
+      operator+=(value_type __i) const noexcept
+      { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
+
+      value_type
+      operator-=(value_type __i) const noexcept
+      { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
+
+    private:
+      _Fp* _M_ptr;
+    };
+
+  // base class for atomic_ref<pointer-type>
+  template<typename _Tp>
+    struct __atomic_ref<_Tp*, false, false>
+    {
+    public:
+      using value_type = _Tp*;
+      using difference_type = ptrdiff_t;
+
+      static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
+
+      static constexpr size_t required_alignment = __alignof__(_Tp*);
+
+      __atomic_ref() = delete;
+      __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+      explicit
+      __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
+      { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+      __atomic_ref(const __atomic_ref&) noexcept = default;
+
+      _Tp*
+      operator=(_Tp* __t) const noexcept
+      {
+       this->store(__t);
+       return __t;
+      }
+
+      operator _Tp*() const noexcept { return this->load(); }
+
+      bool
+      is_lock_free() const noexcept
+      {
+       return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
+      }
+
+      void
+      store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
+      { __atomic_impl::store(_M_ptr, __t, __m); }
+
+      _Tp*
+      load(memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::load(_M_ptr, __m); }
+
+      _Tp*
+      exchange(_Tp* __desired,
+              memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+      bool
+      compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_weak(_M_ptr,
+                                                   __expected, __desired,
+                                                   __success, __failure);
+      }
+
+      bool
+      compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
+                           memory_order __success,
+                           memory_order __failure) const noexcept
+      {
+       return __atomic_impl::compare_exchange_strong(_M_ptr,
+                                                     __expected, __desired,
+                                                     __success, __failure);
+      }
+
+      bool
+      compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
+                           memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_weak(__expected, __desired, __order,
+                                     __cmpexch_failure_order(__order));
+      }
+
+      bool
+      compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
+                             memory_order __order = memory_order_seq_cst)
+      const noexcept
+      {
+       return compare_exchange_strong(__expected, __desired, __order,
+                                      __cmpexch_failure_order(__order));
+      }
+
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_add(difference_type __d,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
+
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_sub(difference_type __d,
+               memory_order __m = memory_order_seq_cst) const noexcept
+      { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
+
+      value_type
+      operator++(int) const noexcept
+      { return fetch_add(1); }
+
+      value_type
+      operator--(int) const noexcept
+      { return fetch_sub(1); }
+
+      value_type
+      operator++() const noexcept
+      {
+       return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
+      }
+
+      value_type
+      operator--() const noexcept
+      {
+       return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
+      }
+
+      value_type
+      operator+=(difference_type __d) const noexcept
+      {
+       return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
+      }
+
+      value_type
+      operator-=(difference_type __d) const noexcept
+      {
+       return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
+      }
+
+    private:
+      static constexpr ptrdiff_t
+      _S_type_size(ptrdiff_t __d) noexcept
+      {
+       static_assert(is_object_v<_Tp>);
+       return __d * sizeof(_Tp);
+      }
+
+      _Tp** _M_ptr;
+    };
+
+#endif // C++2a
+
   // @} group atomics
 
 _GLIBCXX_END_NAMESPACE_VERSION
index 699431e97271959380620455411dd1a90b8746b1..26d8d3946da34b0b5c51f7554f19d6e54e9e7565 100644 (file)
@@ -39,7 +39,6 @@
 #else
 
 #include <bits/atomic_base.h>
-#include <bits/move.h>
 
 namespace std _GLIBCXX_VISIBILITY(default)
 {
@@ -1472,6 +1471,71 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
                     __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
 
+#if __cplusplus > 201703L
+  template<>
+    struct atomic<float> : __atomic_float<float>
+    {
+      atomic() noexcept = default;
+
+      constexpr
+      atomic(float __fp) noexcept : __atomic_float<float>(__fp)
+      { }
+
+      atomic& operator=(const atomic&) volatile = delete;
+      atomic& operator=(const atomic&) = delete;
+
+      using __atomic_float<float>::operator=;
+    };
+
+  template<>
+    struct atomic<double> : __atomic_float<double>
+    {
+      atomic() noexcept = default;
+
+      constexpr
+      atomic(double __fp) noexcept : __atomic_float<double>(__fp)
+      { }
+
+      atomic& operator=(const atomic&) volatile = delete;
+      atomic& operator=(const atomic&) = delete;
+
+      using __atomic_float<double>::operator=;
+    };
+
+  template<>
+    struct atomic<long double> : __atomic_float<long double>
+    {
+      atomic() noexcept = default;
+
+      constexpr
+      atomic(long double __fp) noexcept : __atomic_float<long double>(__fp)
+      { }
+
+      atomic& operator=(const atomic&) volatile = delete;
+      atomic& operator=(const atomic&) = delete;
+
+      using __atomic_float<long double>::operator=;
+    };
+
+#define __cpp_lib_atomic_ref 201806L
+
+  /// Class template to provide atomic operations on a non-atomic variable.
+  template<typename _Tp>
+    struct atomic_ref : __atomic_ref<_Tp>
+    {
+      explicit
+      atomic_ref(_Tp& __t) noexcept : __atomic_ref<_Tp>(__t)
+      { }
+
+      atomic_ref& operator=(const atomic_ref&) = delete;
+
+      atomic_ref(const atomic_ref&) = default;
+
+      using __atomic_ref<_Tp>::operator=;
+    };
+
+#endif // C++2a
+
   // @} group atomics
 
 _GLIBCXX_END_NAMESPACE_VERSION
index e300fc38bc774f87a240273c61f14587d9d8288a..d134f7fde01d9261177c7be39790013b37e192c3 100644 (file)
 
 #if __cplusplus > 201703L
 // c++2a
+#define __cpp_lib_atomic_ref 201806L
 #define __cpp_lib_bind_front 201902L
 #define __cpp_lib_bounded_array_traits 201902L
 #if __cpp_impl_destroying_delete
index 58d554cefc1cfd8463318c380ce425c5788a54de..5065730dd91d7df87432124709ed67789e5c0ade 100644 (file)
@@ -27,4 +27,4 @@ struct X {
   char stuff[0]; // GNU extension, type has zero size
 };
 
-std::atomic<X> a;  // { dg-error "not supported" "" { target *-*-* } 194 }
+std::atomic<X> a;  // { dg-error "zero-sized types" "" { target *-*-* } 0 }
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_float/1.cc b/libstdc++-v3/testsuite/29_atomics/atomic_float/1.cc
new file mode 100644 (file)
index 0000000..bd0e353
--- /dev/null
@@ -0,0 +1,573 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do run { target c++2a } }
+
+#include <atomic>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+  const auto mo = std::memory_order_relaxed;
+  bool ok;
+  float expected;
+
+  if constexpr (std::atomic<float>::is_always_lock_free)
+  {
+    std::atomic<float> a0;
+    std::atomic<float> a1(1.0f);
+    ok = a0.is_lock_free();
+    a0 = a1.load();
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( a0.load(mo) == a0.load() );
+    a0.store(0.5f);
+    a1.store(0.5f, mo);
+    VERIFY( a0.load() == a1.load() );
+    auto f0 = a0.exchange(12.5f);
+    auto f1 = a1.exchange(12.5f, mo);
+    VERIFY( a0 == 12.5f );
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( f0 == 0.5f );
+    VERIFY( f0 == f1 );
+
+    expected = 12.5f;
+    while (!a0.compare_exchange_weak(expected, 1.6f, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == 1.6f );
+    VERIFY( expected == 12.5f );
+    expected = 1.5f;
+    ok = a1.compare_exchange_weak(expected, 1.6f, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5f && expected == 12.5f );
+    VERIFY( expected == 12.5f );
+    expected = 1.6f;
+    ok = a0.compare_exchange_strong(expected, 3.2f, mo, mo);
+    VERIFY( ok && a0.load() == 3.2f );
+    VERIFY( expected == 1.6f );
+    expected = 1.5f;
+    ok = a1.compare_exchange_strong(expected, 3.2f, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5f && expected == 12.5f );
+
+    expected = 3.2f;
+    while (!a0.compare_exchange_weak(expected, .64f))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == .64f );
+    expected = 12.5f;
+    while (!a1.compare_exchange_weak(expected, 1.6f, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a1.load() == 1.6f );
+    expected = 0.5f;
+    ok = a0.compare_exchange_weak(expected, 3.2f);
+    VERIFY( !ok && a0.load() == .64f && expected == .64f );
+    expected = 0.5f;
+    ok = a1.compare_exchange_weak(expected, 3.2f, mo);
+    VERIFY( !ok && a1.load() == 1.6f && expected == 1.6f );
+
+    expected = .64f;
+    ok = a0.compare_exchange_strong(expected, 12.8f);
+    VERIFY( ok && a0.load() == 12.8f );
+    expected = 1.6f;
+    ok = a1.compare_exchange_strong(expected, 2.56f, mo);
+    VERIFY( ok && a1.load() == 2.56f );
+    expected = 0.5f;
+    ok = a0.compare_exchange_strong(expected, 3.2f);
+    VERIFY( !ok && a0.load() == 12.8f && expected == 12.8f );
+    expected = 0.5f;
+    ok = a1.compare_exchange_strong(expected, 3.2f, mo);
+    VERIFY( !ok && a1.load() == 2.56f && expected == 2.56f );
+
+    f0 = a0.fetch_add(1.2f);
+    VERIFY( f0 == 12.8f );
+    VERIFY( a0 == 14.0f );
+    f1 = a1.fetch_add(2.4f, mo);
+    VERIFY( f1 == 2.56f );
+    VERIFY( a1 == 4.96f );
+
+    f0 = a0.fetch_sub(1.2f);
+    VERIFY( f0 == 14.0f );
+    VERIFY( a0 == 12.8f );
+    f1 = a1.fetch_sub(3.5f, mo);
+    VERIFY( f1 == 4.96f );
+    VERIFY( a1 == 1.46f );
+
+    f0 = a0 += 1.2f;
+    VERIFY( f0 == 14.0f );
+    VERIFY( a0 == 14.0f );
+
+    f0 = a0 -= 0.8f;
+    VERIFY( f0 == 13.2f );
+    VERIFY( a0 == 13.2f );
+  }
+
+  // Repeat for volatile std::atomic<float>
+  if constexpr (std::atomic<float>::is_always_lock_free)
+  {
+    volatile std::atomic<float> a0;
+    volatile std::atomic<float> a1(1.0f);
+    ok = a0.is_lock_free();
+    a0 = a1.load();
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( a0.load(mo) == a0.load() );
+    a0.store(0.5f);
+    a1.store(0.5f, mo);
+    VERIFY( a0.load() == a1.load() );
+    auto f0 = a0.exchange(12.5f);
+    auto f1 = a1.exchange(12.5f, mo);
+    VERIFY( a0 == 12.5f );
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( f0 == 0.5f );
+    VERIFY( f0 == f1 );
+
+    expected = 12.5f;
+    while (!a0.compare_exchange_weak(expected, 1.6f, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == 1.6f );
+    VERIFY( expected == 12.5f );
+    expected = 1.5f;
+    ok = a1.compare_exchange_weak(expected, 1.6f, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5f && expected == 12.5f );
+    VERIFY( expected == 12.5f );
+    expected = 1.6f;
+    ok = a0.compare_exchange_strong(expected, 3.2f, mo, mo);
+    VERIFY( ok && a0.load() == 3.2f );
+    VERIFY( expected == 1.6f );
+    expected = 1.5f;
+    ok = a1.compare_exchange_strong(expected, 3.2f, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5f && expected == 12.5f );
+
+    expected = 3.2f;
+    while (!a0.compare_exchange_weak(expected, .64f))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == .64f );
+    expected = 12.5f;
+    while (!a1.compare_exchange_weak(expected, 1.6f, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a1.load() == 1.6f );
+    expected = 0.5f;
+    ok = a0.compare_exchange_weak(expected, 3.2f);
+    VERIFY( !ok && a0.load() == .64f && expected == .64f );
+    expected = 0.5f;
+    ok = a1.compare_exchange_weak(expected, 3.2f, mo);
+    VERIFY( !ok && a1.load() == 1.6f && expected == 1.6f );
+
+    expected = .64f;
+    ok = a0.compare_exchange_strong(expected, 12.8f);
+    VERIFY( ok && a0.load() == 12.8f );
+    expected = 1.6f;
+    ok = a1.compare_exchange_strong(expected, 2.56f, mo);
+    VERIFY( ok && a1.load() == 2.56f );
+    expected = 0.5f;
+    ok = a0.compare_exchange_strong(expected, 3.2f);
+    VERIFY( !ok && a0.load() == 12.8f && expected == 12.8f );
+    expected = 0.5f;
+    ok = a1.compare_exchange_strong(expected, 3.2f, mo);
+    VERIFY( !ok && a1.load() == 2.56f && expected == 2.56f );
+
+    f0 = a0.fetch_add(1.2f);
+    VERIFY( f0 == 12.8f );
+    VERIFY( a0 == 14.0f );
+    f1 = a1.fetch_add(2.4f, mo);
+    VERIFY( f1 == 2.56f );
+    VERIFY( a1 == 4.96f );
+
+    f0 = a0.fetch_sub(1.2f);
+    VERIFY( f0 == 14.0f );
+    VERIFY( a0 == 12.8f );
+    f1 = a1.fetch_sub(3.5f, mo);
+    VERIFY( f1 == 4.96f );
+    VERIFY( a1 == 1.46f );
+
+    f0 = a0 += 1.2f;
+    VERIFY( f0 == 14.0f );
+    VERIFY( a0 == 14.0f );
+
+    f0 = a0 -= 0.8f;
+    VERIFY( f0 == 13.2f );
+    VERIFY( a0 == 13.2f );
+  }
+}
+
+void
+test02()
+{
+  const auto mo = std::memory_order_relaxed;
+  bool ok;
+  double expected;
+
+  if constexpr (std::atomic<double>::is_always_lock_free)
+  {
+    std::atomic<double> a0;
+    std::atomic<double> a1(1.0);
+    ok = a0.is_lock_free();
+    a0 = a1.load();
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( a0.load(mo) == a0.load() );
+    a0.store(0.5);
+    a1.store(0.5, mo);
+    VERIFY( a0.load() == a1.load() );
+    auto f0 = a0.exchange(12.5);
+    auto f1 = a1.exchange(12.5, mo);
+    VERIFY( a0 == 12.5 );
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( f0 == 0.5 );
+    VERIFY( f0 == f1 );
+
+    expected = 12.5;
+    while (!a0.compare_exchange_weak(expected, 1.6, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == 1.6 );
+    VERIFY( expected == 12.5 );
+    expected = 1.5;
+    ok = a1.compare_exchange_weak(expected, 1.6, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5 && expected == 12.5 );
+    VERIFY( expected == 12.5 );
+    expected = 1.6;
+    ok = a0.compare_exchange_strong(expected, 3.2, mo, mo);
+    VERIFY( ok && a0.load() == 3.2 );
+    VERIFY( expected == 1.6 );
+    expected = 1.5;
+    ok = a1.compare_exchange_strong(expected, 3.2, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5 && expected == 12.5 );
+
+    expected = 3.2;
+    while (!a0.compare_exchange_weak(expected, .64))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == .64 );
+    expected = 12.5;
+    while (!a1.compare_exchange_weak(expected, 1.6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a1.load() == 1.6 );
+    expected = 0.5;
+    ok = a0.compare_exchange_weak(expected, 3.2);
+    VERIFY( !ok && a0.load() == .64 && expected == .64 );
+    expected = 0.5;
+    ok = a1.compare_exchange_weak(expected, 3.2, mo);
+    VERIFY( !ok && a1.load() == 1.6 && expected == 1.6 );
+
+    expected = .64;
+    ok = a0.compare_exchange_strong(expected, 12.8);
+    VERIFY( ok && a0.load() == 12.8 );
+    expected = 1.6;
+    ok = a1.compare_exchange_strong(expected, 2.56, mo);
+    VERIFY( ok && a1.load() == 2.56 );
+    expected = 0.5;
+    ok = a0.compare_exchange_strong(expected, 3.2);
+    VERIFY( !ok && a0.load() == 12.8 && expected == 12.8 );
+    expected = 0.5;
+    ok = a1.compare_exchange_strong(expected, 3.2, mo);
+    VERIFY( !ok && a1.load() == 2.56 && expected == 2.56 );
+
+    f0 = a0.fetch_add(1.2);
+    VERIFY( f0 == 12.8 );
+    VERIFY( a0 == 14.0 );
+    f1 = a1.fetch_add(2.4, mo);
+    VERIFY( f1 == 2.56 );
+    VERIFY( a1 == 4.96 );
+
+    f0 = a0.fetch_sub(1.2);
+    VERIFY( f0 == 14.0 );
+    VERIFY( a0 == 12.8 );
+    f1 = a1.fetch_sub(3.5, mo);
+    VERIFY( f1 == 4.96 );
+    VERIFY( a1 == 1.46 );
+
+    f0 = a0 += 1.2;
+    VERIFY( f0 == 14.0 );
+    VERIFY( a0 == 14.0 );
+
+    f0 = a0 -= 0.8;
+    VERIFY( f0 == 13.2 );
+    VERIFY( a0 == 13.2 );
+  }
+
+  // Repeat for volatile std::atomic<double>
+  if constexpr (std::atomic<double>::is_always_lock_free)
+  {
+    volatile std::atomic<double> a0;
+    volatile std::atomic<double> a1(1.0);
+    ok = a0.is_lock_free();
+    a0 = a1.load();
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( a0.load(mo) == a0.load() );
+    a0.store(0.5);
+    a1.store(0.5, mo);
+    VERIFY( a0.load() == a1.load() );
+    auto f0 = a0.exchange(12.5);
+    auto f1 = a1.exchange(12.5, mo);
+    VERIFY( a0 == 12.5 );
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( f0 == 0.5 );
+    VERIFY( f0 == f1 );
+
+    expected = 12.5;
+    while (!a0.compare_exchange_weak(expected, 1.6, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == 1.6 );
+    VERIFY( expected == 12.5 );
+    expected = 1.5;
+    ok = a1.compare_exchange_weak(expected, 1.6, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5 && expected == 12.5 );
+    VERIFY( expected == 12.5 );
+    expected = 1.6;
+    ok = a0.compare_exchange_strong(expected, 3.2, mo, mo);
+    VERIFY( ok && a0.load() == 3.2 );
+    VERIFY( expected == 1.6 );
+    expected = 1.5;
+    ok = a1.compare_exchange_strong(expected, 3.2, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5 && expected == 12.5 );
+
+    expected = 3.2;
+    while (!a0.compare_exchange_weak(expected, .64))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == .64 );
+    expected = 12.5;
+    while (!a1.compare_exchange_weak(expected, 1.6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a1.load() == 1.6 );
+    expected = 0.5;
+    ok = a0.compare_exchange_weak(expected, 3.2);
+    VERIFY( !ok && a0.load() == .64 && expected == .64 );
+    expected = 0.5;
+    ok = a1.compare_exchange_weak(expected, 3.2, mo);
+    VERIFY( !ok && a1.load() == 1.6 && expected == 1.6 );
+
+    expected = .64;
+    ok = a0.compare_exchange_strong(expected, 12.8);
+    VERIFY( ok && a0.load() == 12.8 );
+    expected = 1.6;
+    ok = a1.compare_exchange_strong(expected, 2.56, mo);
+    VERIFY( ok && a1.load() == 2.56 );
+    expected = 0.5;
+    ok = a0.compare_exchange_strong(expected, 3.2);
+    VERIFY( !ok && a0.load() == 12.8 && expected == 12.8 );
+    expected = 0.5;
+    ok = a1.compare_exchange_strong(expected, 3.2, mo);
+    VERIFY( !ok && a1.load() == 2.56 && expected == 2.56 );
+
+    f0 = a0.fetch_add(1.2);
+    VERIFY( f0 == 12.8 );
+    VERIFY( a0 == 14.0 );
+    f1 = a1.fetch_add(2.4, mo);
+    VERIFY( f1 == 2.56 );
+    VERIFY( a1 == 4.96 );
+
+    f0 = a0.fetch_sub(1.2);
+    VERIFY( f0 == 14.0 );
+    VERIFY( a0 == 12.8 );
+    f1 = a1.fetch_sub(3.5, mo);
+    VERIFY( f1 == 4.96 );
+    VERIFY( a1 == 1.46 );
+
+    f0 = a0 += 1.2;
+    VERIFY( f0 == 14.0 );
+    VERIFY( a0 == 14.0 );
+
+    f0 = a0 -= 0.8;
+    VERIFY( f0 == 13.2 );
+    VERIFY( a0 == 13.2 );
+  }
+}
+
+void
+test03()
+{
+  const auto mo = std::memory_order_relaxed;
+  bool ok;
+  long double expected;
+
+  if constexpr (std::atomic<long double>::is_always_lock_free)
+  {
+    std::atomic<long double> a0;
+    std::atomic<long double> a1(1.0l);
+    ok = a0.is_lock_free();
+    a0 = a1.load();
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( a0.load(mo) == a0.load() );
+    a0.store(0.5l);
+    a1.store(0.5l, mo);
+    VERIFY( a0.load() == a1.load() );
+    auto f0 = a0.exchange(12.5l);
+    auto f1 = a1.exchange(12.5l, mo);
+    VERIFY( a0 == 12.5l );
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( f0 == 0.5l );
+    VERIFY( f0 == f1 );
+
+    expected = 12.5l;
+    while (!a0.compare_exchange_weak(expected, 1.6l, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == 1.6l );
+    VERIFY( expected == 12.5l );
+    expected = 1.5l;
+    ok = a1.compare_exchange_weak(expected, 1.6l, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5l && expected == 12.5l );
+    VERIFY( expected == 12.5l );
+    expected = 1.6l;
+    ok = a0.compare_exchange_strong(expected, 3.2l, mo, mo);
+    VERIFY( ok && a0.load() == 3.2l );
+    VERIFY( expected == 1.6l );
+    expected = 1.5l;
+    ok = a1.compare_exchange_strong(expected, 3.2l, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5l && expected == 12.5l );
+
+    expected = 3.2l;
+    while (!a0.compare_exchange_weak(expected, .64l))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == .64l );
+    expected = 12.5l;
+    while (!a1.compare_exchange_weak(expected, 1.6l, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a1.load() == 1.6l );
+    expected = 0.5l;
+    ok = a0.compare_exchange_weak(expected, 3.2l);
+    VERIFY( !ok && a0.load() == .64l && expected == .64l );
+    expected = 0.5l;
+    ok = a1.compare_exchange_weak(expected, 3.2l, mo);
+    VERIFY( !ok && a1.load() == 1.6l && expected == 1.6l );
+
+    expected = .64l;
+    ok = a0.compare_exchange_strong(expected, 12.8l);
+    VERIFY( ok && a0.load() == 12.8l );
+    expected = 1.6l;
+    ok = a1.compare_exchange_strong(expected, 2.56l, mo);
+    VERIFY( ok && a1.load() == 2.56l );
+    expected = 0.5l;
+    ok = a0.compare_exchange_strong(expected, 3.2l);
+    VERIFY( !ok && a0.load() == 12.8l && expected == 12.8l );
+    expected = 0.5l;
+    ok = a1.compare_exchange_strong(expected, 3.2l, mo);
+    VERIFY( !ok && a1.load() == 2.56l && expected == 2.56l );
+
+    f0 = a0.fetch_add(1.2l);
+    VERIFY( f0 == 12.8l );
+    VERIFY( a0 == 14.0l );
+    f1 = a1.fetch_add(2.4l, mo);
+    VERIFY( f1 == 2.56l );
+    VERIFY( a1 == 4.96l );
+
+    f0 = a0.fetch_sub(1.2l);
+    VERIFY( f0 == 14.0l );
+    VERIFY( a0 == 12.8l );
+    f1 = a1.fetch_sub(3.5l, mo);
+    VERIFY( f1 == 4.96l );
+    VERIFY( a1 == 1.46l );
+
+    f0 = a0 += 1.2l;
+    VERIFY( f0 == 14.0l );
+    VERIFY( a0 == 14.0l );
+
+    f0 = a0 -= 0.8l;
+    VERIFY( f0 == 13.2l );
+    VERIFY( a0 == 13.2l );
+  }
+
+  // Repeat for volatile std::atomic<double>
+  if constexpr (std::atomic<long double>::is_always_lock_free)
+  {
+    volatile std::atomic<long double> a0;
+    volatile std::atomic<long double> a1(1.0l);
+    ok = a0.is_lock_free();
+    a0 = a1.load();
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( a0.load(mo) == a0.load() );
+    a0.store(0.5l);
+    a1.store(0.5l, mo);
+    VERIFY( a0.load() == a1.load() );
+    auto f0 = a0.exchange(12.5l);
+    auto f1 = a1.exchange(12.5l, mo);
+    VERIFY( a0 == 12.5l );
+    VERIFY( a0.load() == a1.load() );
+    VERIFY( f0 == 0.5l );
+    VERIFY( f0 == f1 );
+
+    expected = 12.5l;
+    while (!a0.compare_exchange_weak(expected, 1.6l, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == 1.6l );
+    VERIFY( expected == 12.5l );
+    expected = 1.5l;
+    ok = a1.compare_exchange_weak(expected, 1.6l, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5l && expected == 12.5l );
+    VERIFY( expected == 12.5l );
+    expected = 1.6l;
+    ok = a0.compare_exchange_strong(expected, 3.2l, mo, mo);
+    VERIFY( ok && a0.load() == 3.2l );
+    VERIFY( expected == 1.6l );
+    expected = 1.5l;
+    ok = a1.compare_exchange_strong(expected, 3.2l, mo, mo);
+    VERIFY( !ok && a1.load() == 12.5l && expected == 12.5l );
+
+    expected = 3.2l;
+    while (!a0.compare_exchange_weak(expected, .64l))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a0.load() == .64l );
+    expected = 12.5l;
+    while (!a1.compare_exchange_weak(expected, 1.6l, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a1.load() == 1.6l );
+    expected = 0.5l;
+    ok = a0.compare_exchange_weak(expected, 3.2l);
+    VERIFY( !ok && a0.load() == .64l && expected == .64l );
+    expected = 0.5l;
+    ok = a1.compare_exchange_weak(expected, 3.2l, mo);
+    VERIFY( !ok && a1.load() == 1.6l && expected == 1.6l );
+
+    expected = .64l;
+    ok = a0.compare_exchange_strong(expected, 12.8l);
+    VERIFY( ok && a0.load() == 12.8l );
+    expected = 1.6l;
+    ok = a1.compare_exchange_strong(expected, 2.56l, mo);
+    VERIFY( ok && a1.load() == 2.56l );
+    expected = 0.5l;
+    ok = a0.compare_exchange_strong(expected, 3.2l);
+    VERIFY( !ok && a0.load() == 12.8l && expected == 12.8l );
+    expected = 0.5l;
+    ok = a1.compare_exchange_strong(expected, 3.2l, mo);
+    VERIFY( !ok && a1.load() == 2.56l && expected == 2.56l );
+
+    f0 = a0.fetch_add(1.2l);
+    VERIFY( f0 == 12.8l );
+    VERIFY( a0 == 14.0l );
+    f1 = a1.fetch_add(2.4l, mo);
+    VERIFY( f1 == 2.56l );
+    VERIFY( a1 == 4.96l );
+
+    f0 = a0.fetch_sub(1.2l);
+    VERIFY( f0 == 14.0l );
+    VERIFY( a0 == 12.8l );
+    f1 = a1.fetch_sub(3.5l, mo);
+    VERIFY( f1 == 4.96l );
+    VERIFY( a1 == 1.46l );
+
+    f0 = a0 += 1.2l;
+    VERIFY( f0 == 14.0l );
+    VERIFY( a0 == 14.0l );
+
+    f0 = a0 -= 0.8l;
+    VERIFY( f0 == 13.2l );
+    VERIFY( a0 == 13.2l );
+  }
+}
+
+int
+main()
+{
+  test01();
+  test02();
+  test03();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_float/requirements.cc b/libstdc++-v3/testsuite/29_atomics/atomic_float/requirements.cc
new file mode 100644 (file)
index 0000000..e52608c
--- /dev/null
@@ -0,0 +1,69 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do compile { target c++2a } }
+
+#include <atomic>
+
+void
+test01()
+{
+  using A = std::atomic<float>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_trivially_default_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, float> );
+  static_assert( std::is_same_v<A::difference_type, A::value_type> );
+  static_assert( !std::is_copy_constructible_v<A> );
+  static_assert( !std::is_move_constructible_v<A> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+  static_assert( !std::is_assignable_v<volatile A&, const A&> );
+}
+
+void
+test02()
+{
+  using A = std::atomic<double>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_trivially_default_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, double> );
+  static_assert( std::is_same_v<A::difference_type, A::value_type> );
+  static_assert( !std::is_copy_constructible_v<A> );
+  static_assert( !std::is_move_constructible_v<A> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+  static_assert( !std::is_assignable_v<volatile A&, const A&> );
+}
+
+void
+test03()
+{
+  using A = std::atomic<long double>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_trivially_default_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, long double> );
+  static_assert( std::is_same_v<A::difference_type, A::value_type> );
+  static_assert( !std::is_copy_constructible_v<A> );
+  static_assert( !std::is_move_constructible_v<A> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+  static_assert( !std::is_assignable_v<volatile A&, const A&> );
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc
new file mode 100644 (file)
index 0000000..231901c
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do compile { target c++2a } }
+
+#include <atomic>
+
+void
+test01()
+{
+  int i = 0;
+  std::atomic_ref a0(i);
+  static_assert(std::is_same_v<decltype(a0), std::atomic_ref<int>>);
+
+  float f = 1.0f;
+  std::atomic_ref a1(f);
+  static_assert(std::is_same_v<decltype(a1), std::atomic_ref<float>>);
+
+  int* p = &i;
+  std::atomic_ref a2(p);
+  static_assert(std::is_same_v<decltype(a2), std::atomic_ref<int*>>);
+
+  struct X { } x;
+  std::atomic_ref a3(x);
+  static_assert(std::is_same_v<decltype(a3), std::atomic_ref<X>>);
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc
new file mode 100644 (file)
index 0000000..0633f28
--- /dev/null
@@ -0,0 +1,320 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do run { target c++2a } }
+
+#include <atomic>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+  float value;
+  if constexpr (std::atomic_ref<float>::is_always_lock_free)
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<float> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<float>::is_always_lock_free)
+      VERIFY( ok );
+    a = 1.6f;
+    VERIFY( a.load() == 1.6f );
+    a.store(0.8f);
+    VERIFY( a.load(mo) == 0.8f );
+    a.store(3.2f, mo);
+    VERIFY( a.load() == 3.2f );
+    auto v = a.exchange(6.4f);
+    VERIFY( a == 6.4f );
+    VERIFY( v == 3.2f );
+    v = a.exchange(1.28f, mo);
+    VERIFY( a == 1.28f );
+    VERIFY( v == 6.4f );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, 25.6f, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 25.6f );
+    VERIFY( expected == 1.28f );
+    expected = 3.2f;
+    ok = a.compare_exchange_weak(expected, 51.2f, mo, mo);
+    VERIFY( !ok && a.load() == 25.6f && expected == 25.6f );
+    ok = a.compare_exchange_strong(expected, 51.2f, mo, mo);
+    VERIFY( ok && a.load() == 51.2f && expected == 25.6f );
+    expected = 0.0f;
+    ok = a.compare_exchange_strong(expected, 1.28f, mo, mo);
+    VERIFY( !ok && a.load() == 51.2f && expected == 51.2f );
+
+    while (!a.compare_exchange_weak(expected, 25.6f))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 25.6f  && expected == 51.2f );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, 10.24f, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 10.24f && expected == 25.6f );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 40.96f);
+    VERIFY( !ok && a.load() == 10.24f && expected == 10.24f );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 40.96f, mo);
+    VERIFY( !ok && a.load() == 10.24f && expected == 10.24f );
+
+    ok = a.compare_exchange_strong(expected, 1.024f);
+    VERIFY( ok && a.load() == 1.024f && expected == 10.24f );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, 204.8f, mo);
+    VERIFY( ok && a.load() == 204.8f && expected == 1.024f );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 6.4f);
+    VERIFY( !ok && a.load() == 204.8f && expected == 204.8f );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 6.4f, mo);
+    VERIFY( !ok && a.load() == 204.8f && expected == 204.8f );
+
+    v = a.fetch_add(3.2f);
+    VERIFY( v == 204.8f );
+    VERIFY( a == 208.0f );
+    v = a.fetch_add(-8.5f, mo);
+    VERIFY( v == 208.0f );
+    VERIFY( a == 199.5f );
+
+    v = a.fetch_sub(109.5f);
+    VERIFY( v == 199.5f );
+    VERIFY( a == 90.0f );
+    v = a.fetch_sub(2, mo);
+    VERIFY( v == 90.0f );
+    VERIFY( a == 88.0f );
+
+    v = a += 5.0f;
+    VERIFY( v == 93.0f );
+    VERIFY( a == 93.0f );
+
+    v = a -= 6.5f;
+    VERIFY( v == 86.5f );
+    VERIFY( a == 86.5f );
+  }
+
+  if constexpr (std::atomic_ref<float>::is_always_lock_free)
+    VERIFY( value == 86.5f );
+}
+
+void
+test02()
+{
+  double value;
+  if constexpr (std::atomic_ref<double>::is_always_lock_free)
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<double> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<double>::is_always_lock_free)
+      VERIFY( ok );
+    a = 1.6;
+    VERIFY( a.load() == 1.6 );
+    a.store(0.8);
+    VERIFY( a.load(mo) == 0.8 );
+    a.store(3.2, mo);
+    VERIFY( a.load() == 3.2 );
+    auto v = a.exchange(6.4);
+    VERIFY( a == 6.4 );
+    VERIFY( v == 3.2 );
+    v = a.exchange(1.28, mo);
+    VERIFY( a == 1.28 );
+    VERIFY( v == 6.4 );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, 25.6, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 25.6 );
+    VERIFY( expected == 1.28 );
+    expected = 3.2;
+    ok = a.compare_exchange_weak(expected, 51.2, mo, mo);
+    VERIFY( !ok && a.load() == 25.6 && expected == 25.6 );
+    ok = a.compare_exchange_strong(expected, 51.2, mo, mo);
+    VERIFY( ok && a.load() == 51.2 && expected == 25.6 );
+    expected = 0.0;
+    ok = a.compare_exchange_strong(expected, 1.28, mo, mo);
+    VERIFY( !ok && a.load() == 51.2 && expected == 51.2 );
+
+    while (!a.compare_exchange_weak(expected, 25.6))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 25.6  && expected == 51.2 );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, 10.24, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 10.24 && expected == 25.6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 40.96);
+    VERIFY( !ok && a.load() == 10.24 && expected == 10.24 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 40.96, mo);
+    VERIFY( !ok && a.load() == 10.24 && expected == 10.24 );
+
+    ok = a.compare_exchange_strong(expected, 1.024);
+    VERIFY( ok && a.load() == 1.024 && expected == 10.24 );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, 204.8, mo);
+    VERIFY( ok && a.load() == 204.8 && expected == 1.024 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 6.4);
+    VERIFY( !ok && a.load() == 204.8 && expected == 204.8 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 6.4, mo);
+    VERIFY( !ok && a.load() == 204.8 && expected == 204.8 );
+
+    v = a.fetch_add(3.2);
+    VERIFY( v == 204.8 );
+    VERIFY( a == 208.0 );
+    v = a.fetch_add(-8.5, mo);
+    VERIFY( v == 208.0 );
+    VERIFY( a == 199.5 );
+
+    v = a.fetch_sub(109.5);
+    VERIFY( v == 199.5 );
+    VERIFY( a == 90.0 );
+    v = a.fetch_sub(2, mo);
+    VERIFY( v == 90.0 );
+    VERIFY( a == 88.0 );
+
+    v = a += 5.0;
+    VERIFY( v == 93.0 );
+    VERIFY( a == 93.0 );
+
+    v = a -= 6.5;
+    VERIFY( v == 86.5 );
+    VERIFY( a == 86.5 );
+  }
+
+  if constexpr (std::atomic_ref<double>::is_always_lock_free)
+    VERIFY( value == 86.5 );
+}
+
+void
+test03()
+{
+  long double value;
+  if constexpr (std::atomic_ref<long double>::is_always_lock_free)
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<long double> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<long double>::is_always_lock_free)
+      VERIFY( ok );
+    a = 1.6l;
+    VERIFY( a.load() == 1.6l );
+    a.store(0.8l);
+    VERIFY( a.load(mo) == 0.8l );
+    a.store(3.2l, mo);
+    VERIFY( a.load() == 3.2l );
+    auto v = a.exchange(6.4l);
+    VERIFY( a == 6.4l );
+    VERIFY( v == 3.2l );
+    v = a.exchange(1.28l, mo);
+    VERIFY( a == 1.28l );
+    VERIFY( v == 6.4l );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, 25.6l, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 25.6l );
+    VERIFY( expected == 1.28l );
+    expected = 3.2l;
+    ok = a.compare_exchange_weak(expected, 51.2l, mo, mo);
+    VERIFY( !ok && a.load() == 25.6l && expected == 25.6l );
+    ok = a.compare_exchange_strong(expected, 51.2l, mo, mo);
+    VERIFY( ok && a.load() == 51.2l && expected == 25.6l );
+    expected = 0.0l;
+    ok = a.compare_exchange_strong(expected, 1.28l, mo, mo);
+    VERIFY( !ok && a.load() == 51.2l && expected == 51.2l );
+
+    while (!a.compare_exchange_weak(expected, 25.6l))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 25.6l  && expected == 51.2l );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, 10.24l, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 10.24l && expected == 25.6l );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 40.96l);
+    VERIFY( !ok && a.load() == 10.24l && expected == 10.24l );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 40.96l, mo);
+    VERIFY( !ok && a.load() == 10.24l && expected == 10.24l );
+
+    ok = a.compare_exchange_strong(expected, 1.024l);
+    VERIFY( ok && a.load() == 1.024l && expected == 10.24l );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, 204.8l, mo);
+    VERIFY( ok && a.load() == 204.8l && expected == 1.024l );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 6.4l);
+    VERIFY( !ok && a.load() == 204.8l && expected == 204.8l );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 6.4l, mo);
+    VERIFY( !ok && a.load() == 204.8l && expected == 204.8l );
+
+    v = a.fetch_add(3.2l);
+    VERIFY( v == 204.8l );
+    VERIFY( a == 208.0l );
+    v = a.fetch_add(-8.5l, mo);
+    VERIFY( v == 208.0l );
+    VERIFY( a == 199.5l );
+
+    v = a.fetch_sub(109.5l);
+    VERIFY( v == 199.5l );
+    VERIFY( a == 90.0l );
+    v = a.fetch_sub(2, mo);
+    VERIFY( v == 90.0l );
+    VERIFY( a == 88.0l );
+
+    v = a += 5.0l;
+    VERIFY( v == 93.0l );
+    VERIFY( a == 93.0l );
+
+    v = a -= 6.5l;
+    VERIFY( v == 86.5l );
+    VERIFY( a == 86.5l );
+  }
+
+  if constexpr (std::atomic_ref<long double>::is_always_lock_free)
+    VERIFY( value == 86.5l );
+}
+
+void
+test04()
+{
+  if constexpr (std::atomic_ref<float>::is_always_lock_free)
+  {
+    float i = 0;
+    float* ptr = 0;
+    std::atomic_ref<float*> a0(ptr);
+    std::atomic_ref<float*> a1(ptr);
+    std::atomic_ref<float*> a2(a0);
+    a0 = &i;
+    VERIFY( a1 == &i );
+    VERIFY( a2 == &i );
+  }
+}
+
+int
+main()
+{
+  test01();
+  test02();
+  test03();
+  test04();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc
new file mode 100644 (file)
index 0000000..61ae61b
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do run { target c++2a } }
+// { dg-add-options libatomic }
+
+#include <atomic>
+#include <limits.h>
+#include <testsuite_hooks.h>
+
+struct X
+{
+  X() = default;
+  X(int i) : i(i) { }
+  bool operator==(int rhs) const { return i == rhs; }
+  int i;
+};
+
+void
+test01()
+{
+  X value;
+
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<X> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<X>::is_always_lock_free)
+      VERIFY( ok );
+    a = X{};
+    VERIFY( a.load() == 0 );
+    VERIFY( a.load(mo) == 0 );
+    a.store(1);
+    VERIFY( a.load() == 1 );
+    auto v = a.exchange(2);
+    VERIFY( a.load() == 2 );
+    VERIFY( v == 1 );
+    v = a.exchange(3, mo);
+    VERIFY( a.load() == 3 );
+    VERIFY( v == 2 );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, 4, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 4 );
+    VERIFY( expected == 3 );
+    expected = 1;
+    ok = a.compare_exchange_weak(expected, 5, mo, mo);
+    VERIFY( !ok && a.load() == 4 && expected == 4 );
+    ok = a.compare_exchange_strong(expected, 5, mo, mo);
+    VERIFY( ok && a.load() == 5 && expected == 4 );
+    expected = 0;
+    ok = a.compare_exchange_strong(expected, 3, mo, mo);
+    VERIFY( !ok && a.load() == 5 && expected == 5 );
+
+    while (!a.compare_exchange_weak(expected, 4))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 4  && expected == 5 );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, 6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 6 && expected == 4 );
+    expected = a.load();
+    expected.i += 1;
+    ok = a.compare_exchange_weak(expected, -8);
+    VERIFY( !ok && a.load() == 6 && expected == 6 );
+    expected = a.load();
+    expected.i += 1;
+    ok = a.compare_exchange_weak(expected, 8, mo);
+    VERIFY( !ok && a.load() == 6 && expected == 6 );
+
+    ok = a.compare_exchange_strong(expected, -6);
+    VERIFY( ok && a.load() == -6 && expected == 6 );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, 7, mo);
+    VERIFY( ok && a.load() == 7 && expected == -6 );
+    expected = a.load();
+    expected.i += 1;
+    ok = a.compare_exchange_strong(expected, 2);
+    VERIFY( !ok && a.load() == 7 && expected == 7 );
+    expected = a.load();
+    expected.i += 1;
+    ok = a.compare_exchange_strong(expected, 2, mo);
+    VERIFY( !ok && a.load() == 7 && expected == 7 );
+  }
+
+  VERIFY( value == 7 );
+}
+
+void
+test02()
+{
+  X i;
+  std::atomic_ref<X> a0(i);
+  std::atomic_ref<X> a1(i);
+  std::atomic_ref<X> a2(a0);
+  a0 = 42;
+  VERIFY( a1.load() == 42 );
+  VERIFY( a2.load() == 42 );
+}
+
+int
+main()
+{
+  test01();
+  test02();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc
new file mode 100644 (file)
index 0000000..4b5b4d1
--- /dev/null
@@ -0,0 +1,331 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do run { target c++2a } }
+// { dg-add-options libatomic }
+
+#include <atomic>
+#include <limits.h>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+  int value;
+
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<int> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<int>::is_always_lock_free)
+      VERIFY( ok );
+    a = 0;
+    VERIFY( a.load() == 0 );
+    VERIFY( a.load(mo) == 0 );
+    a.store(1);
+    VERIFY( a.load() == 1 );
+    auto v = a.exchange(2);
+    VERIFY( a == 2 );
+    VERIFY( v == 1 );
+    v = a.exchange(3, mo);
+    VERIFY( a == 3 );
+    VERIFY( v == 2 );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, 4, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 4 );
+    VERIFY( expected == 3 );
+    expected = 1;
+    ok = a.compare_exchange_weak(expected, 5, mo, mo);
+    VERIFY( !ok && a.load() == 4 && expected == 4 );
+    ok = a.compare_exchange_strong(expected, 5, mo, mo);
+    VERIFY( ok && a.load() == 5 && expected == 4 );
+    expected = 0;
+    ok = a.compare_exchange_strong(expected, 3, mo, mo);
+    VERIFY( !ok && a.load() == 5 && expected == 5 );
+
+    while (!a.compare_exchange_weak(expected, 4))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 4  && expected == 5 );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, 6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 6 && expected == 4 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, -8);
+    VERIFY( !ok && a.load() == 6 && expected == 6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 8, mo);
+    VERIFY( !ok && a.load() == 6 && expected == 6 );
+
+    ok = a.compare_exchange_strong(expected, -6);
+    VERIFY( ok && a.load() == -6 && expected == 6 );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, 7, mo);
+    VERIFY( ok && a.load() == 7 && expected == -6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 2);
+    VERIFY( !ok && a.load() == 7 && expected == 7 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 2, mo);
+    VERIFY( !ok && a.load() == 7 && expected == 7 );
+
+    v = a.fetch_add(2);
+    VERIFY( v == 7 );
+    VERIFY( a == 9 );
+    v = a.fetch_add(-30, mo);
+    VERIFY( v == 9 );
+    VERIFY( a == -21 );
+
+    v = a.fetch_sub(3);
+    VERIFY( v == -21 );
+    VERIFY( a == -24 );
+    v = a.fetch_sub(-41, mo);
+    VERIFY( v == -24 );
+    VERIFY( a == 17 );
+
+    v = a.fetch_and(0x101);
+    VERIFY( v == 17 );
+    VERIFY( a == 1 );
+    a = 0x17;
+    v = a.fetch_and(0x23, mo);
+    VERIFY( v == 0x17 );
+    VERIFY( a == 3 );
+
+    v = a.fetch_or(0x101);
+    VERIFY( v == 3 );
+    VERIFY( a == 0x103 );
+    v = a.fetch_or(0x23, mo);
+    VERIFY( v == 0x103 );
+    VERIFY( a == 0x123 );
+
+    v = a.fetch_xor(0x101);
+    VERIFY( v == 0x123 );
+    VERIFY( a == 0x022 );
+    v = a.fetch_xor(0x123, mo);
+    VERIFY( v == 0x022 );
+    VERIFY( a == 0x101 );
+
+    v = a++;
+    VERIFY( v == 0x101 );
+    VERIFY( a == 0x102 );
+    v = a--;
+    VERIFY( v == 0x102 );
+    VERIFY( a == 0x101 );
+    v = ++a;
+    VERIFY( v == 0x102 );
+    VERIFY( a == 0x102 );
+    v = --a;
+    VERIFY( v == 0x101 );
+    VERIFY( a == 0x101 );
+
+    v = a += -10;
+    VERIFY( v == 247 );
+    VERIFY( a == 247 );
+
+    v = a -= 250;
+    VERIFY( v == -3 );
+    VERIFY( a == -3 );
+
+    a = 0x17;
+    v = a &= 0x102;
+    VERIFY( v == 2 );
+    VERIFY( a == 2 );
+
+    v = a |= 0x101;
+    VERIFY( v == 0x103 );
+    VERIFY( a == 0x103 );
+
+    v = a ^= 0x121;
+    VERIFY( v == 0x022 );
+    VERIFY( a == 0x022 );
+  }
+
+  VERIFY( value == 0x022 );
+}
+
+void
+test02()
+{
+  unsigned short value;
+
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<unsigned short> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<unsigned short>::is_always_lock_free)
+      VERIFY( ok );
+    a = 0;
+    VERIFY( a.load() == 0 );
+    VERIFY( a.load(mo) == 0 );
+    a.store(1);
+    VERIFY( a.load() == 1 );
+    auto v = a.exchange(2);
+    VERIFY( a == 2 );
+    VERIFY( v == 1 );
+    v = a.exchange(3, mo);
+    VERIFY( a == 3 );
+    VERIFY( v == 2 );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, 4, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 4 );
+    VERIFY( expected == 3 );
+    expected = 1;
+    ok = a.compare_exchange_weak(expected, 5, mo, mo);
+    VERIFY( !ok && a.load() == 4 && expected == 4 );
+    ok = a.compare_exchange_strong(expected, 5, mo, mo);
+    VERIFY( ok && a.load() == 5 && expected == 4 );
+    expected = 0;
+    ok = a.compare_exchange_strong(expected, 3, mo, mo);
+    VERIFY( !ok && a.load() == 5 && expected == 5 );
+
+    while (!a.compare_exchange_weak(expected, 4))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 4  && expected == 5 );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, 6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == 6 && expected == 4 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, -8);
+    VERIFY( !ok && a.load() == 6 && expected == 6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, 8, mo);
+    VERIFY( !ok && a.load() == 6 && expected == 6 );
+
+    ok = a.compare_exchange_strong(expected, -6);
+    VERIFY( ok && a.load() == (unsigned short)-6 && expected == 6 );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, 7, mo);
+    VERIFY( ok && a.load() == 7 && expected == (unsigned short)-6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 2);
+    VERIFY( !ok && a.load() == 7 && expected == 7 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, 2, mo);
+    VERIFY( !ok && a.load() == 7 && expected == 7 );
+
+    v = a.fetch_add(2);
+    VERIFY( v == 7 );
+    VERIFY( a == 9 );
+    v = a.fetch_add(-30, mo);
+    VERIFY( v == 9 );
+    VERIFY( a == (unsigned short)-21 );
+
+    v = a.fetch_sub(3);
+    VERIFY( v == (unsigned short)-21 );
+    VERIFY( a == (unsigned short)-24 );
+    v = a.fetch_sub((unsigned short)-41, mo);
+    VERIFY( v == (unsigned short)-24 );
+    VERIFY( a == 17 );
+
+    v = a.fetch_and(0x21);
+    VERIFY( v == 17 );
+    VERIFY( a == 1 );
+    a = 0x17;
+    v = a.fetch_and(0x23, mo);
+    VERIFY( v == 0x17 );
+    VERIFY( a == 3 );
+
+    v = a.fetch_or(0x21);
+    VERIFY( v == 3 );
+    VERIFY( a == 0x23 );
+    v = a.fetch_or(0x44, mo);
+    VERIFY( v == 0x23 );
+    VERIFY( a == 0x67 );
+
+    v = a.fetch_xor(0x21);
+    VERIFY( v == 0x67 );
+    VERIFY( a == 0x46 );
+    v = a.fetch_xor(0x12, mo);
+    VERIFY( v == 0x46 );
+    VERIFY( a == 0x54 );
+
+    v = a++;
+    VERIFY( v == 0x54 );
+    VERIFY( a == 0x55 );
+    v = a--;
+    VERIFY( v == 0x55 );
+    VERIFY( a == 0x54 );
+    v = ++a;
+    VERIFY( v == 0x55 );
+    VERIFY( a == 0x55 );
+    v = --a;
+    VERIFY( v == 0x54 );
+    VERIFY( a == 0x54 );
+
+    v = a += -10;
+    VERIFY( v == 0x4a );
+    VERIFY( a == 0x4a );
+
+    v = a -= 15;
+    VERIFY( v == 0x3b );
+    VERIFY( a == 0x3b );
+
+    a = 0x17;
+    v = a &= 0x12;
+    VERIFY( v == 0x12 );
+    VERIFY( a == 0x12 );
+
+    v = a |= 0x34;
+    VERIFY( v == 0x36 );
+    VERIFY( a == 0x36 );
+
+    v = a ^= 0x12;
+    VERIFY( v == 0x24 );
+    VERIFY( a == 0x24 );
+  }
+
+  VERIFY( value == 0x24 );
+}
+void
+test03()
+{
+  int i = 0;
+  std::atomic_ref<int> a0(i);
+  std::atomic_ref<int> a1(i);
+  std::atomic_ref<int> a2(a0);
+  a0 = 42;
+  VERIFY( a1 == 42 );
+  VERIFY( a2 == 42 );
+}
+
+void
+test04()
+{
+  int i = INT_MIN;
+  std::atomic_ref<int> a(i);
+  --a;
+  VERIFY( a == INT_MAX );
+  ++a;
+  VERIFY( a == INT_MIN );
+  a |= INT_MAX;
+  VERIFY( a == -1 );
+}
+
+int
+main()
+{
+  test01();
+  test02();
+  test03();
+  test04();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc
new file mode 100644 (file)
index 0000000..d5256d6
--- /dev/null
@@ -0,0 +1,225 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do run { target c++2a } }
+// { dg-add-options libatomic }
+
+#include <atomic>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+  long arr[10] = { };
+  long* value;
+
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<long*> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<long*>::is_always_lock_free)
+      VERIFY( ok );
+    a = arr;
+    VERIFY( a.load() == arr );
+    VERIFY( a.load(mo) == arr );
+    a.store(arr+1);
+    VERIFY( a.load() == arr+1 );
+    auto v = a.exchange(arr+2);
+    VERIFY( a == arr+2 );
+    VERIFY( v == arr+1 );
+    v = a.exchange(arr+3, mo);
+    VERIFY( a == arr+3 );
+    VERIFY( v == arr+2 );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, arr+4, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == arr+4 );
+    VERIFY( expected == arr+3 );
+    expected = arr+1;
+    ok = a.compare_exchange_weak(expected, arr+5, mo, mo);
+    VERIFY( !ok && a.load() == arr+4 && expected == arr+4 );
+    ok = a.compare_exchange_strong(expected, arr+5, mo, mo);
+    VERIFY( ok && a.load() == arr+5 && expected == arr+4 );
+    expected = nullptr;
+    ok = a.compare_exchange_strong(expected, arr+3, mo, mo);
+    VERIFY( !ok && a.load() == arr+5 && expected == arr+5 );
+
+    while (!a.compare_exchange_weak(expected, arr+4))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == arr+4  && expected == arr+5 );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, arr+6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == arr+6 && expected == arr+4 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, arr+8);
+    VERIFY( !ok && a.load() == arr+6 && expected == arr+6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, arr+8, mo);
+    VERIFY( !ok && a.load() == arr+6 && expected == arr+6 );
+
+    ok = a.compare_exchange_strong(expected, arr+5);
+    VERIFY( ok && a.load() == arr+5 && expected == arr+6 );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, arr+7, mo);
+    VERIFY( ok && a.load() == arr+7 && expected == arr+5 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, arr+2);
+    VERIFY( !ok && a.load() == arr+7 && expected == arr+7 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, arr+2, mo);
+    VERIFY( !ok && a.load() == arr+7 && expected == arr+7 );
+
+    v = a.fetch_add(2);
+    VERIFY( v == arr+7 );
+    VERIFY( a == arr+9 );
+    v = a.fetch_add(-3, mo);
+    VERIFY( v == arr+9 );
+    VERIFY( a == arr+6 );
+
+    v = a.fetch_sub(3);
+    VERIFY( v == arr+6 );
+    VERIFY( a == arr+3 );
+    v = a.fetch_sub(2, mo);
+    VERIFY( v == arr+3 );
+    VERIFY( a == arr+1 );
+
+    v = a += 5;
+    VERIFY( v == arr+6 );
+    VERIFY( a == arr+6 );
+
+    v = a -= 5;
+    VERIFY( v == arr+1 );
+    VERIFY( a == arr+1 );
+  }
+
+  VERIFY( value == arr+1 );
+}
+
+void
+test02()
+{
+  char arr[10] = { };
+  char* value;
+
+  {
+    const auto mo = std::memory_order_relaxed;
+    std::atomic_ref<char*> a(value);
+    bool ok = a.is_lock_free();
+    if constexpr (std::atomic_ref<char*>::is_always_lock_free)
+      VERIFY( ok );
+    a = arr;
+    VERIFY( a.load() == arr );
+    a.store(arr+3);
+    VERIFY( a.load(mo) == arr+3 );
+    a.store(arr+1, mo);
+    VERIFY( a.load() == arr+1 );
+    auto v = a.exchange(arr+2);
+    VERIFY( a == arr+2 );
+    VERIFY( v == arr+1 );
+    v = a.exchange(arr+3, mo);
+    VERIFY( a == arr+3 );
+    VERIFY( v == arr+2 );
+
+    auto expected = a.load();
+    while (!a.compare_exchange_weak(expected, arr+4, mo, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == arr+4 );
+    VERIFY( expected == arr+3 );
+    expected = arr+1;
+    ok = a.compare_exchange_weak(expected, arr+5, mo, mo);
+    VERIFY( !ok && a.load() == arr+4 && expected == arr+4 );
+    ok = a.compare_exchange_strong(expected, arr+5, mo, mo);
+    VERIFY( ok && a.load() == arr+5 && expected == arr+4 );
+    expected = nullptr;
+    ok = a.compare_exchange_strong(expected, arr+3, mo, mo);
+    VERIFY( !ok && a.load() == arr+5 && expected == arr+5 );
+
+    while (!a.compare_exchange_weak(expected, arr+4))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == arr+4  && expected == arr+5 );
+    expected = a.load();
+    while (!a.compare_exchange_weak(expected, arr+6, mo))
+    { /* weak form can fail spuriously */ }
+    VERIFY( a.load() == arr+6 && expected == arr+4 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, arr+8);
+    VERIFY( !ok && a.load() == arr+6 && expected == arr+6 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_weak(expected, arr+8, mo);
+    VERIFY( !ok && a.load() == arr+6 && expected == arr+6 );
+
+    ok = a.compare_exchange_strong(expected, arr+5);
+    VERIFY( ok && a.load() == arr+5 && expected == arr+6 );
+    expected = a.load();
+    ok = a.compare_exchange_strong(expected, arr+7, mo);
+    VERIFY( ok && a.load() == arr+7 && expected == arr+5 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, arr+2);
+    VERIFY( !ok && a.load() == arr+7 && expected == arr+7 );
+    expected = a.load() + 1;
+    ok = a.compare_exchange_strong(expected, arr+2, mo);
+    VERIFY( !ok && a.load() == arr+7 && expected == arr+7 );
+
+    v = a.fetch_add(2);
+    VERIFY( v == arr+7 );
+    VERIFY( a == arr+9 );
+    v = a.fetch_add(-3, mo);
+    VERIFY( v == arr+9 );
+    VERIFY( a == arr+6 );
+
+    v = a.fetch_sub(3);
+    VERIFY( v == arr+6 );
+    VERIFY( a == arr+3 );
+    v = a.fetch_sub(2, mo);
+    VERIFY( v == arr+3 );
+    VERIFY( a == arr+1 );
+
+    v = a += 5;
+    VERIFY( v == arr+6 );
+    VERIFY( a == arr+6 );
+
+    v = a -= 5;
+    VERIFY( v == arr+1 );
+    VERIFY( a == arr+1 );
+  }
+
+  VERIFY( value == arr+1 );
+}
+
+void
+test03()
+{
+  int i = 0;
+  int* ptr = 0;
+  std::atomic_ref<int*> a0(ptr);
+  std::atomic_ref<int*> a1(ptr);
+  std::atomic_ref<int*> a2(a0);
+  a0 = &i;
+  VERIFY( a1 == &i );
+  VERIFY( a2 == &i );
+}
+
+int
+main()
+{
+  test01();
+  test02();
+  test03();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc
new file mode 100644 (file)
index 0000000..a3fd450
--- /dev/null
@@ -0,0 +1,74 @@
+// Copyright (C) 2019 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++2a" }
+// { dg-do compile { target c++2a } }
+
+#include <atomic>
+
+void
+test01()
+{
+  struct X { int c; };
+  using A = std::atomic_ref<X>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_nothrow_copy_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, X> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+}
+
+void
+test02()
+{
+  using A = std::atomic_ref<int>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_nothrow_copy_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, int> );
+  static_assert( std::is_same_v<A::difference_type, A::value_type> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+}
+
+void
+test03()
+{
+  using A = std::atomic_ref<double>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_nothrow_copy_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, double> );
+  static_assert( std::is_same_v<A::difference_type, A::value_type> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+}
+
+void
+test04()
+{
+  using A = std::atomic_ref<int*>;
+  static_assert( std::is_standard_layout_v<A> );
+  static_assert( std::is_nothrow_copy_constructible_v<A> );
+  static_assert( std::is_trivially_destructible_v<A> );
+  static_assert( std::is_same_v<A::value_type, int*> );
+  static_assert( std::is_same_v<A::difference_type, std::ptrdiff_t> );
+  static_assert( std::is_nothrow_copy_constructible_v<A> );
+  static_assert( !std::is_copy_assignable_v<A> );
+  static_assert( !std::is_move_assignable_v<A> );
+}