2 * Many similar implementations exist. See for example libwsbm
3 * or the linux kernel include/atomic.h
5 * No copyright claimed on this file.
9 #include "no_extern_c.h"
17 /* Favor OS-provided implementations.
19 * Where no OS-provided implementation is available, fall back to
20 * locally coded assembly, compiler intrinsic or ultimately a
21 * mutex-based implementation.
24 #define PIPE_ATOMIC_OS_SOLARIS
25 #elif defined(_MSC_VER)
26 #define PIPE_ATOMIC_MSVC_INTRINSIC
27 #elif defined(__GNUC__)
28 #define PIPE_ATOMIC_GCC_INTRINSIC
30 #error "Unsupported platform"
34 /* Implementation using GCC-provided synchronization intrinsics
36 #if defined(PIPE_ATOMIC_GCC_INTRINSIC)
38 #define PIPE_ATOMIC "GCC Sync Intrinsics"
40 #if defined(USE_GCC_ATOMIC_BUILTINS)
42 /* The builtins with explicit memory model are available since GCC 4.7. */
43 #define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
44 #define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
45 #define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
46 #define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
47 #define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
48 #define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
49 #define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
50 #define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
51 #define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
52 #define PIPE_NATIVE_ATOMIC_XCHG
56 #define p_atomic_set(_v, _i) (*(_v) = (_i))
57 #define p_atomic_read(_v) (*(_v))
58 #define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
59 #define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
60 #define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
61 #define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
62 #define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
63 #define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
67 /* There is no __atomic_* compare and exchange that returns the current value.
68 * Also, GCC 5.4 seems unable to optimize a compound statement expression that
69 * uses an additional stack variable with __atomic_compare_exchange[_n].
71 #define p_atomic_cmpxchg(v, old, _new) \
72 __sync_val_compare_and_swap((v), (old), (_new))
78 /* Unlocked version for single threaded environments, such as some
79 * windows kernel modules.
81 #if defined(PIPE_ATOMIC_OS_UNLOCKED)
83 #define PIPE_ATOMIC "Unlocked"
85 #define p_atomic_set(_v, _i) (*(_v) = (_i))
86 #define p_atomic_read(_v) (*(_v))
87 #define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
88 #define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
89 #define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
90 #define p_atomic_add(_v, _i) (*(_v) = *(_v) + (_i))
91 #define p_atomic_inc_return(_v) (++(*(_v)))
92 #define p_atomic_dec_return(_v) (--(*(_v)))
93 #define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
98 #if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
100 #define PIPE_ATOMIC "MSVC Intrinsics"
102 /* We use the Windows header's Interlocked*64 functions instead of the
103 * _Interlocked*64 intrinsics wherever we can, as support for the latter varies
104 * with target CPU, whereas Windows headers take care of all portability
105 * issues: using intrinsics where available, falling back to library
106 * implementations where not.
108 #ifndef WIN32_LEAN_AND_MEAN
109 #define WIN32_LEAN_AND_MEAN 1
115 /* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
116 * quite work here; and if a C++-only solution is worthwhile, then it would be
117 * better to use templates / function overloading, instead of decltype magic.
118 * Therefore, we rely on implicit casting to LONGLONG for the functions that return
121 #define p_atomic_set(_v, _i) (*(_v) = (_i))
122 #define p_atomic_read(_v) (*(_v))
124 #define p_atomic_dec_zero(_v) \
125 (p_atomic_dec_return(_v) == 0)
127 #define p_atomic_inc(_v) \
128 ((void) p_atomic_inc_return(_v))
130 #define p_atomic_inc_return(_v) (\
131 sizeof *(_v) == sizeof(short) ? _InterlockedIncrement16((short *) (_v)) : \
132 sizeof *(_v) == sizeof(long) ? _InterlockedIncrement ((long *) (_v)) : \
133 sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
134 (assert(!"should not get here"), 0))
136 #define p_atomic_dec(_v) \
137 ((void) p_atomic_dec_return(_v))
139 #define p_atomic_dec_return(_v) (\
140 sizeof *(_v) == sizeof(short) ? _InterlockedDecrement16((short *) (_v)) : \
141 sizeof *(_v) == sizeof(long) ? _InterlockedDecrement ((long *) (_v)) : \
142 sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
143 (assert(!"should not get here"), 0))
145 #define p_atomic_add(_v, _i) (\
146 sizeof *(_v) == sizeof(char) ? _InterlockedExchangeAdd8 ((char *) (_v), (_i)) : \
147 sizeof *(_v) == sizeof(short) ? _InterlockedExchangeAdd16((short *) (_v), (_i)) : \
148 sizeof *(_v) == sizeof(long) ? _InterlockedExchangeAdd ((long *) (_v), (_i)) : \
149 sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
150 (assert(!"should not get here"), 0))
152 #define p_atomic_cmpxchg(_v, _old, _new) (\
153 sizeof *(_v) == sizeof(char) ? _InterlockedCompareExchange8 ((char *) (_v), (char) (_new), (char) (_old)) : \
154 sizeof *(_v) == sizeof(short) ? _InterlockedCompareExchange16((short *) (_v), (short) (_new), (short) (_old)) : \
155 sizeof *(_v) == sizeof(long) ? _InterlockedCompareExchange ((long *) (_v), (long) (_new), (long) (_old)) : \
156 sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
157 (assert(!"should not get here"), 0))
161 #if defined(PIPE_ATOMIC_OS_SOLARIS)
163 #define PIPE_ATOMIC "Solaris OS atomic functions"
168 #define p_atomic_set(_v, _i) (*(_v) = (_i))
169 #define p_atomic_read(_v) (*(_v))
171 #define p_atomic_dec_zero(v) (\
172 sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) == 0 : \
173 sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
174 sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
175 sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
176 (assert(!"should not get here"), 0))
178 #define p_atomic_inc(v) (void) (\
179 sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8 ((uint8_t *)(v)) : \
180 sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
181 sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
182 sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
183 (assert(!"should not get here"), 0))
185 #define p_atomic_inc_return(v) ((__typeof(*v)) \
186 sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8_nv ((uint8_t *)(v)) : \
187 sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
188 sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
189 sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
190 (assert(!"should not get here"), 0))
192 #define p_atomic_dec(v) ((void) \
193 sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8 ((uint8_t *)(v)) : \
194 sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
195 sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
196 sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
197 (assert(!"should not get here"), 0))
199 #define p_atomic_dec_return(v) ((__typeof(*v)) \
200 sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) : \
201 sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
202 sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
203 sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
204 (assert(!"should not get here"), 0))
206 #define p_atomic_add(v, i) ((void) \
207 sizeof(*v) == sizeof(uint8_t) ? atomic_add_8 ((uint8_t *)(v), (i)) : \
208 sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
209 sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
210 sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
211 (assert(!"should not get here"), 0))
213 #define p_atomic_cmpxchg(v, old, _new) ((__typeof(*v)) \
214 sizeof(*v) == sizeof(uint8_t) ? atomic_cas_8 ((uint8_t *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
215 sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
216 sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
217 sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
218 (assert(!"should not get here"), 0))
223 #error "No pipe_atomic implementation selected"
226 #ifndef PIPE_NATIVE_ATOMIC_XCHG
227 static inline uint32_t p_atomic_xchg_32(uint32_t *v
, uint32_t i
)
229 uint32_t actual
= p_atomic_read(v
);
233 actual
= p_atomic_cmpxchg(v
, expected
, i
);
234 } while (expected
!= actual
);
238 static inline uint64_t p_atomic_xchg_64(uint64_t *v
, uint64_t i
)
240 uint64_t actual
= p_atomic_read(v
);
244 actual
= p_atomic_cmpxchg(v
, expected
, i
);
245 } while (expected
!= actual
);
249 #define p_atomic_xchg(v, i) ((__typeof(*(v))) \
250 sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
251 sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
252 (assert(!"should not get here"), 0))
255 #endif /* U_ATOMIC_H */