1 // Low-level functions for atomic operations: ARM version -*- C++ -*-
3 // Copyright (C) 2000, 2001 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 #ifndef _BITS_ATOMICITY_H
31 #define _BITS_ATOMICITY_H 1
33 typedef int _Atomic_word
;
35 static inline _Atomic_word
36 __attribute__ ((__unused__
))
37 __exchange_and_add (volatile _Atomic_word
* __mem
, int __val
)
39 _Atomic_word __tmp
, __tmp2
, __result
;
41 /* Since this function is inlined, we can't be sure of the alignment. */
42 __asm__
__volatile__ (
52 "swp %2, %1, [%3] \n\t"
54 "swpne %1, %2, [%3] \n\t"
62 : "=&l"(__result
), "=&r"(__tmp
), "=&r"(__tmp2
)
63 : "r" (__mem
), "r"(__val
)
66 __asm__
__volatile__ (
71 "swp %2, %1, [%3] \n\t"
73 "swpne %1, %2, [%3] \n\t"
76 : "=&r"(__result
), "=&r"(__tmp
), "=&r"(__tmp2
)
77 : "r" (__mem
), "r"(__val
)
84 __attribute__ ((__unused__
))
85 __atomic_add (volatile _Atomic_word
*__mem
, int __val
)
87 _Atomic_word __tmp
, __tmp2
, __tmp3
;
89 /* Since this function is inlined, we can't be sure of the alignment. */
90 __asm__
__volatile__ (
100 "swp %2, %1, [%3] \n\t"
102 "swpne %1, %2,[%3] \n\t"
110 : "=&l"(__tmp
), "=&r"(__tmp2
), "=&r"(__tmp3
)
111 : "r" (__mem
), "r"(__val
)
114 __asm__
__volatile__ (
118 "add %1, %0, %4 \n\t"
119 "swp %2, %1, [%3] \n\t"
121 "swpne %1, %2, [%3] \n\t"
124 : "=&r"(__tmp
), "=&r"(__tmp2
), "=&r"(__tmp3
)
125 : "r" (__mem
), "r"(__val
)
131 __attribute__ ((__unused__
))
132 __always_swap (volatile long *__p
, long __newval
)
137 /* Since this function is inlined, we can't be sure of the alignment. */
138 __asm__
__volatile__ (
146 "swp %0, %3, [%2] \n\t"
153 : "=&l"(__result
), "=&r"(__tmp
)
154 : "r"(__p
), "r"(__newval
)
157 __asm__
__volatile__ (
159 "swp %0, %2, [%1] \n\t"
162 : "r"(__p
), "r"(__newval
)
169 __attribute__ ((__unused__
))
170 __test_and_set (volatile long *__p
, long __newval
)
175 /* Since this function is inlined, we can't be sure of the alignment. */
176 __asm__
__volatile__ (
187 "swp %1, %3, [%2] \n\t"
189 "swpne %0, %1, [%2]\n\t"
198 : "=&l"(__result
), "=r" (__tmp
)
199 : "r"(__p
), "r"(__newval
)
202 __asm__
__volatile__ (
208 "swp %1, %3, [%2] \n\t"
210 "swpne %0, %1, [%2] \n\t"
214 : "=&r"(__result
), "=r" (__tmp
)
215 : "r"(__p
), "r"(__newval
)
221 #endif /* atomicity.h */