atomicity.h (__compare_and_swap): Remove unused function.
[gcc.git] / libstdc++-v3 / config / cpu / arm / bits / atomicity.h
1 // Low-level functions for atomic operations: ARM version -*- C++ -*-
2
3 // Copyright (C) 2000, 2001 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 #ifndef _BITS_ATOMICITY_H
31 #define _BITS_ATOMICITY_H 1
32
33 typedef int _Atomic_word;
34
35 static inline _Atomic_word
36 __attribute__ ((__unused__))
37 __exchange_and_add (volatile _Atomic_word* __mem, int __val)
38 {
39 _Atomic_word __tmp, __tmp2, __result;
40 #ifdef __thumb__
41 /* Since this function is inlined, we can't be sure of the alignment. */
42 __asm__ __volatile__ (
43 "ldr %0, 4f \n\t"
44 "bx %0 \n\t"
45 ".align 0 \n"
46 "4:\t"
47 ".word 0f \n\t"
48 ".code 32 \n"
49 "0:\t"
50 "ldr %0, [%3] \n\t"
51 "add %1, %0, %4 \n\t"
52 "swp %2, %1, [%3] \n\t"
53 "cmp %0, %2 \n\t"
54 "swpne %1, %2, [%3] \n\t"
55 "bne 0b \n\t"
56 "ldr %1, 1f \n\t"
57 "bx %1 \n"
58 "1:\t"
59 ".word 2f \n\t"
60 ".code 16 \n"
61 "2:\n"
62 : "=&l"(__result), "=&r"(__tmp), "=&r"(__tmp2)
63 : "r" (__mem), "r"(__val)
64 : "cc", "memory");
65 #else
66 __asm__ __volatile__ (
67 "\n"
68 "0:\t"
69 "ldr %0, [%3] \n\t"
70 "add %1, %0, %4 \n\t"
71 "swp %2, %1, [%3] \n\t"
72 "cmp %0, %2 \n\t"
73 "swpne %1, %2, [%3] \n\t"
74 "bne 0b \n\t"
75 ""
76 : "=&r"(__result), "=&r"(__tmp), "=&r"(__tmp2)
77 : "r" (__mem), "r"(__val)
78 : "cc", "memory");
79 #endif
80 return __result;
81 }
82
83 static inline void
84 __attribute__ ((__unused__))
85 __atomic_add (volatile _Atomic_word *__mem, int __val)
86 {
87 _Atomic_word __tmp, __tmp2, __tmp3;
88 #ifdef __thumb__
89 /* Since this function is inlined, we can't be sure of the alignment. */
90 __asm__ __volatile__ (
91 "ldr %0, 4f \n\t"
92 "bx %0 \n\t"
93 ".align 0\n"
94 "4:\t"
95 ".word 0f \n\t"
96 ".code 32 \n"
97 "0:\t"
98 "ldr %0, [%3] \n\t"
99 "add %1, %0, %4 \n\t"
100 "swp %2, %1, [%3] \n\t"
101 "cmp %0, %2 \n\t"
102 "swpne %1, %2,[%3] \n\t"
103 "bne 0b \n\t"
104 "ldr %1, 1f \n\t"
105 "bx %1 \n"
106 "1:\t"
107 ".word 2f \n\t"
108 ".code 16 \n"
109 "2:\n"
110 : "=&l"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
111 : "r" (__mem), "r"(__val)
112 : "cc", "memory");
113 #else
114 __asm__ __volatile__ (
115 "\n"
116 "0:\t"
117 "ldr %0, [%3] \n\t"
118 "add %1, %0, %4 \n\t"
119 "swp %2, %1, [%3] \n\t"
120 "cmp %0, %2 \n\t"
121 "swpne %1, %2, [%3] \n\t"
122 "bne 0b \n\t"
123 ""
124 : "=&r"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
125 : "r" (__mem), "r"(__val)
126 : "cc", "memory");
127 #endif
128 }
129
130 static inline long
131 __attribute__ ((__unused__))
132 __always_swap (volatile long *__p, long __newval)
133 {
134 long __result;
135 #ifdef __thumb__
136 long __tmp;
137 /* Since this function is inlined, we can't be sure of the alignment. */
138 __asm__ __volatile__ (
139 "ldr %0, 4f \n\t"
140 "bx %0 \n\t"
141 ".align 0 \n"
142 "4:\t"
143 ".word 0f \n\t"
144 ".code 32\n"
145 "0:\t"
146 "swp %0, %3, [%2] \n\t"
147 "ldr %1, 1f \n\t"
148 "bx %1 \n"
149 "1:\t"
150 ".word 2f \n\t"
151 ".code 16 \n"
152 "2:\n"
153 : "=&l"(__result), "=&r"(__tmp)
154 : "r"(__p), "r"(__newval)
155 : "memory");
156 #else
157 __asm__ __volatile__ (
158 "\n\t"
159 "swp %0, %2, [%1] \n\t"
160 ""
161 : "=&r"(__result)
162 : "r"(__p), "r"(__newval)
163 : "memory");
164 #endif
165 return __result;
166 }
167
168 static inline int
169 __attribute__ ((__unused__))
170 __test_and_set (volatile long *__p, long __newval)
171 {
172 int __result;
173 long __tmp;
174 #ifdef __thumb__
175 /* Since this function is inlined, we can't be sure of the alignment. */
176 __asm__ __volatile__ (
177 "ldr %0, 4f \n\t"
178 "bx %0 \n\t"
179 ".align 0 \n"
180 "4:\t"
181 ".word 0f \n\t"
182 ".code 32 \n"
183 "0:\t"
184 "ldr %0, [%2] \n\t"
185 "cmp %0, #0 \n\t"
186 "bne 1f \n\t"
187 "swp %1, %3, [%2] \n\t"
188 "cmp %0, %1 \n\t"
189 "swpne %0, %1, [%2]\n\t"
190 "bne 0b \n"
191 "1:\t"
192 "ldr %1, 2f \n\t"
193 "bx %1 \n"
194 "2:\t"
195 ".word 3f \n\t"
196 ".code 16 \n"
197 "3:"
198 : "=&l"(__result), "=r" (__tmp)
199 : "r"(__p), "r"(__newval)
200 : "cc", "memory");
201 #else
202 __asm__ __volatile__ (
203 "\n"
204 "0:\t"
205 "ldr %0, [%2] \n\t"
206 "cmp %0, #0 \n\t"
207 "bne 1f \n\t"
208 "swp %1, %3, [%2] \n\t"
209 "cmp %0, %1 \n\t"
210 "swpne %0, %1, [%2] \n\t"
211 "bne 0b \n"
212 "1:\n\t"
213 ""
214 : "=&r"(__result), "=r" (__tmp)
215 : "r"(__p), "r"(__newval)
216 : "cc", "memory");
217 #endif
218 return __result;
219 }
220
221 #endif /* atomicity.h */