[arm][aarch64] Make no_insn issue to nothing
[gcc.git] / gcc / config / aarch64 / atomics.md
1 ;; Machine description for AArch64 processor synchronization primitives.
2 ;; Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 ;; Contributed by ARM Ltd.
4 ;;
5 ;; This file is part of GCC.
6 ;;
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
10 ;; any later version.
11 ;;
12 ;; GCC is distributed in the hope that it will be useful, but
13 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;; General Public License for more details.
16 ;;
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
20
21 ;; Instruction patterns.
22
23 (define_expand "@atomic_compare_and_swap<mode>"
24 [(match_operand:SI 0 "register_operand") ;; bool out
25 (match_operand:ALLI 1 "register_operand") ;; val out
26 (match_operand:ALLI 2 "aarch64_sync_memory_operand") ;; memory
27 (match_operand:ALLI 3 "nonmemory_operand") ;; expected
28 (match_operand:ALLI 4 "aarch64_reg_or_zero") ;; desired
29 (match_operand:SI 5 "const_int_operand") ;; is_weak
30 (match_operand:SI 6 "const_int_operand") ;; mod_s
31 (match_operand:SI 7 "const_int_operand")] ;; mod_f
32 ""
33 {
34 aarch64_expand_compare_and_swap (operands);
35 DONE;
36 }
37 )
38
39 (define_mode_attr cas_short_expected_pred
40 [(QI "aarch64_reg_or_imm") (HI "aarch64_plushi_operand")])
41
42 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
43 [(set (reg:CC CC_REGNUM) ;; bool out
44 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
45 (set (match_operand:SI 0 "register_operand" "=&r") ;; val out
46 (zero_extend:SI
47 (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
48 (set (match_dup 1)
49 (unspec_volatile:SHORT
50 [(match_operand:SHORT 2 "<cas_short_expected_pred>" "rn") ;; expected
51 (match_operand:SHORT 3 "aarch64_reg_or_zero" "rZ") ;; desired
52 (match_operand:SI 4 "const_int_operand") ;; is_weak
53 (match_operand:SI 5 "const_int_operand") ;; mod_s
54 (match_operand:SI 6 "const_int_operand")] ;; mod_f
55 UNSPECV_ATOMIC_CMPSW))
56 (clobber (match_scratch:SI 7 "=&r"))]
57 ""
58 "#"
59 "&& reload_completed"
60 [(const_int 0)]
61 {
62 aarch64_split_compare_and_swap (operands);
63 DONE;
64 }
65 )
66
67 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
68 [(set (reg:CC CC_REGNUM) ;; bool out
69 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
70 (set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
71 (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
72 (set (match_dup 1)
73 (unspec_volatile:GPI
74 [(match_operand:GPI 2 "aarch64_plus_operand" "rIJ") ;; expect
75 (match_operand:GPI 3 "aarch64_reg_or_zero" "rZ") ;; desired
76 (match_operand:SI 4 "const_int_operand") ;; is_weak
77 (match_operand:SI 5 "const_int_operand") ;; mod_s
78 (match_operand:SI 6 "const_int_operand")] ;; mod_f
79 UNSPECV_ATOMIC_CMPSW))
80 (clobber (match_scratch:SI 7 "=&r"))]
81 ""
82 "#"
83 "&& reload_completed"
84 [(const_int 0)]
85 {
86 aarch64_split_compare_and_swap (operands);
87 DONE;
88 }
89 )
90
91 (define_insn "@aarch64_compare_and_swap<mode>_lse"
92 [(set (match_operand:SI 0 "register_operand" "+r") ;; val out
93 (zero_extend:SI
94 (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
95 (set (match_dup 1)
96 (unspec_volatile:SHORT
97 [(match_dup 0) ;; expected
98 (match_operand:SHORT 2 "aarch64_reg_or_zero" "rZ") ;; desired
99 (match_operand:SI 3 "const_int_operand")] ;; mod_s
100 UNSPECV_ATOMIC_CMPSW))]
101 "TARGET_LSE"
102 {
103 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
104 if (is_mm_relaxed (model))
105 return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
106 else if (is_mm_acquire (model) || is_mm_consume (model))
107 return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
108 else if (is_mm_release (model))
109 return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
110 else
111 return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
112 })
113
114 (define_insn "@aarch64_compare_and_swap<mode>_lse"
115 [(set (match_operand:GPI 0 "register_operand" "+r") ;; val out
116 (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
117 (set (match_dup 1)
118 (unspec_volatile:GPI
119 [(match_dup 0) ;; expected
120 (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ") ;; desired
121 (match_operand:SI 3 "const_int_operand")] ;; mod_s
122 UNSPECV_ATOMIC_CMPSW))]
123 "TARGET_LSE"
124 {
125 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
126 if (is_mm_relaxed (model))
127 return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
128 else if (is_mm_acquire (model) || is_mm_consume (model))
129 return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
130 else if (is_mm_release (model))
131 return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
132 else
133 return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
134 })
135
136 (define_expand "atomic_exchange<mode>"
137 [(match_operand:ALLI 0 "register_operand")
138 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
139 (match_operand:ALLI 2 "aarch64_reg_or_zero")
140 (match_operand:SI 3 "const_int_operand")]
141 ""
142 {
143 rtx (*gen) (rtx, rtx, rtx, rtx);
144
145 /* Use an atomic SWP when available. */
146 if (TARGET_LSE)
147 gen = gen_aarch64_atomic_exchange<mode>_lse;
148 else
149 gen = gen_aarch64_atomic_exchange<mode>;
150
151 emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
152
153 DONE;
154 }
155 )
156
157 (define_insn_and_split "aarch64_atomic_exchange<mode>"
158 [(set (match_operand:ALLI 0 "register_operand" "=&r") ;; output
159 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
160 (set (match_dup 1)
161 (unspec_volatile:ALLI
162 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ") ;; input
163 (match_operand:SI 3 "const_int_operand" "")] ;; model
164 UNSPECV_ATOMIC_EXCHG))
165 (clobber (reg:CC CC_REGNUM))
166 (clobber (match_scratch:SI 4 "=&r"))]
167 ""
168 "#"
169 "&& reload_completed"
170 [(const_int 0)]
171 {
172 aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
173 operands[2], operands[3], operands[4]);
174 DONE;
175 }
176 )
177
178 (define_insn "aarch64_atomic_exchange<mode>_lse"
179 [(set (match_operand:ALLI 0 "register_operand" "=r")
180 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
181 (set (match_dup 1)
182 (unspec_volatile:ALLI
183 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
184 (match_operand:SI 3 "const_int_operand" "")]
185 UNSPECV_ATOMIC_EXCHG))]
186 "TARGET_LSE"
187 {
188 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
189 if (is_mm_relaxed (model))
190 return "swp<atomic_sfx>\t%<w>2, %<w>0, %1";
191 else if (is_mm_acquire (model) || is_mm_consume (model))
192 return "swpa<atomic_sfx>\t%<w>2, %<w>0, %1";
193 else if (is_mm_release (model))
194 return "swpl<atomic_sfx>\t%<w>2, %<w>0, %1";
195 else
196 return "swpal<atomic_sfx>\t%<w>2, %<w>0, %1";
197 }
198 )
199
200 (define_expand "atomic_<atomic_optab><mode>"
201 [(match_operand:ALLI 0 "aarch64_sync_memory_operand")
202 (atomic_op:ALLI
203 (match_operand:ALLI 1 "<atomic_op_operand>")
204 (match_operand:SI 2 "const_int_operand"))]
205 ""
206 {
207 rtx (*gen) (rtx, rtx, rtx);
208
209 /* Use an atomic load-operate instruction when possible. */
210 if (TARGET_LSE)
211 {
212 switch (<CODE>)
213 {
214 case MINUS:
215 operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
216 NULL, 1);
217 /* fallthru */
218 case PLUS:
219 gen = gen_aarch64_atomic_add<mode>_lse;
220 break;
221 case IOR:
222 gen = gen_aarch64_atomic_ior<mode>_lse;
223 break;
224 case XOR:
225 gen = gen_aarch64_atomic_xor<mode>_lse;
226 break;
227 case AND:
228 operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
229 NULL, 1);
230 gen = gen_aarch64_atomic_bic<mode>_lse;
231 break;
232 default:
233 gcc_unreachable ();
234 }
235 operands[1] = force_reg (<MODE>mode, operands[1]);
236 }
237 else
238 gen = gen_aarch64_atomic_<atomic_optab><mode>;
239
240 emit_insn (gen (operands[0], operands[1], operands[2]));
241 DONE;
242 }
243 )
244
245 (define_insn_and_split "aarch64_atomic_<atomic_optab><mode>"
246 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
247 (unspec_volatile:ALLI
248 [(atomic_op:ALLI (match_dup 0)
249 (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
250 (match_operand:SI 2 "const_int_operand")]
251 UNSPECV_ATOMIC_OP))
252 (clobber (reg:CC CC_REGNUM))
253 (clobber (match_scratch:ALLI 3 "=&r"))
254 (clobber (match_scratch:SI 4 "=&r"))]
255 ""
256 "#"
257 "&& reload_completed"
258 [(const_int 0)]
259 {
260 aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
261 operands[1], operands[2], operands[4]);
262 DONE;
263 }
264 )
265
266 ;; It is tempting to want to use ST<OP> for relaxed and release
267 ;; memory models here. However, that is incompatible with the
268 ;; C++ memory model for the following case:
269 ;;
270 ;; atomic_fetch_add(ptr, 1, memory_order_relaxed);
271 ;; atomic_thread_fence(memory_order_acquire);
272 ;;
273 ;; The problem is that the architecture says that ST<OP> (and LD<OP>
274 ;; insns where the destination is XZR) are not regarded as a read.
275 ;; However we also implement the acquire memory barrier with DMB LD,
276 ;; and so the ST<OP> is not blocked by the barrier.
277
278 (define_insn "aarch64_atomic_<atomic_ldoptab><mode>_lse"
279 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
280 (unspec_volatile:ALLI
281 [(match_dup 0)
282 (match_operand:ALLI 1 "register_operand" "r")
283 (match_operand:SI 2 "const_int_operand")]
284 ATOMIC_LDOP))
285 (clobber (match_scratch:ALLI 3 "=r"))]
286 "TARGET_LSE"
287 {
288 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
289 if (is_mm_relaxed (model))
290 return "ld<atomic_ldop><atomic_sfx>\t%<w>1, %<w>3, %0";
291 else if (is_mm_release (model))
292 return "ld<atomic_ldop>l<atomic_sfx>\t%<w>1, %<w>3, %0";
293 else if (is_mm_acquire (model) || is_mm_consume (model))
294 return "ld<atomic_ldop>a<atomic_sfx>\t%<w>1, %<w>3, %0";
295 else
296 return "ld<atomic_ldop>al<atomic_sfx>\t%<w>1, %<w>3, %0";
297 }
298 )
299
300 (define_insn_and_split "atomic_nand<mode>"
301 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
302 (unspec_volatile:ALLI
303 [(not:ALLI
304 (and:ALLI (match_dup 0)
305 (match_operand:ALLI 1 "aarch64_logical_operand" "r<lconst_atomic>")))
306 (match_operand:SI 2 "const_int_operand")] ;; model
307 UNSPECV_ATOMIC_OP))
308 (clobber (reg:CC CC_REGNUM))
309 (clobber (match_scratch:ALLI 3 "=&r"))
310 (clobber (match_scratch:SI 4 "=&r"))]
311 ""
312 "#"
313 "&& reload_completed"
314 [(const_int 0)]
315 {
316 aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
317 operands[1], operands[2], operands[4]);
318 DONE;
319 }
320 )
321
322 ;; Load-operate-store, returning the original memory data.
323
324 (define_expand "atomic_fetch_<atomic_optab><mode>"
325 [(match_operand:ALLI 0 "register_operand")
326 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
327 (atomic_op:ALLI
328 (match_operand:ALLI 2 "<atomic_op_operand>")
329 (match_operand:SI 3 "const_int_operand"))]
330 ""
331 {
332 rtx (*gen) (rtx, rtx, rtx, rtx);
333
334 /* Use an atomic load-operate instruction when possible. */
335 if (TARGET_LSE)
336 {
337 switch (<CODE>)
338 {
339 case MINUS:
340 operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
341 NULL, 1);
342 /* fallthru */
343 case PLUS:
344 gen = gen_aarch64_atomic_fetch_add<mode>_lse;
345 break;
346 case IOR:
347 gen = gen_aarch64_atomic_fetch_ior<mode>_lse;
348 break;
349 case XOR:
350 gen = gen_aarch64_atomic_fetch_xor<mode>_lse;
351 break;
352 case AND:
353 operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
354 NULL, 1);
355 gen = gen_aarch64_atomic_fetch_bic<mode>_lse;
356 break;
357 default:
358 gcc_unreachable ();
359 }
360 operands[2] = force_reg (<MODE>mode, operands[2]);
361 }
362 else
363 gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
364
365 emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
366 DONE;
367 })
368
369 (define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>"
370 [(set (match_operand:ALLI 0 "register_operand" "=&r")
371 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
372 (set (match_dup 1)
373 (unspec_volatile:ALLI
374 [(atomic_op:ALLI (match_dup 1)
375 (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
376 (match_operand:SI 3 "const_int_operand")] ;; model
377 UNSPECV_ATOMIC_OP))
378 (clobber (reg:CC CC_REGNUM))
379 (clobber (match_scratch:ALLI 4 "=&r"))
380 (clobber (match_scratch:SI 5 "=&r"))]
381 ""
382 "#"
383 "&& reload_completed"
384 [(const_int 0)]
385 {
386 aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
387 operands[2], operands[3], operands[5]);
388 DONE;
389 }
390 )
391
392 (define_insn "aarch64_atomic_fetch_<atomic_ldoptab><mode>_lse"
393 [(set (match_operand:ALLI 0 "register_operand" "=r")
394 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
395 (set (match_dup 1)
396 (unspec_volatile:ALLI
397 [(match_dup 1)
398 (match_operand:ALLI 2 "register_operand" "r")
399 (match_operand:SI 3 "const_int_operand")]
400 ATOMIC_LDOP))]
401 "TARGET_LSE"
402 {
403 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
404 if (is_mm_relaxed (model))
405 return "ld<atomic_ldop><atomic_sfx>\t%<w>2, %<w>0, %1";
406 else if (is_mm_acquire (model) || is_mm_consume (model))
407 return "ld<atomic_ldop>a<atomic_sfx>\t%<w>2, %<w>0, %1";
408 else if (is_mm_release (model))
409 return "ld<atomic_ldop>l<atomic_sfx>\t%<w>2, %<w>0, %1";
410 else
411 return "ld<atomic_ldop>al<atomic_sfx>\t%<w>2, %<w>0, %1";
412 }
413 )
414
415 (define_insn_and_split "atomic_fetch_nand<mode>"
416 [(set (match_operand:ALLI 0 "register_operand" "=&r")
417 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
418 (set (match_dup 1)
419 (unspec_volatile:ALLI
420 [(not:ALLI
421 (and:ALLI (match_dup 1)
422 (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>")))
423 (match_operand:SI 3 "const_int_operand")] ;; model
424 UNSPECV_ATOMIC_OP))
425 (clobber (reg:CC CC_REGNUM))
426 (clobber (match_scratch:ALLI 4 "=&r"))
427 (clobber (match_scratch:SI 5 "=&r"))]
428 ""
429 "#"
430 "&& reload_completed"
431 [(const_int 0)]
432 {
433 aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
434 operands[2], operands[3], operands[5]);
435 DONE;
436 }
437 )
438
439 ;; Load-operate-store, returning the updated memory data.
440
441 (define_expand "atomic_<atomic_optab>_fetch<mode>"
442 [(match_operand:ALLI 0 "register_operand")
443 (atomic_op:ALLI
444 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
445 (match_operand:ALLI 2 "<atomic_op_operand>"))
446 (match_operand:SI 3 "const_int_operand")]
447 ""
448 {
449 /* Use an atomic load-operate instruction when possible. In this case
450 we will re-compute the result from the original mem value. */
451 if (TARGET_LSE)
452 {
453 rtx tmp = gen_reg_rtx (<MODE>mode);
454 operands[2] = force_reg (<MODE>mode, operands[2]);
455 emit_insn (gen_atomic_fetch_<atomic_optab><mode>
456 (tmp, operands[1], operands[2], operands[3]));
457 tmp = expand_simple_binop (<MODE>mode, <CODE>, tmp, operands[2],
458 operands[0], 1, OPTAB_WIDEN);
459 emit_move_insn (operands[0], tmp);
460 }
461 else
462 {
463 emit_insn (gen_aarch64_atomic_<atomic_optab>_fetch<mode>
464 (operands[0], operands[1], operands[2], operands[3]));
465 }
466 DONE;
467 })
468
469 (define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>"
470 [(set (match_operand:ALLI 0 "register_operand" "=&r")
471 (atomic_op:ALLI
472 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
473 (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")))
474 (set (match_dup 1)
475 (unspec_volatile:ALLI
476 [(match_dup 1) (match_dup 2)
477 (match_operand:SI 3 "const_int_operand")] ;; model
478 UNSPECV_ATOMIC_OP))
479 (clobber (reg:CC CC_REGNUM))
480 (clobber (match_scratch:SI 4 "=&r"))]
481 ""
482 "#"
483 "&& reload_completed"
484 [(const_int 0)]
485 {
486 aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
487 operands[2], operands[3], operands[4]);
488 DONE;
489 }
490 )
491
492 (define_insn_and_split "atomic_nand_fetch<mode>"
493 [(set (match_operand:ALLI 0 "register_operand" "=&r")
494 (not:ALLI
495 (and:ALLI
496 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
497 (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>"))))
498 (set (match_dup 1)
499 (unspec_volatile:ALLI
500 [(match_dup 1) (match_dup 2)
501 (match_operand:SI 3 "const_int_operand")] ;; model
502 UNSPECV_ATOMIC_OP))
503 (clobber (reg:CC CC_REGNUM))
504 (clobber (match_scratch:SI 4 "=&r"))]
505 ""
506 "#"
507 "&& reload_completed"
508 [(const_int 0)]
509 {
510 aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
511 operands[2], operands[3], operands[4]);
512 DONE;
513 }
514 )
515
516 (define_insn "atomic_load<mode>"
517 [(set (match_operand:ALLI 0 "register_operand" "=r")
518 (unspec_volatile:ALLI
519 [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
520 (match_operand:SI 2 "const_int_operand")] ;; model
521 UNSPECV_LDA))]
522 ""
523 {
524 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
525 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
526 return "ldr<atomic_sfx>\t%<w>0, %1";
527 else
528 return "ldar<atomic_sfx>\t%<w>0, %1";
529 }
530 )
531
532 (define_insn "atomic_store<mode>"
533 [(set (match_operand:ALLI 0 "aarch64_rcpc_memory_operand" "=Q,Ust")
534 (unspec_volatile:ALLI
535 [(match_operand:ALLI 1 "general_operand" "rZ,rZ")
536 (match_operand:SI 2 "const_int_operand")] ;; model
537 UNSPECV_STL))]
538 ""
539 {
540 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
541 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
542 return "str<atomic_sfx>\t%<w>1, %0";
543 else if (which_alternative == 0)
544 return "stlr<atomic_sfx>\t%<w>1, %0";
545 else
546 return "stlur<atomic_sfx>\t%<w>1, %0";
547 }
548 [(set_attr "arch" "*,rcpc8_4")]
549 )
550
551 (define_insn "@aarch64_load_exclusive<mode>"
552 [(set (match_operand:SI 0 "register_operand" "=r")
553 (zero_extend:SI
554 (unspec_volatile:SHORT
555 [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
556 (match_operand:SI 2 "const_int_operand")]
557 UNSPECV_LX)))]
558 ""
559 {
560 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
561 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
562 return "ldxr<atomic_sfx>\t%w0, %1";
563 else
564 return "ldaxr<atomic_sfx>\t%w0, %1";
565 }
566 )
567
568 (define_insn "@aarch64_load_exclusive<mode>"
569 [(set (match_operand:GPI 0 "register_operand" "=r")
570 (unspec_volatile:GPI
571 [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
572 (match_operand:SI 2 "const_int_operand")]
573 UNSPECV_LX))]
574 ""
575 {
576 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
577 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
578 return "ldxr\t%<w>0, %1";
579 else
580 return "ldaxr\t%<w>0, %1";
581 }
582 )
583
584 (define_insn "@aarch64_store_exclusive<mode>"
585 [(set (match_operand:SI 0 "register_operand" "=&r")
586 (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
587 (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
588 (unspec_volatile:ALLI
589 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
590 (match_operand:SI 3 "const_int_operand")]
591 UNSPECV_SX))]
592 ""
593 {
594 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
595 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
596 return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
597 else
598 return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
599 }
600 )
601
602 (define_expand "mem_thread_fence"
603 [(match_operand:SI 0 "const_int_operand")]
604 ""
605 {
606 enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
607 if (!(is_mm_relaxed (model) || is_mm_consume (model)))
608 emit_insn (gen_dmb (operands[0]));
609 DONE;
610 }
611 )
612
613 (define_expand "dmb"
614 [(set (match_dup 1)
615 (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
616 UNSPEC_MB))]
617 ""
618 {
619 operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
620 MEM_VOLATILE_P (operands[1]) = 1;
621 }
622 )
623
624 (define_insn "*dmb"
625 [(set (match_operand:BLK 0 "" "")
626 (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
627 UNSPEC_MB))]
628 ""
629 {
630 enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
631 if (is_mm_acquire (model))
632 return "dmb\\tishld";
633 else
634 return "dmb\\tish";
635 }
636 )