1 ;; Machine description for AArch64 processor synchronization primitives.
2 ;; Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 ;; Contributed by ARM Ltd.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
12 ;; GCC is distributed in the hope that it will be useful, but
13 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;; General Public License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
21 ;; Instruction patterns.
23 (define_expand "@atomic_compare_and_swap<mode>"
24 [(match_operand:SI 0 "register_operand" "") ;; bool out
25 (match_operand:ALLI_TI 1 "register_operand" "") ;; val out
26 (match_operand:ALLI_TI 2 "aarch64_sync_memory_operand" "") ;; memory
27 (match_operand:ALLI_TI 3 "nonmemory_operand" "") ;; expected
28 (match_operand:ALLI_TI 4 "aarch64_reg_or_zero" "") ;; desired
29 (match_operand:SI 5 "const_int_operand") ;; is_weak
30 (match_operand:SI 6 "const_int_operand") ;; mod_s
31 (match_operand:SI 7 "const_int_operand")] ;; mod_f
34 aarch64_expand_compare_and_swap (operands);
39 (define_mode_attr cas_short_expected_pred
40 [(QI "aarch64_reg_or_imm") (HI "aarch64_plushi_operand")])
42 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
43 [(set (reg:CC CC_REGNUM) ;; bool out
44 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
45 (set (match_operand:SI 0 "register_operand" "=&r") ;; val out
47 (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
49 (unspec_volatile:SHORT
50 [(match_operand:SHORT 2 "<cas_short_expected_pred>" "rn") ;; expected
51 (match_operand:SHORT 3 "aarch64_reg_or_zero" "rZ") ;; desired
52 (match_operand:SI 4 "const_int_operand") ;; is_weak
53 (match_operand:SI 5 "const_int_operand") ;; mod_s
54 (match_operand:SI 6 "const_int_operand")] ;; mod_f
55 UNSPECV_ATOMIC_CMPSW))
56 (clobber (match_scratch:SI 7 "=&r"))]
62 aarch64_split_compare_and_swap (operands);
67 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
68 [(set (reg:CC CC_REGNUM) ;; bool out
69 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
70 (set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
71 (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
74 [(match_operand:GPI 2 "aarch64_plus_operand" "rIJ") ;; expect
75 (match_operand:GPI 3 "aarch64_reg_or_zero" "rZ") ;; desired
76 (match_operand:SI 4 "const_int_operand") ;; is_weak
77 (match_operand:SI 5 "const_int_operand") ;; mod_s
78 (match_operand:SI 6 "const_int_operand")] ;; mod_f
79 UNSPECV_ATOMIC_CMPSW))
80 (clobber (match_scratch:SI 7 "=&r"))]
86 aarch64_split_compare_and_swap (operands);
91 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
92 [(set (reg:CC CC_REGNUM) ;; bool out
93 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
94 (set (match_operand:JUST_TI 0 "register_operand" "=&r") ;; val out
95 (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
97 (unspec_volatile:JUST_TI
98 [(match_operand:JUST_TI 2 "aarch64_reg_or_zero" "rZ") ;; expect
99 (match_operand:JUST_TI 3 "aarch64_reg_or_zero" "rZ") ;; desired
100 (match_operand:SI 4 "const_int_operand") ;; is_weak
101 (match_operand:SI 5 "const_int_operand") ;; mod_s
102 (match_operand:SI 6 "const_int_operand")] ;; mod_f
103 UNSPECV_ATOMIC_CMPSW))
104 (clobber (match_scratch:SI 7 "=&r"))]
107 "&& reload_completed"
110 aarch64_split_compare_and_swap (operands);
115 (define_insn "@aarch64_compare_and_swap<mode>_lse"
116 [(set (match_operand:SI 0 "register_operand" "+r") ;; val out
118 (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
120 (unspec_volatile:SHORT
121 [(match_dup 0) ;; expected
122 (match_operand:SHORT 2 "aarch64_reg_or_zero" "rZ") ;; desired
123 (match_operand:SI 3 "const_int_operand")] ;; mod_s
124 UNSPECV_ATOMIC_CMPSW))]
127 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
128 if (is_mm_relaxed (model))
129 return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
130 else if (is_mm_acquire (model) || is_mm_consume (model))
131 return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
132 else if (is_mm_release (model))
133 return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
135 return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
138 (define_insn "@aarch64_compare_and_swap<mode>_lse"
139 [(set (match_operand:GPI 0 "register_operand" "+r") ;; val out
140 (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
143 [(match_dup 0) ;; expected
144 (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ") ;; desired
145 (match_operand:SI 3 "const_int_operand")] ;; mod_s
146 UNSPECV_ATOMIC_CMPSW))]
149 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
150 if (is_mm_relaxed (model))
151 return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
152 else if (is_mm_acquire (model) || is_mm_consume (model))
153 return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
154 else if (is_mm_release (model))
155 return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
157 return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
160 (define_insn "@aarch64_compare_and_swap<mode>_lse"
161 [(set (match_operand:JUST_TI 0 "register_operand" "+r") ;; val out
162 (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
164 (unspec_volatile:JUST_TI
165 [(match_dup 0) ;; expect
166 (match_operand:JUST_TI 2 "register_operand" "r") ;; desired
167 (match_operand:SI 3 "const_int_operand")] ;; mod_s
168 UNSPECV_ATOMIC_CMPSW))]
171 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
172 if (is_mm_relaxed (model))
173 return "casp\t%0, %R0, %2, %R2, %1";
174 else if (is_mm_acquire (model) || is_mm_consume (model))
175 return "caspa\t%0, %R0, %2, %R2, %1";
176 else if (is_mm_release (model))
177 return "caspl\t%0, %R0, %2, %R2, %1";
179 return "caspal\t%0, %R0, %2, %R2, %1";
182 (define_expand "atomic_exchange<mode>"
183 [(match_operand:ALLI 0 "register_operand")
184 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
185 (match_operand:ALLI 2 "aarch64_reg_or_zero")
186 (match_operand:SI 3 "const_int_operand")]
189 /* Use an atomic SWP when available. */
192 emit_insn (gen_aarch64_atomic_exchange<mode>_lse
193 (operands[0], operands[1], operands[2], operands[3]));
195 else if (TARGET_OUTLINE_ATOMICS)
197 machine_mode mode = <MODE>mode;
198 rtx func = aarch64_atomic_ool_func (mode, operands[3],
199 &aarch64_ool_swp_names);
200 rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL,
201 mode, operands[2], mode,
202 XEXP (operands[1], 0), Pmode);
203 emit_move_insn (operands[0], rval);
207 emit_insn (gen_aarch64_atomic_exchange<mode>
208 (operands[0], operands[1], operands[2], operands[3]));
214 (define_insn_and_split "aarch64_atomic_exchange<mode>"
215 [(set (match_operand:ALLI 0 "register_operand" "=&r") ;; output
216 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
218 (unspec_volatile:ALLI
219 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ") ;; input
220 (match_operand:SI 3 "const_int_operand" "")] ;; model
221 UNSPECV_ATOMIC_EXCHG))
222 (clobber (reg:CC CC_REGNUM))
223 (clobber (match_scratch:SI 4 "=&r"))]
226 "&& reload_completed"
229 aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
230 operands[2], operands[3], operands[4]);
235 (define_insn "aarch64_atomic_exchange<mode>_lse"
236 [(set (match_operand:ALLI 0 "register_operand" "=r")
237 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
239 (unspec_volatile:ALLI
240 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
241 (match_operand:SI 3 "const_int_operand" "")]
242 UNSPECV_ATOMIC_EXCHG))]
245 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
246 if (is_mm_relaxed (model))
247 return "swp<atomic_sfx>\t%<w>2, %<w>0, %1";
248 else if (is_mm_acquire (model) || is_mm_consume (model))
249 return "swpa<atomic_sfx>\t%<w>2, %<w>0, %1";
250 else if (is_mm_release (model))
251 return "swpl<atomic_sfx>\t%<w>2, %<w>0, %1";
253 return "swpal<atomic_sfx>\t%<w>2, %<w>0, %1";
257 (define_expand "atomic_<atomic_optab><mode>"
258 [(match_operand:ALLI 0 "aarch64_sync_memory_operand")
260 (match_operand:ALLI 1 "<atomic_op_operand>")
261 (match_operand:SI 2 "const_int_operand"))]
264 rtx (*gen) (rtx, rtx, rtx);
266 /* Use an atomic load-operate instruction when possible. */
272 operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
276 gen = gen_aarch64_atomic_add<mode>_lse;
279 gen = gen_aarch64_atomic_ior<mode>_lse;
282 gen = gen_aarch64_atomic_xor<mode>_lse;
285 operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
287 gen = gen_aarch64_atomic_bic<mode>_lse;
292 operands[1] = force_reg (<MODE>mode, operands[1]);
294 else if (TARGET_OUTLINE_ATOMICS)
296 const atomic_ool_names *names;
300 operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
304 names = &aarch64_ool_ldadd_names;
307 names = &aarch64_ool_ldset_names;
310 names = &aarch64_ool_ldeor_names;
313 operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
315 names = &aarch64_ool_ldclr_names;
320 machine_mode mode = <MODE>mode;
321 rtx func = aarch64_atomic_ool_func (mode, operands[2], names);
322 emit_library_call_value (func, NULL_RTX, LCT_NORMAL, mode,
324 XEXP (operands[0], 0), Pmode);
328 gen = gen_aarch64_atomic_<atomic_optab><mode>;
330 emit_insn (gen (operands[0], operands[1], operands[2]));
335 (define_insn_and_split "aarch64_atomic_<atomic_optab><mode>"
336 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
337 (unspec_volatile:ALLI
338 [(atomic_op:ALLI (match_dup 0)
339 (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
340 (match_operand:SI 2 "const_int_operand")]
342 (clobber (reg:CC CC_REGNUM))
343 (clobber (match_scratch:ALLI 3 "=&r"))
344 (clobber (match_scratch:SI 4 "=&r"))]
347 "&& reload_completed"
350 aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
351 operands[1], operands[2], operands[4]);
356 ;; It is tempting to want to use ST<OP> for relaxed and release
357 ;; memory models here. However, that is incompatible with the
358 ;; C++ memory model for the following case:
360 ;; atomic_fetch_add(ptr, 1, memory_order_relaxed);
361 ;; atomic_thread_fence(memory_order_acquire);
363 ;; The problem is that the architecture says that ST<OP> (and LD<OP>
364 ;; insns where the destination is XZR) are not regarded as a read.
365 ;; However we also implement the acquire memory barrier with DMB LD,
366 ;; and so the ST<OP> is not blocked by the barrier.
368 (define_insn "aarch64_atomic_<atomic_ldoptab><mode>_lse"
369 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
370 (unspec_volatile:ALLI
372 (match_operand:ALLI 1 "register_operand" "r")
373 (match_operand:SI 2 "const_int_operand")]
375 (clobber (match_scratch:ALLI 3 "=r"))]
378 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
379 if (is_mm_relaxed (model))
380 return "ld<atomic_ldop><atomic_sfx>\t%<w>1, %<w>3, %0";
381 else if (is_mm_release (model))
382 return "ld<atomic_ldop>l<atomic_sfx>\t%<w>1, %<w>3, %0";
383 else if (is_mm_acquire (model) || is_mm_consume (model))
384 return "ld<atomic_ldop>a<atomic_sfx>\t%<w>1, %<w>3, %0";
386 return "ld<atomic_ldop>al<atomic_sfx>\t%<w>1, %<w>3, %0";
390 (define_insn_and_split "atomic_nand<mode>"
391 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
392 (unspec_volatile:ALLI
394 (and:ALLI (match_dup 0)
395 (match_operand:ALLI 1 "aarch64_logical_operand" "r<lconst_atomic>")))
396 (match_operand:SI 2 "const_int_operand")] ;; model
398 (clobber (reg:CC CC_REGNUM))
399 (clobber (match_scratch:ALLI 3 "=&r"))
400 (clobber (match_scratch:SI 4 "=&r"))]
403 "&& reload_completed"
406 aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
407 operands[1], operands[2], operands[4]);
412 ;; Load-operate-store, returning the original memory data.
414 (define_expand "atomic_fetch_<atomic_optab><mode>"
415 [(match_operand:ALLI 0 "register_operand")
416 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
418 (match_operand:ALLI 2 "<atomic_op_operand>")
419 (match_operand:SI 3 "const_int_operand"))]
422 rtx (*gen) (rtx, rtx, rtx, rtx);
424 /* Use an atomic load-operate instruction when possible. */
430 operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
434 gen = gen_aarch64_atomic_fetch_add<mode>_lse;
437 gen = gen_aarch64_atomic_fetch_ior<mode>_lse;
440 gen = gen_aarch64_atomic_fetch_xor<mode>_lse;
443 operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
445 gen = gen_aarch64_atomic_fetch_bic<mode>_lse;
450 operands[2] = force_reg (<MODE>mode, operands[2]);
452 else if (TARGET_OUTLINE_ATOMICS)
454 const atomic_ool_names *names;
458 operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
462 names = &aarch64_ool_ldadd_names;
465 names = &aarch64_ool_ldset_names;
468 names = &aarch64_ool_ldeor_names;
471 operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
473 names = &aarch64_ool_ldclr_names;
478 machine_mode mode = <MODE>mode;
479 rtx func = aarch64_atomic_ool_func (mode, operands[3], names);
480 rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL, mode,
482 XEXP (operands[1], 0), Pmode);
483 emit_move_insn (operands[0], rval);
487 gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
489 emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
493 (define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>"
494 [(set (match_operand:ALLI 0 "register_operand" "=&r")
495 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
497 (unspec_volatile:ALLI
498 [(atomic_op:ALLI (match_dup 1)
499 (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
500 (match_operand:SI 3 "const_int_operand")] ;; model
502 (clobber (reg:CC CC_REGNUM))
503 (clobber (match_scratch:ALLI 4 "=&r"))
504 (clobber (match_scratch:SI 5 "=&r"))]
507 "&& reload_completed"
510 aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
511 operands[2], operands[3], operands[5]);
516 (define_insn "aarch64_atomic_fetch_<atomic_ldoptab><mode>_lse"
517 [(set (match_operand:ALLI 0 "register_operand" "=r")
518 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
520 (unspec_volatile:ALLI
522 (match_operand:ALLI 2 "register_operand" "r")
523 (match_operand:SI 3 "const_int_operand")]
527 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
528 if (is_mm_relaxed (model))
529 return "ld<atomic_ldop><atomic_sfx>\t%<w>2, %<w>0, %1";
530 else if (is_mm_acquire (model) || is_mm_consume (model))
531 return "ld<atomic_ldop>a<atomic_sfx>\t%<w>2, %<w>0, %1";
532 else if (is_mm_release (model))
533 return "ld<atomic_ldop>l<atomic_sfx>\t%<w>2, %<w>0, %1";
535 return "ld<atomic_ldop>al<atomic_sfx>\t%<w>2, %<w>0, %1";
539 (define_insn_and_split "atomic_fetch_nand<mode>"
540 [(set (match_operand:ALLI 0 "register_operand" "=&r")
541 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
543 (unspec_volatile:ALLI
545 (and:ALLI (match_dup 1)
546 (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>")))
547 (match_operand:SI 3 "const_int_operand")] ;; model
549 (clobber (reg:CC CC_REGNUM))
550 (clobber (match_scratch:ALLI 4 "=&r"))
551 (clobber (match_scratch:SI 5 "=&r"))]
554 "&& reload_completed"
557 aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
558 operands[2], operands[3], operands[5]);
563 ;; Load-operate-store, returning the updated memory data.
565 (define_expand "atomic_<atomic_optab>_fetch<mode>"
566 [(match_operand:ALLI 0 "register_operand")
568 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
569 (match_operand:ALLI 2 "<atomic_op_operand>"))
570 (match_operand:SI 3 "const_int_operand")]
573 /* Use an atomic load-operate instruction when possible. In this case
574 we will re-compute the result from the original mem value. */
575 if (TARGET_LSE || TARGET_OUTLINE_ATOMICS)
577 rtx tmp = gen_reg_rtx (<MODE>mode);
578 operands[2] = force_reg (<MODE>mode, operands[2]);
579 emit_insn (gen_atomic_fetch_<atomic_optab><mode>
580 (tmp, operands[1], operands[2], operands[3]));
581 tmp = expand_simple_binop (<MODE>mode, <CODE>, tmp, operands[2],
582 operands[0], 1, OPTAB_WIDEN);
583 emit_move_insn (operands[0], tmp);
587 emit_insn (gen_aarch64_atomic_<atomic_optab>_fetch<mode>
588 (operands[0], operands[1], operands[2], operands[3]));
593 (define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>"
594 [(set (match_operand:ALLI 0 "register_operand" "=&r")
596 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
597 (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")))
599 (unspec_volatile:ALLI
600 [(match_dup 1) (match_dup 2)
601 (match_operand:SI 3 "const_int_operand")] ;; model
603 (clobber (reg:CC CC_REGNUM))
604 (clobber (match_scratch:SI 4 "=&r"))]
607 "&& reload_completed"
610 aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
611 operands[2], operands[3], operands[4]);
616 (define_insn_and_split "atomic_nand_fetch<mode>"
617 [(set (match_operand:ALLI 0 "register_operand" "=&r")
620 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
621 (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>"))))
623 (unspec_volatile:ALLI
624 [(match_dup 1) (match_dup 2)
625 (match_operand:SI 3 "const_int_operand")] ;; model
627 (clobber (reg:CC CC_REGNUM))
628 (clobber (match_scratch:SI 4 "=&r"))]
631 "&& reload_completed"
634 aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
635 operands[2], operands[3], operands[4]);
640 (define_insn "atomic_load<mode>"
641 [(set (match_operand:ALLI 0 "register_operand" "=r")
642 (unspec_volatile:ALLI
643 [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
644 (match_operand:SI 2 "const_int_operand")] ;; model
648 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
649 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
650 return "ldr<atomic_sfx>\t%<w>0, %1";
652 return "ldar<atomic_sfx>\t%<w>0, %1";
656 (define_insn "atomic_store<mode>"
657 [(set (match_operand:ALLI 0 "aarch64_rcpc_memory_operand" "=Q,Ust")
658 (unspec_volatile:ALLI
659 [(match_operand:ALLI 1 "general_operand" "rZ,rZ")
660 (match_operand:SI 2 "const_int_operand")] ;; model
664 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
665 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
666 return "str<atomic_sfx>\t%<w>1, %0";
667 else if (which_alternative == 0)
668 return "stlr<atomic_sfx>\t%<w>1, %0";
670 return "stlur<atomic_sfx>\t%<w>1, %0";
672 [(set_attr "arch" "*,rcpc8_4")]
675 (define_insn "@aarch64_load_exclusive<mode>"
676 [(set (match_operand:SI 0 "register_operand" "=r")
678 (unspec_volatile:SHORT
679 [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
680 (match_operand:SI 2 "const_int_operand")]
684 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
685 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
686 return "ldxr<atomic_sfx>\t%w0, %1";
688 return "ldaxr<atomic_sfx>\t%w0, %1";
692 (define_insn "@aarch64_load_exclusive<mode>"
693 [(set (match_operand:GPI 0 "register_operand" "=r")
695 [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
696 (match_operand:SI 2 "const_int_operand")]
700 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
701 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
702 return "ldxr\t%<w>0, %1";
704 return "ldaxr\t%<w>0, %1";
708 (define_insn "aarch64_load_exclusive_pair"
709 [(set (match_operand:DI 0 "register_operand" "=r")
711 [(match_operand:TI 2 "aarch64_sync_memory_operand" "Q")
712 (match_operand:SI 3 "const_int_operand")]
714 (set (match_operand:DI 1 "register_operand" "=r")
715 (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LX))]
718 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
719 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
720 return "ldxp\t%0, %1, %2";
722 return "ldaxp\t%0, %1, %2";
726 (define_insn "@aarch64_store_exclusive<mode>"
727 [(set (match_operand:SI 0 "register_operand" "=&r")
728 (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
729 (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
730 (unspec_volatile:ALLI
731 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
732 (match_operand:SI 3 "const_int_operand")]
736 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
737 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
738 return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
740 return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
744 (define_insn "aarch64_store_exclusive_pair"
745 [(set (match_operand:SI 0 "register_operand" "=&r")
746 (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
747 (set (match_operand:TI 1 "aarch64_sync_memory_operand" "=Q")
749 [(match_operand:DI 2 "aarch64_reg_or_zero" "rZ")
750 (match_operand:DI 3 "aarch64_reg_or_zero" "rZ")
751 (match_operand:SI 4 "const_int_operand")]
755 enum memmodel model = memmodel_from_int (INTVAL (operands[4]));
756 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
757 return "stxp\t%w0, %x2, %x3, %1";
759 return "stlxp\t%w0, %x2, %x3, %1";
763 (define_expand "mem_thread_fence"
764 [(match_operand:SI 0 "const_int_operand")]
767 enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
768 if (!(is_mm_relaxed (model) || is_mm_consume (model)))
769 emit_insn (gen_dmb (operands[0]));
776 (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
780 operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
781 MEM_VOLATILE_P (operands[1]) = 1;
786 [(set (match_operand:BLK 0 "" "")
787 (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
791 enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
792 if (is_mm_acquire (model))
793 return "dmb\\tishld";