]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/aarch64/atomics.md
Update copyright years.
[thirdparty/gcc.git] / gcc / config / aarch64 / atomics.md
1 ;; Machine description for AArch64 processor synchronization primitives.
2 ;; Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 ;; Contributed by ARM Ltd.
4 ;;
5 ;; This file is part of GCC.
6 ;;
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
10 ;; any later version.
11 ;;
12 ;; GCC is distributed in the hope that it will be useful, but
13 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;; General Public License for more details.
16 ;;
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
20
21 ;; Instruction patterns.
22
23 (define_expand "@atomic_compare_and_swap<mode>"
24 [(match_operand:SI 0 "register_operand" "") ;; bool out
25 (match_operand:ALLI_TI 1 "register_operand" "") ;; val out
26 (match_operand:ALLI_TI 2 "aarch64_sync_memory_operand" "") ;; memory
27 (match_operand:ALLI_TI 3 "nonmemory_operand" "") ;; expected
28 (match_operand:ALLI_TI 4 "aarch64_reg_or_zero" "") ;; desired
29 (match_operand:SI 5 "const_int_operand") ;; is_weak
30 (match_operand:SI 6 "const_int_operand") ;; mod_s
31 (match_operand:SI 7 "const_int_operand")] ;; mod_f
32 ""
33 {
34 aarch64_expand_compare_and_swap (operands);
35 DONE;
36 }
37 )
38
39 (define_mode_attr cas_short_expected_pred
40 [(QI "aarch64_reg_or_imm") (HI "aarch64_plushi_operand")])
41
42 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
43 [(set (reg:CC CC_REGNUM) ;; bool out
44 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
45 (set (match_operand:SI 0 "register_operand" "=&r") ;; val out
46 (zero_extend:SI
47 (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
48 (set (match_dup 1)
49 (unspec_volatile:SHORT
50 [(match_operand:SHORT 2 "<cas_short_expected_pred>" "rn") ;; expected
51 (match_operand:SHORT 3 "aarch64_reg_or_zero" "rZ") ;; desired
52 (match_operand:SI 4 "const_int_operand") ;; is_weak
53 (match_operand:SI 5 "const_int_operand") ;; mod_s
54 (match_operand:SI 6 "const_int_operand")] ;; mod_f
55 UNSPECV_ATOMIC_CMPSW))
56 (clobber (match_scratch:SI 7 "=&r"))]
57 ""
58 "#"
59 "&& reload_completed"
60 [(const_int 0)]
61 {
62 aarch64_split_compare_and_swap (operands);
63 DONE;
64 }
65 )
66
67 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
68 [(set (reg:CC CC_REGNUM) ;; bool out
69 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
70 (set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
71 (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
72 (set (match_dup 1)
73 (unspec_volatile:GPI
74 [(match_operand:GPI 2 "aarch64_plus_operand" "rIJ") ;; expect
75 (match_operand:GPI 3 "aarch64_reg_or_zero" "rZ") ;; desired
76 (match_operand:SI 4 "const_int_operand") ;; is_weak
77 (match_operand:SI 5 "const_int_operand") ;; mod_s
78 (match_operand:SI 6 "const_int_operand")] ;; mod_f
79 UNSPECV_ATOMIC_CMPSW))
80 (clobber (match_scratch:SI 7 "=&r"))]
81 ""
82 "#"
83 "&& reload_completed"
84 [(const_int 0)]
85 {
86 aarch64_split_compare_and_swap (operands);
87 DONE;
88 }
89 )
90
91 (define_insn_and_split "@aarch64_compare_and_swap<mode>"
92 [(set (reg:CC CC_REGNUM) ;; bool out
93 (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
94 (set (match_operand:JUST_TI 0 "register_operand" "=&r") ;; val out
95 (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
96 (set (match_dup 1)
97 (unspec_volatile:JUST_TI
98 [(match_operand:JUST_TI 2 "aarch64_reg_or_zero" "rZ") ;; expect
99 (match_operand:JUST_TI 3 "aarch64_reg_or_zero" "rZ") ;; desired
100 (match_operand:SI 4 "const_int_operand") ;; is_weak
101 (match_operand:SI 5 "const_int_operand") ;; mod_s
102 (match_operand:SI 6 "const_int_operand")] ;; mod_f
103 UNSPECV_ATOMIC_CMPSW))
104 (clobber (match_scratch:SI 7 "=&r"))]
105 ""
106 "#"
107 "&& reload_completed"
108 [(const_int 0)]
109 {
110 aarch64_split_compare_and_swap (operands);
111 DONE;
112 }
113 )
114
115 (define_insn "@aarch64_compare_and_swap<mode>_lse"
116 [(set (match_operand:SI 0 "register_operand" "+r") ;; val out
117 (zero_extend:SI
118 (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
119 (set (match_dup 1)
120 (unspec_volatile:SHORT
121 [(match_dup 0) ;; expected
122 (match_operand:SHORT 2 "aarch64_reg_or_zero" "rZ") ;; desired
123 (match_operand:SI 3 "const_int_operand")] ;; mod_s
124 UNSPECV_ATOMIC_CMPSW))]
125 "TARGET_LSE"
126 {
127 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
128 if (is_mm_relaxed (model))
129 return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
130 else if (is_mm_acquire (model) || is_mm_consume (model))
131 return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
132 else if (is_mm_release (model))
133 return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
134 else
135 return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
136 })
137
138 (define_insn "@aarch64_compare_and_swap<mode>_lse"
139 [(set (match_operand:GPI 0 "register_operand" "+r") ;; val out
140 (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
141 (set (match_dup 1)
142 (unspec_volatile:GPI
143 [(match_dup 0) ;; expected
144 (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ") ;; desired
145 (match_operand:SI 3 "const_int_operand")] ;; mod_s
146 UNSPECV_ATOMIC_CMPSW))]
147 "TARGET_LSE"
148 {
149 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
150 if (is_mm_relaxed (model))
151 return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
152 else if (is_mm_acquire (model) || is_mm_consume (model))
153 return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
154 else if (is_mm_release (model))
155 return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
156 else
157 return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
158 })
159
160 (define_insn "@aarch64_compare_and_swap<mode>_lse"
161 [(set (match_operand:JUST_TI 0 "register_operand" "+r") ;; val out
162 (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
163 (set (match_dup 1)
164 (unspec_volatile:JUST_TI
165 [(match_dup 0) ;; expect
166 (match_operand:JUST_TI 2 "register_operand" "r") ;; desired
167 (match_operand:SI 3 "const_int_operand")] ;; mod_s
168 UNSPECV_ATOMIC_CMPSW))]
169 "TARGET_LSE"
170 {
171 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
172 if (is_mm_relaxed (model))
173 return "casp\t%0, %R0, %2, %R2, %1";
174 else if (is_mm_acquire (model) || is_mm_consume (model))
175 return "caspa\t%0, %R0, %2, %R2, %1";
176 else if (is_mm_release (model))
177 return "caspl\t%0, %R0, %2, %R2, %1";
178 else
179 return "caspal\t%0, %R0, %2, %R2, %1";
180 })
181
182 (define_expand "atomic_exchange<mode>"
183 [(match_operand:ALLI 0 "register_operand")
184 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
185 (match_operand:ALLI 2 "aarch64_reg_or_zero")
186 (match_operand:SI 3 "const_int_operand")]
187 ""
188 {
189 /* Use an atomic SWP when available. */
190 if (TARGET_LSE)
191 {
192 emit_insn (gen_aarch64_atomic_exchange<mode>_lse
193 (operands[0], operands[1], operands[2], operands[3]));
194 }
195 else if (TARGET_OUTLINE_ATOMICS)
196 {
197 machine_mode mode = <MODE>mode;
198 rtx func = aarch64_atomic_ool_func (mode, operands[3],
199 &aarch64_ool_swp_names);
200 rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL,
201 mode, operands[2], mode,
202 XEXP (operands[1], 0), Pmode);
203 emit_move_insn (operands[0], rval);
204 }
205 else
206 {
207 emit_insn (gen_aarch64_atomic_exchange<mode>
208 (operands[0], operands[1], operands[2], operands[3]));
209 }
210 DONE;
211 }
212 )
213
214 (define_insn_and_split "aarch64_atomic_exchange<mode>"
215 [(set (match_operand:ALLI 0 "register_operand" "=&r") ;; output
216 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
217 (set (match_dup 1)
218 (unspec_volatile:ALLI
219 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ") ;; input
220 (match_operand:SI 3 "const_int_operand" "")] ;; model
221 UNSPECV_ATOMIC_EXCHG))
222 (clobber (reg:CC CC_REGNUM))
223 (clobber (match_scratch:SI 4 "=&r"))]
224 ""
225 "#"
226 "&& reload_completed"
227 [(const_int 0)]
228 {
229 aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
230 operands[2], operands[3], operands[4]);
231 DONE;
232 }
233 )
234
235 (define_insn "aarch64_atomic_exchange<mode>_lse"
236 [(set (match_operand:ALLI 0 "register_operand" "=r")
237 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
238 (set (match_dup 1)
239 (unspec_volatile:ALLI
240 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
241 (match_operand:SI 3 "const_int_operand" "")]
242 UNSPECV_ATOMIC_EXCHG))]
243 "TARGET_LSE"
244 {
245 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
246 if (is_mm_relaxed (model))
247 return "swp<atomic_sfx>\t%<w>2, %<w>0, %1";
248 else if (is_mm_acquire (model) || is_mm_consume (model))
249 return "swpa<atomic_sfx>\t%<w>2, %<w>0, %1";
250 else if (is_mm_release (model))
251 return "swpl<atomic_sfx>\t%<w>2, %<w>0, %1";
252 else
253 return "swpal<atomic_sfx>\t%<w>2, %<w>0, %1";
254 }
255 )
256
257 (define_expand "atomic_<atomic_optab><mode>"
258 [(match_operand:ALLI 0 "aarch64_sync_memory_operand")
259 (atomic_op:ALLI
260 (match_operand:ALLI 1 "<atomic_op_operand>")
261 (match_operand:SI 2 "const_int_operand"))]
262 ""
263 {
264 rtx (*gen) (rtx, rtx, rtx);
265
266 /* Use an atomic load-operate instruction when possible. */
267 if (TARGET_LSE)
268 {
269 switch (<CODE>)
270 {
271 case MINUS:
272 operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
273 NULL, 1);
274 /* fallthru */
275 case PLUS:
276 gen = gen_aarch64_atomic_add<mode>_lse;
277 break;
278 case IOR:
279 gen = gen_aarch64_atomic_ior<mode>_lse;
280 break;
281 case XOR:
282 gen = gen_aarch64_atomic_xor<mode>_lse;
283 break;
284 case AND:
285 operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
286 NULL, 1);
287 gen = gen_aarch64_atomic_bic<mode>_lse;
288 break;
289 default:
290 gcc_unreachable ();
291 }
292 operands[1] = force_reg (<MODE>mode, operands[1]);
293 }
294 else if (TARGET_OUTLINE_ATOMICS)
295 {
296 const atomic_ool_names *names;
297 switch (<CODE>)
298 {
299 case MINUS:
300 operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
301 NULL, 1);
302 /* fallthru */
303 case PLUS:
304 names = &aarch64_ool_ldadd_names;
305 break;
306 case IOR:
307 names = &aarch64_ool_ldset_names;
308 break;
309 case XOR:
310 names = &aarch64_ool_ldeor_names;
311 break;
312 case AND:
313 operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
314 NULL, 1);
315 names = &aarch64_ool_ldclr_names;
316 break;
317 default:
318 gcc_unreachable ();
319 }
320 machine_mode mode = <MODE>mode;
321 rtx func = aarch64_atomic_ool_func (mode, operands[2], names);
322 emit_library_call_value (func, NULL_RTX, LCT_NORMAL, mode,
323 operands[1], mode,
324 XEXP (operands[0], 0), Pmode);
325 DONE;
326 }
327 else
328 gen = gen_aarch64_atomic_<atomic_optab><mode>;
329
330 emit_insn (gen (operands[0], operands[1], operands[2]));
331 DONE;
332 }
333 )
334
335 (define_insn_and_split "aarch64_atomic_<atomic_optab><mode>"
336 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
337 (unspec_volatile:ALLI
338 [(atomic_op:ALLI (match_dup 0)
339 (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>"))
340 (match_operand:SI 2 "const_int_operand")]
341 UNSPECV_ATOMIC_OP))
342 (clobber (reg:CC CC_REGNUM))
343 (clobber (match_scratch:ALLI 3 "=&r"))
344 (clobber (match_scratch:SI 4 "=&r"))]
345 ""
346 "#"
347 "&& reload_completed"
348 [(const_int 0)]
349 {
350 aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
351 operands[1], operands[2], operands[4]);
352 DONE;
353 }
354 )
355
356 ;; It is tempting to want to use ST<OP> for relaxed and release
357 ;; memory models here. However, that is incompatible with the
358 ;; C++ memory model for the following case:
359 ;;
360 ;; atomic_fetch_add(ptr, 1, memory_order_relaxed);
361 ;; atomic_thread_fence(memory_order_acquire);
362 ;;
363 ;; The problem is that the architecture says that ST<OP> (and LD<OP>
364 ;; insns where the destination is XZR) are not regarded as a read.
365 ;; However we also implement the acquire memory barrier with DMB LD,
366 ;; and so the ST<OP> is not blocked by the barrier.
367
368 (define_insn "aarch64_atomic_<atomic_ldoptab><mode>_lse"
369 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
370 (unspec_volatile:ALLI
371 [(match_dup 0)
372 (match_operand:ALLI 1 "register_operand" "r")
373 (match_operand:SI 2 "const_int_operand")]
374 ATOMIC_LDOP))
375 (clobber (match_scratch:ALLI 3 "=r"))]
376 "TARGET_LSE"
377 {
378 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
379 if (is_mm_relaxed (model))
380 return "ld<atomic_ldop><atomic_sfx>\t%<w>1, %<w>3, %0";
381 else if (is_mm_release (model))
382 return "ld<atomic_ldop>l<atomic_sfx>\t%<w>1, %<w>3, %0";
383 else if (is_mm_acquire (model) || is_mm_consume (model))
384 return "ld<atomic_ldop>a<atomic_sfx>\t%<w>1, %<w>3, %0";
385 else
386 return "ld<atomic_ldop>al<atomic_sfx>\t%<w>1, %<w>3, %0";
387 }
388 )
389
390 (define_insn_and_split "atomic_nand<mode>"
391 [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
392 (unspec_volatile:ALLI
393 [(not:ALLI
394 (and:ALLI (match_dup 0)
395 (match_operand:ALLI 1 "aarch64_logical_operand" "r<lconst_atomic>")))
396 (match_operand:SI 2 "const_int_operand")] ;; model
397 UNSPECV_ATOMIC_OP))
398 (clobber (reg:CC CC_REGNUM))
399 (clobber (match_scratch:ALLI 3 "=&r"))
400 (clobber (match_scratch:SI 4 "=&r"))]
401 ""
402 "#"
403 "&& reload_completed"
404 [(const_int 0)]
405 {
406 aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
407 operands[1], operands[2], operands[4]);
408 DONE;
409 }
410 )
411
412 ;; Load-operate-store, returning the original memory data.
413
414 (define_expand "atomic_fetch_<atomic_optab><mode>"
415 [(match_operand:ALLI 0 "register_operand")
416 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
417 (atomic_op:ALLI
418 (match_operand:ALLI 2 "<atomic_op_operand>")
419 (match_operand:SI 3 "const_int_operand"))]
420 ""
421 {
422 rtx (*gen) (rtx, rtx, rtx, rtx);
423
424 /* Use an atomic load-operate instruction when possible. */
425 if (TARGET_LSE)
426 {
427 switch (<CODE>)
428 {
429 case MINUS:
430 operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
431 NULL, 1);
432 /* fallthru */
433 case PLUS:
434 gen = gen_aarch64_atomic_fetch_add<mode>_lse;
435 break;
436 case IOR:
437 gen = gen_aarch64_atomic_fetch_ior<mode>_lse;
438 break;
439 case XOR:
440 gen = gen_aarch64_atomic_fetch_xor<mode>_lse;
441 break;
442 case AND:
443 operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
444 NULL, 1);
445 gen = gen_aarch64_atomic_fetch_bic<mode>_lse;
446 break;
447 default:
448 gcc_unreachable ();
449 }
450 operands[2] = force_reg (<MODE>mode, operands[2]);
451 }
452 else if (TARGET_OUTLINE_ATOMICS)
453 {
454 const atomic_ool_names *names;
455 switch (<CODE>)
456 {
457 case MINUS:
458 operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
459 NULL, 1);
460 /* fallthru */
461 case PLUS:
462 names = &aarch64_ool_ldadd_names;
463 break;
464 case IOR:
465 names = &aarch64_ool_ldset_names;
466 break;
467 case XOR:
468 names = &aarch64_ool_ldeor_names;
469 break;
470 case AND:
471 operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
472 NULL, 1);
473 names = &aarch64_ool_ldclr_names;
474 break;
475 default:
476 gcc_unreachable ();
477 }
478 machine_mode mode = <MODE>mode;
479 rtx func = aarch64_atomic_ool_func (mode, operands[3], names);
480 rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL, mode,
481 operands[2], mode,
482 XEXP (operands[1], 0), Pmode);
483 emit_move_insn (operands[0], rval);
484 DONE;
485 }
486 else
487 gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
488
489 emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
490 DONE;
491 })
492
493 (define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>"
494 [(set (match_operand:ALLI 0 "register_operand" "=&r")
495 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
496 (set (match_dup 1)
497 (unspec_volatile:ALLI
498 [(atomic_op:ALLI (match_dup 1)
499 (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))
500 (match_operand:SI 3 "const_int_operand")] ;; model
501 UNSPECV_ATOMIC_OP))
502 (clobber (reg:CC CC_REGNUM))
503 (clobber (match_scratch:ALLI 4 "=&r"))
504 (clobber (match_scratch:SI 5 "=&r"))]
505 ""
506 "#"
507 "&& reload_completed"
508 [(const_int 0)]
509 {
510 aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
511 operands[2], operands[3], operands[5]);
512 DONE;
513 }
514 )
515
516 (define_insn "aarch64_atomic_fetch_<atomic_ldoptab><mode>_lse"
517 [(set (match_operand:ALLI 0 "register_operand" "=r")
518 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
519 (set (match_dup 1)
520 (unspec_volatile:ALLI
521 [(match_dup 1)
522 (match_operand:ALLI 2 "register_operand" "r")
523 (match_operand:SI 3 "const_int_operand")]
524 ATOMIC_LDOP))]
525 "TARGET_LSE"
526 {
527 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
528 if (is_mm_relaxed (model))
529 return "ld<atomic_ldop><atomic_sfx>\t%<w>2, %<w>0, %1";
530 else if (is_mm_acquire (model) || is_mm_consume (model))
531 return "ld<atomic_ldop>a<atomic_sfx>\t%<w>2, %<w>0, %1";
532 else if (is_mm_release (model))
533 return "ld<atomic_ldop>l<atomic_sfx>\t%<w>2, %<w>0, %1";
534 else
535 return "ld<atomic_ldop>al<atomic_sfx>\t%<w>2, %<w>0, %1";
536 }
537 )
538
539 (define_insn_and_split "atomic_fetch_nand<mode>"
540 [(set (match_operand:ALLI 0 "register_operand" "=&r")
541 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
542 (set (match_dup 1)
543 (unspec_volatile:ALLI
544 [(not:ALLI
545 (and:ALLI (match_dup 1)
546 (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>")))
547 (match_operand:SI 3 "const_int_operand")] ;; model
548 UNSPECV_ATOMIC_OP))
549 (clobber (reg:CC CC_REGNUM))
550 (clobber (match_scratch:ALLI 4 "=&r"))
551 (clobber (match_scratch:SI 5 "=&r"))]
552 ""
553 "#"
554 "&& reload_completed"
555 [(const_int 0)]
556 {
557 aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
558 operands[2], operands[3], operands[5]);
559 DONE;
560 }
561 )
562
563 ;; Load-operate-store, returning the updated memory data.
564
565 (define_expand "atomic_<atomic_optab>_fetch<mode>"
566 [(match_operand:ALLI 0 "register_operand")
567 (atomic_op:ALLI
568 (match_operand:ALLI 1 "aarch64_sync_memory_operand")
569 (match_operand:ALLI 2 "<atomic_op_operand>"))
570 (match_operand:SI 3 "const_int_operand")]
571 ""
572 {
573 /* Use an atomic load-operate instruction when possible. In this case
574 we will re-compute the result from the original mem value. */
575 if (TARGET_LSE || TARGET_OUTLINE_ATOMICS)
576 {
577 rtx tmp = gen_reg_rtx (<MODE>mode);
578 operands[2] = force_reg (<MODE>mode, operands[2]);
579 emit_insn (gen_atomic_fetch_<atomic_optab><mode>
580 (tmp, operands[1], operands[2], operands[3]));
581 tmp = expand_simple_binop (<MODE>mode, <CODE>, tmp, operands[2],
582 operands[0], 1, OPTAB_WIDEN);
583 emit_move_insn (operands[0], tmp);
584 }
585 else
586 {
587 emit_insn (gen_aarch64_atomic_<atomic_optab>_fetch<mode>
588 (operands[0], operands[1], operands[2], operands[3]));
589 }
590 DONE;
591 })
592
593 (define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>"
594 [(set (match_operand:ALLI 0 "register_operand" "=&r")
595 (atomic_op:ALLI
596 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
597 (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")))
598 (set (match_dup 1)
599 (unspec_volatile:ALLI
600 [(match_dup 1) (match_dup 2)
601 (match_operand:SI 3 "const_int_operand")] ;; model
602 UNSPECV_ATOMIC_OP))
603 (clobber (reg:CC CC_REGNUM))
604 (clobber (match_scratch:SI 4 "=&r"))]
605 ""
606 "#"
607 "&& reload_completed"
608 [(const_int 0)]
609 {
610 aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
611 operands[2], operands[3], operands[4]);
612 DONE;
613 }
614 )
615
616 (define_insn_and_split "atomic_nand_fetch<mode>"
617 [(set (match_operand:ALLI 0 "register_operand" "=&r")
618 (not:ALLI
619 (and:ALLI
620 (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
621 (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>"))))
622 (set (match_dup 1)
623 (unspec_volatile:ALLI
624 [(match_dup 1) (match_dup 2)
625 (match_operand:SI 3 "const_int_operand")] ;; model
626 UNSPECV_ATOMIC_OP))
627 (clobber (reg:CC CC_REGNUM))
628 (clobber (match_scratch:SI 4 "=&r"))]
629 ""
630 "#"
631 "&& reload_completed"
632 [(const_int 0)]
633 {
634 aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
635 operands[2], operands[3], operands[4]);
636 DONE;
637 }
638 )
639
640 (define_insn "atomic_load<mode>"
641 [(set (match_operand:ALLI 0 "register_operand" "=r")
642 (unspec_volatile:ALLI
643 [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
644 (match_operand:SI 2 "const_int_operand")] ;; model
645 UNSPECV_LDA))]
646 ""
647 {
648 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
649 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
650 return "ldr<atomic_sfx>\t%<w>0, %1";
651 else
652 return "ldar<atomic_sfx>\t%<w>0, %1";
653 }
654 )
655
656 (define_insn "atomic_store<mode>"
657 [(set (match_operand:ALLI 0 "aarch64_rcpc_memory_operand" "=Q,Ust")
658 (unspec_volatile:ALLI
659 [(match_operand:ALLI 1 "general_operand" "rZ,rZ")
660 (match_operand:SI 2 "const_int_operand")] ;; model
661 UNSPECV_STL))]
662 ""
663 {
664 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
665 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
666 return "str<atomic_sfx>\t%<w>1, %0";
667 else if (which_alternative == 0)
668 return "stlr<atomic_sfx>\t%<w>1, %0";
669 else
670 return "stlur<atomic_sfx>\t%<w>1, %0";
671 }
672 [(set_attr "arch" "*,rcpc8_4")]
673 )
674
675 (define_insn "@aarch64_load_exclusive<mode>"
676 [(set (match_operand:SI 0 "register_operand" "=r")
677 (zero_extend:SI
678 (unspec_volatile:SHORT
679 [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
680 (match_operand:SI 2 "const_int_operand")]
681 UNSPECV_LX)))]
682 ""
683 {
684 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
685 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
686 return "ldxr<atomic_sfx>\t%w0, %1";
687 else
688 return "ldaxr<atomic_sfx>\t%w0, %1";
689 }
690 )
691
692 (define_insn "@aarch64_load_exclusive<mode>"
693 [(set (match_operand:GPI 0 "register_operand" "=r")
694 (unspec_volatile:GPI
695 [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
696 (match_operand:SI 2 "const_int_operand")]
697 UNSPECV_LX))]
698 ""
699 {
700 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
701 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
702 return "ldxr\t%<w>0, %1";
703 else
704 return "ldaxr\t%<w>0, %1";
705 }
706 )
707
708 (define_insn "aarch64_load_exclusive_pair"
709 [(set (match_operand:DI 0 "register_operand" "=r")
710 (unspec_volatile:DI
711 [(match_operand:TI 2 "aarch64_sync_memory_operand" "Q")
712 (match_operand:SI 3 "const_int_operand")]
713 UNSPECV_LX))
714 (set (match_operand:DI 1 "register_operand" "=r")
715 (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LX))]
716 ""
717 {
718 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
719 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
720 return "ldxp\t%0, %1, %2";
721 else
722 return "ldaxp\t%0, %1, %2";
723 }
724 )
725
726 (define_insn "@aarch64_store_exclusive<mode>"
727 [(set (match_operand:SI 0 "register_operand" "=&r")
728 (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
729 (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
730 (unspec_volatile:ALLI
731 [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ")
732 (match_operand:SI 3 "const_int_operand")]
733 UNSPECV_SX))]
734 ""
735 {
736 enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
737 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
738 return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
739 else
740 return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
741 }
742 )
743
744 (define_insn "aarch64_store_exclusive_pair"
745 [(set (match_operand:SI 0 "register_operand" "=&r")
746 (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
747 (set (match_operand:TI 1 "aarch64_sync_memory_operand" "=Q")
748 (unspec_volatile:TI
749 [(match_operand:DI 2 "aarch64_reg_or_zero" "rZ")
750 (match_operand:DI 3 "aarch64_reg_or_zero" "rZ")
751 (match_operand:SI 4 "const_int_operand")]
752 UNSPECV_SX))]
753 ""
754 {
755 enum memmodel model = memmodel_from_int (INTVAL (operands[4]));
756 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
757 return "stxp\t%w0, %x2, %x3, %1";
758 else
759 return "stlxp\t%w0, %x2, %x3, %1";
760 }
761 )
762
763 (define_expand "mem_thread_fence"
764 [(match_operand:SI 0 "const_int_operand")]
765 ""
766 {
767 enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
768 if (!(is_mm_relaxed (model) || is_mm_consume (model)))
769 emit_insn (gen_dmb (operands[0]));
770 DONE;
771 }
772 )
773
774 (define_expand "dmb"
775 [(set (match_dup 1)
776 (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
777 UNSPEC_MB))]
778 ""
779 {
780 operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
781 MEM_VOLATILE_P (operands[1]) = 1;
782 }
783 )
784
785 (define_insn "*dmb"
786 [(set (match_operand:BLK 0 "" "")
787 (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
788 UNSPEC_MB))]
789 ""
790 {
791 enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
792 if (is_mm_acquire (model))
793 return "dmb\\tishld";
794 else
795 return "dmb\\tish";
796 }
797 )