]>
Commit | Line | Data |
---|---|---|
86c818c2 | 1 | ;; Machine description for AArch64 processor synchronization primitives. |
99dee823 | 2 | ;; Copyright (C) 2009-2021 Free Software Foundation, Inc. |
86c818c2 JG |
3 | ;; Contributed by ARM Ltd. |
4 | ;; | |
5 | ;; This file is part of GCC. | |
6 | ;; | |
7 | ;; GCC is free software; you can redistribute it and/or modify it | |
8 | ;; under the terms of the GNU General Public License as published by | |
9 | ;; the Free Software Foundation; either version 3, or (at your option) | |
10 | ;; any later version. | |
11 | ;; | |
12 | ;; GCC is distributed in the hope that it will be useful, but | |
13 | ;; WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | ;; General Public License for more details. | |
16 | ;; | |
17 | ;; You should have received a copy of the GNU General Public License | |
18 | ;; along with GCC; see the file COPYING3. If not see | |
19 | ;; <http://www.gnu.org/licenses/>. | |
20 | ||
6380d2bc MW |
21 | ;; Instruction patterns. |
22 | ||
0016d8d9 | 23 | (define_expand "@atomic_compare_and_swap<mode>" |
4a2095eb RH |
24 | [(match_operand:SI 0 "register_operand" "") ;; bool out |
25 | (match_operand:ALLI_TI 1 "register_operand" "") ;; val out | |
26 | (match_operand:ALLI_TI 2 "aarch64_sync_memory_operand" "") ;; memory | |
27 | (match_operand:ALLI_TI 3 "nonmemory_operand" "") ;; expected | |
28 | (match_operand:ALLI_TI 4 "aarch64_reg_or_zero" "") ;; desired | |
86c818c2 JG |
29 | (match_operand:SI 5 "const_int_operand") ;; is_weak |
30 | (match_operand:SI 6 "const_int_operand") ;; mod_s | |
31 | (match_operand:SI 7 "const_int_operand")] ;; mod_f | |
32 | "" | |
33 | { | |
34 | aarch64_expand_compare_and_swap (operands); | |
35 | DONE; | |
36 | } | |
37 | ) | |
38 | ||
d400fda3 RH |
39 | (define_mode_attr cas_short_expected_pred |
40 | [(QI "aarch64_reg_or_imm") (HI "aarch64_plushi_operand")]) | |
a27c5347 JJ |
41 | (define_mode_attr cas_short_expected_imm |
42 | [(QI "n") (HI "Uph")]) | |
d400fda3 | 43 | |
0016d8d9 | 44 | (define_insn_and_split "@aarch64_compare_and_swap<mode>" |
86c818c2 JG |
45 | [(set (reg:CC CC_REGNUM) ;; bool out |
46 | (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW)) | |
d400fda3 | 47 | (set (match_operand:SI 0 "register_operand" "=&r") ;; val out |
86c818c2 JG |
48 | (zero_extend:SI |
49 | (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory | |
50 | (set (match_dup 1) | |
51 | (unspec_volatile:SHORT | |
a27c5347 JJ |
52 | [(match_operand:SHORT 2 "<cas_short_expected_pred>" |
53 | "r<cas_short_expected_imm>") ;; expected | |
4ebcc903 | 54 | (match_operand:SHORT 3 "aarch64_reg_or_zero" "rZ") ;; desired |
d400fda3 RH |
55 | (match_operand:SI 4 "const_int_operand") ;; is_weak |
56 | (match_operand:SI 5 "const_int_operand") ;; mod_s | |
57 | (match_operand:SI 6 "const_int_operand")] ;; mod_f | |
86c818c2 JG |
58 | UNSPECV_ATOMIC_CMPSW)) |
59 | (clobber (match_scratch:SI 7 "=&r"))] | |
60 | "" | |
61 | "#" | |
e5e07b68 | 62 | "&& epilogue_completed" |
86c818c2 JG |
63 | [(const_int 0)] |
64 | { | |
65 | aarch64_split_compare_and_swap (operands); | |
66 | DONE; | |
67 | } | |
68 | ) | |
69 | ||
0016d8d9 | 70 | (define_insn_and_split "@aarch64_compare_and_swap<mode>" |
86c818c2 JG |
71 | [(set (reg:CC CC_REGNUM) ;; bool out |
72 | (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW)) | |
73 | (set (match_operand:GPI 0 "register_operand" "=&r") ;; val out | |
b0770c0f | 74 | (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory |
86c818c2 JG |
75 | (set (match_dup 1) |
76 | (unspec_volatile:GPI | |
e21679a8 | 77 | [(match_operand:GPI 2 "aarch64_plus_operand" "rIJ") ;; expect |
4ebcc903 | 78 | (match_operand:GPI 3 "aarch64_reg_or_zero" "rZ") ;; desired |
b0770c0f MW |
79 | (match_operand:SI 4 "const_int_operand") ;; is_weak |
80 | (match_operand:SI 5 "const_int_operand") ;; mod_s | |
86c818c2 JG |
81 | (match_operand:SI 6 "const_int_operand")] ;; mod_f |
82 | UNSPECV_ATOMIC_CMPSW)) | |
83 | (clobber (match_scratch:SI 7 "=&r"))] | |
84 | "" | |
85 | "#" | |
e5e07b68 | 86 | "&& epilogue_completed" |
86c818c2 JG |
87 | [(const_int 0)] |
88 | { | |
89 | aarch64_split_compare_and_swap (operands); | |
90 | DONE; | |
91 | } | |
92 | ) | |
93 | ||
4a2095eb RH |
94 | (define_insn_and_split "@aarch64_compare_and_swap<mode>" |
95 | [(set (reg:CC CC_REGNUM) ;; bool out | |
96 | (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW)) | |
97 | (set (match_operand:JUST_TI 0 "register_operand" "=&r") ;; val out | |
98 | (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory | |
99 | (set (match_dup 1) | |
100 | (unspec_volatile:JUST_TI | |
101 | [(match_operand:JUST_TI 2 "aarch64_reg_or_zero" "rZ") ;; expect | |
102 | (match_operand:JUST_TI 3 "aarch64_reg_or_zero" "rZ") ;; desired | |
103 | (match_operand:SI 4 "const_int_operand") ;; is_weak | |
104 | (match_operand:SI 5 "const_int_operand") ;; mod_s | |
105 | (match_operand:SI 6 "const_int_operand")] ;; mod_f | |
106 | UNSPECV_ATOMIC_CMPSW)) | |
107 | (clobber (match_scratch:SI 7 "=&r"))] | |
108 | "" | |
109 | "#" | |
e5e07b68 | 110 | "&& epilogue_completed" |
4a2095eb RH |
111 | [(const_int 0)] |
112 | { | |
113 | aarch64_split_compare_and_swap (operands); | |
114 | DONE; | |
115 | } | |
116 | ) | |
117 | ||
77f33f44 RH |
118 | (define_insn "@aarch64_compare_and_swap<mode>_lse" |
119 | [(set (match_operand:SI 0 "register_operand" "+r") ;; val out | |
b0770c0f | 120 | (zero_extend:SI |
77f33f44 | 121 | (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory |
b0770c0f MW |
122 | (set (match_dup 1) |
123 | (unspec_volatile:SHORT | |
77f33f44 RH |
124 | [(match_dup 0) ;; expected |
125 | (match_operand:SHORT 2 "aarch64_reg_or_zero" "rZ") ;; desired | |
126 | (match_operand:SI 3 "const_int_operand")] ;; mod_s | |
b0770c0f MW |
127 | UNSPECV_ATOMIC_CMPSW))] |
128 | "TARGET_LSE" | |
77f33f44 RH |
129 | { |
130 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); | |
131 | if (is_mm_relaxed (model)) | |
132 | return "cas<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
133 | else if (is_mm_acquire (model) || is_mm_consume (model)) | |
134 | return "casa<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
135 | else if (is_mm_release (model)) | |
136 | return "casl<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
137 | else | |
138 | return "casal<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
139 | }) | |
b0770c0f | 140 | |
77f33f44 RH |
141 | (define_insn "@aarch64_compare_and_swap<mode>_lse" |
142 | [(set (match_operand:GPI 0 "register_operand" "+r") ;; val out | |
b0770c0f MW |
143 | (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory |
144 | (set (match_dup 1) | |
145 | (unspec_volatile:GPI | |
77f33f44 RH |
146 | [(match_dup 0) ;; expected |
147 | (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ") ;; desired | |
148 | (match_operand:SI 3 "const_int_operand")] ;; mod_s | |
b0770c0f | 149 | UNSPECV_ATOMIC_CMPSW))] |
9cd7b720 | 150 | "TARGET_LSE" |
77f33f44 RH |
151 | { |
152 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); | |
153 | if (is_mm_relaxed (model)) | |
154 | return "cas<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
155 | else if (is_mm_acquire (model) || is_mm_consume (model)) | |
156 | return "casa<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
157 | else if (is_mm_release (model)) | |
158 | return "casl<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
159 | else | |
160 | return "casal<atomic_sfx>\t%<w>0, %<w>2, %1"; | |
161 | }) | |
b0770c0f | 162 | |
4a2095eb RH |
163 | (define_insn "@aarch64_compare_and_swap<mode>_lse" |
164 | [(set (match_operand:JUST_TI 0 "register_operand" "+r") ;; val out | |
165 | (match_operand:JUST_TI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory | |
166 | (set (match_dup 1) | |
167 | (unspec_volatile:JUST_TI | |
168 | [(match_dup 0) ;; expect | |
169 | (match_operand:JUST_TI 2 "register_operand" "r") ;; desired | |
170 | (match_operand:SI 3 "const_int_operand")] ;; mod_s | |
171 | UNSPECV_ATOMIC_CMPSW))] | |
172 | "TARGET_LSE" | |
173 | { | |
174 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); | |
175 | if (is_mm_relaxed (model)) | |
176 | return "casp\t%0, %R0, %2, %R2, %1"; | |
177 | else if (is_mm_acquire (model) || is_mm_consume (model)) | |
178 | return "caspa\t%0, %R0, %2, %R2, %1"; | |
179 | else if (is_mm_release (model)) | |
180 | return "caspl\t%0, %R0, %2, %R2, %1"; | |
181 | else | |
182 | return "caspal\t%0, %R0, %2, %R2, %1"; | |
183 | }) | |
184 | ||
9cd7b720 | 185 | (define_expand "atomic_exchange<mode>" |
1bbffb87 DZ |
186 | [(match_operand:ALLI 0 "register_operand") |
187 | (match_operand:ALLI 1 "aarch64_sync_memory_operand") | |
188 | (match_operand:ALLI 2 "aarch64_reg_or_zero") | |
189 | (match_operand:SI 3 "const_int_operand")] | |
9cd7b720 MW |
190 | "" |
191 | { | |
9cd7b720 MW |
192 | /* Use an atomic SWP when available. */ |
193 | if (TARGET_LSE) | |
3950b229 RH |
194 | { |
195 | emit_insn (gen_aarch64_atomic_exchange<mode>_lse | |
196 | (operands[0], operands[1], operands[2], operands[3])); | |
197 | } | |
198 | else if (TARGET_OUTLINE_ATOMICS) | |
199 | { | |
200 | machine_mode mode = <MODE>mode; | |
201 | rtx func = aarch64_atomic_ool_func (mode, operands[3], | |
202 | &aarch64_ool_swp_names); | |
203 | rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL, | |
204 | mode, operands[2], mode, | |
205 | XEXP (operands[1], 0), Pmode); | |
206 | emit_move_insn (operands[0], rval); | |
207 | } | |
9cd7b720 | 208 | else |
3950b229 RH |
209 | { |
210 | emit_insn (gen_aarch64_atomic_exchange<mode> | |
211 | (operands[0], operands[1], operands[2], operands[3])); | |
212 | } | |
9cd7b720 MW |
213 | DONE; |
214 | } | |
215 | ) | |
216 | ||
217 | (define_insn_and_split "aarch64_atomic_exchange<mode>" | |
86c818c2 | 218 | [(set (match_operand:ALLI 0 "register_operand" "=&r") ;; output |
8f5603d3 | 219 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory |
86c818c2 JG |
220 | (set (match_dup 1) |
221 | (unspec_volatile:ALLI | |
8f5603d3 | 222 | [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ") ;; input |
86c818c2 JG |
223 | (match_operand:SI 3 "const_int_operand" "")] ;; model |
224 | UNSPECV_ATOMIC_EXCHG)) | |
225 | (clobber (reg:CC CC_REGNUM)) | |
226 | (clobber (match_scratch:SI 4 "=&r"))] | |
227 | "" | |
228 | "#" | |
e5e07b68 | 229 | "&& epilogue_completed" |
86c818c2 JG |
230 | [(const_int 0)] |
231 | { | |
232 | aarch64_split_atomic_op (SET, operands[0], NULL, operands[1], | |
9cd7b720 MW |
233 | operands[2], operands[3], operands[4]); |
234 | DONE; | |
235 | } | |
236 | ) | |
237 | ||
8f5603d3 RH |
238 | (define_insn "aarch64_atomic_exchange<mode>_lse" |
239 | [(set (match_operand:ALLI 0 "register_operand" "=r") | |
9cd7b720 MW |
240 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) |
241 | (set (match_dup 1) | |
242 | (unspec_volatile:ALLI | |
8f5603d3 | 243 | [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ") |
9cd7b720 MW |
244 | (match_operand:SI 3 "const_int_operand" "")] |
245 | UNSPECV_ATOMIC_EXCHG))] | |
246 | "TARGET_LSE" | |
9cd7b720 | 247 | { |
8f5603d3 RH |
248 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); |
249 | if (is_mm_relaxed (model)) | |
250 | return "swp<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
251 | else if (is_mm_acquire (model) || is_mm_consume (model)) | |
252 | return "swpa<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
253 | else if (is_mm_release (model)) | |
254 | return "swpl<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
255 | else | |
256 | return "swpal<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
86c818c2 JG |
257 | } |
258 | ) | |
259 | ||
641c2f8b | 260 | (define_expand "atomic_<atomic_optab><mode>" |
1bbffb87 | 261 | [(match_operand:ALLI 0 "aarch64_sync_memory_operand") |
641c2f8b | 262 | (atomic_op:ALLI |
1bbffb87 | 263 | (match_operand:ALLI 1 "<atomic_op_operand>") |
641c2f8b MW |
264 | (match_operand:SI 2 "const_int_operand"))] |
265 | "" | |
266 | { | |
267 | rtx (*gen) (rtx, rtx, rtx); | |
268 | ||
269 | /* Use an atomic load-operate instruction when possible. */ | |
7803ec5e RH |
270 | if (TARGET_LSE) |
271 | { | |
272 | switch (<CODE>) | |
273 | { | |
274 | case MINUS: | |
275 | operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1], | |
276 | NULL, 1); | |
277 | /* fallthru */ | |
278 | case PLUS: | |
279 | gen = gen_aarch64_atomic_add<mode>_lse; | |
280 | break; | |
281 | case IOR: | |
282 | gen = gen_aarch64_atomic_ior<mode>_lse; | |
283 | break; | |
284 | case XOR: | |
285 | gen = gen_aarch64_atomic_xor<mode>_lse; | |
286 | break; | |
287 | case AND: | |
288 | operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1], | |
289 | NULL, 1); | |
290 | gen = gen_aarch64_atomic_bic<mode>_lse; | |
291 | break; | |
292 | default: | |
293 | gcc_unreachable (); | |
294 | } | |
295 | operands[1] = force_reg (<MODE>mode, operands[1]); | |
296 | } | |
3950b229 RH |
297 | else if (TARGET_OUTLINE_ATOMICS) |
298 | { | |
299 | const atomic_ool_names *names; | |
300 | switch (<CODE>) | |
301 | { | |
302 | case MINUS: | |
303 | operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1], | |
304 | NULL, 1); | |
305 | /* fallthru */ | |
306 | case PLUS: | |
307 | names = &aarch64_ool_ldadd_names; | |
308 | break; | |
309 | case IOR: | |
310 | names = &aarch64_ool_ldset_names; | |
311 | break; | |
312 | case XOR: | |
313 | names = &aarch64_ool_ldeor_names; | |
314 | break; | |
315 | case AND: | |
316 | operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1], | |
317 | NULL, 1); | |
318 | names = &aarch64_ool_ldclr_names; | |
319 | break; | |
320 | default: | |
321 | gcc_unreachable (); | |
322 | } | |
323 | machine_mode mode = <MODE>mode; | |
324 | rtx func = aarch64_atomic_ool_func (mode, operands[2], names); | |
325 | emit_library_call_value (func, NULL_RTX, LCT_NORMAL, mode, | |
326 | operands[1], mode, | |
327 | XEXP (operands[0], 0), Pmode); | |
328 | DONE; | |
329 | } | |
641c2f8b MW |
330 | else |
331 | gen = gen_aarch64_atomic_<atomic_optab><mode>; | |
332 | ||
333 | emit_insn (gen (operands[0], operands[1], operands[2])); | |
641c2f8b MW |
334 | DONE; |
335 | } | |
336 | ) | |
337 | ||
338 | (define_insn_and_split "aarch64_atomic_<atomic_optab><mode>" | |
339 | [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q") | |
340 | (unspec_volatile:ALLI | |
341 | [(atomic_op:ALLI (match_dup 0) | |
342 | (match_operand:ALLI 1 "<atomic_op_operand>" "r<const_atomic>")) | |
343 | (match_operand:SI 2 "const_int_operand")] | |
344 | UNSPECV_ATOMIC_OP)) | |
345 | (clobber (reg:CC CC_REGNUM)) | |
346 | (clobber (match_scratch:ALLI 3 "=&r")) | |
347 | (clobber (match_scratch:SI 4 "=&r"))] | |
348 | "" | |
349 | "#" | |
e5e07b68 | 350 | "&& epilogue_completed" |
641c2f8b MW |
351 | [(const_int 0)] |
352 | { | |
353 | aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0], | |
354 | operands[1], operands[2], operands[4]); | |
355 | DONE; | |
356 | } | |
357 | ) | |
358 | ||
53de1ea8 RH |
359 | ;; It is tempting to want to use ST<OP> for relaxed and release |
360 | ;; memory models here. However, that is incompatible with the | |
361 | ;; C++ memory model for the following case: | |
362 | ;; | |
363 | ;; atomic_fetch_add(ptr, 1, memory_order_relaxed); | |
364 | ;; atomic_thread_fence(memory_order_acquire); | |
365 | ;; | |
366 | ;; The problem is that the architecture says that ST<OP> (and LD<OP> | |
367 | ;; insns where the destination is XZR) are not regarded as a read. | |
368 | ;; However we also implement the acquire memory barrier with DMB LD, | |
369 | ;; and so the ST<OP> is not blocked by the barrier. | |
370 | ||
7803ec5e | 371 | (define_insn "aarch64_atomic_<atomic_ldoptab><mode>_lse" |
86c818c2 | 372 | [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q") |
7803ec5e RH |
373 | (unspec_volatile:ALLI |
374 | [(match_dup 0) | |
375 | (match_operand:ALLI 1 "register_operand" "r") | |
376 | (match_operand:SI 2 "const_int_operand")] | |
377 | ATOMIC_LDOP)) | |
53de1ea8 | 378 | (clobber (match_scratch:ALLI 3 "=r"))] |
641c2f8b | 379 | "TARGET_LSE" |
86c818c2 | 380 | { |
7803ec5e RH |
381 | enum memmodel model = memmodel_from_int (INTVAL (operands[2])); |
382 | if (is_mm_relaxed (model)) | |
383 | return "ld<atomic_ldop><atomic_sfx>\t%<w>1, %<w>3, %0"; | |
384 | else if (is_mm_release (model)) | |
385 | return "ld<atomic_ldop>l<atomic_sfx>\t%<w>1, %<w>3, %0"; | |
386 | else if (is_mm_acquire (model) || is_mm_consume (model)) | |
387 | return "ld<atomic_ldop>a<atomic_sfx>\t%<w>1, %<w>3, %0"; | |
388 | else | |
389 | return "ld<atomic_ldop>al<atomic_sfx>\t%<w>1, %<w>3, %0"; | |
86c818c2 JG |
390 | } |
391 | ) | |
392 | ||
393 | (define_insn_and_split "atomic_nand<mode>" | |
394 | [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q") | |
395 | (unspec_volatile:ALLI | |
396 | [(not:ALLI | |
397 | (and:ALLI (match_dup 0) | |
95d47b10 | 398 | (match_operand:ALLI 1 "aarch64_logical_operand" "r<lconst_atomic>"))) |
86c818c2 JG |
399 | (match_operand:SI 2 "const_int_operand")] ;; model |
400 | UNSPECV_ATOMIC_OP)) | |
401 | (clobber (reg:CC CC_REGNUM)) | |
402 | (clobber (match_scratch:ALLI 3 "=&r")) | |
403 | (clobber (match_scratch:SI 4 "=&r"))] | |
404 | "" | |
405 | "#" | |
e5e07b68 | 406 | "&& epilogue_completed" |
86c818c2 JG |
407 | [(const_int 0)] |
408 | { | |
409 | aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0], | |
410 | operands[1], operands[2], operands[4]); | |
411 | DONE; | |
412 | } | |
413 | ) | |
414 | ||
7803ec5e | 415 | ;; Load-operate-store, returning the original memory data. |
641c2f8b MW |
416 | |
417 | (define_expand "atomic_fetch_<atomic_optab><mode>" | |
1bbffb87 DZ |
418 | [(match_operand:ALLI 0 "register_operand") |
419 | (match_operand:ALLI 1 "aarch64_sync_memory_operand") | |
641c2f8b | 420 | (atomic_op:ALLI |
1bbffb87 | 421 | (match_operand:ALLI 2 "<atomic_op_operand>") |
641c2f8b MW |
422 | (match_operand:SI 3 "const_int_operand"))] |
423 | "" | |
424 | { | |
425 | rtx (*gen) (rtx, rtx, rtx, rtx); | |
426 | ||
427 | /* Use an atomic load-operate instruction when possible. */ | |
7803ec5e RH |
428 | if (TARGET_LSE) |
429 | { | |
430 | switch (<CODE>) | |
431 | { | |
432 | case MINUS: | |
433 | operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2], | |
434 | NULL, 1); | |
435 | /* fallthru */ | |
436 | case PLUS: | |
437 | gen = gen_aarch64_atomic_fetch_add<mode>_lse; | |
438 | break; | |
439 | case IOR: | |
440 | gen = gen_aarch64_atomic_fetch_ior<mode>_lse; | |
441 | break; | |
442 | case XOR: | |
443 | gen = gen_aarch64_atomic_fetch_xor<mode>_lse; | |
444 | break; | |
445 | case AND: | |
446 | operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2], | |
447 | NULL, 1); | |
448 | gen = gen_aarch64_atomic_fetch_bic<mode>_lse; | |
449 | break; | |
450 | default: | |
451 | gcc_unreachable (); | |
452 | } | |
453 | operands[2] = force_reg (<MODE>mode, operands[2]); | |
454 | } | |
3950b229 RH |
455 | else if (TARGET_OUTLINE_ATOMICS) |
456 | { | |
457 | const atomic_ool_names *names; | |
458 | switch (<CODE>) | |
459 | { | |
460 | case MINUS: | |
461 | operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2], | |
462 | NULL, 1); | |
463 | /* fallthru */ | |
464 | case PLUS: | |
465 | names = &aarch64_ool_ldadd_names; | |
466 | break; | |
467 | case IOR: | |
468 | names = &aarch64_ool_ldset_names; | |
469 | break; | |
470 | case XOR: | |
471 | names = &aarch64_ool_ldeor_names; | |
472 | break; | |
473 | case AND: | |
474 | operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2], | |
475 | NULL, 1); | |
476 | names = &aarch64_ool_ldclr_names; | |
477 | break; | |
478 | default: | |
479 | gcc_unreachable (); | |
480 | } | |
481 | machine_mode mode = <MODE>mode; | |
482 | rtx func = aarch64_atomic_ool_func (mode, operands[3], names); | |
483 | rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL, mode, | |
484 | operands[2], mode, | |
485 | XEXP (operands[1], 0), Pmode); | |
486 | emit_move_insn (operands[0], rval); | |
487 | DONE; | |
488 | } | |
641c2f8b MW |
489 | else |
490 | gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>; | |
491 | ||
492 | emit_insn (gen (operands[0], operands[1], operands[2], operands[3])); | |
641c2f8b MW |
493 | DONE; |
494 | }) | |
495 | ||
496 | (define_insn_and_split "aarch64_atomic_fetch_<atomic_optab><mode>" | |
86c818c2 JG |
497 | [(set (match_operand:ALLI 0 "register_operand" "=&r") |
498 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) | |
499 | (set (match_dup 1) | |
500 | (unspec_volatile:ALLI | |
501 | [(atomic_op:ALLI (match_dup 1) | |
356c32e2 | 502 | (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>")) |
86c818c2 JG |
503 | (match_operand:SI 3 "const_int_operand")] ;; model |
504 | UNSPECV_ATOMIC_OP)) | |
505 | (clobber (reg:CC CC_REGNUM)) | |
506 | (clobber (match_scratch:ALLI 4 "=&r")) | |
507 | (clobber (match_scratch:SI 5 "=&r"))] | |
508 | "" | |
509 | "#" | |
e5e07b68 | 510 | "&& epilogue_completed" |
86c818c2 JG |
511 | [(const_int 0)] |
512 | { | |
513 | aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1], | |
514 | operands[2], operands[3], operands[5]); | |
641c2f8b MW |
515 | DONE; |
516 | } | |
517 | ) | |
518 | ||
7803ec5e RH |
519 | (define_insn "aarch64_atomic_fetch_<atomic_ldoptab><mode>_lse" |
520 | [(set (match_operand:ALLI 0 "register_operand" "=r") | |
521 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) | |
641c2f8b | 522 | (set (match_dup 1) |
7803ec5e RH |
523 | (unspec_volatile:ALLI |
524 | [(match_dup 1) | |
525 | (match_operand:ALLI 2 "register_operand" "r") | |
526 | (match_operand:SI 3 "const_int_operand")] | |
527 | ATOMIC_LDOP))] | |
641c2f8b | 528 | "TARGET_LSE" |
641c2f8b | 529 | { |
7803ec5e RH |
530 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); |
531 | if (is_mm_relaxed (model)) | |
532 | return "ld<atomic_ldop><atomic_sfx>\t%<w>2, %<w>0, %1"; | |
533 | else if (is_mm_acquire (model) || is_mm_consume (model)) | |
534 | return "ld<atomic_ldop>a<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
535 | else if (is_mm_release (model)) | |
536 | return "ld<atomic_ldop>l<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
537 | else | |
538 | return "ld<atomic_ldop>al<atomic_sfx>\t%<w>2, %<w>0, %1"; | |
86c818c2 JG |
539 | } |
540 | ) | |
541 | ||
542 | (define_insn_and_split "atomic_fetch_nand<mode>" | |
543 | [(set (match_operand:ALLI 0 "register_operand" "=&r") | |
544 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) | |
545 | (set (match_dup 1) | |
546 | (unspec_volatile:ALLI | |
547 | [(not:ALLI | |
548 | (and:ALLI (match_dup 1) | |
95d47b10 | 549 | (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>"))) |
86c818c2 JG |
550 | (match_operand:SI 3 "const_int_operand")] ;; model |
551 | UNSPECV_ATOMIC_OP)) | |
552 | (clobber (reg:CC CC_REGNUM)) | |
553 | (clobber (match_scratch:ALLI 4 "=&r")) | |
554 | (clobber (match_scratch:SI 5 "=&r"))] | |
555 | "" | |
556 | "#" | |
e5e07b68 | 557 | "&& epilogue_completed" |
86c818c2 JG |
558 | [(const_int 0)] |
559 | { | |
560 | aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1], | |
561 | operands[2], operands[3], operands[5]); | |
562 | DONE; | |
563 | } | |
564 | ) | |
565 | ||
7803ec5e | 566 | ;; Load-operate-store, returning the updated memory data. |
68729b06 MW |
567 | |
568 | (define_expand "atomic_<atomic_optab>_fetch<mode>" | |
1bbffb87 | 569 | [(match_operand:ALLI 0 "register_operand") |
68729b06 | 570 | (atomic_op:ALLI |
1bbffb87 DZ |
571 | (match_operand:ALLI 1 "aarch64_sync_memory_operand") |
572 | (match_operand:ALLI 2 "<atomic_op_operand>")) | |
68729b06 MW |
573 | (match_operand:SI 3 "const_int_operand")] |
574 | "" | |
575 | { | |
7803ec5e RH |
576 | /* Use an atomic load-operate instruction when possible. In this case |
577 | we will re-compute the result from the original mem value. */ | |
3950b229 | 578 | if (TARGET_LSE || TARGET_OUTLINE_ATOMICS) |
7803ec5e RH |
579 | { |
580 | rtx tmp = gen_reg_rtx (<MODE>mode); | |
581 | operands[2] = force_reg (<MODE>mode, operands[2]); | |
582 | emit_insn (gen_atomic_fetch_<atomic_optab><mode> | |
583 | (tmp, operands[1], operands[2], operands[3])); | |
584 | tmp = expand_simple_binop (<MODE>mode, <CODE>, tmp, operands[2], | |
585 | operands[0], 1, OPTAB_WIDEN); | |
586 | emit_move_insn (operands[0], tmp); | |
587 | } | |
68729b06 | 588 | else |
7803ec5e RH |
589 | { |
590 | emit_insn (gen_aarch64_atomic_<atomic_optab>_fetch<mode> | |
591 | (operands[0], operands[1], operands[2], operands[3])); | |
592 | } | |
68729b06 MW |
593 | DONE; |
594 | }) | |
595 | ||
596 | (define_insn_and_split "aarch64_atomic_<atomic_optab>_fetch<mode>" | |
86c818c2 JG |
597 | [(set (match_operand:ALLI 0 "register_operand" "=&r") |
598 | (atomic_op:ALLI | |
599 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q") | |
356c32e2 | 600 | (match_operand:ALLI 2 "<atomic_op_operand>" "r<const_atomic>"))) |
86c818c2 JG |
601 | (set (match_dup 1) |
602 | (unspec_volatile:ALLI | |
603 | [(match_dup 1) (match_dup 2) | |
604 | (match_operand:SI 3 "const_int_operand")] ;; model | |
605 | UNSPECV_ATOMIC_OP)) | |
606 | (clobber (reg:CC CC_REGNUM)) | |
607 | (clobber (match_scratch:SI 4 "=&r"))] | |
608 | "" | |
609 | "#" | |
e5e07b68 | 610 | "&& epilogue_completed" |
86c818c2 JG |
611 | [(const_int 0)] |
612 | { | |
613 | aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1], | |
614 | operands[2], operands[3], operands[4]); | |
615 | DONE; | |
616 | } | |
617 | ) | |
618 | ||
619 | (define_insn_and_split "atomic_nand_fetch<mode>" | |
620 | [(set (match_operand:ALLI 0 "register_operand" "=&r") | |
621 | (not:ALLI | |
622 | (and:ALLI | |
623 | (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q") | |
95d47b10 | 624 | (match_operand:ALLI 2 "aarch64_logical_operand" "r<lconst_atomic>")))) |
86c818c2 JG |
625 | (set (match_dup 1) |
626 | (unspec_volatile:ALLI | |
627 | [(match_dup 1) (match_dup 2) | |
628 | (match_operand:SI 3 "const_int_operand")] ;; model | |
629 | UNSPECV_ATOMIC_OP)) | |
630 | (clobber (reg:CC CC_REGNUM)) | |
631 | (clobber (match_scratch:SI 4 "=&r"))] | |
632 | "" | |
633 | "#" | |
e5e07b68 | 634 | "&& epilogue_completed" |
86c818c2 JG |
635 | [(const_int 0)] |
636 | { | |
637 | aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1], | |
638 | operands[2], operands[3], operands[4]); | |
639 | DONE; | |
640 | } | |
641 | ) | |
642 | ||
643 | (define_insn "atomic_load<mode>" | |
644 | [(set (match_operand:ALLI 0 "register_operand" "=r") | |
645 | (unspec_volatile:ALLI | |
646 | [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q") | |
647 | (match_operand:SI 2 "const_int_operand")] ;; model | |
648 | UNSPECV_LDA))] | |
649 | "" | |
650 | { | |
46b35980 AM |
651 | enum memmodel model = memmodel_from_int (INTVAL (operands[2])); |
652 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) | |
86c818c2 JG |
653 | return "ldr<atomic_sfx>\t%<w>0, %1"; |
654 | else | |
655 | return "ldar<atomic_sfx>\t%<w>0, %1"; | |
656 | } | |
657 | ) | |
658 | ||
659 | (define_insn "atomic_store<mode>" | |
3c5af608 | 660 | [(set (match_operand:ALLI 0 "aarch64_rcpc_memory_operand" "=Q,Ust") |
86c818c2 | 661 | (unspec_volatile:ALLI |
3c5af608 | 662 | [(match_operand:ALLI 1 "general_operand" "rZ,rZ") |
86c818c2 JG |
663 | (match_operand:SI 2 "const_int_operand")] ;; model |
664 | UNSPECV_STL))] | |
665 | "" | |
666 | { | |
46b35980 AM |
667 | enum memmodel model = memmodel_from_int (INTVAL (operands[2])); |
668 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) | |
86c818c2 | 669 | return "str<atomic_sfx>\t%<w>1, %0"; |
3c5af608 | 670 | else if (which_alternative == 0) |
86c818c2 | 671 | return "stlr<atomic_sfx>\t%<w>1, %0"; |
3c5af608 MM |
672 | else |
673 | return "stlur<atomic_sfx>\t%<w>1, %0"; | |
86c818c2 | 674 | } |
3c5af608 | 675 | [(set_attr "arch" "*,rcpc8_4")] |
86c818c2 JG |
676 | ) |
677 | ||
0016d8d9 | 678 | (define_insn "@aarch64_load_exclusive<mode>" |
86c818c2 JG |
679 | [(set (match_operand:SI 0 "register_operand" "=r") |
680 | (zero_extend:SI | |
681 | (unspec_volatile:SHORT | |
682 | [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q") | |
683 | (match_operand:SI 2 "const_int_operand")] | |
684 | UNSPECV_LX)))] | |
685 | "" | |
686 | { | |
46b35980 AM |
687 | enum memmodel model = memmodel_from_int (INTVAL (operands[2])); |
688 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) | |
86c818c2 JG |
689 | return "ldxr<atomic_sfx>\t%w0, %1"; |
690 | else | |
691 | return "ldaxr<atomic_sfx>\t%w0, %1"; | |
692 | } | |
693 | ) | |
694 | ||
0016d8d9 | 695 | (define_insn "@aarch64_load_exclusive<mode>" |
86c818c2 JG |
696 | [(set (match_operand:GPI 0 "register_operand" "=r") |
697 | (unspec_volatile:GPI | |
698 | [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q") | |
699 | (match_operand:SI 2 "const_int_operand")] | |
700 | UNSPECV_LX))] | |
701 | "" | |
702 | { | |
46b35980 AM |
703 | enum memmodel model = memmodel_from_int (INTVAL (operands[2])); |
704 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) | |
86c818c2 JG |
705 | return "ldxr\t%<w>0, %1"; |
706 | else | |
707 | return "ldaxr\t%<w>0, %1"; | |
708 | } | |
709 | ) | |
710 | ||
4a2095eb RH |
711 | (define_insn "aarch64_load_exclusive_pair" |
712 | [(set (match_operand:DI 0 "register_operand" "=r") | |
713 | (unspec_volatile:DI | |
714 | [(match_operand:TI 2 "aarch64_sync_memory_operand" "Q") | |
715 | (match_operand:SI 3 "const_int_operand")] | |
716 | UNSPECV_LX)) | |
717 | (set (match_operand:DI 1 "register_operand" "=r") | |
718 | (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LX))] | |
719 | "" | |
720 | { | |
721 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); | |
722 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) | |
723 | return "ldxp\t%0, %1, %2"; | |
724 | else | |
725 | return "ldaxp\t%0, %1, %2"; | |
726 | } | |
727 | ) | |
728 | ||
0016d8d9 | 729 | (define_insn "@aarch64_store_exclusive<mode>" |
1d896f48 | 730 | [(set (match_operand:SI 0 "register_operand" "=&r") |
86c818c2 JG |
731 | (unspec_volatile:SI [(const_int 0)] UNSPECV_SX)) |
732 | (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q") | |
733 | (unspec_volatile:ALLI | |
4ebcc903 | 734 | [(match_operand:ALLI 2 "aarch64_reg_or_zero" "rZ") |
86c818c2 JG |
735 | (match_operand:SI 3 "const_int_operand")] |
736 | UNSPECV_SX))] | |
737 | "" | |
738 | { | |
46b35980 AM |
739 | enum memmodel model = memmodel_from_int (INTVAL (operands[3])); |
740 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) | |
86c818c2 JG |
741 | return "stxr<atomic_sfx>\t%w0, %<w>2, %1"; |
742 | else | |
743 | return "stlxr<atomic_sfx>\t%w0, %<w>2, %1"; | |
744 | } | |
745 | ) | |
746 | ||
4a2095eb RH |
747 | (define_insn "aarch64_store_exclusive_pair" |
748 | [(set (match_operand:SI 0 "register_operand" "=&r") | |
749 | (unspec_volatile:SI [(const_int 0)] UNSPECV_SX)) | |
750 | (set (match_operand:TI 1 "aarch64_sync_memory_operand" "=Q") | |
751 | (unspec_volatile:TI | |
752 | [(match_operand:DI 2 "aarch64_reg_or_zero" "rZ") | |
753 | (match_operand:DI 3 "aarch64_reg_or_zero" "rZ") | |
754 | (match_operand:SI 4 "const_int_operand")] | |
755 | UNSPECV_SX))] | |
756 | "" | |
757 | { | |
3a30d255 | 758 | enum memmodel model = memmodel_from_int (INTVAL (operands[4])); |
4a2095eb RH |
759 | if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) |
760 | return "stxp\t%w0, %x2, %x3, %1"; | |
761 | else | |
762 | return "stlxp\t%w0, %x2, %x3, %1"; | |
763 | } | |
764 | ) | |
765 | ||
86c818c2 | 766 | (define_expand "mem_thread_fence" |
1bbffb87 | 767 | [(match_operand:SI 0 "const_int_operand")] |
86c818c2 JG |
768 | "" |
769 | { | |
46b35980 AM |
770 | enum memmodel model = memmodel_from_int (INTVAL (operands[0])); |
771 | if (!(is_mm_relaxed (model) || is_mm_consume (model))) | |
86c818c2 JG |
772 | emit_insn (gen_dmb (operands[0])); |
773 | DONE; | |
774 | } | |
775 | ) | |
776 | ||
777 | (define_expand "dmb" | |
778 | [(set (match_dup 1) | |
779 | (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")] | |
780 | UNSPEC_MB))] | |
781 | "" | |
782 | { | |
783 | operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); | |
784 | MEM_VOLATILE_P (operands[1]) = 1; | |
785 | } | |
786 | ) | |
787 | ||
788 | (define_insn "*dmb" | |
789 | [(set (match_operand:BLK 0 "" "") | |
790 | (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")] | |
791 | UNSPEC_MB))] | |
792 | "" | |
793 | { | |
46b35980 AM |
794 | enum memmodel model = memmodel_from_int (INTVAL (operands[1])); |
795 | if (is_mm_acquire (model)) | |
86c818c2 JG |
796 | return "dmb\\tishld"; |
797 | else | |
798 | return "dmb\\tish"; | |
799 | } | |
800 | ) |