]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sh/sync.md
re PR target/64661 ([SH] Allow @(disp,reg) address mode for atomics)
[thirdparty/gcc.git] / gcc / config / sh / sync.md
1 ;; GCC machine description for SH synchronization instructions.
2 ;; Copyright (C) 2011-2015 Free Software Foundation, Inc.
3 ;;
4 ;; This file is part of GCC.
5 ;;
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
9 ;; any later version.
10 ;;
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
15 ;;
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
19 ;;
20 ;;
21 ;; Atomic integer operations for the Renesas / SuperH SH CPUs.
22 ;;
23 ;; On SH CPUs atomic integer operations can be done either in 'software' or
24 ;; in 'hardware' in various styles. True hardware support was introduced
25 ;; with the SH4A. Some SH2A dual-core models (e.g. SH7205) also come with
26 ;; 'semaphore' hardware registers, but these are currently unsupported.
27 ;; All SH CPUs support the 'tas.b' instruction, which can be optionally used
28 ;; to implement the 'atomic_test_and_set' builtin.
29 ;; The following atomic options and models are supported.
30 ;;
31 ;; tas.b atomic_test_and_set (-mtas)
32 ;;
33 ;; Depending on the particular hardware configuration, usage of the 'tas.b'
34 ;; instruction might be undesired or even unsafe. Thus, it has to be
35 ;; enabled by the user explicitly. If it is not enabled, the
36 ;; 'atomic_test_and_set' builtin is implemented either with hardware or with
37 ;; software atomics, depending on which is enabled. It is also possible to
38 ;; enable the 'tas.b' instruction only, without enabling support for the
39 ;; other atomic operations.
40 ;;
41 ;;
42 ;; Hardware Atomics (-matomic-model=hard-llcs; SH4A only)
43 ;;
44 ;; Hardware atomics implement all atomic operations using the 'movli.l' and
45 ;; 'movco.l' instructions that are availble on SH4A. On multi-core hardware
46 ;; configurations hardware atomics is the only safe mode.
47 ;; However, it can also be safely used on single-core configurations.
48 ;; Since these instructions operate on SImode memory only, QImode and HImode
49 ;; have to be emulated with SImode and subreg masking, which results in
50 ;; larger code.
51 ;;
52 ;;
53 ;; gUSA Software Atomics (-matomic-model=soft-gusa; SH3*, SH4* only)
54 ;;
55 ;; On single-core systems there can only be one execution context running
56 ;; at a given point in time. This allows the usage of rewindable atomic
57 ;; sequences, which effectively emulate locked-load / conditional-store
58 ;; operations. This requires complementary support in the interrupt /
59 ;; exception handling code (e.g. kernel) and does not work safely on multi-
60 ;; core configurations.
61 ;;
62 ;; When an execution context is interrupted while it is an atomic
63 ;; sequence, the interrupted context's PC is rewound to the beginning of
64 ;; the atomic sequence by the interrupt / exception handling code, before
65 ;; transferring control to another execution context. This is done by
66 ;; something like...
67 ;;
68 ;; if (interrupted_context_in_atomic_sequence
69 ;; && interrupted_pc < atomic_exitpoint)
70 ;; interrupted_pc = atomic_entrypoint;
71 ;;
72 ;; This method is also known as gUSA ("g" User Space Atomicity) and the
73 ;; Linux kernel for SH3/SH4 implements support for such software atomic
74 ;; sequences. It can also be implemented in freestanding environments.
75 ;;
76 ;; For this the following atomic sequence ABI is used.
77 ;;
78 ;; r15 >= 0: Execution context is not in an atomic sequence.
79 ;;
80 ;; r15 < 0: Execution context is in an atomic sequence and r15
81 ;; holds the negative byte length of the atomic sequence.
82 ;; In this case the following applies:
83 ;;
84 ;; r0: PC of the first instruction after the atomic
85 ;; write-back instruction (exit point).
86 ;; The entry point PC of the atomic sequence can be
87 ;; determined by doing r0 + r15.
88 ;;
89 ;; r1: Saved r15 stack pointer before entering the
90 ;; atomic sequence.
91 ;;
92 ;; An example atomic add sequence would look like:
93 ;;
94 ;; mova .Lend,r0 ! .Lend must be 4-byte aligned.
95 ;; mov r15,r1
96 ;; .align 2 ! Insert aligning nop if needed.
97 ;; mov #(.Lstart - .Lend),r15 ! Enter atomic sequence
98 ;;.Lstart:
99 ;; mov.l @r4,r2 ! read value
100 ;; add r2,r5 ! modify value
101 ;; mov.l r5,@r4 ! write-back
102 ;;.Lend:
103 ;; mov r1,r15 ! Exit atomic sequence
104 ;; ! r2 holds the previous value.
105 ;; ! r5 holds the new value.
106 ;;
107 ;; Notice that due to the restrictions of the mova instruction, the .Lend
108 ;; label must always be 4-byte aligned. Aligning the .Lend label would
109 ;; potentially insert a nop after the write-back instruction which could
110 ;; make the sequence to be rewound, although it has already passed the
111 ;; write-back instruction. This would make it execute twice.
112 ;; For correct operation the atomic sequences must not be rewound after
113 ;; they have passed the write-back instruction.
114 ;;
115 ;; This is model works only on SH3* and SH4* because the stack pointer (r15)
116 ;; is set to an invalid pointer temporarily. SH1* and SH2* CPUs will try
117 ;; to push SR and PC registers on the stack when an interrupt / exception
118 ;; occurs, and thus require the stack pointer (r15) always to be valid.
119 ;;
120 ;;
121 ;; TCB Software Atomics (-matomic-model=soft-tcb)
122 ;;
123 ;; This model is a variation of the gUSA model. The concept of rewindable
124 ;; atomic sequences is the same, but it does not use the stack pointer (r15)
125 ;; for signaling the 'is in atomic sequence' condition. Instead, a variable
126 ;; in the thread control block (TCB) is set to hold the exit point of the
127 ;; atomic sequence. This assumes that the GBR is used as a thread pointer
128 ;; register. The offset of the variable in the TCB to be used must be
129 ;; specified with an additional option 'gbr-offset', such as:
130 ;; -matomic-model=soft-tcb,gbr-offset=4
131 ;;
132 ;; For this model the following atomic sequence ABI is used.
133 ;;
134 ;; @(#x,gbr) == 0: Execution context is not in an atomic sequence.
135 ;;
136 ;; @(#x,gbr) != 0: Execution context is in an atomic sequence. In this
137 ;; case the following applies:
138 ;;
139 ;; @(#x,gbr): PC of the first instruction after the atomic
140 ;; write-back instruction (exit point).
141 ;;
142 ;; r1: Negative byte length of the atomic sequence.
143 ;; The entry point PC of the sequence can be
144 ;; determined by doing @(#x,gbr) + r1
145 ;;
146 ;; Note: #x is the user specified gbr-offset.
147 ;;
148 ;;
149 ;; Interrupt-Flipping Software Atomics (-matomic-model=soft-imask)
150 ;;
151 ;; This model achieves atomicity by temporarily disabling interrupts for
152 ;; the duration of the atomic sequence. This works only when the program
153 ;; runs in privileged mode but does not require any support from the
154 ;; interrupt / exception handling code. There is no particular ABI.
155 ;; To disable interrupts the SR.IMASK bits are set to '1111'.
156 ;; This method is not as efficient as the other software atomic models,
157 ;; since loading and storing SR (in order to flip interrupts on / off)
158 ;; requires using multi-cycle instructions. Moreover, it can potentially
159 ;; increase the interrupt latency which might be important for hard-realtime
160 ;; applications.
161 ;;
162 ;;
163 ;; Compatibility Notes
164 ;;
165 ;; On single-core SH4A CPUs software atomic aware interrupt / exception code
166 ;; is actually compatible with user code that utilizes hardware atomics.
167 ;; Since SImode hardware atomic sequences are more compact on SH4A they are
168 ;; always used, regardless of the selected atomic model. This atomic model
169 ;; mixing can be disabled by setting the 'strict' flag, like:
170 ;; -matomic-model=soft-gusa,strict
171 ;;
172 ;; The software atomic models are generally compatible with each other,
173 ;; but the interrupt / exception handling code has to support both gUSA and
174 ;; TCB models.
175 ;;
176 ;; The current atomic support is limited to QImode, HImode and SImode
177 ;; atomic operations. DImode operations could also be implemented but
178 ;; would require some ABI modifications to support multiple-instruction
179 ;; write-back. This is because SH1/SH2/SH3/SH4 does not have a DImode
180 ;; store instruction. DImode stores must be split into two SImode stores.
181
182 (define_c_enum "unspec" [
183 UNSPEC_ATOMIC
184 ])
185
186 (define_c_enum "unspecv" [
187 UNSPECV_CMPXCHG_1
188 UNSPECV_CMPXCHG_2
189 UNSPECV_CMPXCHG_3
190 ])
191
192 (define_mode_attr i124extend_insn [(QI "exts.b") (HI "exts.w") (SI "mov")])
193
194 (define_code_iterator FETCHOP [plus minus ior xor and])
195 (define_code_attr fetchop_name
196 [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")])
197
198 ;;------------------------------------------------------------------------------
199 ;; comapre and swap
200
201 ;; Only the hard_llcs SImode patterns can use an I08 for the comparison
202 ;; or for the new swapped in value.
203 (define_predicate "atomic_arith_operand_0"
204 (and (match_code "subreg,reg,const_int")
205 (ior (match_operand 0 "arith_reg_operand")
206 (and (match_test "satisfies_constraint_I08 (op)")
207 (match_test "mode == SImode")
208 (ior (match_test "TARGET_ATOMIC_HARD_LLCS")
209 (match_test "TARGET_ATOMIC_ANY && TARGET_SH4A
210 && !TARGET_ATOMIC_STRICT"))))))
211
212 ;; Displacement addressing can be used for all SImode atomic patterns, except
213 ;; llcs.
214 (define_predicate "atomic_mem_operand_0"
215 (and (match_code "mem")
216 (ior (match_operand 0 "simple_mem_operand")
217 (and (match_test "mode == SImode")
218 (and (match_test "!TARGET_ATOMIC_HARD_LLCS")
219 (match_test "!TARGET_SH4A || TARGET_ATOMIC_STRICT"))
220 (match_operand 0 "displacement_mem_operand")
221 (match_operand 0 "short_displacement_mem_operand")))))
222
223 (define_expand "atomic_compare_and_swap<mode>"
224 [(match_operand:SI 0 "arith_reg_dest") ;; bool success output
225 (match_operand:QIHISI 1 "arith_reg_dest") ;; oldval output
226 (match_operand:QIHISI 2 "atomic_mem_operand_0") ;; memory
227 (match_operand:QIHISI 3 "atomic_arith_operand_0") ;; expected input
228 (match_operand:QIHISI 4 "atomic_arith_operand_0") ;; newval input
229 (match_operand:SI 5 "const_int_operand") ;; is_weak
230 (match_operand:SI 6 "const_int_operand") ;; success model
231 (match_operand:SI 7 "const_int_operand")] ;; failure model
232 "TARGET_ATOMIC_ANY"
233 {
234 rtx mem = operands[2];
235 rtx old_val = gen_lowpart (SImode, operands[1]);
236 rtx exp_val = operands[3];
237 rtx new_val = operands[4];
238 rtx atomic_insn;
239
240 if (TARGET_ATOMIC_HARD_LLCS
241 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
242 atomic_insn = gen_atomic_compare_and_swap<mode>_hard (old_val, mem,
243 exp_val, new_val);
244 else if (TARGET_ATOMIC_SOFT_GUSA)
245 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_gusa (old_val, mem,
246 exp_val, new_val);
247 else if (TARGET_ATOMIC_SOFT_TCB)
248 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_tcb (old_val, mem,
249 exp_val, new_val, TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
250 else if (TARGET_ATOMIC_SOFT_IMASK)
251 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_imask (old_val, mem,
252 exp_val, new_val);
253 else
254 FAIL;
255
256 emit_insn (atomic_insn);
257
258 if (<MODE>mode == QImode)
259 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[1]),
260 operands[1]));
261 else if (<MODE>mode == HImode)
262 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[1]),
263 operands[1]));
264 emit_insn (gen_movsi (operands[0], gen_rtx_REG (SImode, T_REG)));
265 DONE;
266 })
267
268 (define_insn_and_split "atomic_compare_and_swapsi_hard"
269 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
270 (unspec_volatile:SI
271 [(match_operand:SI 1 "atomic_mem_operand_0" "=Sra")
272 (match_operand:SI 2 "arith_operand" "rI08")
273 (match_operand:SI 3 "arith_operand" "rI08")]
274 UNSPECV_CMPXCHG_1))
275 (set (match_dup 1)
276 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_2))
277 (set (reg:SI T_REG)
278 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
279 (clobber (reg:SI R0_REG))]
280 "TARGET_ATOMIC_HARD_LLCS
281 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
282 {
283 return "\r0: movli.l %1,r0" "\n"
284 " cmp/eq %2,r0" "\n"
285 " bf{.|/}s 0f" "\n"
286 " mov r0,%0" "\n"
287 " mov %3,r0" "\n"
288 " movco.l r0,%1" "\n"
289 " bf 0b" "\n"
290 "0:";
291 }
292 "&& can_create_pseudo_p () && !satisfies_constraint_I08 (operands[2])"
293 [(const_int 0)]
294 {
295 /* FIXME: Sometimes the 'expected value' operand is not propagated as
296 immediate value. See PR 64974. */
297 set_of_reg op2 = sh_find_set_of_reg (operands[2], curr_insn,
298 prev_nonnote_insn_bb);
299 if (op2.set_src != NULL && satisfies_constraint_I08 (op2.set_src))
300 {
301 rtx* r = &XVECEXP (XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1), 0, 1);
302 validate_change (curr_insn, r, op2.set_src, false);
303 DONE;
304 }
305 else
306 FAIL;
307 }
308 [(set_attr "length" "14")])
309
310 ;; The QIHImode llcs patterns modify the address register of the memory
311 ;; operand. In order to express that, we have to open code the memory
312 ;; operand. Initially the insn is expanded like every other atomic insn
313 ;; using the memory operand. In split1 the insn is converted and the
314 ;; memory operand's address register is exposed.
315 (define_insn_and_split "atomic_compare_and_swap<mode>_hard"
316 [(set (match_operand:SI 0 "arith_reg_dest")
317 (unspec_volatile:SI
318 [(match_operand:QIHI 1 "atomic_mem_operand_0")
319 (match_operand:QIHI 2 "arith_reg_operand")
320 (match_operand:QIHI 3 "arith_reg_operand")]
321 UNSPECV_CMPXCHG_1))
322 (set (match_dup 1)
323 (unspec_volatile:QIHI [(const_int 0)] UNSPECV_CMPXCHG_2))
324 (set (reg:SI T_REG)
325 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
326 (clobber (reg:SI R0_REG))]
327 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
328 "#"
329 "&& 1"
330 [(const_int 0)]
331 {
332 rtx i = gen_atomic_compare_and_swap<mode>_hard_1 (
333 operands[0], XEXP (operands[1], 0), operands[2], operands[3]);
334
335 /* Replace the new mems in the new insn with the old mem to preserve
336 aliasing info. */
337 XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0) = operands[1];
338 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
339 emit_insn (i);
340 })
341
342 (define_insn "atomic_compare_and_swap<mode>_hard_1"
343 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
344 (unspec_volatile:SI
345 [(mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))
346 (match_operand:QIHI 2 "arith_reg_operand" "r")
347 (match_operand:QIHI 3 "arith_reg_operand" "r")]
348 UNSPECV_CMPXCHG_1))
349 (set (mem:QIHI (match_dup 1))
350 (unspec_volatile:QIHI [(const_int 0)] UNSPECV_CMPXCHG_2))
351 (set (reg:SI T_REG)
352 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
353 (clobber (reg:SI R0_REG))
354 (clobber (match_scratch:SI 4 "=&r"))
355 (clobber (match_scratch:SI 5 "=&r"))
356 (clobber (match_scratch:SI 6 "=1"))]
357 "TARGET_ATOMIC_HARD_LLCS"
358 {
359 return "\r mov #-4,%5" "\n"
360 " <i124extend_insn> %2,%4" "\n"
361 " and %1,%5" "\n"
362 " xor %5,%1" "\n"
363 " add r15,%1" "\n"
364 " add #-4,%1" "\n"
365 "0: movli.l @%5,r0" "\n"
366 " mov.l r0,@-r15" "\n"
367 " mov.<bw> @%1,%0" "\n"
368 " mov.<bw> %3,@%1" "\n"
369 " cmp/eq %4,%0" "\n"
370 " bf{.|/}s 0f" "\n"
371 " mov.l @r15+,r0" "\n"
372 " movco.l r0,@%5" "\n"
373 " bf 0b" "\n"
374 "0:";
375 }
376 [(set_attr "length" "30")])
377
378 (define_insn "atomic_compare_and_swap<mode>_soft_gusa"
379 [(set (match_operand:SI 0 "arith_reg_dest" "=&u")
380 (unspec_volatile:SI
381 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=AraAdd")
382 (match_operand:QIHISI 2 "arith_reg_operand" "u")
383 (match_operand:QIHISI 3 "arith_reg_operand" "u")]
384 UNSPECV_CMPXCHG_1))
385 (set (match_dup 1)
386 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2))
387 (set (reg:SI T_REG)
388 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
389 (clobber (match_scratch:SI 4 "=&u"))
390 (clobber (reg:SI R0_REG))
391 (clobber (reg:SI R1_REG))]
392 "TARGET_ATOMIC_SOFT_GUSA"
393 {
394 return "\r mova 1f,r0" "\n"
395 " <i124extend_insn> %2,%4" "\n"
396 " .align 2" "\n"
397 " mov r15,r1" "\n"
398 " mov #(0f-1f),r15" "\n"
399 "0: mov.<bwl> %1,%0" "\n"
400 " cmp/eq %0,%4" "\n"
401 " bf 1f" "\n"
402 " mov.<bwl> %3,%1" "\n"
403 "1: mov r1,r15";
404 }
405 [(set_attr "length" "20")])
406
407 (define_insn "atomic_compare_and_swap<mode>_soft_tcb"
408 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
409 (unspec_volatile:SI
410 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd")
411 (match_operand:QIHISI 2 "arith_reg_operand" "r")
412 (match_operand:QIHISI 3 "arith_reg_operand" "r")]
413 UNSPECV_CMPXCHG_1))
414 (set (match_dup 1)
415 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2))
416 (set (reg:SI T_REG)
417 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
418 (use (match_operand:SI 4 "gbr_displacement"))
419 (clobber (match_scratch:SI 5 "=&r"))
420 (clobber (reg:SI R0_REG))
421 (clobber (reg:SI R1_REG))]
422 "TARGET_ATOMIC_SOFT_TCB"
423 {
424 return "\r mova 1f,r0" "\n"
425 " .align 2" "\n"
426 " <i124extend_insn> %2,%5" "\n"
427 " mov #(0f-1f),r1" "\n"
428 " mov.l r0,@(%O4,gbr)" "\n"
429 "0: mov.<bwl> %1,%0" "\n"
430 " mov #0,r0" "\n"
431 " cmp/eq %0,%5" "\n"
432 " bf 1f" "\n"
433 " mov.<bwl> %3,%1" "\n"
434 "1: mov.l r0,@(%O4,gbr)";
435 }
436 [(set_attr "length" "22")])
437
438 (define_insn "atomic_compare_and_swap<mode>_soft_imask"
439 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
440 (unspec_volatile:SI
441 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd")
442 (match_operand:QIHISI 2 "arith_reg_operand" "r")
443 (match_operand:QIHISI 3 "arith_reg_operand" "r")]
444 UNSPECV_CMPXCHG_1))
445 (set (match_dup 1)
446 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2))
447 (set (reg:SI T_REG)
448 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
449 (clobber (match_scratch:SI 4 "=&r"))
450 (clobber (match_scratch:SI 5 "=&r"))]
451 "TARGET_ATOMIC_SOFT_IMASK"
452 {
453 /* The comparison result is supposed to be in T_REG.
454 Notice that restoring SR will overwrite the T_REG. We handle this by
455 rotating the T_REG into the saved SR before restoring SR. On SH2A we
456 can do one insn shorter by using the bst insn. */
457 if (!TARGET_SH2A)
458 return "\r stc sr,%0" "\n"
459 " <i124extend_insn> %2,%4" "\n"
460 " mov %0,%5" "\n"
461 " or #0xF0,%0" "\n"
462 " shlr %5" "\n"
463 " ldc %0,sr" "\n"
464 " mov.<bwl> %1,%0" "\n"
465 " cmp/eq %4,%0" "\n"
466 " bf 1f" "\n"
467 " mov.<bwl> %3,%1" "\n"
468 "1: rotcl %5" "\n"
469 " ldc %5,sr";
470 else
471 return "\r stc sr,%0" "\n"
472 " <i124extend_insn> %2,%4" "\n"
473 " mov %0,%5" "\n"
474 " or #0xF0,%0" "\n"
475 " ldc %0,sr" "\n"
476 " mov.<bwl> %1,%0" "\n"
477 " cmp/eq %4,%0" "\n"
478 " bst #0,%5" "\n"
479 " bf 1f" "\n"
480 " mov.<bwl> %3,%1" "\n"
481 "1: ldc %5,sr";
482 }
483 [(set (attr "length") (if_then_else (match_test "!TARGET_SH2A")
484 (const_string "24")
485 (const_string "22")))])
486
487 ;;------------------------------------------------------------------------------
488 ;; read - write - return old value
489
490 (define_expand "atomic_exchange<mode>"
491 [(match_operand:QIHISI 0 "arith_reg_dest") ;; oldval output
492 (match_operand:QIHISI 1 "atomic_mem_operand_0") ;; memory
493 (match_operand:QIHISI 2 "atomic_arith_operand_0") ;; newval input
494 (match_operand:SI 3 "const_int_operand")] ;; memory model
495 "TARGET_ATOMIC_ANY"
496 {
497 rtx mem = operands[1];
498 rtx val = operands[2];
499 rtx atomic_insn;
500
501 if (TARGET_ATOMIC_HARD_LLCS
502 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
503 atomic_insn = gen_atomic_exchange<mode>_hard (operands[0], mem, val);
504 else if (TARGET_ATOMIC_SOFT_GUSA)
505 atomic_insn = gen_atomic_exchange<mode>_soft_gusa (operands[0], mem, val);
506 else if (TARGET_ATOMIC_SOFT_TCB)
507 atomic_insn = gen_atomic_exchange<mode>_soft_tcb (operands[0], mem, val,
508 TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
509 else if (TARGET_ATOMIC_SOFT_IMASK)
510 atomic_insn = gen_atomic_exchange<mode>_soft_imask (operands[0], mem, val);
511 else
512 FAIL;
513
514 emit_insn (atomic_insn);
515
516 if (<MODE>mode == QImode)
517 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
518 operands[0]));
519 else if (<MODE>mode == HImode)
520 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
521 operands[0]));
522 DONE;
523 })
524
525 (define_insn "atomic_exchangesi_hard"
526 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
527 (match_operand:SI 1 "atomic_mem_operand_0" "=Sra"))
528 (set (match_dup 1)
529 (unspec:SI
530 [(match_operand:SI 2 "arith_operand" "rI08")] UNSPEC_ATOMIC))
531 (set (reg:SI T_REG) (const_int 1))
532 (clobber (reg:SI R0_REG))]
533 "TARGET_ATOMIC_HARD_LLCS
534 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
535 {
536 return "\r0: movli.l %1,r0" "\n"
537 " mov r0,%0" "\n"
538 " mov %2,r0" "\n"
539 " movco.l r0,%1" "\n"
540 " bf 0b";
541 }
542 [(set_attr "length" "10")])
543
544 ;; The QIHImode llcs patterns modify the address register of the memory
545 ;; operand. In order to express that, we have to open code the memory
546 ;; operand. Initially the insn is expanded like every other atomic insn
547 ;; using the memory operand. In split1 the insn is converted and the
548 ;; memory operand's address register is exposed.
549 (define_insn_and_split "atomic_exchange<mode>_hard"
550 [(set (match_operand:QIHI 0 "arith_reg_dest")
551 (match_operand:QIHI 1 "atomic_mem_operand_0"))
552 (set (match_dup 1)
553 (unspec:QIHI
554 [(match_operand:QIHI 2 "arith_reg_operand")] UNSPEC_ATOMIC))
555 (set (reg:SI T_REG) (const_int 1))
556 (clobber (reg:SI R0_REG))]
557 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
558 "#"
559 "&& 1"
560 [(const_int 0)]
561 {
562 rtx i = gen_atomic_exchange<mode>_hard_1 (operands[0], XEXP (operands[1], 0),
563 operands[2]);
564
565 /* Replace the new mems in the new insn with the old mem to preserve
566 aliasing info. */
567 XEXP (XVECEXP (i, 0, 0), 1) = operands[1];
568 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
569 emit_insn (i);
570 })
571
572 (define_insn "atomic_exchange<mode>_hard_1"
573 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
574 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
575 (set (mem:QIHI (match_dup 1))
576 (unspec:QIHI
577 [(match_operand:QIHI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC))
578 (set (reg:SI T_REG) (const_int 1))
579 (clobber (reg:SI R0_REG))
580 (clobber (match_scratch:SI 3 "=&r"))
581 (clobber (match_scratch:SI 4 "=1"))]
582 "TARGET_ATOMIC_HARD_LLCS"
583 {
584 return "\r mov #-4,%3" "\n"
585 " and %1,%3" "\n"
586 " xor %3,%1" "\n"
587 " add r15,%1" "\n"
588 " add #-4,%1" "\n"
589 "0: movli.l @%3,r0" "\n"
590 " mov.l r0,@-r15" "\n"
591 " mov.<bw> @%1,%0" "\n"
592 " mov.<bw> %2,@%1" "\n"
593 " mov.l @r15+,r0" "\n"
594 " movco.l r0,@%3" "\n"
595 " bf 0b";
596 }
597 [(set_attr "length" "24")])
598
599 (define_insn "atomic_exchange<mode>_soft_gusa"
600 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
601 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=AraAdd"))
602 (set (match_dup 1)
603 (unspec:QIHISI
604 [(match_operand:QIHISI 2 "arith_reg_operand" "u")] UNSPEC_ATOMIC))
605 (clobber (reg:SI R0_REG))
606 (clobber (reg:SI R1_REG))]
607 "TARGET_ATOMIC_SOFT_GUSA"
608 {
609 return "\r mova 1f,r0" "\n"
610 " .align 2" "\n"
611 " mov r15,r1" "\n"
612 " mov #(0f-1f),r15" "\n"
613 "0: mov.<bwl> %1,%0" "\n"
614 " mov.<bwl> %2,%1" "\n"
615 "1: mov r1,r15";
616 }
617 [(set_attr "length" "14")])
618
619 (define_insn "atomic_exchange<mode>_soft_tcb"
620 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
621 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd"))
622 (set (match_dup 1)
623 (unspec:QIHISI
624 [(match_operand:QIHISI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC))
625 (clobber (reg:SI R0_REG))
626 (clobber (reg:SI R1_REG))
627 (use (match_operand:SI 3 "gbr_displacement"))]
628 "TARGET_ATOMIC_SOFT_TCB"
629 {
630 return "\r mova 1f,r0" "\n"
631 " mov #(0f-1f),r1" "\n"
632 " .align 2" "\n"
633 " mov.l r0,@(%O3,gbr)" "\n"
634 "0: mov.<bwl> %1,%0" "\n"
635 " mov #0,r0" "\n"
636 " mov.<bwl> %2,%1" "\n"
637 "1: mov.l r0,@(%O3,gbr)";
638 }
639 [(set_attr "length" "16")])
640
641 (define_insn "atomic_exchange<mode>_soft_imask"
642 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
643 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd"))
644 (set (match_dup 1)
645 (unspec:QIHISI
646 [(match_operand:QIHISI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC))
647 (clobber (match_scratch:SI 3 "=&r"))]
648 "TARGET_ATOMIC_SOFT_IMASK"
649 {
650 return "\r stc sr,%0" "\n"
651 " mov %0,%3" "\n"
652 " or #0xF0,%0" "\n"
653 " ldc %0,sr" "\n"
654 " mov.<bwl> %1,%0" "\n"
655 " mov.<bwl> %2,%1" "\n"
656 " ldc %3,sr";
657 }
658 [(set_attr "length" "14")])
659
660 ;;------------------------------------------------------------------------------
661 ;; read - add|sub|or|and|xor|nand - write - return old value
662
663 ;; atomic_arith_operand_1 can be used by any atomic type for a plus op,
664 ;; since there's no r0 restriction.
665 (define_predicate "atomic_arith_operand_1"
666 (and (match_code "subreg,reg,const_int")
667 (ior (match_operand 0 "arith_reg_operand")
668 (match_test "satisfies_constraint_I08 (op)"))))
669
670 ;; atomic_logic_operand_1 can be used by the hard_llcs, tcb and soft_imask
671 ;; patterns only due to its r0 restriction.
672 (define_predicate "atomic_logical_operand_1"
673 (and (match_code "subreg,reg,const_int")
674 (ior (match_operand 0 "arith_reg_operand")
675 (and (match_test "satisfies_constraint_K08 (op)")
676 (ior (match_test "TARGET_ATOMIC_HARD_LLCS")
677 (match_test "TARGET_ATOMIC_SOFT_IMASK")
678 (match_test "TARGET_ATOMIC_SOFT_TCB")
679 (match_test "TARGET_ATOMIC_ANY && TARGET_SH4A
680 && mode == SImode
681 && !TARGET_ATOMIC_STRICT"))))))
682
683 (define_code_attr fetchop_predicate_1
684 [(plus "atomic_arith_operand_1") (minus "arith_reg_operand")
685 (ior "atomic_logical_operand_1") (xor "atomic_logical_operand_1")
686 (and "atomic_logical_operand_1")])
687
688 (define_code_attr fetchop_constraint_1_llcs
689 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
690
691 (define_code_attr fetchop_constraint_1_gusa
692 [(plus "uI08") (minus "u") (ior "u") (xor "u") (and "u")])
693
694 (define_code_attr fetchop_constraint_1_tcb
695 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
696
697 (define_code_attr fetchop_constraint_1_imask
698 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
699
700 ;; Displacement addressing mode (incl. GBR relative) can be used by tcb and
701 ;; imask atomic patterns in any mode, since all the patterns use R0 as the
702 ;; register operand for memory loads/stores. gusa and llcs patterns can only
703 ;; use displacement addressing for SImode.
704 (define_predicate "atomic_mem_operand_1"
705 (and (match_code "mem")
706 (ior (match_operand 0 "simple_mem_operand")
707 (and (match_test "mode == SImode")
708 (match_test "TARGET_ATOMIC_SOFT_GUSA
709 && (!TARGET_SH4A || TARGET_ATOMIC_STRICT)")
710 (match_operand 0 "displacement_mem_operand")
711 (match_operand 0 "short_displacement_mem_operand"))
712 (and (ior (match_test "(TARGET_ATOMIC_SOFT_TCB
713 || TARGET_ATOMIC_SOFT_IMASK)
714 && (!TARGET_SH4A || TARGET_ATOMIC_STRICT)")
715 (match_test "(TARGET_ATOMIC_SOFT_TCB
716 || TARGET_ATOMIC_SOFT_IMASK)
717 && TARGET_SH4A && !TARGET_ATOMIC_STRICT
718 && mode != SImode"))
719 (ior (and (match_operand 0 "displacement_mem_operand")
720 (match_operand 0 "short_displacement_mem_operand"))
721 (match_operand 0 "gbr_address_mem"))))))
722
723 (define_expand "atomic_fetch_<fetchop_name><mode>"
724 [(set (match_operand:QIHISI 0 "arith_reg_dest")
725 (match_operand:QIHISI 1 "atomic_mem_operand_1"))
726 (set (match_dup 1)
727 (unspec:QIHISI
728 [(FETCHOP:QIHISI (match_dup 1)
729 (match_operand:QIHISI 2 "<fetchop_predicate_1>"))]
730 UNSPEC_ATOMIC))
731 (match_operand:SI 3 "const_int_operand")]
732 "TARGET_ATOMIC_ANY"
733 {
734 rtx mem = operands[1];
735 rtx atomic_insn;
736
737 if (TARGET_ATOMIC_HARD_LLCS
738 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
739 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_hard (operands[0], mem,
740 operands[2]);
741 else if (TARGET_ATOMIC_SOFT_GUSA)
742 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_gusa (operands[0],
743 mem, operands[2]);
744 else if (TARGET_ATOMIC_SOFT_TCB)
745 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_tcb (operands[0],
746 mem, operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
747 else if (TARGET_ATOMIC_SOFT_IMASK)
748 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_imask (operands[0],
749 mem, operands[2]);
750 else
751 FAIL;
752
753 emit_insn (atomic_insn);
754
755 if (<MODE>mode == QImode)
756 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
757 operands[0]));
758 else if (<MODE>mode == HImode)
759 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
760 operands[0]));
761 DONE;
762 })
763
764 (define_insn_and_split "atomic_fetch_<fetchop_name>si_hard"
765 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
766 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))
767 (set (match_dup 1)
768 (unspec:SI
769 [(FETCHOP:SI (match_dup 1)
770 (match_operand:SI 2 "<fetchop_predicate_1>"
771 "<fetchop_constraint_1_llcs>"))]
772 UNSPEC_ATOMIC))
773 (set (reg:SI T_REG) (const_int 1))
774 (clobber (reg:SI R0_REG))]
775 "TARGET_ATOMIC_HARD_LLCS
776 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
777 {
778 return "\r0: movli.l %1,r0" "\n"
779 " mov r0,%0" "\n"
780 " <fetchop_name> %2,r0" "\n"
781 " movco.l r0,%1" "\n"
782 " bf 0b";
783 }
784 "&& can_create_pseudo_p () && optimize
785 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
786 [(const_int 0)]
787 {
788 emit_insn (gen_atomic_<fetchop_name>_fetchsi_hard (gen_reg_rtx (SImode),
789 operands[1], operands[2]));
790 }
791 [(set_attr "length" "10")])
792
793 ;; Combine pattern for xor (val, -1) / nand (val, -1).
794 (define_insn_and_split "atomic_fetch_notsi_hard"
795 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
796 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))
797 (set (match_dup 1)
798 (unspec:SI [(not:SI (match_dup 1))] UNSPEC_ATOMIC))
799 (set (reg:SI T_REG) (const_int 1))
800 (clobber (reg:SI R0_REG))]
801 "TARGET_ATOMIC_HARD_LLCS
802 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
803 {
804 return "\r0: movli.l %1,r0" "\n"
805 " mov r0,%0" "\n"
806 " not r0,r0" "\n"
807 " movco.l r0,%1" "\n"
808 " bf 0b";
809 }
810 "&& can_create_pseudo_p () && optimize
811 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
812 [(const_int 0)]
813 {
814 emit_insn (gen_atomic_not_fetchsi_hard (gen_reg_rtx (SImode), operands[1]));
815 }
816 [(set_attr "length" "10")])
817
818 ;; The QIHImode llcs patterns modify the address register of the memory
819 ;; operand. In order to express that, we have to open code the memory
820 ;; operand. Initially the insn is expanded like every other atomic insn
821 ;; using the memory operand. In split1 the insn is converted and the
822 ;; memory operand's address register is exposed.
823 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_hard"
824 [(set (match_operand:QIHI 0 "arith_reg_dest")
825 (match_operand:QIHI 1 "atomic_mem_operand_1"))
826 (set (match_dup 1)
827 (unspec:QIHI
828 [(FETCHOP:QIHI (match_dup 1)
829 (match_operand:QIHI 2 "<fetchop_predicate_1>"))]
830 UNSPEC_ATOMIC))
831 (set (reg:SI T_REG) (const_int 1))
832 (clobber (reg:SI R0_REG))]
833 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
834 "#"
835 "&& 1"
836 [(const_int 0)]
837 {
838 if (optimize
839 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
840 emit_insn (gen_atomic_<fetchop_name><mode>_hard (operands[1], operands[2]));
841 else
842 {
843 rtx i = gen_atomic_fetch_<fetchop_name><mode>_hard_1 (
844 operands[0], XEXP (operands[1], 0), operands[2]);
845
846 /* Replace the new mems in the new insn with the old mem to preserve
847 aliasing info. */
848 XEXP (XVECEXP (i, 0, 0), 1) = operands[1];
849 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
850 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0) = operands[1];
851 emit_insn (i);
852 }
853 })
854
855 (define_insn "atomic_fetch_<fetchop_name><mode>_hard_1"
856 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
857 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
858 (set (mem:QIHI (match_dup 1))
859 (unspec:QIHI
860 [(FETCHOP:QIHI (mem:QIHI (match_dup 1))
861 (match_operand:QIHI 2 "<fetchop_predicate_1>"
862 "<fetchop_constraint_1_llcs>"))]
863 UNSPEC_ATOMIC))
864 (set (reg:SI T_REG) (const_int 1))
865 (clobber (reg:SI R0_REG))
866 (clobber (match_scratch:SI 3 "=&r"))
867 (clobber (match_scratch:SI 4 "=1"))]
868 "TARGET_ATOMIC_HARD_LLCS"
869 {
870 return "\r mov #-4,%3" "\n"
871 " and %1,%3" "\n"
872 " xor %3,%1" "\n"
873 " add r15,%1" "\n"
874 " add #-4,%1" "\n"
875 "0: movli.l @%3,r0" "\n"
876 " mov.l r0,@-r15" "\n"
877 " mov.<bw> @%1,r0" "\n"
878 " mov r0,%0" "\n"
879 " <fetchop_name> %2,r0" "\n"
880 " mov.<bw> r0,@%1" "\n"
881 " mov.l @r15+,r0" "\n"
882 " movco.l r0,@%3" "\n"
883 " bf 0b";
884 }
885 [(set_attr "length" "28")])
886
887 ;; The QIHImode llcs patterns modify the address register of the memory
888 ;; operand. In order to express that, we have to open code the memory
889 ;; operand. Initially the insn is expanded like every other atomic insn
890 ;; using the memory operand. In split1 the insn is converted and the
891 ;; memory operand's address register is exposed.
892 (define_insn_and_split "atomic_<fetchop_name><mode>_hard"
893 [(set (match_operand:QIHI 0 "atomic_mem_operand_1")
894 (unspec:QIHI
895 [(FETCHOP:QIHI (match_dup 0)
896 (match_operand:QIHI 1 "<fetchop_predicate_1>"))]
897 UNSPEC_ATOMIC))
898 (set (reg:SI T_REG) (const_int 1))
899 (clobber (reg:SI R0_REG))]
900 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
901 "#"
902 "&& 1"
903 [(const_int 0)]
904 {
905 rtx i = gen_atomic_<fetchop_name><mode>_hard_1 (XEXP (operands[0], 0),
906 operands[1]);
907 /* Replace the new mems in the new insn with the old mem to preserve
908 aliasing info. */
909 XEXP (XVECEXP (i, 0, 0), 0) = operands[0];
910 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = operands[0];
911 emit_insn (i);
912 })
913
914 (define_insn "atomic_<fetchop_name><mode>_hard_1"
915 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r"))
916 (unspec:QIHI
917 [(FETCHOP:QIHI (mem:QIHI (match_dup 0))
918 (match_operand:QIHI 1 "<fetchop_predicate_1>"
919 "<fetchop_constraint_1_llcs>"))]
920 UNSPEC_ATOMIC))
921 (set (reg:SI T_REG) (const_int 1))
922 (clobber (reg:SI R0_REG))
923 (clobber (match_scratch:SI 2 "=&r"))
924 (clobber (match_scratch:SI 3 "=0"))]
925 "TARGET_ATOMIC_HARD_LLCS"
926 {
927 return "\r mov #-4,%2" "\n"
928 " and %0,%2" "\n"
929 " xor %2,%0" "\n"
930 " add r15,%0" "\n"
931 " add #-4,%0" "\n"
932 "0: movli.l @%2,r0" "\n"
933 " mov.l r0,@-r15" "\n"
934 " mov.<bw> @%0,r0" "\n"
935 " <fetchop_name> %1,r0" "\n"
936 " mov.<bw> r0,@%0" "\n"
937 " mov.l @r15+,r0" "\n"
938 " movco.l r0,@%2" "\n"
939 " bf 0b";
940 }
941 [(set_attr "length" "26")])
942
943 ;; Combine pattern for xor (val, -1) / nand (val, -1).
944 (define_insn_and_split "atomic_fetch_not<mode>_hard"
945 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
946 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
947 (set (mem:QIHI (match_dup 1))
948 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 1)))] UNSPEC_ATOMIC))
949 (set (reg:SI T_REG) (const_int 1))
950 (clobber (reg:SI R0_REG))
951 (clobber (match_scratch:SI 2 "=&r"))
952 (clobber (match_scratch:SI 3 "=1"))]
953 "TARGET_ATOMIC_HARD_LLCS"
954 {
955 return "\r mov #-4,%2" "\n"
956 " and %1,%2" "\n"
957 " xor %2,%1" "\n"
958 " add r15,%1" "\n"
959 " add #-4,%1" "\n"
960 "0: movli.l @%2,r0" "\n"
961 " mov.l r0,@-r15" "\n"
962 " mov.<bw> @%1,%0" "\n"
963 " not %0,r0" "\n"
964 " mov.<bw> r0,@%1" "\n"
965 " mov.l @r15+,r0" "\n"
966 " movco.l r0,@%2" "\n"
967 " bf 0b";
968 }
969 "&& can_create_pseudo_p () && optimize
970 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
971 [(const_int 0)]
972 {
973 rtx i = gen_atomic_not<mode>_hard (operands[1]);
974
975 /* Replace the new mems in the new insn with the old mem to preserve
976 aliasing info. */
977 rtx m = XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1);
978 XEXP (XVECEXP (i, 0, 0), 0) = m;
979 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = m;
980 emit_insn (i);
981 }
982 [(set_attr "length" "26")])
983
984 (define_insn "atomic_not<mode>_hard"
985 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r"))
986 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 0)))] UNSPEC_ATOMIC))
987 (set (reg:SI T_REG) (const_int 1))
988 (clobber (reg:SI R0_REG))
989 (clobber (match_scratch:SI 1 "=&r"))
990 (clobber (match_scratch:SI 2 "=0"))]
991 "TARGET_ATOMIC_HARD_LLCS"
992 {
993 return "\r mov #-4,%1" "\n"
994 " and %0,%1" "\n"
995 " xor %1,%0" "\n"
996 " add r15,%0" "\n"
997 " add #-4,%0" "\n"
998 "0: movli.l @%1,r0" "\n"
999 " mov.l r0,@-r15" "\n"
1000 " mov.<bw> @%0,r0" "\n"
1001 " not r0,r0" "\n"
1002 " mov.<bw> r0,@%0" "\n"
1003 " mov.l @r15+,r0" "\n"
1004 " movco.l r0,@%1" "\n"
1005 " bf 0b";
1006 }
1007 [(set_attr "length" "26")])
1008
1009 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_gusa"
1010 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1011 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))
1012 (set (match_dup 1)
1013 (unspec:QIHISI
1014 [(FETCHOP:QIHISI
1015 (match_dup 1)
1016 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1017 "<fetchop_constraint_1_gusa>"))]
1018 UNSPEC_ATOMIC))
1019 (clobber (match_scratch:QIHISI 3 "=&u"))
1020 (clobber (reg:SI R0_REG))
1021 (clobber (reg:SI R1_REG))]
1022 "TARGET_ATOMIC_SOFT_GUSA"
1023 {
1024 return "\r mova 1f,r0" "\n"
1025 " .align 2" "\n"
1026 " mov r15,r1" "\n"
1027 " mov #(0f-1f),r15" "\n"
1028 "0: mov.<bwl> %1,%0" "\n"
1029 " mov %0,%3" "\n"
1030 " <fetchop_name> %2,%3" "\n"
1031 " mov.<bwl> %3,%1" "\n"
1032 "1: mov r1,r15";
1033 }
1034 "&& can_create_pseudo_p () && optimize
1035 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1036 [(const_int 0)]
1037 {
1038 emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft_gusa (
1039 gen_reg_rtx (<MODE>mode), operands[1], operands[2]));
1040 }
1041 [(set_attr "length" "18")])
1042
1043 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1044 (define_insn_and_split "atomic_fetch_not<mode>_soft_gusa"
1045 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1046 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))
1047 (set (match_dup 1)
1048 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1049 (clobber (match_scratch:QIHISI 2 "=&u"))
1050 (clobber (reg:SI R0_REG))
1051 (clobber (reg:SI R1_REG))]
1052 "TARGET_ATOMIC_SOFT_GUSA"
1053 {
1054 return "\r mova 1f,r0" "\n"
1055 " mov r15,r1" "\n"
1056 " .align 2" "\n"
1057 " mov #(0f-1f),r15" "\n"
1058 "0: mov.<bwl> %1,%0" "\n"
1059 " not %0,%2" "\n"
1060 " mov.<bwl> %2,%1" "\n"
1061 "1: mov r1,r15";
1062 }
1063 "&& can_create_pseudo_p () && optimize
1064 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1065 [(const_int 0)]
1066 {
1067 emit_insn (gen_atomic_not_fetch<mode>_soft_gusa (gen_reg_rtx (<MODE>mode),
1068 operands[1]));
1069 }
1070 [(set_attr "length" "16")])
1071
1072 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_tcb"
1073 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1074 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1075 (set (match_dup 1)
1076 (unspec:QIHISI
1077 [(FETCHOP:QIHISI
1078 (match_dup 1)
1079 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1080 "<fetchop_constraint_1_tcb>"))]
1081 UNSPEC_ATOMIC))
1082 (use (match_operand:SI 3 "gbr_displacement"))
1083 (clobber (reg:SI R0_REG))
1084 (clobber (reg:SI R1_REG))]
1085 "TARGET_ATOMIC_SOFT_TCB"
1086 {
1087 return "\r mova 1f,r0" "\n"
1088 " .align 2" "\n"
1089 " mov #(0f-1f),r1" "\n"
1090 " mov.l r0,@(%O3,gbr)" "\n"
1091 "0: mov.<bwl> %1,r0" "\n"
1092 " mov r0,%0" "\n"
1093 " <fetchop_name> %2,r0" "\n"
1094 " mov.<bwl> r0,%1" "\n"
1095 "1: mov #0,r0" "\n"
1096 " mov.l r0,@(%O3,gbr)";
1097 }
1098 "&& can_create_pseudo_p () && optimize
1099 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1100 [(const_int 0)]
1101 {
1102 emit_insn (gen_atomic_<fetchop_name><mode>_soft_tcb (
1103 operands[1], operands[2], operands[3]));
1104 }
1105 [(set_attr "length" "20")])
1106
1107 (define_insn "atomic_<fetchop_name><mode>_soft_tcb"
1108 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd")
1109 (unspec:QIHISI
1110 [(FETCHOP:QIHISI
1111 (match_dup 0)
1112 (match_operand:QIHISI 1 "<fetchop_predicate_1>"
1113 "<fetchop_constraint_1_tcb>"))]
1114 UNSPEC_ATOMIC))
1115 (use (match_operand:SI 2 "gbr_displacement"))
1116 (clobber (reg:SI R0_REG))
1117 (clobber (reg:SI R1_REG))]
1118 "TARGET_ATOMIC_SOFT_TCB"
1119 {
1120 return "\r mova 1f,r0" "\n"
1121 " mov #(0f-1f),r1" "\n"
1122 " .align 2" "\n"
1123 " mov.l r0,@(%O2,gbr)" "\n"
1124 "0: mov.<bwl> %0,r0" "\n"
1125 " <fetchop_name> %1,r0" "\n"
1126 " mov.<bwl> r0,%0" "\n"
1127 "1: mov #0,r0" "\n"
1128 " mov.l r0,@(%O2,gbr)";
1129 }
1130 [(set_attr "length" "18")])
1131
1132 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1133 (define_insn_and_split "atomic_fetch_not<mode>_soft_tcb"
1134 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1135 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1136 (set (match_dup 1)
1137 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1138 (use (match_operand:SI 2 "gbr_displacement"))
1139 (clobber (reg:SI R0_REG))
1140 (clobber (reg:SI R1_REG))]
1141 "TARGET_ATOMIC_SOFT_TCB"
1142 {
1143 return "\r mova 1f,r0" "\n"
1144 " .align 2" "\n"
1145 " mov #(0f-1f),r1" "\n"
1146 " mov.l r0,@(%O2,gbr)" "\n"
1147 "0: mov.<bwl> %1,r0" "\n"
1148 " mov r0,%0" "\n"
1149 " not r0,r0" "\n"
1150 " mov.<bwl> r0,%1" "\n"
1151 "1: mov #0,r0" "\n"
1152 " mov.l r0,@(%O2,gbr)";
1153 }
1154 "&& can_create_pseudo_p () && optimize
1155 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1156 [(const_int 0)]
1157 {
1158 emit_insn (gen_atomic_not<mode>_soft_tcb (operands[1], operands[2]));
1159 }
1160 [(set_attr "length" "20")])
1161
1162 (define_insn "atomic_not<mode>_soft_tcb"
1163 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd")
1164 (unspec:QIHISI [(not:QIHISI (match_dup 0))] UNSPEC_ATOMIC))
1165 (use (match_operand:SI 1 "gbr_displacement"))
1166 (clobber (reg:SI R0_REG))
1167 (clobber (reg:SI R1_REG))]
1168 "TARGET_ATOMIC_SOFT_TCB"
1169 {
1170 return "\r mova 1f,r0" "\n"
1171 " mov #(0f-1f),r1" "\n"
1172 " .align 2" "\n"
1173 " mov.l r0,@(%O1,gbr)" "\n"
1174 "0: mov.<bwl> %0,r0" "\n"
1175 " not r0,r0" "\n"
1176 " mov.<bwl> r0,%0" "\n"
1177 "1: mov #0,r0" "\n"
1178 " mov.l r0,@(%O1,gbr)";
1179 }
1180 [(set_attr "length" "18")])
1181
1182 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_imask"
1183 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1184 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1185 (set (match_dup 1)
1186 (unspec:QIHISI
1187 [(FETCHOP:QIHISI
1188 (match_dup 1)
1189 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1190 "<fetchop_constraint_1_imask>"))]
1191 UNSPEC_ATOMIC))
1192 (clobber (reg:SI R0_REG))
1193 (clobber (match_scratch:QIHISI 3 "=&r"))]
1194 "TARGET_ATOMIC_SOFT_IMASK"
1195 {
1196 return "\r stc sr,r0" "\n"
1197 " mov r0,%3" "\n"
1198 " or #0xF0,r0" "\n"
1199 " ldc r0,sr" "\n"
1200 " mov.<bwl> %1,r0" "\n"
1201 " mov r0,%0" "\n"
1202 " <fetchop_name> %2,r0" "\n"
1203 " mov.<bwl> r0,%1" "\n"
1204 " ldc %3,sr";
1205 }
1206 "&& can_create_pseudo_p () && optimize
1207 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1208 [(const_int 0)]
1209 {
1210 emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft_imask (
1211 gen_reg_rtx (<MODE>mode), operands[1], operands[2]));
1212 }
1213 [(set_attr "length" "18")])
1214
1215 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1216 (define_insn_and_split "atomic_fetch_not<mode>_soft_imask"
1217 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1218 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1219 (set (match_dup 1)
1220 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1221 (clobber (reg:SI R0_REG))
1222 (clobber (match_scratch:QIHISI 2 "=&r"))]
1223 "TARGET_ATOMIC_SOFT_IMASK"
1224 {
1225 return "\r stc sr,r0" "\n"
1226 " mov r0,%2" "\n"
1227 " or #0xF0,r0" "\n"
1228 " ldc r0,sr" "\n"
1229 " mov.<bwl> %1,r0" "\n"
1230 " mov r0,%0" "\n"
1231 " not r0,r0" "\n"
1232 " mov.<bwl> r0,%1" "\n"
1233 " ldc %2,sr";
1234 }
1235 "&& can_create_pseudo_p () && optimize
1236 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1237 [(const_int 0)]
1238 {
1239 emit_insn (gen_atomic_not_fetch<mode>_soft_imask (gen_reg_rtx (<MODE>mode),
1240 operands[1]));
1241 }
1242 [(set_attr "length" "18")])
1243
1244 (define_expand "atomic_fetch_nand<mode>"
1245 [(set (match_operand:QIHISI 0 "arith_reg_dest")
1246 (match_operand:QIHISI 1 "atomic_mem_operand_1"))
1247 (set (match_dup 1)
1248 (unspec:QIHISI
1249 [(not:QIHISI (and:QIHISI (match_dup 1)
1250 (match_operand:QIHISI 2 "atomic_logical_operand_1")))]
1251 UNSPEC_ATOMIC))
1252 (match_operand:SI 3 "const_int_operand")]
1253 "TARGET_ATOMIC_ANY"
1254 {
1255 rtx mem = operands[1];
1256 rtx atomic_insn;
1257
1258 if (TARGET_ATOMIC_HARD_LLCS
1259 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
1260 atomic_insn = gen_atomic_fetch_nand<mode>_hard (operands[0], mem,
1261 operands[2]);
1262 else if (TARGET_ATOMIC_SOFT_GUSA)
1263 atomic_insn = gen_atomic_fetch_nand<mode>_soft_gusa (operands[0], mem,
1264 operands[2]);
1265 else if (TARGET_ATOMIC_SOFT_TCB)
1266 atomic_insn = gen_atomic_fetch_nand<mode>_soft_tcb (operands[0], mem,
1267 operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
1268 else if (TARGET_ATOMIC_SOFT_IMASK)
1269 atomic_insn = gen_atomic_fetch_nand<mode>_soft_imask (operands[0], mem,
1270 operands[2]);
1271 else
1272 FAIL;
1273
1274 emit_insn (atomic_insn);
1275
1276 if (<MODE>mode == QImode)
1277 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
1278 operands[0]));
1279 else if (<MODE>mode == HImode)
1280 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
1281 operands[0]));
1282 DONE;
1283 })
1284
1285 (define_insn_and_split "atomic_fetch_nandsi_hard"
1286 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
1287 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))
1288 (set (match_dup 1)
1289 (unspec:SI
1290 [(not:SI (and:SI (match_dup 1)
1291 (match_operand:SI 2 "logical_operand" "rK08")))]
1292 UNSPEC_ATOMIC))
1293 (set (reg:SI T_REG) (const_int 1))
1294 (clobber (reg:SI R0_REG))]
1295 "TARGET_ATOMIC_HARD_LLCS
1296 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1297 {
1298 return "\r0: movli.l %1,r0" "\n"
1299 " mov r0,%0" "\n"
1300 " and %2,r0" "\n"
1301 " not r0,r0" "\n"
1302 " movco.l r0,%1" "\n"
1303 " bf 0b";
1304 }
1305 "&& can_create_pseudo_p () && optimize
1306 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1307 [(const_int 0)]
1308 {
1309 emit_insn (gen_atomic_nand_fetchsi_hard (gen_reg_rtx (SImode), operands[1],
1310 operands[2]));
1311 }
1312 [(set_attr "length" "12")])
1313
1314 ;; The QIHImode llcs patterns modify the address register of the memory
1315 ;; operand. In order to express that, we have to open code the memory
1316 ;; operand. Initially the insn is expanded like every other atomic insn
1317 ;; using the memory operand. In split1 the insn is converted and the
1318 ;; memory operand's address register is exposed.
1319 (define_insn_and_split "atomic_fetch_nand<mode>_hard"
1320 [(set (match_operand:QIHI 0 "arith_reg_dest")
1321 (match_operand:QIHI 1 "atomic_mem_operand_1"))
1322 (set (match_dup 1)
1323 (unspec:QIHI
1324 [(not:QIHI (and:QIHI (match_dup 1)
1325 (match_operand:QIHI 2 "logical_operand" "rK08")))]
1326 UNSPEC_ATOMIC))
1327 (set (reg:SI T_REG) (const_int 1))
1328 (clobber (reg:SI R0_REG))]
1329 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1330 "#"
1331 "&& 1"
1332 [(const_int 0)]
1333 {
1334 if (optimize
1335 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
1336 emit_insn (gen_atomic_nand<mode>_hard (operands[1], operands[2]));
1337 else
1338 {
1339 rtx i = gen_atomic_fetch_nand<mode>_hard_1 (
1340 operands[0], XEXP (operands[1], 0), operands[2]);
1341
1342 /* Replace the new mems in the new insn with the old mem to preserve
1343 aliasing info. */
1344 XEXP (XVECEXP (i, 0, 0), 1) = operands[1];
1345 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
1346 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0),
1347 0) = operands[1];
1348 emit_insn (i);
1349 }
1350 })
1351
1352 (define_insn "atomic_fetch_nand<mode>_hard_1"
1353 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1354 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
1355 (set (mem:QIHI (match_dup 1))
1356 (unspec:QIHI
1357 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 1))
1358 (match_operand:QIHI 2 "logical_operand" "rK08")))]
1359 UNSPEC_ATOMIC))
1360 (set (reg:SI T_REG) (const_int 1))
1361 (clobber (reg:SI R0_REG))
1362 (clobber (match_scratch:SI 3 "=&r"))
1363 (clobber (match_scratch:SI 4 "=1"))]
1364 "TARGET_ATOMIC_HARD_LLCS"
1365 {
1366 return "\r mov #-4,%3" "\n"
1367 " and %1,%3" "\n"
1368 " xor %3,%1" "\n"
1369 " add r15,%1" "\n"
1370 " add #-4,%1" "\n"
1371 "0: movli.l @%3,r0" "\n"
1372 " mov.l r0,@-r15" "\n"
1373 " mov.<bw> @%1,r0" "\n"
1374 " mov r0,%0" "\n"
1375 " and %2,r0" "\n"
1376 " not r0,r0" "\n"
1377 " mov.<bw> r0,@%1" "\n"
1378 " mov.l @r15+,r0" "\n"
1379 " movco.l r0,@%3" "\n"
1380 " bf 0b";
1381 }
1382 [(set_attr "length" "30")])
1383
1384 ;; The QIHImode llcs patterns modify the address register of the memory
1385 ;; operand. In order to express that, we have to open code the memory
1386 ;; operand. Initially the insn is expanded like every other atomic insn
1387 ;; using the memory operand. In split1 the insn is converted and the
1388 ;; memory operand's address register is exposed.
1389 (define_insn_and_split "atomic_nand<mode>_hard"
1390 [(set (match_operand:QIHI 0 "atomic_mem_operand_1")
1391 (unspec:QIHI
1392 [(not:QIHI (and:QIHI (match_dup 0)
1393 (match_operand:QIHI 1 "logical_operand")))]
1394 UNSPEC_ATOMIC))
1395 (set (reg:SI T_REG) (const_int 1))
1396 (clobber (reg:SI R0_REG))]
1397 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1398 "#"
1399 "&& 1"
1400 [(const_int 0)]
1401 {
1402 rtx i = gen_atomic_nand<mode>_hard_1 (XEXP (operands[0], 0), operands[1]);
1403
1404 /* Replace the new mems in the new insn with the old mem to preserve
1405 aliasing info. */
1406 XEXP (XVECEXP (i, 0, 0), 0) = operands[0];
1407 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0), 0) = operands[0];
1408 emit_insn (i);
1409 })
1410
1411 (define_insn "atomic_nand<mode>_hard_1"
1412 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r"))
1413 (unspec:QIHI
1414 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 0))
1415 (match_operand:QIHI 1 "logical_operand" "rK08")))]
1416 UNSPEC_ATOMIC))
1417 (set (reg:SI T_REG) (const_int 1))
1418 (clobber (reg:SI R0_REG))
1419 (clobber (match_scratch:SI 2 "=&r"))
1420 (clobber (match_scratch:SI 3 "=0"))]
1421 "TARGET_ATOMIC_HARD_LLCS"
1422 {
1423 return "\r mov #-4,%2" "\n"
1424 " and %0,%2" "\n"
1425 " xor %2,%0" "\n"
1426 " add r15,%0" "\n"
1427 " add #-4,%0" "\n"
1428 "0: movli.l @%2,r0" "\n"
1429 " mov.l r0,@-r15" "\n"
1430 " mov.<bw> @%0,r0" "\n"
1431 " and %1,r0" "\n"
1432 " not r0,r0" "\n"
1433 " mov.<bw> r0,@%0" "\n"
1434 " mov.l @r15+,r0" "\n"
1435 " movco.l r0,@%2" "\n"
1436 " bf 0b";
1437 }
1438 [(set_attr "length" "28")])
1439
1440 (define_insn_and_split "atomic_fetch_nand<mode>_soft_gusa"
1441 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1442 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))
1443 (set (match_dup 1)
1444 (unspec:QIHISI
1445 [(not:QIHISI
1446 (and:QIHISI (match_dup 1)
1447 (match_operand:QIHISI 2 "arith_reg_operand" "u")))]
1448 UNSPEC_ATOMIC))
1449 (clobber (match_scratch:QIHISI 3 "=&u"))
1450 (clobber (reg:SI R0_REG))
1451 (clobber (reg:SI R1_REG))]
1452 "TARGET_ATOMIC_SOFT_GUSA"
1453 {
1454 return "\r mova 1f,r0" "\n"
1455 " mov r15,r1" "\n"
1456 " .align 2" "\n"
1457 " mov #(0f-1f),r15" "\n"
1458 "0: mov.<bwl> %1,%0" "\n"
1459 " mov %2,%3" "\n"
1460 " and %0,%3" "\n"
1461 " not %3,%3" "\n"
1462 " mov.<bwl> %3,%1" "\n"
1463 "1: mov r1,r15";
1464 }
1465 "&& can_create_pseudo_p () && optimize
1466 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1467 [(const_int 0)]
1468 {
1469 emit_insn (gen_atomic_nand_fetch<mode>_soft_gusa (gen_reg_rtx (<MODE>mode),
1470 operands[1], operands[2]));
1471 }
1472 [(set_attr "length" "20")])
1473
1474 (define_insn_and_split "atomic_fetch_nand<mode>_soft_tcb"
1475 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1476 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1477 (set (match_dup 1)
1478 (unspec:QIHISI
1479 [(not:QIHISI
1480 (and:QIHISI (match_dup 1)
1481 (match_operand:QIHISI 2 "logical_operand" "rK08")))]
1482 UNSPEC_ATOMIC))
1483 (use (match_operand:SI 3 "gbr_displacement"))
1484 (clobber (reg:SI R0_REG))
1485 (clobber (reg:SI R1_REG))]
1486 "TARGET_ATOMIC_SOFT_TCB"
1487 {
1488 return "\r mova 1f,r0" "\n"
1489 " mov #(0f-1f),r1" "\n"
1490 " .align 2" "\n"
1491 " mov.l r0,@(%O3,gbr)" "\n"
1492 "0: mov.<bwl> %1,r0" "\n"
1493 " mov r0,%0" "\n"
1494 " and %2,r0" "\n"
1495 " not r0,r0" "\n"
1496 " mov.<bwl> r0,%1" "\n"
1497 "1: mov #0,r0" "\n"
1498 " mov.l r0,@(%O3,gbr)";
1499 }
1500 "&& can_create_pseudo_p () && optimize
1501 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1502 [(const_int 0)]
1503 {
1504 emit_insn (gen_atomic_nand<mode>_soft_tcb (operands[1], operands[2],
1505 operands[3]));
1506 }
1507 [(set_attr "length" "22")])
1508
1509 (define_insn "atomic_nand<mode>_soft_tcb"
1510 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd")
1511 (unspec:QIHISI
1512 [(not:QIHISI
1513 (and:QIHISI (match_dup 0)
1514 (match_operand:QIHISI 1 "logical_operand" "rK08")))]
1515 UNSPEC_ATOMIC))
1516 (use (match_operand:SI 2 "gbr_displacement"))
1517 (clobber (reg:SI R0_REG))
1518 (clobber (reg:SI R1_REG))]
1519 "TARGET_ATOMIC_SOFT_TCB"
1520 {
1521 return "\r mova 1f,r0" "\n"
1522 " .align 2" "\n"
1523 " mov #(0f-1f),r1" "\n"
1524 " mov.l r0,@(%O2,gbr)" "\n"
1525 "0: mov.<bwl> %0,r0" "\n"
1526 " and %1,r0" "\n"
1527 " not r0,r0" "\n"
1528 " mov.<bwl> r0,%0" "\n"
1529 "1: mov #0,r0" "\n"
1530 " mov.l r0,@(%O2,gbr)";
1531 }
1532 [(set_attr "length" "20")])
1533
1534 (define_insn_and_split "atomic_fetch_nand<mode>_soft_imask"
1535 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1536 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1537 (set (match_dup 1)
1538 (unspec:QIHISI
1539 [(not:QIHISI
1540 (and:QIHISI (match_dup 1)
1541 (match_operand:QIHISI 2 "logical_operand" "rK08")))]
1542 UNSPEC_ATOMIC))
1543 (clobber (reg:SI R0_REG))
1544 (clobber (match_scratch:SI 3 "=&r"))]
1545 "TARGET_ATOMIC_SOFT_IMASK"
1546 {
1547 return "\r stc sr,r0" "\n"
1548 " mov r0,%3" "\n"
1549 " or #0xF0,r0" "\n"
1550 " ldc r0,sr" "\n"
1551 " mov.<bwl> %1,r0" "\n"
1552 " mov r0,%0" "\n"
1553 " and %2,r0" "\n"
1554 " not r0,r0" "\n"
1555 " mov.<bwl> r0,%1" "\n"
1556 " ldc %3,sr";
1557 }
1558 "&& can_create_pseudo_p () && optimize
1559 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1560 [(const_int 0)]
1561 {
1562 emit_insn (gen_atomic_nand_fetch<mode>_soft_imask (gen_reg_rtx (<MODE>mode),
1563 operands[1], operands[2]));
1564 }
1565 [(set_attr "length" "20")])
1566
1567 ;;------------------------------------------------------------------------------
1568 ;; read - add|sub|or|and|xor|nand - write - return new value
1569
1570 (define_expand "atomic_<fetchop_name>_fetch<mode>"
1571 [(set (match_operand:QIHISI 0 "arith_reg_dest")
1572 (FETCHOP:QIHISI
1573 (match_operand:QIHISI 1 "atomic_mem_operand_1")
1574 (match_operand:QIHISI 2 "<fetchop_predicate_1>")))
1575 (set (match_dup 1)
1576 (unspec:QIHISI
1577 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1578 UNSPEC_ATOMIC))
1579 (match_operand:SI 3 "const_int_operand" "")]
1580 "TARGET_ATOMIC_ANY"
1581 {
1582 rtx mem = operands[1];
1583 rtx atomic_insn;
1584
1585 if (TARGET_ATOMIC_HARD_LLCS
1586 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
1587 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_hard (operands[0], mem,
1588 operands[2]);
1589 else if (TARGET_ATOMIC_SOFT_GUSA)
1590 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_gusa (operands[0],
1591 mem, operands[2]);
1592 else if (TARGET_ATOMIC_SOFT_TCB)
1593 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_tcb (operands[0],
1594 mem, operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
1595 else if (TARGET_ATOMIC_SOFT_IMASK)
1596 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_imask (operands[0],
1597 mem, operands[2]);
1598 else
1599 FAIL;
1600
1601 emit_insn (atomic_insn);
1602
1603 if (<MODE>mode == QImode)
1604 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
1605 operands[0]));
1606 else if (<MODE>mode == HImode)
1607 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
1608 operands[0]));
1609 DONE;
1610 })
1611
1612 (define_insn "atomic_<fetchop_name>_fetchsi_hard"
1613 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
1614 (FETCHOP:SI
1615 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")
1616 (match_operand:SI 2 "<fetchop_predicate_1>"
1617 "<fetchop_constraint_1_llcs>")))
1618 (set (match_dup 1)
1619 (unspec:SI
1620 [(FETCHOP:SI (match_dup 1) (match_dup 2))]
1621 UNSPEC_ATOMIC))
1622 (set (reg:SI T_REG) (const_int 1))]
1623 "TARGET_ATOMIC_HARD_LLCS
1624 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1625 {
1626 return "\r0: movli.l %1,%0" "\n"
1627 " <fetchop_name> %2,%0" "\n"
1628 " movco.l %0,%1" "\n"
1629 " bf 0b";
1630 }
1631 [(set_attr "length" "8")])
1632
1633 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1634 (define_insn "atomic_not_fetchsi_hard"
1635 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
1636 (not:SI (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")))
1637 (set (match_dup 1)
1638 (unspec:SI [(not:SI (match_dup 1))] UNSPEC_ATOMIC))
1639 (set (reg:SI T_REG) (const_int 1))]
1640 "TARGET_ATOMIC_HARD_LLCS
1641 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1642 {
1643 return "\r0: movli.l %1,%0" "\n"
1644 " not %0,%0" "\n"
1645 " movco.l %0,%1" "\n"
1646 " bf 0b";
1647 }
1648 [(set_attr "length" "8")])
1649
1650 ;; The QIHImode llcs patterns modify the address register of the memory
1651 ;; operand. In order to express that, we have to open code the memory
1652 ;; operand. Initially the insn is expanded like every other atomic insn
1653 ;; using the memory operand. In split1 the insn is converted and the
1654 ;; memory operand's address register is exposed.
1655 (define_insn_and_split "atomic_<fetchop_name>_fetch<mode>_hard"
1656 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1657 (FETCHOP:QIHI (match_operand:QIHI 1 "atomic_mem_operand_1")
1658 (match_operand:QIHI 2 "<fetchop_predicate_1>")))
1659 (set (match_dup 1) (unspec:QIHI [(FETCHOP:QIHI (match_dup 1) (match_dup 2))]
1660 UNSPEC_ATOMIC))
1661 (set (reg:SI T_REG) (const_int 1))
1662 (clobber (reg:SI R0_REG))]
1663 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1664 "#"
1665 "&& 1"
1666 [(const_int 0)]
1667 {
1668 if (optimize
1669 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
1670 emit_insn (gen_atomic_<fetchop_name><mode>_hard (operands[1], operands[2]));
1671 else
1672 {
1673 rtx i = gen_atomic_<fetchop_name>_fetch<mode>_hard_1 (
1674 operands[0], XEXP (operands[1], 0), operands[2]);
1675
1676 /* Replace the new mems in the new insn with the old mem to preserve
1677 aliasing info. */
1678 XEXP (XEXP (XVECEXP (i, 0, 0), 1), 0) = operands[1];
1679 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
1680 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0) = operands[1];
1681 emit_insn (i);
1682 }
1683 })
1684
1685 (define_insn "atomic_<fetchop_name>_fetch<mode>_hard_1"
1686 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1687 (FETCHOP:QIHI
1688 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))
1689 (match_operand:QIHI 2 "<fetchop_predicate_1>"
1690 "<fetchop_constraint_1_llcs>")))
1691 (set (mem:QIHI (match_dup 1))
1692 (unspec:QIHI
1693 [(FETCHOP:QIHI (mem:QIHI (match_dup 1)) (match_dup 2))]
1694 UNSPEC_ATOMIC))
1695 (set (reg:SI T_REG) (const_int 1))
1696 (clobber (reg:SI R0_REG))
1697 (clobber (match_scratch:SI 3 "=&r"))
1698 (clobber (match_scratch:SI 4 "=1"))]
1699 "TARGET_ATOMIC_HARD_LLCS"
1700 {
1701 return "\r mov #-4,%3" "\n"
1702 " and %1,%3" "\n"
1703 " xor %3,%1" "\n"
1704 " add r15,%1" "\n"
1705 " add #-4,%1" "\n"
1706 "0: movli.l @%3,r0" "\n"
1707 " mov.l r0,@-r15" "\n"
1708 " mov.<bw> @%1,r0" "\n"
1709 " <fetchop_name> %2,r0" "\n"
1710 " mov.<bw> r0,@%1" "\n"
1711 " mov r0,%0" "\n"
1712 " mov.l @r15+,r0" "\n"
1713 " movco.l r0,@%3" "\n"
1714 " bf 0b";
1715 }
1716 [(set_attr "length" "28")])
1717
1718 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1719 (define_insn_and_split "atomic_not_fetch<mode>_hard"
1720 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1721 (not:QIHI (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))))
1722 (set (mem:QIHI (match_dup 1))
1723 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 1)))] UNSPEC_ATOMIC))
1724 (set (reg:SI T_REG) (const_int 1))
1725 (clobber (reg:SI R0_REG))
1726 (clobber (match_scratch:SI 2 "=&r"))
1727 (clobber (match_scratch:SI 3 "=1"))]
1728 "TARGET_ATOMIC_HARD_LLCS"
1729 {
1730 return "\r mov #-4,%2" "\n"
1731 " and %1,%2" "\n"
1732 " xor %2,%1" "\n"
1733 " add r15,%1" "\n"
1734 " add #-4,%1" "\n"
1735 "0: movli.l @%2,r0" "\n"
1736 " mov.l r0,@-r15" "\n"
1737 " mov.<bw> @%1,r0" "\n"
1738 " not r0,r0" "\n"
1739 " mov.<bw> r0,@%1" "\n"
1740 " mov r0,%0" "\n"
1741 " mov.l @r15+,r0" "\n"
1742 " movco.l r0,@%2" "\n"
1743 " bf 0b";
1744 }
1745 "&& can_create_pseudo_p () && optimize
1746 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1747 [(const_int 0)]
1748 {
1749 rtx i = gen_atomic_not<mode>_hard (operands[1]);
1750
1751 /* Replace the new mems in the new insn with the old mem to preserve
1752 aliasing info. */
1753 rtx m = XEXP (XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1), 0);
1754 XEXP (XVECEXP (i, 0, 0), 0) = m;
1755 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = m;
1756 emit_insn (i);
1757 }
1758 [(set_attr "length" "28")])
1759
1760 (define_insn "atomic_<fetchop_name>_fetch<mode>_soft_gusa"
1761 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1762 (FETCHOP:QIHISI
1763 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")
1764 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1765 "<fetchop_constraint_1_gusa>")))
1766 (set (match_dup 1)
1767 (unspec:QIHISI
1768 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1769 UNSPEC_ATOMIC))
1770 (clobber (reg:SI R0_REG))
1771 (clobber (reg:SI R1_REG))]
1772 "TARGET_ATOMIC_SOFT_GUSA"
1773 {
1774 return "\r mova 1f,r0" "\n"
1775 " mov r15,r1" "\n"
1776 " .align 2" "\n"
1777 " mov #(0f-1f),r15" "\n"
1778 "0: mov.<bwl> %1,%0" "\n"
1779 " <fetchop_name> %2,%0" "\n"
1780 " mov.<bwl> %0,%1" "\n"
1781 "1: mov r1,r15";
1782 }
1783 [(set_attr "length" "16")])
1784
1785 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1786 (define_insn "atomic_not_fetch<mode>_soft_gusa"
1787 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1788 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")))
1789 (set (match_dup 1)
1790 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1791 (clobber (reg:SI R0_REG))
1792 (clobber (reg:SI R1_REG))]
1793 "TARGET_ATOMIC_SOFT_GUSA"
1794 {
1795 return "\r mova 1f,r0" "\n"
1796 " mov r15,r1" "\n"
1797 " .align 2" "\n"
1798 " mov #(0f-1f),r15" "\n"
1799 "0: mov.<bwl> %1,%0" "\n"
1800 " not %0,%0" "\n"
1801 " mov.<bwl> %0,%1" "\n"
1802 "1: mov r1,r15";
1803 }
1804 [(set_attr "length" "16")])
1805
1806 (define_insn_and_split "atomic_<fetchop_name>_fetch<mode>_soft_tcb"
1807 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1808 (FETCHOP:QIHISI
1809 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
1810 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1811 "<fetchop_constraint_1_tcb>")))
1812 (set (match_dup 1)
1813 (unspec:QIHISI
1814 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1815 UNSPEC_ATOMIC))
1816 (clobber (reg:SI R0_REG))
1817 (clobber (reg:SI R1_REG))
1818 (use (match_operand:SI 3 "gbr_displacement"))]
1819 "TARGET_ATOMIC_SOFT_TCB"
1820 {
1821 return "\r mova 1f,r0" "\n"
1822 " mov #(0f-1f),r1" "\n"
1823 " .align 2" "\n"
1824 " mov.l r0,@(%O3,gbr)" "\n"
1825 "0: mov.<bwl> %1,r0" "\n"
1826 " <fetchop_name> %2,r0" "\n"
1827 " mov.<bwl> r0,%1" "\n"
1828 "1: mov r0,%0" "\n"
1829 " mov #0,r0" "\n"
1830 " mov.l r0,@(%O3,gbr)";
1831 }
1832 "&& can_create_pseudo_p () && optimize
1833 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1834 [(const_int 0)]
1835 {
1836 emit_insn (gen_atomic_<fetchop_name><mode>_soft_tcb (
1837 operands[1], operands[2], operands[3]));
1838 }
1839 [(set_attr "length" "20")])
1840
1841 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1842 (define_insn_and_split "atomic_not_fetch<mode>_soft_tcb"
1843 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1844 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")))
1845 (set (match_dup 1)
1846 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1847 (clobber (reg:SI R0_REG))
1848 (clobber (reg:SI R1_REG))
1849 (use (match_operand:SI 2 "gbr_displacement"))]
1850 "TARGET_ATOMIC_SOFT_TCB"
1851 {
1852 return "\r mova 1f,r0" "\n"
1853 " mov #(0f-1f),r1" "\n"
1854 " .align 2" "\n"
1855 " mov.l r0,@(%O2,gbr)" "\n"
1856 "0: mov.<bwl> %1,r0" "\n"
1857 " not r0,r0" "\n"
1858 " mov.<bwl> r0,%1" "\n"
1859 "1: mov r0,%0" "\n"
1860 " mov #0,r0" "\n"
1861 " mov.l r0,@(%O2,gbr)";
1862 }
1863 "&& can_create_pseudo_p () && optimize
1864 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1865 [(const_int 0)]
1866 {
1867 emit_insn (gen_atomic_not<mode>_soft_tcb (operands[1], operands[2]));
1868 }
1869 [(set_attr "length" "20")])
1870
1871 (define_insn "atomic_<fetchop_name>_fetch<mode>_soft_imask"
1872 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
1873 (FETCHOP:QIHISI
1874 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
1875 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1876 "<fetchop_constraint_1_imask>")))
1877 (set (match_dup 1)
1878 (unspec:QIHISI
1879 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1880 UNSPEC_ATOMIC))
1881 (clobber (match_scratch:SI 3 "=&r"))]
1882 "TARGET_ATOMIC_SOFT_IMASK"
1883 {
1884 return "\r stc sr,%0" "\n"
1885 " mov %0,%3" "\n"
1886 " or #0xF0,%0" "\n"
1887 " ldc %0,sr" "\n"
1888 " mov.<bwl> %1,%0" "\n"
1889 " <fetchop_name> %2,%0" "\n"
1890 " mov.<bwl> %0,%1" "\n"
1891 " ldc %3,sr";
1892 }
1893 [(set_attr "length" "16")])
1894
1895 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1896 (define_insn "atomic_not_fetch<mode>_soft_imask"
1897 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
1898 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")))
1899 (set (match_dup 1)
1900 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1901 (clobber (match_scratch:SI 2 "=&r"))]
1902 "TARGET_ATOMIC_SOFT_IMASK"
1903 {
1904 return "\r stc sr,%0" "\n"
1905 " mov %0,%2" "\n"
1906 " or #0xF0,%0" "\n"
1907 " ldc %0,sr" "\n"
1908 " mov.<bwl> %1,%0" "\n"
1909 " not %0,%0" "\n"
1910 " mov.<bwl> %0,%1" "\n"
1911 " ldc %2,sr";
1912 }
1913 [(set_attr "length" "16")])
1914
1915 (define_expand "atomic_nand_fetch<mode>"
1916 [(set (match_operand:QIHISI 0 "arith_reg_dest")
1917 (not:QIHISI (and:QIHISI
1918 (match_operand:QIHISI 1 "atomic_mem_operand_1")
1919 (match_operand:QIHISI 2 "atomic_logical_operand_1"))))
1920 (set (match_dup 1)
1921 (unspec:QIHISI
1922 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
1923 UNSPEC_ATOMIC))
1924 (match_operand:SI 3 "const_int_operand")]
1925 "TARGET_ATOMIC_ANY"
1926 {
1927 rtx mem = operands[1];
1928 rtx atomic_insn;
1929
1930 if (TARGET_ATOMIC_HARD_LLCS
1931 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
1932 atomic_insn = gen_atomic_nand_fetch<mode>_hard (operands[0], mem,
1933 operands[2]);
1934 else if (TARGET_ATOMIC_SOFT_GUSA)
1935 atomic_insn = gen_atomic_nand_fetch<mode>_soft_gusa (operands[0], mem,
1936 operands[2]);
1937 else if (TARGET_ATOMIC_SOFT_TCB)
1938 atomic_insn = gen_atomic_nand_fetch<mode>_soft_tcb (operands[0], mem,
1939 operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
1940 else if (TARGET_ATOMIC_SOFT_IMASK)
1941 atomic_insn = gen_atomic_nand_fetch<mode>_soft_imask (operands[0], mem,
1942 operands[2]);
1943 else
1944 FAIL;
1945
1946 emit_insn (atomic_insn);
1947
1948 if (<MODE>mode == QImode)
1949 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
1950 operands[0]));
1951 else if (<MODE>mode == HImode)
1952 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
1953 operands[0]));
1954 DONE;
1955 })
1956
1957 (define_insn "atomic_nand_fetchsi_hard"
1958 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
1959 (not:SI (and:SI (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")
1960 (match_operand:SI 2 "logical_operand" "rK08"))))
1961 (set (match_dup 1)
1962 (unspec:SI
1963 [(not:SI (and:SI (match_dup 1) (match_dup 2)))]
1964 UNSPEC_ATOMIC))
1965 (set (reg:SI T_REG) (const_int 1))]
1966 "TARGET_ATOMIC_HARD_LLCS
1967 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1968 {
1969 return "\r0: movli.l %1,%0" "\n"
1970 " and %2,%0" "\n"
1971 " not %0,%0" "\n"
1972 " movco.l %0,%1" "\n"
1973 " bf 0b";
1974 }
1975 [(set_attr "length" "10")])
1976
1977 ;; The QIHImode llcs patterns modify the address register of the memory
1978 ;; operand. In order to express that, we have to open code the memory
1979 ;; operand. Initially the insn is expanded like every other atomic insn
1980 ;; using the memory operand. In split1 the insn is converted and the
1981 ;; memory operand's address register is exposed.
1982 (define_insn_and_split "atomic_nand_fetch<mode>_hard"
1983 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1984 (not:QIHI (and:QIHI (match_operand:QIHI 1 "atomic_mem_operand_1")
1985 (match_operand:QIHI 2 "logical_operand"))))
1986 (set (match_dup 1)
1987 (unspec:QIHI [(not:QIHI (and:QIHI (match_dup 1) (match_dup 2)))]
1988 UNSPEC_ATOMIC))
1989 (set (reg:SI T_REG) (const_int 1))
1990 (clobber (reg:SI R0_REG))]
1991 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1992 "#"
1993 "&& 1"
1994 [(const_int 0)]
1995 {
1996 if (optimize
1997 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
1998 emit_insn (gen_atomic_nand<mode>_hard (operands[1], operands[2]));
1999 else
2000 {
2001 rtx i = gen_atomic_nand_fetch<mode>_hard_1 (
2002 operands[0], XEXP (operands[1], 0), operands[2]);
2003
2004 /* Replace the new mems in the new insn with the old mem to preserve
2005 aliasing info. */
2006 XEXP (XEXP (XEXP (XVECEXP (i, 0, 0), 1), 0), 0) = operands[1];
2007 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
2008 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0),
2009 0) = operands[1];
2010 emit_insn (i);
2011 }
2012 })
2013
2014 (define_insn "atomic_nand_fetch<mode>_hard_1"
2015 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
2016 (not:QIHI
2017 (and:QIHI (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))
2018 (match_operand:QIHI 2 "logical_operand" "rK08"))))
2019 (set (mem:QIHI (match_dup 1))
2020 (unspec:QIHI
2021 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 1)) (match_dup 2)))]
2022 UNSPEC_ATOMIC))
2023 (set (reg:SI T_REG) (const_int 1))
2024 (clobber (reg:SI R0_REG))
2025 (clobber (match_scratch:SI 3 "=&r"))
2026 (clobber (match_scratch:SI 4 "=1"))]
2027 "TARGET_ATOMIC_HARD_LLCS"
2028 {
2029 return "\r mov #-4,%3" "\n"
2030 " and %1,%3" "\n"
2031 " xor %3,%1" "\n"
2032 " add r15,%1" "\n"
2033 " add #-4,%1" "\n"
2034 "0: movli.l @%3,r0" "\n"
2035 " mov.l r0,@-r15" "\n"
2036 " mov.<bw> @%1,r0" "\n"
2037 " and %2,r0" "\n"
2038 " not r0,%0" "\n"
2039 " mov.<bw> %0,@%1" "\n"
2040 " mov.l @r15+,r0" "\n"
2041 " movco.l r0,@%3" "\n"
2042 " bf 0b";
2043 }
2044 [(set_attr "length" "28")])
2045
2046 (define_insn "atomic_nand_fetch<mode>_soft_gusa"
2047 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
2048 (not:QIHISI (and:QIHISI
2049 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")
2050 (match_operand:QIHISI 2 "arith_reg_operand" "u"))))
2051 (set (match_dup 1)
2052 (unspec:QIHISI
2053 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
2054 UNSPEC_ATOMIC))
2055 (clobber (reg:SI R0_REG))
2056 (clobber (reg:SI R1_REG))]
2057 "TARGET_ATOMIC_SOFT_GUSA"
2058 {
2059 return "\r mova 1f,r0" "\n"
2060 " .align 2" "\n"
2061 " mov r15,r1" "\n"
2062 " mov #(0f-1f),r15" "\n"
2063 "0: mov.<bwl> %1,%0" "\n"
2064 " and %2,%0" "\n"
2065 " not %0,%0" "\n"
2066 " mov.<bwl> %0,%1" "\n"
2067 "1: mov r1,r15";
2068 }
2069 [(set_attr "length" "18")])
2070
2071 (define_insn_and_split "atomic_nand_fetch<mode>_soft_tcb"
2072 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
2073 (not:QIHISI (and:QIHISI
2074 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
2075 (match_operand:QIHISI 2 "logical_operand" "rK08"))))
2076 (set (match_dup 1)
2077 (unspec:QIHISI
2078 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
2079 UNSPEC_ATOMIC))
2080 (clobber (reg:SI R0_REG))
2081 (clobber (reg:SI R1_REG))
2082 (use (match_operand:SI 3 "gbr_displacement"))]
2083 "TARGET_ATOMIC_SOFT_TCB"
2084 {
2085 return "\r mova 1f,r0" "\n"
2086 " mov #(0f-1f),r1" "\n"
2087 " .align 2" "\n"
2088 " mov.l r0,@(%O3,gbr)" "\n"
2089 "0: mov.<bwl> %1,r0" "\n"
2090 " and %2,r0" "\n"
2091 " not r0,r0" "\n"
2092 " mov r0,%0" "\n"
2093 " mov.<bwl> r0,%1" "\n"
2094 "1: mov #0,r0" "\n"
2095 " mov.l r0,@(%O3,gbr)";
2096 }
2097 "&& can_create_pseudo_p () && optimize
2098 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
2099 [(const_int 0)]
2100 {
2101 emit_insn (gen_atomic_nand<mode>_soft_tcb (operands[1], operands[2],
2102 operands[3]));
2103 }
2104 [(set_attr "length" "22")])
2105
2106 (define_insn "atomic_nand_fetch<mode>_soft_imask"
2107 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
2108 (not:QIHISI (and:QIHISI
2109 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
2110 (match_operand:QIHISI 2 "logical_operand" "rK08"))))
2111 (set (match_dup 1)
2112 (unspec:QIHISI
2113 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
2114 UNSPEC_ATOMIC))
2115 (clobber (match_scratch:SI 3 "=&r"))]
2116 "TARGET_ATOMIC_SOFT_IMASK"
2117 {
2118 return "\r stc sr,%0" "\n"
2119 " mov %0,%3" "\n"
2120 " or #0xF0,%0" "\n"
2121 " ldc %0,sr" "\n"
2122 " mov.<bwl> %1,%0" "\n"
2123 " and %2,%0" "\n"
2124 " not %0,%0" "\n"
2125 " mov.<bwl> %0,%1" "\n"
2126 " ldc %3,sr";
2127 }
2128 [(set_attr "length" "18")])
2129
2130 ;;------------------------------------------------------------------------------
2131 ;; read - test against zero - or with 0x80 - write - return test result
2132
2133 (define_expand "atomic_test_and_set"
2134 [(match_operand:SI 0 "register_operand" "") ;; bool result output
2135 (match_operand:QI 1 "memory_operand" "") ;; memory
2136 (match_operand:SI 2 "const_int_operand" "")] ;; model
2137 "(TARGET_ATOMIC_ANY || TARGET_ENABLE_TAS) && !TARGET_SHMEDIA"
2138 {
2139 rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
2140
2141 if (TARGET_ENABLE_TAS)
2142 emit_insn (gen_tasb (addr));
2143 else
2144 {
2145 rtx val = gen_int_mode (targetm.atomic_test_and_set_trueval, QImode);
2146 val = force_reg (QImode, val);
2147
2148 if (TARGET_ATOMIC_HARD_LLCS)
2149 emit_insn (gen_atomic_test_and_set_hard (addr, val));
2150 else if (TARGET_ATOMIC_SOFT_GUSA)
2151 emit_insn (gen_atomic_test_and_set_soft_gusa (addr, val));
2152 else if (TARGET_ATOMIC_SOFT_TCB)
2153 emit_insn (gen_atomic_test_and_set_soft_tcb (addr, val,
2154 TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX));
2155 else if (TARGET_ATOMIC_SOFT_IMASK)
2156 emit_insn (gen_atomic_test_and_set_soft_imask (addr, val));
2157 else
2158 FAIL;
2159 }
2160
2161 /* The result of the test op is the inverse of what we are
2162 supposed to return. Thus invert the T bit. The inversion will be
2163 potentially optimized away and integrated into surrounding code. */
2164 emit_insn (gen_movnegt (operands[0], get_t_reg_rtx ()));
2165 DONE;
2166 })
2167
2168 (define_insn "tasb"
2169 [(set (reg:SI T_REG)
2170 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2171 (const_int 0)))
2172 (set (mem:QI (match_dup 0))
2173 (unspec:QI [(const_int 128)] UNSPEC_ATOMIC))]
2174 "TARGET_ENABLE_TAS && !TARGET_SHMEDIA"
2175 "tas.b @%0"
2176 [(set_attr "insn_class" "co_group")])
2177
2178 (define_insn "atomic_test_and_set_soft_gusa"
2179 [(set (reg:SI T_REG)
2180 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "u"))
2181 (const_int 0)))
2182 (set (mem:QI (match_dup 0))
2183 (unspec:QI [(match_operand:QI 1 "register_operand" "u")] UNSPEC_ATOMIC))
2184 (clobber (match_scratch:QI 2 "=&u"))
2185 (clobber (reg:SI R0_REG))
2186 (clobber (reg:SI R1_REG))]
2187 "TARGET_ATOMIC_SOFT_GUSA && !TARGET_ENABLE_TAS"
2188 {
2189 return "\r mova 1f,r0" "\n"
2190 " .align 2" "\n"
2191 " mov r15,r1" "\n"
2192 " mov #(0f-1f),r15" "\n"
2193 "0: mov.b @%0,%2" "\n"
2194 " mov.b %1,@%0" "\n"
2195 "1: mov r1,r15" "\n"
2196 " tst %2,%2";
2197 }
2198 [(set_attr "length" "16")])
2199
2200 (define_insn "atomic_test_and_set_soft_tcb"
2201 [(set (reg:SI T_REG)
2202 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2203 (const_int 0)))
2204 (set (mem:QI (match_dup 0))
2205 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
2206 (use (match_operand:SI 2 "gbr_displacement"))
2207 (clobber (match_scratch:QI 3 "=&r"))
2208 (clobber (reg:SI R0_REG))
2209 (clobber (reg:SI R1_REG))]
2210 "TARGET_ATOMIC_SOFT_TCB && !TARGET_ENABLE_TAS"
2211 {
2212 return "\r mova 1f,r0" "\n"
2213 " mov #(0f-1f),r1" "\n"
2214 " .align 2" "\n"
2215 " mov.l r0,@(%O2,gbr)" "\n"
2216 "0: mov.b @%0,%3" "\n"
2217 " mov #0,r0" "\n"
2218 " mov.b %1,@%0" "\n"
2219 "1: mov.l r0,@(%O2,gbr)" "\n"
2220 " tst %3,%3";
2221 }
2222 [(set_attr "length" "18")])
2223
2224 (define_insn "atomic_test_and_set_soft_imask"
2225 [(set (reg:SI T_REG)
2226 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2227 (const_int 0)))
2228 (set (mem:QI (match_dup 0))
2229 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
2230 (clobber (match_scratch:SI 2 "=&r"))
2231 (clobber (reg:SI R0_REG))]
2232 "TARGET_ATOMIC_SOFT_IMASK && !TARGET_ENABLE_TAS"
2233 {
2234 return "\r stc sr,r0" "\n"
2235 " mov r0,%2" "\n"
2236 " or #0xF0,r0" "\n"
2237 " ldc r0,sr" "\n"
2238 " mov.b @%0,r0" "\n"
2239 " mov.b %1,@%0" "\n"
2240 " ldc %2,sr" "\n"
2241 " tst r0,r0";
2242 }
2243 [(set_attr "length" "16")])
2244
2245 (define_insn "atomic_test_and_set_hard"
2246 [(set (reg:SI T_REG)
2247 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2248 (const_int 0)))
2249 (set (mem:QI (match_dup 0))
2250 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
2251 (clobber (reg:SI R0_REG))
2252 (clobber (match_scratch:SI 2 "=&r"))
2253 (clobber (match_scratch:SI 3 "=&r"))
2254 (clobber (match_scratch:SI 4 "=0"))]
2255 "TARGET_ATOMIC_HARD_LLCS && !TARGET_ENABLE_TAS"
2256 {
2257 return "\r mov #-4,%2" "\n"
2258 " and %0,%2" "\n"
2259 " xor %2,%0" "\n"
2260 " add r15,%0" "\n"
2261 " add #-4,%0" "\n"
2262 "0: movli.l @%2,r0" "\n"
2263 " mov.l r0,@-r15" "\n"
2264 " mov.b @%0,%3" "\n"
2265 " mov.b %1,@%0" "\n"
2266 " mov.l @r15+,r0" "\n"
2267 " movco.l r0,@%2" "\n"
2268 " bf 0b" "\n"
2269 " tst %3,%3";
2270 }
2271 [(set_attr "length" "26")])
2272