1 ;; GCC machine description for SH synchronization instructions.
2 ;; Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 ;; This file is part of GCC.
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
21 ;; Atomic integer operations for the Renesas / SuperH SH CPUs.
23 ;; On SH CPUs atomic integer operations can be done either in 'software' or
24 ;; in 'hardware' in various styles. True hardware support was introduced
25 ;; with the SH4A. Some SH2A dual-core models (e.g. SH7205) also come with
26 ;; 'semaphore' hardware registers, but these are currently unsupported.
27 ;; All SH CPUs support the 'tas.b' instruction, which can be optionally used
28 ;; to implement the 'atomic_test_and_set' builtin.
29 ;; The following atomic options and models are supported.
31 ;; tas.b atomic_test_and_set (-mtas)
33 ;; Depending on the particular hardware configuration, usage of the 'tas.b'
34 ;; instruction might be undesired or even unsafe. Thus, it has to be
35 ;; enabled by the user explicitly. If it is not enabled, the
36 ;; 'atomic_test_and_set' builtin is implemented either with hardware or with
37 ;; software atomics, depending on which is enabled. It is also possible to
38 ;; enable the 'tas.b' instruction only, without enabling support for the
39 ;; other atomic operations.
42 ;; Hardware Atomics (-matomic-model=hard-llcs; SH4A only)
44 ;; Hardware atomics implement all atomic operations using the 'movli.l' and
45 ;; 'movco.l' instructions that are availble on SH4A. On multi-core hardware
46 ;; configurations hardware atomics is the only safe mode.
47 ;; However, it can also be safely used on single-core configurations.
48 ;; Since these instructions operate on SImode memory only, QImode and HImode
49 ;; have to be emulated with SImode and subreg masking, which results in
53 ;; gUSA Software Atomics (-matomic-model=soft-gusa; SH3*, SH4* only)
55 ;; On single-core systems there can only be one execution context running
56 ;; at a given point in time. This allows the usage of rewindable atomic
57 ;; sequences, which effectively emulate locked-load / conditional-store
58 ;; operations. This requires complementary support in the interrupt /
59 ;; exception handling code (e.g. kernel) and does not work safely on multi-
60 ;; core configurations.
62 ;; When an execution context is interrupted while it is an atomic
63 ;; sequence, the interrupted context's PC is rewound to the beginning of
64 ;; the atomic sequence by the interrupt / exception handling code, before
65 ;; transferring control to another execution context. This is done by
68 ;; if (interrupted_context_in_atomic_sequence
69 ;; && interrupted_pc < atomic_exitpoint)
70 ;; interrupted_pc = atomic_entrypoint;
72 ;; This method is also known as gUSA ("g" User Space Atomicity) and the
73 ;; Linux kernel for SH3/SH4 implements support for such software atomic
74 ;; sequences. It can also be implemented in freestanding environments.
76 ;; For this the following atomic sequence ABI is used.
78 ;; r15 >= 0: Execution context is not in an atomic sequence.
80 ;; r15 < 0: Execution context is in an atomic sequence and r15
81 ;; holds the negative byte length of the atomic sequence.
82 ;; In this case the following applies:
84 ;; r0: PC of the first instruction after the atomic
85 ;; write-back instruction (exit point).
86 ;; The entry point PC of the atomic sequence can be
87 ;; determined by doing r0 + r15.
89 ;; r1: Saved r15 stack pointer before entering the
92 ;; An example atomic add sequence would look like:
94 ;; mova .Lend,r0 ! .Lend must be 4-byte aligned.
96 ;; .align 2 ! Insert aligning nop if needed.
97 ;; mov #(.Lstart - .Lend),r15 ! Enter atomic sequence
99 ;; mov.l @r4,r2 ! read value
100 ;; add r2,r5 ! modify value
101 ;; mov.l r5,@r4 ! write-back
103 ;; mov r1,r15 ! Exit atomic sequence
104 ;; ! r2 holds the previous value.
105 ;; ! r5 holds the new value.
107 ;; Notice that due to the restrictions of the mova instruction, the .Lend
108 ;; label must always be 4-byte aligned. Aligning the .Lend label would
109 ;; potentially insert a nop after the write-back instruction which could
110 ;; make the sequence to be rewound, although it has already passed the
111 ;; write-back instruction. This would make it execute twice.
112 ;; For correct operation the atomic sequences must not be rewound after
113 ;; they have passed the write-back instruction.
115 ;; This is model works only on SH3* and SH4* because the stack pointer (r15)
116 ;; is set to an invalid pointer temporarily. SH1* and SH2* CPUs will try
117 ;; to push SR and PC registers on the stack when an interrupt / exception
118 ;; occurs, and thus require the stack pointer (r15) always to be valid.
121 ;; TCB Software Atomics (-matomic-model=soft-tcb)
123 ;; This model is a variation of the gUSA model. The concept of rewindable
124 ;; atomic sequences is the same, but it does not use the stack pointer (r15)
125 ;; for signaling the 'is in atomic sequence' condition. Instead, a variable
126 ;; in the thread control block (TCB) is set to hold the exit point of the
127 ;; atomic sequence. This assumes that the GBR is used as a thread pointer
128 ;; register. The offset of the variable in the TCB to be used must be
129 ;; specified with an additional option 'gbr-offset', such as:
130 ;; -matomic-model=soft-tcb,gbr-offset=4
132 ;; For this model the following atomic sequence ABI is used.
134 ;; @(#x,gbr) == 0: Execution context is not in an atomic sequence.
136 ;; @(#x,gbr) != 0: Execution context is in an atomic sequence. In this
137 ;; case the following applies:
139 ;; @(#x,gbr): PC of the first instruction after the atomic
140 ;; write-back instruction (exit point).
142 ;; r1: Negative byte length of the atomic sequence.
143 ;; The entry point PC of the sequence can be
144 ;; determined by doing @(#x,gbr) + r1
146 ;; Note: #x is the user specified gbr-offset.
149 ;; Interrupt-Flipping Software Atomics (-matomic-model=soft-imask)
151 ;; This model achieves atomicity by temporarily disabling interrupts for
152 ;; the duration of the atomic sequence. This works only when the program
153 ;; runs in privileged mode but does not require any support from the
154 ;; interrupt / exception handling code. There is no particular ABI.
155 ;; To disable interrupts the SR.IMASK bits are set to '1111'.
156 ;; This method is not as efficient as the other software atomic models,
157 ;; since loading and storing SR (in order to flip interrupts on / off)
158 ;; requires using multi-cycle instructions. Moreover, it can potentially
159 ;; increase the interrupt latency which might be important for hard-realtime
163 ;; Compatibility Notes
165 ;; On single-core SH4A CPUs software atomic aware interrupt / exception code
166 ;; is actually compatible with user code that utilizes hardware atomics.
167 ;; Since SImode hardware atomic sequences are more compact on SH4A they are
168 ;; always used, regardless of the selected atomic model. This atomic model
169 ;; mixing can be disabled by setting the 'strict' flag, like:
170 ;; -matomic-model=soft-gusa,strict
172 ;; The software atomic models are generally compatible with each other,
173 ;; but the interrupt / exception handling code has to support both gUSA and
176 ;; The current atomic support is limited to QImode, HImode and SImode
177 ;; atomic operations. DImode operations could also be implemented but
178 ;; would require some ABI modifications to support multiple-instruction
179 ;; write-back. This is because SH1/SH2/SH3/SH4 does not have a DImode
180 ;; store instruction. DImode stores must be split into two SImode stores.
182 (define_c_enum "unspec" [
186 (define_c_enum "unspecv" [
192 (define_mode_attr i124extend_insn [(QI "exts.b") (HI "exts.w") (SI "mov")])
194 (define_code_iterator FETCHOP [plus minus ior xor and])
195 (define_code_attr fetchop_name
196 [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")])
198 ;;------------------------------------------------------------------------------
201 ;; Only the hard_llcs SImode patterns can use an I08 for the comparison
202 ;; or for the new swapped in value.
203 (define_predicate "atomic_arith_operand_0"
204 (and (match_code "subreg,reg,const_int")
205 (ior (match_operand 0 "arith_reg_operand")
206 (and (match_test "satisfies_constraint_I08 (op)")
207 (match_test "mode == SImode")
208 (ior (match_test "TARGET_ATOMIC_HARD_LLCS")
209 (match_test "TARGET_ATOMIC_ANY && TARGET_SH4A
210 && !TARGET_ATOMIC_STRICT"))))))
212 ;; Displacement addressing can be used for all SImode atomic patterns, except
214 (define_predicate "atomic_mem_operand_0"
215 (and (match_code "mem")
216 (ior (match_operand 0 "simple_mem_operand")
217 (and (match_test "mode == SImode")
218 (and (match_test "!TARGET_ATOMIC_HARD_LLCS")
219 (match_test "!TARGET_SH4A || TARGET_ATOMIC_STRICT"))
220 (match_operand 0 "displacement_mem_operand")
221 (match_operand 0 "short_displacement_mem_operand")))))
223 (define_expand "atomic_compare_and_swap<mode>"
224 [(match_operand:SI 0 "arith_reg_dest") ;; bool success output
225 (match_operand:QIHISI 1 "arith_reg_dest") ;; oldval output
226 (match_operand:QIHISI 2 "atomic_mem_operand_0") ;; memory
227 (match_operand:QIHISI 3 "atomic_arith_operand_0") ;; expected input
228 (match_operand:QIHISI 4 "atomic_arith_operand_0") ;; newval input
229 (match_operand:SI 5 "const_int_operand") ;; is_weak
230 (match_operand:SI 6 "const_int_operand") ;; success model
231 (match_operand:SI 7 "const_int_operand")] ;; failure model
234 rtx mem = operands[2];
235 rtx old_val = gen_lowpart (SImode, operands[1]);
236 rtx exp_val = operands[3];
237 rtx new_val = operands[4];
240 if (TARGET_ATOMIC_HARD_LLCS
241 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
242 atomic_insn = gen_atomic_compare_and_swap<mode>_hard (old_val, mem,
244 else if (TARGET_ATOMIC_SOFT_GUSA)
245 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_gusa (old_val, mem,
247 else if (TARGET_ATOMIC_SOFT_TCB)
248 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_tcb (old_val, mem,
249 exp_val, new_val, TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
250 else if (TARGET_ATOMIC_SOFT_IMASK)
251 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_imask (old_val, mem,
256 emit_insn (atomic_insn);
258 if (<MODE>mode == QImode)
259 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[1]),
261 else if (<MODE>mode == HImode)
262 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[1]),
264 emit_insn (gen_movsi (operands[0], gen_rtx_REG (SImode, T_REG)));
268 (define_insn_and_split "atomic_compare_and_swapsi_hard"
269 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
271 [(match_operand:SI 1 "atomic_mem_operand_0" "=Sra")
272 (match_operand:SI 2 "arith_operand" "rI08")
273 (match_operand:SI 3 "arith_operand" "rI08")]
276 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_2))
278 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
279 (clobber (reg:SI R0_REG))]
280 "TARGET_ATOMIC_HARD_LLCS
281 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
283 return "\r0: movli.l %1,r0" "\n"
288 " movco.l r0,%1" "\n"
292 "&& can_create_pseudo_p () && !satisfies_constraint_I08 (operands[2])"
295 /* FIXME: Sometimes the 'expected value' operand is not propagated as
296 immediate value. See PR 64974. */
297 set_of_reg op2 = sh_find_set_of_reg (operands[2], curr_insn,
298 prev_nonnote_insn_bb);
299 if (op2.set_src != NULL && satisfies_constraint_I08 (op2.set_src))
301 rtx* r = &XVECEXP (XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1), 0, 1);
302 validate_change (curr_insn, r, op2.set_src, false);
308 [(set_attr "length" "14")])
310 ;; The QIHImode llcs patterns modify the address register of the memory
311 ;; operand. In order to express that, we have to open code the memory
312 ;; operand. Initially the insn is expanded like every other atomic insn
313 ;; using the memory operand. In split1 the insn is converted and the
314 ;; memory operand's address register is exposed.
315 (define_insn_and_split "atomic_compare_and_swap<mode>_hard"
316 [(set (match_operand:SI 0 "arith_reg_dest")
318 [(match_operand:QIHI 1 "atomic_mem_operand_0")
319 (match_operand:QIHI 2 "arith_reg_operand")
320 (match_operand:QIHI 3 "arith_reg_operand")]
323 (unspec_volatile:QIHI [(const_int 0)] UNSPECV_CMPXCHG_2))
325 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
326 (clobber (reg:SI R0_REG))]
327 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
332 rtx i = gen_atomic_compare_and_swap<mode>_hard_1 (
333 operands[0], XEXP (operands[1], 0), operands[2], operands[3]);
335 /* Replace the new mems in the new insn with the old mem to preserve
337 XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0) = operands[1];
338 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
342 (define_insn "atomic_compare_and_swap<mode>_hard_1"
343 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
345 [(mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))
346 (match_operand:QIHI 2 "arith_reg_operand" "r")
347 (match_operand:QIHI 3 "arith_reg_operand" "r")]
349 (set (mem:QIHI (match_dup 1))
350 (unspec_volatile:QIHI [(const_int 0)] UNSPECV_CMPXCHG_2))
352 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
353 (clobber (reg:SI R0_REG))
354 (clobber (match_scratch:SI 4 "=&r"))
355 (clobber (match_scratch:SI 5 "=&r"))
356 (clobber (match_scratch:SI 6 "=1"))]
357 "TARGET_ATOMIC_HARD_LLCS"
359 return "\r mov #-4,%5" "\n"
360 " <i124extend_insn> %2,%4" "\n"
365 "0: movli.l @%5,r0" "\n"
366 " mov.l r0,@-r15" "\n"
367 " mov.<bw> @%1,%0" "\n"
368 " mov.<bw> %3,@%1" "\n"
371 " mov.l @r15+,r0" "\n"
372 " movco.l r0,@%5" "\n"
376 [(set_attr "length" "30")])
378 (define_insn "atomic_compare_and_swap<mode>_soft_gusa"
379 [(set (match_operand:SI 0 "arith_reg_dest" "=&u")
381 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=AraAdd")
382 (match_operand:QIHISI 2 "arith_reg_operand" "u")
383 (match_operand:QIHISI 3 "arith_reg_operand" "u")]
386 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2))
388 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
389 (clobber (match_scratch:SI 4 "=&u"))
390 (clobber (reg:SI R0_REG))
391 (clobber (reg:SI R1_REG))]
392 "TARGET_ATOMIC_SOFT_GUSA"
394 return "\r mova 1f,r0" "\n"
395 " <i124extend_insn> %2,%4" "\n"
398 " mov #(0f-1f),r15" "\n"
399 "0: mov.<bwl> %1,%0" "\n"
402 " mov.<bwl> %3,%1" "\n"
405 [(set_attr "length" "20")])
407 (define_insn "atomic_compare_and_swap<mode>_soft_tcb"
408 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
410 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd")
411 (match_operand:QIHISI 2 "arith_reg_operand" "r")
412 (match_operand:QIHISI 3 "arith_reg_operand" "r")]
415 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2))
417 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
418 (use (match_operand:SI 4 "gbr_displacement"))
419 (clobber (match_scratch:SI 5 "=&r"))
420 (clobber (reg:SI R0_REG))
421 (clobber (reg:SI R1_REG))]
422 "TARGET_ATOMIC_SOFT_TCB"
424 return "\r mova 1f,r0" "\n"
426 " <i124extend_insn> %2,%5" "\n"
427 " mov #(0f-1f),r1" "\n"
428 " mov.l r0,@(%O4,gbr)" "\n"
429 "0: mov.<bwl> %1,%0" "\n"
433 " mov.<bwl> %3,%1" "\n"
434 "1: mov.l r0,@(%O4,gbr)";
436 [(set_attr "length" "22")])
438 (define_insn "atomic_compare_and_swap<mode>_soft_imask"
439 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
441 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd")
442 (match_operand:QIHISI 2 "arith_reg_operand" "r")
443 (match_operand:QIHISI 3 "arith_reg_operand" "r")]
446 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2))
448 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
449 (clobber (match_scratch:SI 4 "=&r"))
450 (clobber (match_scratch:SI 5 "=&r"))]
451 "TARGET_ATOMIC_SOFT_IMASK"
453 /* The comparison result is supposed to be in T_REG.
454 Notice that restoring SR will overwrite the T_REG. We handle this by
455 rotating the T_REG into the saved SR before restoring SR. On SH2A we
456 can do one insn shorter by using the bst insn. */
458 return "\r stc sr,%0" "\n"
459 " <i124extend_insn> %2,%4" "\n"
464 " mov.<bwl> %1,%0" "\n"
467 " mov.<bwl> %3,%1" "\n"
471 return "\r stc sr,%0" "\n"
472 " <i124extend_insn> %2,%4" "\n"
476 " mov.<bwl> %1,%0" "\n"
480 " mov.<bwl> %3,%1" "\n"
483 [(set (attr "length") (if_then_else (match_test "!TARGET_SH2A")
485 (const_string "22")))])
487 ;;------------------------------------------------------------------------------
488 ;; read - write - return old value
490 (define_expand "atomic_exchange<mode>"
491 [(match_operand:QIHISI 0 "arith_reg_dest") ;; oldval output
492 (match_operand:QIHISI 1 "atomic_mem_operand_0") ;; memory
493 (match_operand:QIHISI 2 "atomic_arith_operand_0") ;; newval input
494 (match_operand:SI 3 "const_int_operand")] ;; memory model
497 rtx mem = operands[1];
498 rtx val = operands[2];
501 if (TARGET_ATOMIC_HARD_LLCS
502 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
503 atomic_insn = gen_atomic_exchange<mode>_hard (operands[0], mem, val);
504 else if (TARGET_ATOMIC_SOFT_GUSA)
505 atomic_insn = gen_atomic_exchange<mode>_soft_gusa (operands[0], mem, val);
506 else if (TARGET_ATOMIC_SOFT_TCB)
507 atomic_insn = gen_atomic_exchange<mode>_soft_tcb (operands[0], mem, val,
508 TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
509 else if (TARGET_ATOMIC_SOFT_IMASK)
510 atomic_insn = gen_atomic_exchange<mode>_soft_imask (operands[0], mem, val);
514 emit_insn (atomic_insn);
516 if (<MODE>mode == QImode)
517 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
519 else if (<MODE>mode == HImode)
520 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
525 (define_insn "atomic_exchangesi_hard"
526 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
527 (match_operand:SI 1 "atomic_mem_operand_0" "=Sra"))
530 [(match_operand:SI 2 "arith_operand" "rI08")] UNSPEC_ATOMIC))
531 (set (reg:SI T_REG) (const_int 1))
532 (clobber (reg:SI R0_REG))]
533 "TARGET_ATOMIC_HARD_LLCS
534 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
536 return "\r0: movli.l %1,r0" "\n"
539 " movco.l r0,%1" "\n"
542 [(set_attr "length" "10")])
544 ;; The QIHImode llcs patterns modify the address register of the memory
545 ;; operand. In order to express that, we have to open code the memory
546 ;; operand. Initially the insn is expanded like every other atomic insn
547 ;; using the memory operand. In split1 the insn is converted and the
548 ;; memory operand's address register is exposed.
549 (define_insn_and_split "atomic_exchange<mode>_hard"
550 [(set (match_operand:QIHI 0 "arith_reg_dest")
551 (match_operand:QIHI 1 "atomic_mem_operand_0"))
554 [(match_operand:QIHI 2 "arith_reg_operand")] UNSPEC_ATOMIC))
555 (set (reg:SI T_REG) (const_int 1))
556 (clobber (reg:SI R0_REG))]
557 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
562 rtx i = gen_atomic_exchange<mode>_hard_1 (operands[0], XEXP (operands[1], 0),
565 /* Replace the new mems in the new insn with the old mem to preserve
567 XEXP (XVECEXP (i, 0, 0), 1) = operands[1];
568 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
572 (define_insn "atomic_exchange<mode>_hard_1"
573 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
574 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
575 (set (mem:QIHI (match_dup 1))
577 [(match_operand:QIHI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC))
578 (set (reg:SI T_REG) (const_int 1))
579 (clobber (reg:SI R0_REG))
580 (clobber (match_scratch:SI 3 "=&r"))
581 (clobber (match_scratch:SI 4 "=1"))]
582 "TARGET_ATOMIC_HARD_LLCS"
584 return "\r mov #-4,%3" "\n"
589 "0: movli.l @%3,r0" "\n"
590 " mov.l r0,@-r15" "\n"
591 " mov.<bw> @%1,%0" "\n"
592 " mov.<bw> %2,@%1" "\n"
593 " mov.l @r15+,r0" "\n"
594 " movco.l r0,@%3" "\n"
597 [(set_attr "length" "24")])
599 (define_insn "atomic_exchange<mode>_soft_gusa"
600 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
601 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=AraAdd"))
604 [(match_operand:QIHISI 2 "arith_reg_operand" "u")] UNSPEC_ATOMIC))
605 (clobber (reg:SI R0_REG))
606 (clobber (reg:SI R1_REG))]
607 "TARGET_ATOMIC_SOFT_GUSA"
609 return "\r mova 1f,r0" "\n"
612 " mov #(0f-1f),r15" "\n"
613 "0: mov.<bwl> %1,%0" "\n"
614 " mov.<bwl> %2,%1" "\n"
617 [(set_attr "length" "14")])
619 (define_insn "atomic_exchange<mode>_soft_tcb"
620 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
621 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd"))
624 [(match_operand:QIHISI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC))
625 (clobber (reg:SI R0_REG))
626 (clobber (reg:SI R1_REG))
627 (use (match_operand:SI 3 "gbr_displacement"))]
628 "TARGET_ATOMIC_SOFT_TCB"
630 return "\r mova 1f,r0" "\n"
631 " mov #(0f-1f),r1" "\n"
633 " mov.l r0,@(%O3,gbr)" "\n"
634 "0: mov.<bwl> %1,%0" "\n"
636 " mov.<bwl> %2,%1" "\n"
637 "1: mov.l r0,@(%O3,gbr)";
639 [(set_attr "length" "16")])
641 (define_insn "atomic_exchange<mode>_soft_imask"
642 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
643 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd"))
646 [(match_operand:QIHISI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC))
647 (clobber (match_scratch:SI 3 "=&r"))]
648 "TARGET_ATOMIC_SOFT_IMASK"
650 return "\r stc sr,%0" "\n"
654 " mov.<bwl> %1,%0" "\n"
655 " mov.<bwl> %2,%1" "\n"
658 [(set_attr "length" "14")])
660 ;;------------------------------------------------------------------------------
661 ;; read - add|sub|or|and|xor|nand - write - return old value
663 ;; atomic_arith_operand_1 can be used by any atomic type for a plus op,
664 ;; since there's no r0 restriction.
665 (define_predicate "atomic_arith_operand_1"
666 (and (match_code "subreg,reg,const_int")
667 (ior (match_operand 0 "arith_reg_operand")
668 (match_test "satisfies_constraint_I08 (op)"))))
670 ;; atomic_logic_operand_1 can be used by the hard_llcs, tcb and soft_imask
671 ;; patterns only due to its r0 restriction.
672 (define_predicate "atomic_logical_operand_1"
673 (and (match_code "subreg,reg,const_int")
674 (ior (match_operand 0 "arith_reg_operand")
675 (and (match_test "satisfies_constraint_K08 (op)")
676 (ior (match_test "TARGET_ATOMIC_HARD_LLCS")
677 (match_test "TARGET_ATOMIC_SOFT_IMASK")
678 (match_test "TARGET_ATOMIC_SOFT_TCB")
679 (match_test "TARGET_ATOMIC_ANY && TARGET_SH4A
681 && !TARGET_ATOMIC_STRICT"))))))
683 (define_code_attr fetchop_predicate_1
684 [(plus "atomic_arith_operand_1") (minus "arith_reg_operand")
685 (ior "atomic_logical_operand_1") (xor "atomic_logical_operand_1")
686 (and "atomic_logical_operand_1")])
688 (define_code_attr fetchop_constraint_1_llcs
689 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
691 (define_code_attr fetchop_constraint_1_gusa
692 [(plus "uI08") (minus "u") (ior "u") (xor "u") (and "u")])
694 (define_code_attr fetchop_constraint_1_tcb
695 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
697 (define_code_attr fetchop_constraint_1_imask
698 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
700 ;; Displacement addressing mode (incl. GBR relative) can be used by tcb and
701 ;; imask atomic patterns in any mode, since all the patterns use R0 as the
702 ;; register operand for memory loads/stores. gusa and llcs patterns can only
703 ;; use displacement addressing for SImode.
704 (define_predicate "atomic_mem_operand_1"
705 (and (match_code "mem")
706 (ior (match_operand 0 "simple_mem_operand")
707 (and (match_test "mode == SImode")
708 (match_test "TARGET_ATOMIC_SOFT_GUSA
709 && (!TARGET_SH4A || TARGET_ATOMIC_STRICT)")
710 (match_operand 0 "displacement_mem_operand")
711 (match_operand 0 "short_displacement_mem_operand"))
712 (and (ior (match_test "(TARGET_ATOMIC_SOFT_TCB
713 || TARGET_ATOMIC_SOFT_IMASK)
714 && (!TARGET_SH4A || TARGET_ATOMIC_STRICT)")
715 (match_test "(TARGET_ATOMIC_SOFT_TCB
716 || TARGET_ATOMIC_SOFT_IMASK)
717 && TARGET_SH4A && !TARGET_ATOMIC_STRICT
719 (ior (and (match_operand 0 "displacement_mem_operand")
720 (match_operand 0 "short_displacement_mem_operand"))
721 (match_operand 0 "gbr_address_mem"))))))
723 (define_expand "atomic_fetch_<fetchop_name><mode>"
724 [(set (match_operand:QIHISI 0 "arith_reg_dest")
725 (match_operand:QIHISI 1 "atomic_mem_operand_1"))
728 [(FETCHOP:QIHISI (match_dup 1)
729 (match_operand:QIHISI 2 "<fetchop_predicate_1>"))]
731 (match_operand:SI 3 "const_int_operand")]
734 rtx mem = operands[1];
737 if (TARGET_ATOMIC_HARD_LLCS
738 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
739 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_hard (operands[0], mem,
741 else if (TARGET_ATOMIC_SOFT_GUSA)
742 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_gusa (operands[0],
744 else if (TARGET_ATOMIC_SOFT_TCB)
745 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_tcb (operands[0],
746 mem, operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
747 else if (TARGET_ATOMIC_SOFT_IMASK)
748 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_imask (operands[0],
753 emit_insn (atomic_insn);
755 if (<MODE>mode == QImode)
756 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
758 else if (<MODE>mode == HImode)
759 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
764 (define_insn_and_split "atomic_fetch_<fetchop_name>si_hard"
765 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
766 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))
769 [(FETCHOP:SI (match_dup 1)
770 (match_operand:SI 2 "<fetchop_predicate_1>"
771 "<fetchop_constraint_1_llcs>"))]
773 (set (reg:SI T_REG) (const_int 1))
774 (clobber (reg:SI R0_REG))]
775 "TARGET_ATOMIC_HARD_LLCS
776 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
778 return "\r0: movli.l %1,r0" "\n"
780 " <fetchop_name> %2,r0" "\n"
781 " movco.l r0,%1" "\n"
784 "&& can_create_pseudo_p () && optimize
785 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
788 emit_insn (gen_atomic_<fetchop_name>_fetchsi_hard (gen_reg_rtx (SImode),
789 operands[1], operands[2]));
791 [(set_attr "length" "10")])
793 ;; Combine pattern for xor (val, -1) / nand (val, -1).
794 (define_insn_and_split "atomic_fetch_notsi_hard"
795 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
796 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))
798 (unspec:SI [(not:SI (match_dup 1))] UNSPEC_ATOMIC))
799 (set (reg:SI T_REG) (const_int 1))
800 (clobber (reg:SI R0_REG))]
801 "TARGET_ATOMIC_HARD_LLCS
802 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
804 return "\r0: movli.l %1,r0" "\n"
807 " movco.l r0,%1" "\n"
810 "&& can_create_pseudo_p () && optimize
811 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
814 emit_insn (gen_atomic_not_fetchsi_hard (gen_reg_rtx (SImode), operands[1]));
816 [(set_attr "length" "10")])
818 ;; The QIHImode llcs patterns modify the address register of the memory
819 ;; operand. In order to express that, we have to open code the memory
820 ;; operand. Initially the insn is expanded like every other atomic insn
821 ;; using the memory operand. In split1 the insn is converted and the
822 ;; memory operand's address register is exposed.
823 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_hard"
824 [(set (match_operand:QIHI 0 "arith_reg_dest")
825 (match_operand:QIHI 1 "atomic_mem_operand_1"))
828 [(FETCHOP:QIHI (match_dup 1)
829 (match_operand:QIHI 2 "<fetchop_predicate_1>"))]
831 (set (reg:SI T_REG) (const_int 1))
832 (clobber (reg:SI R0_REG))]
833 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
839 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
840 emit_insn (gen_atomic_<fetchop_name><mode>_hard (operands[1], operands[2]));
843 rtx i = gen_atomic_fetch_<fetchop_name><mode>_hard_1 (
844 operands[0], XEXP (operands[1], 0), operands[2]);
846 /* Replace the new mems in the new insn with the old mem to preserve
848 XEXP (XVECEXP (i, 0, 0), 1) = operands[1];
849 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
850 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0) = operands[1];
855 (define_insn "atomic_fetch_<fetchop_name><mode>_hard_1"
856 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
857 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
858 (set (mem:QIHI (match_dup 1))
860 [(FETCHOP:QIHI (mem:QIHI (match_dup 1))
861 (match_operand:QIHI 2 "<fetchop_predicate_1>"
862 "<fetchop_constraint_1_llcs>"))]
864 (set (reg:SI T_REG) (const_int 1))
865 (clobber (reg:SI R0_REG))
866 (clobber (match_scratch:SI 3 "=&r"))
867 (clobber (match_scratch:SI 4 "=1"))]
868 "TARGET_ATOMIC_HARD_LLCS"
870 return "\r mov #-4,%3" "\n"
875 "0: movli.l @%3,r0" "\n"
876 " mov.l r0,@-r15" "\n"
877 " mov.<bw> @%1,r0" "\n"
879 " <fetchop_name> %2,r0" "\n"
880 " mov.<bw> r0,@%1" "\n"
881 " mov.l @r15+,r0" "\n"
882 " movco.l r0,@%3" "\n"
885 [(set_attr "length" "28")])
887 ;; The QIHImode llcs patterns modify the address register of the memory
888 ;; operand. In order to express that, we have to open code the memory
889 ;; operand. Initially the insn is expanded like every other atomic insn
890 ;; using the memory operand. In split1 the insn is converted and the
891 ;; memory operand's address register is exposed.
892 (define_insn_and_split "atomic_<fetchop_name><mode>_hard"
893 [(set (match_operand:QIHI 0 "atomic_mem_operand_1")
895 [(FETCHOP:QIHI (match_dup 0)
896 (match_operand:QIHI 1 "<fetchop_predicate_1>"))]
898 (set (reg:SI T_REG) (const_int 1))
899 (clobber (reg:SI R0_REG))]
900 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
905 rtx i = gen_atomic_<fetchop_name><mode>_hard_1 (XEXP (operands[0], 0),
907 /* Replace the new mems in the new insn with the old mem to preserve
909 XEXP (XVECEXP (i, 0, 0), 0) = operands[0];
910 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = operands[0];
914 (define_insn "atomic_<fetchop_name><mode>_hard_1"
915 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r"))
917 [(FETCHOP:QIHI (mem:QIHI (match_dup 0))
918 (match_operand:QIHI 1 "<fetchop_predicate_1>"
919 "<fetchop_constraint_1_llcs>"))]
921 (set (reg:SI T_REG) (const_int 1))
922 (clobber (reg:SI R0_REG))
923 (clobber (match_scratch:SI 2 "=&r"))
924 (clobber (match_scratch:SI 3 "=0"))]
925 "TARGET_ATOMIC_HARD_LLCS"
927 return "\r mov #-4,%2" "\n"
932 "0: movli.l @%2,r0" "\n"
933 " mov.l r0,@-r15" "\n"
934 " mov.<bw> @%0,r0" "\n"
935 " <fetchop_name> %1,r0" "\n"
936 " mov.<bw> r0,@%0" "\n"
937 " mov.l @r15+,r0" "\n"
938 " movco.l r0,@%2" "\n"
941 [(set_attr "length" "26")])
943 ;; Combine pattern for xor (val, -1) / nand (val, -1).
944 (define_insn_and_split "atomic_fetch_not<mode>_hard"
945 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
946 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
947 (set (mem:QIHI (match_dup 1))
948 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 1)))] UNSPEC_ATOMIC))
949 (set (reg:SI T_REG) (const_int 1))
950 (clobber (reg:SI R0_REG))
951 (clobber (match_scratch:SI 2 "=&r"))
952 (clobber (match_scratch:SI 3 "=1"))]
953 "TARGET_ATOMIC_HARD_LLCS"
955 return "\r mov #-4,%2" "\n"
960 "0: movli.l @%2,r0" "\n"
961 " mov.l r0,@-r15" "\n"
962 " mov.<bw> @%1,%0" "\n"
964 " mov.<bw> r0,@%1" "\n"
965 " mov.l @r15+,r0" "\n"
966 " movco.l r0,@%2" "\n"
969 "&& can_create_pseudo_p () && optimize
970 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
973 rtx i = gen_atomic_not<mode>_hard (operands[1]);
975 /* Replace the new mems in the new insn with the old mem to preserve
977 rtx m = XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1);
978 XEXP (XVECEXP (i, 0, 0), 0) = m;
979 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = m;
982 [(set_attr "length" "26")])
984 (define_insn "atomic_not<mode>_hard"
985 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r"))
986 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 0)))] UNSPEC_ATOMIC))
987 (set (reg:SI T_REG) (const_int 1))
988 (clobber (reg:SI R0_REG))
989 (clobber (match_scratch:SI 1 "=&r"))
990 (clobber (match_scratch:SI 2 "=0"))]
991 "TARGET_ATOMIC_HARD_LLCS"
993 return "\r mov #-4,%1" "\n"
998 "0: movli.l @%1,r0" "\n"
999 " mov.l r0,@-r15" "\n"
1000 " mov.<bw> @%0,r0" "\n"
1002 " mov.<bw> r0,@%0" "\n"
1003 " mov.l @r15+,r0" "\n"
1004 " movco.l r0,@%1" "\n"
1007 [(set_attr "length" "26")])
1009 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_gusa"
1010 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1011 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))
1016 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1017 "<fetchop_constraint_1_gusa>"))]
1019 (clobber (match_scratch:QIHISI 3 "=&u"))
1020 (clobber (reg:SI R0_REG))
1021 (clobber (reg:SI R1_REG))]
1022 "TARGET_ATOMIC_SOFT_GUSA"
1024 return "\r mova 1f,r0" "\n"
1027 " mov #(0f-1f),r15" "\n"
1028 "0: mov.<bwl> %1,%0" "\n"
1030 " <fetchop_name> %2,%3" "\n"
1031 " mov.<bwl> %3,%1" "\n"
1034 "&& can_create_pseudo_p () && optimize
1035 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1038 emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft_gusa (
1039 gen_reg_rtx (<MODE>mode), operands[1], operands[2]));
1041 [(set_attr "length" "18")])
1043 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1044 (define_insn_and_split "atomic_fetch_not<mode>_soft_gusa"
1045 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1046 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))
1048 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1049 (clobber (match_scratch:QIHISI 2 "=&u"))
1050 (clobber (reg:SI R0_REG))
1051 (clobber (reg:SI R1_REG))]
1052 "TARGET_ATOMIC_SOFT_GUSA"
1054 return "\r mova 1f,r0" "\n"
1057 " mov #(0f-1f),r15" "\n"
1058 "0: mov.<bwl> %1,%0" "\n"
1060 " mov.<bwl> %2,%1" "\n"
1063 "&& can_create_pseudo_p () && optimize
1064 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1067 emit_insn (gen_atomic_not_fetch<mode>_soft_gusa (gen_reg_rtx (<MODE>mode),
1070 [(set_attr "length" "16")])
1072 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_tcb"
1073 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1074 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1079 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1080 "<fetchop_constraint_1_tcb>"))]
1082 (use (match_operand:SI 3 "gbr_displacement"))
1083 (clobber (reg:SI R0_REG))
1084 (clobber (reg:SI R1_REG))]
1085 "TARGET_ATOMIC_SOFT_TCB"
1087 return "\r mova 1f,r0" "\n"
1089 " mov #(0f-1f),r1" "\n"
1090 " mov.l r0,@(%O3,gbr)" "\n"
1091 "0: mov.<bwl> %1,r0" "\n"
1093 " <fetchop_name> %2,r0" "\n"
1094 " mov.<bwl> r0,%1" "\n"
1096 " mov.l r0,@(%O3,gbr)";
1098 "&& can_create_pseudo_p () && optimize
1099 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1102 emit_insn (gen_atomic_<fetchop_name><mode>_soft_tcb (
1103 operands[1], operands[2], operands[3]));
1105 [(set_attr "length" "20")])
1107 (define_insn "atomic_<fetchop_name><mode>_soft_tcb"
1108 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd")
1112 (match_operand:QIHISI 1 "<fetchop_predicate_1>"
1113 "<fetchop_constraint_1_tcb>"))]
1115 (use (match_operand:SI 2 "gbr_displacement"))
1116 (clobber (reg:SI R0_REG))
1117 (clobber (reg:SI R1_REG))]
1118 "TARGET_ATOMIC_SOFT_TCB"
1120 return "\r mova 1f,r0" "\n"
1121 " mov #(0f-1f),r1" "\n"
1123 " mov.l r0,@(%O2,gbr)" "\n"
1124 "0: mov.<bwl> %0,r0" "\n"
1125 " <fetchop_name> %1,r0" "\n"
1126 " mov.<bwl> r0,%0" "\n"
1128 " mov.l r0,@(%O2,gbr)";
1130 [(set_attr "length" "18")])
1132 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1133 (define_insn_and_split "atomic_fetch_not<mode>_soft_tcb"
1134 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1135 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1137 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1138 (use (match_operand:SI 2 "gbr_displacement"))
1139 (clobber (reg:SI R0_REG))
1140 (clobber (reg:SI R1_REG))]
1141 "TARGET_ATOMIC_SOFT_TCB"
1143 return "\r mova 1f,r0" "\n"
1145 " mov #(0f-1f),r1" "\n"
1146 " mov.l r0,@(%O2,gbr)" "\n"
1147 "0: mov.<bwl> %1,r0" "\n"
1150 " mov.<bwl> r0,%1" "\n"
1152 " mov.l r0,@(%O2,gbr)";
1154 "&& can_create_pseudo_p () && optimize
1155 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1158 emit_insn (gen_atomic_not<mode>_soft_tcb (operands[1], operands[2]));
1160 [(set_attr "length" "20")])
1162 (define_insn "atomic_not<mode>_soft_tcb"
1163 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd")
1164 (unspec:QIHISI [(not:QIHISI (match_dup 0))] UNSPEC_ATOMIC))
1165 (use (match_operand:SI 1 "gbr_displacement"))
1166 (clobber (reg:SI R0_REG))
1167 (clobber (reg:SI R1_REG))]
1168 "TARGET_ATOMIC_SOFT_TCB"
1170 return "\r mova 1f,r0" "\n"
1171 " mov #(0f-1f),r1" "\n"
1173 " mov.l r0,@(%O1,gbr)" "\n"
1174 "0: mov.<bwl> %0,r0" "\n"
1176 " mov.<bwl> r0,%0" "\n"
1178 " mov.l r0,@(%O1,gbr)";
1180 [(set_attr "length" "18")])
1182 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_imask"
1183 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1184 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1189 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1190 "<fetchop_constraint_1_imask>"))]
1192 (clobber (reg:SI R0_REG))
1193 (clobber (match_scratch:QIHISI 3 "=&r"))]
1194 "TARGET_ATOMIC_SOFT_IMASK"
1196 return "\r stc sr,r0" "\n"
1200 " mov.<bwl> %1,r0" "\n"
1202 " <fetchop_name> %2,r0" "\n"
1203 " mov.<bwl> r0,%1" "\n"
1206 "&& can_create_pseudo_p () && optimize
1207 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1210 emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft_imask (
1211 gen_reg_rtx (<MODE>mode), operands[1], operands[2]));
1213 [(set_attr "length" "18")])
1215 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1216 (define_insn_and_split "atomic_fetch_not<mode>_soft_imask"
1217 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1218 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1220 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1221 (clobber (reg:SI R0_REG))
1222 (clobber (match_scratch:QIHISI 2 "=&r"))]
1223 "TARGET_ATOMIC_SOFT_IMASK"
1225 return "\r stc sr,r0" "\n"
1229 " mov.<bwl> %1,r0" "\n"
1232 " mov.<bwl> r0,%1" "\n"
1235 "&& can_create_pseudo_p () && optimize
1236 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1239 emit_insn (gen_atomic_not_fetch<mode>_soft_imask (gen_reg_rtx (<MODE>mode),
1242 [(set_attr "length" "18")])
1244 (define_expand "atomic_fetch_nand<mode>"
1245 [(set (match_operand:QIHISI 0 "arith_reg_dest")
1246 (match_operand:QIHISI 1 "atomic_mem_operand_1"))
1249 [(not:QIHISI (and:QIHISI (match_dup 1)
1250 (match_operand:QIHISI 2 "atomic_logical_operand_1")))]
1252 (match_operand:SI 3 "const_int_operand")]
1255 rtx mem = operands[1];
1258 if (TARGET_ATOMIC_HARD_LLCS
1259 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
1260 atomic_insn = gen_atomic_fetch_nand<mode>_hard (operands[0], mem,
1262 else if (TARGET_ATOMIC_SOFT_GUSA)
1263 atomic_insn = gen_atomic_fetch_nand<mode>_soft_gusa (operands[0], mem,
1265 else if (TARGET_ATOMIC_SOFT_TCB)
1266 atomic_insn = gen_atomic_fetch_nand<mode>_soft_tcb (operands[0], mem,
1267 operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
1268 else if (TARGET_ATOMIC_SOFT_IMASK)
1269 atomic_insn = gen_atomic_fetch_nand<mode>_soft_imask (operands[0], mem,
1274 emit_insn (atomic_insn);
1276 if (<MODE>mode == QImode)
1277 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
1279 else if (<MODE>mode == HImode)
1280 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
1285 (define_insn_and_split "atomic_fetch_nandsi_hard"
1286 [(set (match_operand:SI 0 "arith_reg_dest" "=&r")
1287 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))
1290 [(not:SI (and:SI (match_dup 1)
1291 (match_operand:SI 2 "logical_operand" "rK08")))]
1293 (set (reg:SI T_REG) (const_int 1))
1294 (clobber (reg:SI R0_REG))]
1295 "TARGET_ATOMIC_HARD_LLCS
1296 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1298 return "\r0: movli.l %1,r0" "\n"
1302 " movco.l r0,%1" "\n"
1305 "&& can_create_pseudo_p () && optimize
1306 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1309 emit_insn (gen_atomic_nand_fetchsi_hard (gen_reg_rtx (SImode), operands[1],
1312 [(set_attr "length" "12")])
1314 ;; The QIHImode llcs patterns modify the address register of the memory
1315 ;; operand. In order to express that, we have to open code the memory
1316 ;; operand. Initially the insn is expanded like every other atomic insn
1317 ;; using the memory operand. In split1 the insn is converted and the
1318 ;; memory operand's address register is exposed.
1319 (define_insn_and_split "atomic_fetch_nand<mode>_hard"
1320 [(set (match_operand:QIHI 0 "arith_reg_dest")
1321 (match_operand:QIHI 1 "atomic_mem_operand_1"))
1324 [(not:QIHI (and:QIHI (match_dup 1)
1325 (match_operand:QIHI 2 "logical_operand" "rK08")))]
1327 (set (reg:SI T_REG) (const_int 1))
1328 (clobber (reg:SI R0_REG))]
1329 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1335 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
1336 emit_insn (gen_atomic_nand<mode>_hard (operands[1], operands[2]));
1339 rtx i = gen_atomic_fetch_nand<mode>_hard_1 (
1340 operands[0], XEXP (operands[1], 0), operands[2]);
1342 /* Replace the new mems in the new insn with the old mem to preserve
1344 XEXP (XVECEXP (i, 0, 0), 1) = operands[1];
1345 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
1346 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0),
1352 (define_insn "atomic_fetch_nand<mode>_hard_1"
1353 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1354 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))
1355 (set (mem:QIHI (match_dup 1))
1357 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 1))
1358 (match_operand:QIHI 2 "logical_operand" "rK08")))]
1360 (set (reg:SI T_REG) (const_int 1))
1361 (clobber (reg:SI R0_REG))
1362 (clobber (match_scratch:SI 3 "=&r"))
1363 (clobber (match_scratch:SI 4 "=1"))]
1364 "TARGET_ATOMIC_HARD_LLCS"
1366 return "\r mov #-4,%3" "\n"
1371 "0: movli.l @%3,r0" "\n"
1372 " mov.l r0,@-r15" "\n"
1373 " mov.<bw> @%1,r0" "\n"
1377 " mov.<bw> r0,@%1" "\n"
1378 " mov.l @r15+,r0" "\n"
1379 " movco.l r0,@%3" "\n"
1382 [(set_attr "length" "30")])
1384 ;; The QIHImode llcs patterns modify the address register of the memory
1385 ;; operand. In order to express that, we have to open code the memory
1386 ;; operand. Initially the insn is expanded like every other atomic insn
1387 ;; using the memory operand. In split1 the insn is converted and the
1388 ;; memory operand's address register is exposed.
1389 (define_insn_and_split "atomic_nand<mode>_hard"
1390 [(set (match_operand:QIHI 0 "atomic_mem_operand_1")
1392 [(not:QIHI (and:QIHI (match_dup 0)
1393 (match_operand:QIHI 1 "logical_operand")))]
1395 (set (reg:SI T_REG) (const_int 1))
1396 (clobber (reg:SI R0_REG))]
1397 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1402 rtx i = gen_atomic_nand<mode>_hard_1 (XEXP (operands[0], 0), operands[1]);
1404 /* Replace the new mems in the new insn with the old mem to preserve
1406 XEXP (XVECEXP (i, 0, 0), 0) = operands[0];
1407 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0), 0) = operands[0];
1411 (define_insn "atomic_nand<mode>_hard_1"
1412 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r"))
1414 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 0))
1415 (match_operand:QIHI 1 "logical_operand" "rK08")))]
1417 (set (reg:SI T_REG) (const_int 1))
1418 (clobber (reg:SI R0_REG))
1419 (clobber (match_scratch:SI 2 "=&r"))
1420 (clobber (match_scratch:SI 3 "=0"))]
1421 "TARGET_ATOMIC_HARD_LLCS"
1423 return "\r mov #-4,%2" "\n"
1428 "0: movli.l @%2,r0" "\n"
1429 " mov.l r0,@-r15" "\n"
1430 " mov.<bw> @%0,r0" "\n"
1433 " mov.<bw> r0,@%0" "\n"
1434 " mov.l @r15+,r0" "\n"
1435 " movco.l r0,@%2" "\n"
1438 [(set_attr "length" "28")])
1440 (define_insn_and_split "atomic_fetch_nand<mode>_soft_gusa"
1441 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1442 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))
1446 (and:QIHISI (match_dup 1)
1447 (match_operand:QIHISI 2 "arith_reg_operand" "u")))]
1449 (clobber (match_scratch:QIHISI 3 "=&u"))
1450 (clobber (reg:SI R0_REG))
1451 (clobber (reg:SI R1_REG))]
1452 "TARGET_ATOMIC_SOFT_GUSA"
1454 return "\r mova 1f,r0" "\n"
1457 " mov #(0f-1f),r15" "\n"
1458 "0: mov.<bwl> %1,%0" "\n"
1462 " mov.<bwl> %3,%1" "\n"
1465 "&& can_create_pseudo_p () && optimize
1466 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1469 emit_insn (gen_atomic_nand_fetch<mode>_soft_gusa (gen_reg_rtx (<MODE>mode),
1470 operands[1], operands[2]));
1472 [(set_attr "length" "20")])
1474 (define_insn_and_split "atomic_fetch_nand<mode>_soft_tcb"
1475 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1476 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1480 (and:QIHISI (match_dup 1)
1481 (match_operand:QIHISI 2 "logical_operand" "rK08")))]
1483 (use (match_operand:SI 3 "gbr_displacement"))
1484 (clobber (reg:SI R0_REG))
1485 (clobber (reg:SI R1_REG))]
1486 "TARGET_ATOMIC_SOFT_TCB"
1488 return "\r mova 1f,r0" "\n"
1489 " mov #(0f-1f),r1" "\n"
1491 " mov.l r0,@(%O3,gbr)" "\n"
1492 "0: mov.<bwl> %1,r0" "\n"
1496 " mov.<bwl> r0,%1" "\n"
1498 " mov.l r0,@(%O3,gbr)";
1500 "&& can_create_pseudo_p () && optimize
1501 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1504 emit_insn (gen_atomic_nand<mode>_soft_tcb (operands[1], operands[2],
1507 [(set_attr "length" "22")])
1509 (define_insn "atomic_nand<mode>_soft_tcb"
1510 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd")
1513 (and:QIHISI (match_dup 0)
1514 (match_operand:QIHISI 1 "logical_operand" "rK08")))]
1516 (use (match_operand:SI 2 "gbr_displacement"))
1517 (clobber (reg:SI R0_REG))
1518 (clobber (reg:SI R1_REG))]
1519 "TARGET_ATOMIC_SOFT_TCB"
1521 return "\r mova 1f,r0" "\n"
1523 " mov #(0f-1f),r1" "\n"
1524 " mov.l r0,@(%O2,gbr)" "\n"
1525 "0: mov.<bwl> %0,r0" "\n"
1528 " mov.<bwl> r0,%0" "\n"
1530 " mov.l r0,@(%O2,gbr)";
1532 [(set_attr "length" "20")])
1534 (define_insn_and_split "atomic_fetch_nand<mode>_soft_imask"
1535 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1536 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))
1540 (and:QIHISI (match_dup 1)
1541 (match_operand:QIHISI 2 "logical_operand" "rK08")))]
1543 (clobber (reg:SI R0_REG))
1544 (clobber (match_scratch:SI 3 "=&r"))]
1545 "TARGET_ATOMIC_SOFT_IMASK"
1547 return "\r stc sr,r0" "\n"
1551 " mov.<bwl> %1,r0" "\n"
1555 " mov.<bwl> r0,%1" "\n"
1558 "&& can_create_pseudo_p () && optimize
1559 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1562 emit_insn (gen_atomic_nand_fetch<mode>_soft_imask (gen_reg_rtx (<MODE>mode),
1563 operands[1], operands[2]));
1565 [(set_attr "length" "20")])
1567 ;;------------------------------------------------------------------------------
1568 ;; read - add|sub|or|and|xor|nand - write - return new value
1570 (define_expand "atomic_<fetchop_name>_fetch<mode>"
1571 [(set (match_operand:QIHISI 0 "arith_reg_dest")
1573 (match_operand:QIHISI 1 "atomic_mem_operand_1")
1574 (match_operand:QIHISI 2 "<fetchop_predicate_1>")))
1577 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1579 (match_operand:SI 3 "const_int_operand" "")]
1582 rtx mem = operands[1];
1585 if (TARGET_ATOMIC_HARD_LLCS
1586 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
1587 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_hard (operands[0], mem,
1589 else if (TARGET_ATOMIC_SOFT_GUSA)
1590 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_gusa (operands[0],
1592 else if (TARGET_ATOMIC_SOFT_TCB)
1593 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_tcb (operands[0],
1594 mem, operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
1595 else if (TARGET_ATOMIC_SOFT_IMASK)
1596 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_imask (operands[0],
1601 emit_insn (atomic_insn);
1603 if (<MODE>mode == QImode)
1604 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
1606 else if (<MODE>mode == HImode)
1607 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
1612 (define_insn "atomic_<fetchop_name>_fetchsi_hard"
1613 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
1615 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")
1616 (match_operand:SI 2 "<fetchop_predicate_1>"
1617 "<fetchop_constraint_1_llcs>")))
1620 [(FETCHOP:SI (match_dup 1) (match_dup 2))]
1622 (set (reg:SI T_REG) (const_int 1))]
1623 "TARGET_ATOMIC_HARD_LLCS
1624 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1626 return "\r0: movli.l %1,%0" "\n"
1627 " <fetchop_name> %2,%0" "\n"
1628 " movco.l %0,%1" "\n"
1631 [(set_attr "length" "8")])
1633 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1634 (define_insn "atomic_not_fetchsi_hard"
1635 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
1636 (not:SI (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")))
1638 (unspec:SI [(not:SI (match_dup 1))] UNSPEC_ATOMIC))
1639 (set (reg:SI T_REG) (const_int 1))]
1640 "TARGET_ATOMIC_HARD_LLCS
1641 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1643 return "\r0: movli.l %1,%0" "\n"
1645 " movco.l %0,%1" "\n"
1648 [(set_attr "length" "8")])
1650 ;; The QIHImode llcs patterns modify the address register of the memory
1651 ;; operand. In order to express that, we have to open code the memory
1652 ;; operand. Initially the insn is expanded like every other atomic insn
1653 ;; using the memory operand. In split1 the insn is converted and the
1654 ;; memory operand's address register is exposed.
1655 (define_insn_and_split "atomic_<fetchop_name>_fetch<mode>_hard"
1656 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1657 (FETCHOP:QIHI (match_operand:QIHI 1 "atomic_mem_operand_1")
1658 (match_operand:QIHI 2 "<fetchop_predicate_1>")))
1659 (set (match_dup 1) (unspec:QIHI [(FETCHOP:QIHI (match_dup 1) (match_dup 2))]
1661 (set (reg:SI T_REG) (const_int 1))
1662 (clobber (reg:SI R0_REG))]
1663 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1669 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
1670 emit_insn (gen_atomic_<fetchop_name><mode>_hard (operands[1], operands[2]));
1673 rtx i = gen_atomic_<fetchop_name>_fetch<mode>_hard_1 (
1674 operands[0], XEXP (operands[1], 0), operands[2]);
1676 /* Replace the new mems in the new insn with the old mem to preserve
1678 XEXP (XEXP (XVECEXP (i, 0, 0), 1), 0) = operands[1];
1679 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
1680 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0) = operands[1];
1685 (define_insn "atomic_<fetchop_name>_fetch<mode>_hard_1"
1686 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1688 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))
1689 (match_operand:QIHI 2 "<fetchop_predicate_1>"
1690 "<fetchop_constraint_1_llcs>")))
1691 (set (mem:QIHI (match_dup 1))
1693 [(FETCHOP:QIHI (mem:QIHI (match_dup 1)) (match_dup 2))]
1695 (set (reg:SI T_REG) (const_int 1))
1696 (clobber (reg:SI R0_REG))
1697 (clobber (match_scratch:SI 3 "=&r"))
1698 (clobber (match_scratch:SI 4 "=1"))]
1699 "TARGET_ATOMIC_HARD_LLCS"
1701 return "\r mov #-4,%3" "\n"
1706 "0: movli.l @%3,r0" "\n"
1707 " mov.l r0,@-r15" "\n"
1708 " mov.<bw> @%1,r0" "\n"
1709 " <fetchop_name> %2,r0" "\n"
1710 " mov.<bw> r0,@%1" "\n"
1712 " mov.l @r15+,r0" "\n"
1713 " movco.l r0,@%3" "\n"
1716 [(set_attr "length" "28")])
1718 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1719 (define_insn_and_split "atomic_not_fetch<mode>_hard"
1720 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1721 (not:QIHI (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))))
1722 (set (mem:QIHI (match_dup 1))
1723 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 1)))] UNSPEC_ATOMIC))
1724 (set (reg:SI T_REG) (const_int 1))
1725 (clobber (reg:SI R0_REG))
1726 (clobber (match_scratch:SI 2 "=&r"))
1727 (clobber (match_scratch:SI 3 "=1"))]
1728 "TARGET_ATOMIC_HARD_LLCS"
1730 return "\r mov #-4,%2" "\n"
1735 "0: movli.l @%2,r0" "\n"
1736 " mov.l r0,@-r15" "\n"
1737 " mov.<bw> @%1,r0" "\n"
1739 " mov.<bw> r0,@%1" "\n"
1741 " mov.l @r15+,r0" "\n"
1742 " movco.l r0,@%2" "\n"
1745 "&& can_create_pseudo_p () && optimize
1746 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1749 rtx i = gen_atomic_not<mode>_hard (operands[1]);
1751 /* Replace the new mems in the new insn with the old mem to preserve
1753 rtx m = XEXP (XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1), 0);
1754 XEXP (XVECEXP (i, 0, 0), 0) = m;
1755 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = m;
1758 [(set_attr "length" "28")])
1760 (define_insn "atomic_<fetchop_name>_fetch<mode>_soft_gusa"
1761 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1763 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")
1764 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1765 "<fetchop_constraint_1_gusa>")))
1768 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1770 (clobber (reg:SI R0_REG))
1771 (clobber (reg:SI R1_REG))]
1772 "TARGET_ATOMIC_SOFT_GUSA"
1774 return "\r mova 1f,r0" "\n"
1777 " mov #(0f-1f),r15" "\n"
1778 "0: mov.<bwl> %1,%0" "\n"
1779 " <fetchop_name> %2,%0" "\n"
1780 " mov.<bwl> %0,%1" "\n"
1783 [(set_attr "length" "16")])
1785 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1786 (define_insn "atomic_not_fetch<mode>_soft_gusa"
1787 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
1788 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")))
1790 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1791 (clobber (reg:SI R0_REG))
1792 (clobber (reg:SI R1_REG))]
1793 "TARGET_ATOMIC_SOFT_GUSA"
1795 return "\r mova 1f,r0" "\n"
1798 " mov #(0f-1f),r15" "\n"
1799 "0: mov.<bwl> %1,%0" "\n"
1801 " mov.<bwl> %0,%1" "\n"
1804 [(set_attr "length" "16")])
1806 (define_insn_and_split "atomic_<fetchop_name>_fetch<mode>_soft_tcb"
1807 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1809 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
1810 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1811 "<fetchop_constraint_1_tcb>")))
1814 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1816 (clobber (reg:SI R0_REG))
1817 (clobber (reg:SI R1_REG))
1818 (use (match_operand:SI 3 "gbr_displacement"))]
1819 "TARGET_ATOMIC_SOFT_TCB"
1821 return "\r mova 1f,r0" "\n"
1822 " mov #(0f-1f),r1" "\n"
1824 " mov.l r0,@(%O3,gbr)" "\n"
1825 "0: mov.<bwl> %1,r0" "\n"
1826 " <fetchop_name> %2,r0" "\n"
1827 " mov.<bwl> r0,%1" "\n"
1830 " mov.l r0,@(%O3,gbr)";
1832 "&& can_create_pseudo_p () && optimize
1833 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1836 emit_insn (gen_atomic_<fetchop_name><mode>_soft_tcb (
1837 operands[1], operands[2], operands[3]));
1839 [(set_attr "length" "20")])
1841 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1842 (define_insn_and_split "atomic_not_fetch<mode>_soft_tcb"
1843 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
1844 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")))
1846 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1847 (clobber (reg:SI R0_REG))
1848 (clobber (reg:SI R1_REG))
1849 (use (match_operand:SI 2 "gbr_displacement"))]
1850 "TARGET_ATOMIC_SOFT_TCB"
1852 return "\r mova 1f,r0" "\n"
1853 " mov #(0f-1f),r1" "\n"
1855 " mov.l r0,@(%O2,gbr)" "\n"
1856 "0: mov.<bwl> %1,r0" "\n"
1858 " mov.<bwl> r0,%1" "\n"
1861 " mov.l r0,@(%O2,gbr)";
1863 "&& can_create_pseudo_p () && optimize
1864 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
1867 emit_insn (gen_atomic_not<mode>_soft_tcb (operands[1], operands[2]));
1869 [(set_attr "length" "20")])
1871 (define_insn "atomic_<fetchop_name>_fetch<mode>_soft_imask"
1872 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
1874 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
1875 (match_operand:QIHISI 2 "<fetchop_predicate_1>"
1876 "<fetchop_constraint_1_imask>")))
1879 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))]
1881 (clobber (match_scratch:SI 3 "=&r"))]
1882 "TARGET_ATOMIC_SOFT_IMASK"
1884 return "\r stc sr,%0" "\n"
1888 " mov.<bwl> %1,%0" "\n"
1889 " <fetchop_name> %2,%0" "\n"
1890 " mov.<bwl> %0,%1" "\n"
1893 [(set_attr "length" "16")])
1895 ;; Combine pattern for xor (val, -1) / nand (val, -1).
1896 (define_insn "atomic_not_fetch<mode>_soft_imask"
1897 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
1898 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")))
1900 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC))
1901 (clobber (match_scratch:SI 2 "=&r"))]
1902 "TARGET_ATOMIC_SOFT_IMASK"
1904 return "\r stc sr,%0" "\n"
1908 " mov.<bwl> %1,%0" "\n"
1910 " mov.<bwl> %0,%1" "\n"
1913 [(set_attr "length" "16")])
1915 (define_expand "atomic_nand_fetch<mode>"
1916 [(set (match_operand:QIHISI 0 "arith_reg_dest")
1917 (not:QIHISI (and:QIHISI
1918 (match_operand:QIHISI 1 "atomic_mem_operand_1")
1919 (match_operand:QIHISI 2 "atomic_logical_operand_1"))))
1922 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
1924 (match_operand:SI 3 "const_int_operand")]
1927 rtx mem = operands[1];
1930 if (TARGET_ATOMIC_HARD_LLCS
1931 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT))
1932 atomic_insn = gen_atomic_nand_fetch<mode>_hard (operands[0], mem,
1934 else if (TARGET_ATOMIC_SOFT_GUSA)
1935 atomic_insn = gen_atomic_nand_fetch<mode>_soft_gusa (operands[0], mem,
1937 else if (TARGET_ATOMIC_SOFT_TCB)
1938 atomic_insn = gen_atomic_nand_fetch<mode>_soft_tcb (operands[0], mem,
1939 operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX);
1940 else if (TARGET_ATOMIC_SOFT_IMASK)
1941 atomic_insn = gen_atomic_nand_fetch<mode>_soft_imask (operands[0], mem,
1946 emit_insn (atomic_insn);
1948 if (<MODE>mode == QImode)
1949 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
1951 else if (<MODE>mode == HImode)
1952 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]),
1957 (define_insn "atomic_nand_fetchsi_hard"
1958 [(set (match_operand:SI 0 "arith_reg_dest" "=&z")
1959 (not:SI (and:SI (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")
1960 (match_operand:SI 2 "logical_operand" "rK08"))))
1963 [(not:SI (and:SI (match_dup 1) (match_dup 2)))]
1965 (set (reg:SI T_REG) (const_int 1))]
1966 "TARGET_ATOMIC_HARD_LLCS
1967 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)"
1969 return "\r0: movli.l %1,%0" "\n"
1972 " movco.l %0,%1" "\n"
1975 [(set_attr "length" "10")])
1977 ;; The QIHImode llcs patterns modify the address register of the memory
1978 ;; operand. In order to express that, we have to open code the memory
1979 ;; operand. Initially the insn is expanded like every other atomic insn
1980 ;; using the memory operand. In split1 the insn is converted and the
1981 ;; memory operand's address register is exposed.
1982 (define_insn_and_split "atomic_nand_fetch<mode>_hard"
1983 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
1984 (not:QIHI (and:QIHI (match_operand:QIHI 1 "atomic_mem_operand_1")
1985 (match_operand:QIHI 2 "logical_operand"))))
1987 (unspec:QIHI [(not:QIHI (and:QIHI (match_dup 1) (match_dup 2)))]
1989 (set (reg:SI T_REG) (const_int 1))
1990 (clobber (reg:SI R0_REG))]
1991 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()"
1997 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0])))
1998 emit_insn (gen_atomic_nand<mode>_hard (operands[1], operands[2]));
2001 rtx i = gen_atomic_nand_fetch<mode>_hard_1 (
2002 operands[0], XEXP (operands[1], 0), operands[2]);
2004 /* Replace the new mems in the new insn with the old mem to preserve
2006 XEXP (XEXP (XEXP (XVECEXP (i, 0, 0), 1), 0), 0) = operands[1];
2007 XEXP (XVECEXP (i, 0, 1), 0) = operands[1];
2008 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0),
2014 (define_insn "atomic_nand_fetch<mode>_hard_1"
2015 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r")
2017 (and:QIHI (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))
2018 (match_operand:QIHI 2 "logical_operand" "rK08"))))
2019 (set (mem:QIHI (match_dup 1))
2021 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 1)) (match_dup 2)))]
2023 (set (reg:SI T_REG) (const_int 1))
2024 (clobber (reg:SI R0_REG))
2025 (clobber (match_scratch:SI 3 "=&r"))
2026 (clobber (match_scratch:SI 4 "=1"))]
2027 "TARGET_ATOMIC_HARD_LLCS"
2029 return "\r mov #-4,%3" "\n"
2034 "0: movli.l @%3,r0" "\n"
2035 " mov.l r0,@-r15" "\n"
2036 " mov.<bw> @%1,r0" "\n"
2039 " mov.<bw> %0,@%1" "\n"
2040 " mov.l @r15+,r0" "\n"
2041 " movco.l r0,@%3" "\n"
2044 [(set_attr "length" "28")])
2046 (define_insn "atomic_nand_fetch<mode>_soft_gusa"
2047 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u")
2048 (not:QIHISI (and:QIHISI
2049 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")
2050 (match_operand:QIHISI 2 "arith_reg_operand" "u"))))
2053 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
2055 (clobber (reg:SI R0_REG))
2056 (clobber (reg:SI R1_REG))]
2057 "TARGET_ATOMIC_SOFT_GUSA"
2059 return "\r mova 1f,r0" "\n"
2062 " mov #(0f-1f),r15" "\n"
2063 "0: mov.<bwl> %1,%0" "\n"
2066 " mov.<bwl> %0,%1" "\n"
2069 [(set_attr "length" "18")])
2071 (define_insn_and_split "atomic_nand_fetch<mode>_soft_tcb"
2072 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r")
2073 (not:QIHISI (and:QIHISI
2074 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
2075 (match_operand:QIHISI 2 "logical_operand" "rK08"))))
2078 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
2080 (clobber (reg:SI R0_REG))
2081 (clobber (reg:SI R1_REG))
2082 (use (match_operand:SI 3 "gbr_displacement"))]
2083 "TARGET_ATOMIC_SOFT_TCB"
2085 return "\r mova 1f,r0" "\n"
2086 " mov #(0f-1f),r1" "\n"
2088 " mov.l r0,@(%O3,gbr)" "\n"
2089 "0: mov.<bwl> %1,r0" "\n"
2093 " mov.<bwl> r0,%1" "\n"
2095 " mov.l r0,@(%O3,gbr)";
2097 "&& can_create_pseudo_p () && optimize
2098 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))"
2101 emit_insn (gen_atomic_nand<mode>_soft_tcb (operands[1], operands[2],
2104 [(set_attr "length" "22")])
2106 (define_insn "atomic_nand_fetch<mode>_soft_imask"
2107 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z")
2108 (not:QIHISI (and:QIHISI
2109 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")
2110 (match_operand:QIHISI 2 "logical_operand" "rK08"))))
2113 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))]
2115 (clobber (match_scratch:SI 3 "=&r"))]
2116 "TARGET_ATOMIC_SOFT_IMASK"
2118 return "\r stc sr,%0" "\n"
2122 " mov.<bwl> %1,%0" "\n"
2125 " mov.<bwl> %0,%1" "\n"
2128 [(set_attr "length" "18")])
2130 ;;------------------------------------------------------------------------------
2131 ;; read - test against zero - or with 0x80 - write - return test result
2133 (define_expand "atomic_test_and_set"
2134 [(match_operand:SI 0 "register_operand" "") ;; bool result output
2135 (match_operand:QI 1 "memory_operand" "") ;; memory
2136 (match_operand:SI 2 "const_int_operand" "")] ;; model
2137 "(TARGET_ATOMIC_ANY || TARGET_ENABLE_TAS) && !TARGET_SHMEDIA"
2139 rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
2141 if (TARGET_ENABLE_TAS)
2142 emit_insn (gen_tasb (addr));
2145 rtx val = gen_int_mode (targetm.atomic_test_and_set_trueval, QImode);
2146 val = force_reg (QImode, val);
2148 if (TARGET_ATOMIC_HARD_LLCS)
2149 emit_insn (gen_atomic_test_and_set_hard (addr, val));
2150 else if (TARGET_ATOMIC_SOFT_GUSA)
2151 emit_insn (gen_atomic_test_and_set_soft_gusa (addr, val));
2152 else if (TARGET_ATOMIC_SOFT_TCB)
2153 emit_insn (gen_atomic_test_and_set_soft_tcb (addr, val,
2154 TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX));
2155 else if (TARGET_ATOMIC_SOFT_IMASK)
2156 emit_insn (gen_atomic_test_and_set_soft_imask (addr, val));
2161 /* The result of the test op is the inverse of what we are
2162 supposed to return. Thus invert the T bit. The inversion will be
2163 potentially optimized away and integrated into surrounding code. */
2164 emit_insn (gen_movnegt (operands[0], get_t_reg_rtx ()));
2169 [(set (reg:SI T_REG)
2170 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2172 (set (mem:QI (match_dup 0))
2173 (unspec:QI [(const_int 128)] UNSPEC_ATOMIC))]
2174 "TARGET_ENABLE_TAS && !TARGET_SHMEDIA"
2176 [(set_attr "insn_class" "co_group")])
2178 (define_insn "atomic_test_and_set_soft_gusa"
2179 [(set (reg:SI T_REG)
2180 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "u"))
2182 (set (mem:QI (match_dup 0))
2183 (unspec:QI [(match_operand:QI 1 "register_operand" "u")] UNSPEC_ATOMIC))
2184 (clobber (match_scratch:QI 2 "=&u"))
2185 (clobber (reg:SI R0_REG))
2186 (clobber (reg:SI R1_REG))]
2187 "TARGET_ATOMIC_SOFT_GUSA && !TARGET_ENABLE_TAS"
2189 return "\r mova 1f,r0" "\n"
2192 " mov #(0f-1f),r15" "\n"
2193 "0: mov.b @%0,%2" "\n"
2194 " mov.b %1,@%0" "\n"
2195 "1: mov r1,r15" "\n"
2198 [(set_attr "length" "16")])
2200 (define_insn "atomic_test_and_set_soft_tcb"
2201 [(set (reg:SI T_REG)
2202 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2204 (set (mem:QI (match_dup 0))
2205 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
2206 (use (match_operand:SI 2 "gbr_displacement"))
2207 (clobber (match_scratch:QI 3 "=&r"))
2208 (clobber (reg:SI R0_REG))
2209 (clobber (reg:SI R1_REG))]
2210 "TARGET_ATOMIC_SOFT_TCB && !TARGET_ENABLE_TAS"
2212 return "\r mova 1f,r0" "\n"
2213 " mov #(0f-1f),r1" "\n"
2215 " mov.l r0,@(%O2,gbr)" "\n"
2216 "0: mov.b @%0,%3" "\n"
2218 " mov.b %1,@%0" "\n"
2219 "1: mov.l r0,@(%O2,gbr)" "\n"
2222 [(set_attr "length" "18")])
2224 (define_insn "atomic_test_and_set_soft_imask"
2225 [(set (reg:SI T_REG)
2226 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2228 (set (mem:QI (match_dup 0))
2229 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
2230 (clobber (match_scratch:SI 2 "=&r"))
2231 (clobber (reg:SI R0_REG))]
2232 "TARGET_ATOMIC_SOFT_IMASK && !TARGET_ENABLE_TAS"
2234 return "\r stc sr,r0" "\n"
2238 " mov.b @%0,r0" "\n"
2239 " mov.b %1,@%0" "\n"
2243 [(set_attr "length" "16")])
2245 (define_insn "atomic_test_and_set_hard"
2246 [(set (reg:SI T_REG)
2247 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
2249 (set (mem:QI (match_dup 0))
2250 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
2251 (clobber (reg:SI R0_REG))
2252 (clobber (match_scratch:SI 2 "=&r"))
2253 (clobber (match_scratch:SI 3 "=&r"))
2254 (clobber (match_scratch:SI 4 "=0"))]
2255 "TARGET_ATOMIC_HARD_LLCS && !TARGET_ENABLE_TAS"
2257 return "\r mov #-4,%2" "\n"
2262 "0: movli.l @%2,r0" "\n"
2263 " mov.l r0,@-r15" "\n"
2264 " mov.b @%0,%3" "\n"
2265 " mov.b %1,@%0" "\n"
2266 " mov.l @r15+,r0" "\n"
2267 " movco.l r0,@%2" "\n"
2271 [(set_attr "length" "26")])