]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/riscv/sync.md
Update copyright years.
[thirdparty/gcc.git] / gcc / config / riscv / sync.md
1 ;; Machine description for RISC-V atomic operations.
2 ;; Copyright (C) 2011-2020 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
5
6 ;; This file is part of GCC.
7
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; any later version.
12
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
17
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
21
22 (define_c_enum "unspec" [
23 UNSPEC_COMPARE_AND_SWAP
24 UNSPEC_SYNC_OLD_OP
25 UNSPEC_SYNC_EXCHANGE
26 UNSPEC_ATOMIC_STORE
27 UNSPEC_MEMORY_BARRIER
28 ])
29
30 (define_code_iterator any_atomic [plus ior xor and])
31 (define_code_attr atomic_optab
32 [(plus "add") (ior "or") (xor "xor") (and "and")])
33
34 ;; Memory barriers.
35
36 (define_expand "mem_thread_fence"
37 [(match_operand:SI 0 "const_int_operand" "")] ;; model
38 ""
39 {
40 if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
41 {
42 rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
43 MEM_VOLATILE_P (mem) = 1;
44 emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
45 }
46 DONE;
47 })
48
49 ;; Until the RISC-V memory model (hence its mapping from C++) is finalized,
50 ;; conservatively emit a full FENCE.
51 (define_insn "mem_thread_fence_1"
52 [(set (match_operand:BLK 0 "" "")
53 (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
54 (match_operand:SI 1 "const_int_operand" "")] ;; model
55 ""
56 "fence\tiorw,iorw")
57
58 ;; Atomic memory operations.
59
60 ;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
61 (define_insn "atomic_store<mode>"
62 [(set (match_operand:GPR 0 "memory_operand" "=A")
63 (unspec_volatile:GPR
64 [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
65 (match_operand:SI 2 "const_int_operand")] ;; model
66 UNSPEC_ATOMIC_STORE))]
67 "TARGET_ATOMIC"
68 "%F2amoswap.<amo>%A2 zero,%z1,%0"
69 [(set (attr "length") (const_int 8))])
70
71 (define_insn "atomic_<atomic_optab><mode>"
72 [(set (match_operand:GPR 0 "memory_operand" "+A")
73 (unspec_volatile:GPR
74 [(any_atomic:GPR (match_dup 0)
75 (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
76 (match_operand:SI 2 "const_int_operand")] ;; model
77 UNSPEC_SYNC_OLD_OP))]
78 "TARGET_ATOMIC"
79 "%F2amo<insn>.<amo>%A2 zero,%z1,%0"
80 [(set (attr "length") (const_int 8))])
81
82 (define_insn "atomic_fetch_<atomic_optab><mode>"
83 [(set (match_operand:GPR 0 "register_operand" "=&r")
84 (match_operand:GPR 1 "memory_operand" "+A"))
85 (set (match_dup 1)
86 (unspec_volatile:GPR
87 [(any_atomic:GPR (match_dup 1)
88 (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
89 (match_operand:SI 3 "const_int_operand")] ;; model
90 UNSPEC_SYNC_OLD_OP))]
91 "TARGET_ATOMIC"
92 "%F3amo<insn>.<amo>%A3 %0,%z2,%1"
93 [(set (attr "length") (const_int 8))])
94
95 (define_insn "atomic_exchange<mode>"
96 [(set (match_operand:GPR 0 "register_operand" "=&r")
97 (unspec_volatile:GPR
98 [(match_operand:GPR 1 "memory_operand" "+A")
99 (match_operand:SI 3 "const_int_operand")] ;; model
100 UNSPEC_SYNC_EXCHANGE))
101 (set (match_dup 1)
102 (match_operand:GPR 2 "register_operand" "0"))]
103 "TARGET_ATOMIC"
104 "%F3amoswap.<amo>%A3 %0,%z2,%1"
105 [(set (attr "length") (const_int 8))])
106
107 (define_insn "atomic_cas_value_strong<mode>"
108 [(set (match_operand:GPR 0 "register_operand" "=&r")
109 (match_operand:GPR 1 "memory_operand" "+A"))
110 (set (match_dup 1)
111 (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
112 (match_operand:GPR 3 "reg_or_0_operand" "rJ")
113 (match_operand:SI 4 "const_int_operand") ;; mod_s
114 (match_operand:SI 5 "const_int_operand")] ;; mod_f
115 UNSPEC_COMPARE_AND_SWAP))
116 (clobber (match_scratch:GPR 6 "=&r"))]
117 "TARGET_ATOMIC"
118 "%F5 1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
119 [(set (attr "length") (const_int 20))])
120
121 (define_expand "atomic_compare_and_swap<mode>"
122 [(match_operand:SI 0 "register_operand" "") ;; bool output
123 (match_operand:GPR 1 "register_operand" "") ;; val output
124 (match_operand:GPR 2 "memory_operand" "") ;; memory
125 (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
126 (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
127 (match_operand:SI 5 "const_int_operand" "") ;; is_weak
128 (match_operand:SI 6 "const_int_operand" "") ;; mod_s
129 (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
130 "TARGET_ATOMIC"
131 {
132 emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
133 operands[3], operands[4],
134 operands[6], operands[7]));
135
136 rtx compare = operands[1];
137 if (operands[3] != const0_rtx)
138 {
139 rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
140 compare = gen_reg_rtx (<MODE>mode);
141 emit_insn (gen_rtx_SET (compare, difference));
142 }
143
144 if (word_mode != <MODE>mode)
145 {
146 rtx reg = gen_reg_rtx (word_mode);
147 emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)));
148 compare = reg;
149 }
150
151 emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx)));
152 DONE;
153 })
154
155 (define_expand "atomic_test_and_set"
156 [(match_operand:QI 0 "register_operand" "") ;; bool output
157 (match_operand:QI 1 "memory_operand" "+A") ;; memory
158 (match_operand:SI 2 "const_int_operand" "")] ;; model
159 "TARGET_ATOMIC"
160 {
161 /* We have no QImode atomics, so use the address LSBs to form a mask,
162 then use an aligned SImode atomic. */
163 rtx result = operands[0];
164 rtx mem = operands[1];
165 rtx model = operands[2];
166 rtx addr = force_reg (Pmode, XEXP (mem, 0));
167
168 rtx aligned_addr = gen_reg_rtx (Pmode);
169 emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
170
171 rtx aligned_mem = change_address (mem, SImode, aligned_addr);
172 set_mem_alias_set (aligned_mem, 0);
173
174 rtx offset = gen_reg_rtx (SImode);
175 emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
176 GEN_INT (3)));
177
178 rtx tmp = gen_reg_rtx (SImode);
179 emit_move_insn (tmp, GEN_INT (1));
180
181 rtx shmt = gen_reg_rtx (SImode);
182 emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
183
184 rtx word = gen_reg_rtx (SImode);
185 emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp,
186 gen_lowpart (QImode, shmt)));
187
188 tmp = gen_reg_rtx (SImode);
189 emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
190
191 emit_move_insn (gen_lowpart (SImode, result),
192 gen_rtx_LSHIFTRT (SImode, tmp,
193 gen_lowpart (QImode, shmt)));
194 DONE;
195 })