]>
Commit | Line | Data |
---|---|---|
f002c046 PB |
1 | ;; Matrix-Multiply Assist (MMA) patterns. |
2 | ;; Copyright (C) 2020 Free Software Foundation, Inc. | |
3 | ;; Contributed by Peter Bergner <bergner@linux.ibm.com> and | |
4 | ;; Michael Meissner <meissner@linux.ibm.com> | |
5 | ;; | |
6 | ;; This file is part of GCC. | |
7 | ;; | |
8 | ;; GCC is free software; you can redistribute it and/or modify it | |
9 | ;; under the terms of the GNU General Public License as published | |
10 | ;; by the Free Software Foundation; either version 3, or (at your | |
11 | ;; option) any later version. | |
12 | ;; | |
13 | ;; GCC is distributed in the hope that it will be useful, but WITHOUT | |
14 | ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | |
16 | ;; License for more details. | |
17 | ;; | |
18 | ;; You should have received a copy of the GNU General Public License | |
19 | ;; along with GCC; see the file COPYING3. If not see | |
20 | ;; <http://www.gnu.org/licenses/>. | |
21 | ||
f8f8909a AS |
22 | ;; The MMA patterns use the multi-register XOmode and OOmode opaque |
23 | ;; modes to implement the target specific __vector_quad and | |
24 | ;; __vector_pair types that the MMA built-in functions reference. We | |
25 | ;; use OPAQUE_MODE to prevent anything from trying to open them up. | |
f002c046 | 26 | |
8ee2640b PB |
27 | (define_constants [(MAX_MMA_OPERANDS 7)]) |
28 | ||
29 | ;; Constants for creating unspecs | |
30 | ||
31 | (define_c_enum "unspec" | |
f8f8909a AS |
32 | [UNSPEC_MMA_ASSEMBLE |
33 | UNSPEC_MMA_EXTRACT | |
8ee2640b PB |
34 | UNSPEC_MMA_PMXVBF16GER2 |
35 | UNSPEC_MMA_PMXVBF16GER2NN | |
36 | UNSPEC_MMA_PMXVBF16GER2NP | |
37 | UNSPEC_MMA_PMXVBF16GER2PN | |
38 | UNSPEC_MMA_PMXVBF16GER2PP | |
39 | UNSPEC_MMA_PMXVF16GER2 | |
40 | UNSPEC_MMA_PMXVF16GER2NN | |
41 | UNSPEC_MMA_PMXVF16GER2NP | |
42 | UNSPEC_MMA_PMXVF16GER2PN | |
43 | UNSPEC_MMA_PMXVF16GER2PP | |
44 | UNSPEC_MMA_PMXVF32GER | |
45 | UNSPEC_MMA_PMXVF32GERNN | |
46 | UNSPEC_MMA_PMXVF32GERNP | |
47 | UNSPEC_MMA_PMXVF32GERPN | |
48 | UNSPEC_MMA_PMXVF32GERPP | |
49 | UNSPEC_MMA_PMXVF64GER | |
50 | UNSPEC_MMA_PMXVF64GERNN | |
51 | UNSPEC_MMA_PMXVF64GERNP | |
52 | UNSPEC_MMA_PMXVF64GERPN | |
53 | UNSPEC_MMA_PMXVF64GERPP | |
54 | UNSPEC_MMA_PMXVI16GER2 | |
55 | UNSPEC_MMA_PMXVI16GER2PP | |
56 | UNSPEC_MMA_PMXVI16GER2S | |
57 | UNSPEC_MMA_PMXVI16GER2SPP | |
58 | UNSPEC_MMA_PMXVI4GER8 | |
59 | UNSPEC_MMA_PMXVI4GER8PP | |
60 | UNSPEC_MMA_PMXVI8GER4 | |
61 | UNSPEC_MMA_PMXVI8GER4PP | |
62 | UNSPEC_MMA_PMXVI8GER4SPP | |
63 | UNSPEC_MMA_XVBF16GER2 | |
64 | UNSPEC_MMA_XVBF16GER2NN | |
65 | UNSPEC_MMA_XVBF16GER2NP | |
66 | UNSPEC_MMA_XVBF16GER2PN | |
67 | UNSPEC_MMA_XVBF16GER2PP | |
68 | UNSPEC_MMA_XVF16GER2 | |
69 | UNSPEC_MMA_XVF16GER2NN | |
70 | UNSPEC_MMA_XVF16GER2NP | |
71 | UNSPEC_MMA_XVF16GER2PN | |
72 | UNSPEC_MMA_XVF16GER2PP | |
73 | UNSPEC_MMA_XVF32GER | |
74 | UNSPEC_MMA_XVF32GERNN | |
75 | UNSPEC_MMA_XVF32GERNP | |
76 | UNSPEC_MMA_XVF32GERPN | |
77 | UNSPEC_MMA_XVF32GERPP | |
78 | UNSPEC_MMA_XVF64GER | |
79 | UNSPEC_MMA_XVF64GERNN | |
80 | UNSPEC_MMA_XVF64GERNP | |
81 | UNSPEC_MMA_XVF64GERPN | |
82 | UNSPEC_MMA_XVF64GERPP | |
83 | UNSPEC_MMA_XVI16GER2 | |
84 | UNSPEC_MMA_XVI16GER2PP | |
85 | UNSPEC_MMA_XVI16GER2S | |
86 | UNSPEC_MMA_XVI16GER2SPP | |
87 | UNSPEC_MMA_XVI4GER8 | |
88 | UNSPEC_MMA_XVI4GER8PP | |
89 | UNSPEC_MMA_XVI8GER4 | |
90 | UNSPEC_MMA_XVI8GER4PP | |
91 | UNSPEC_MMA_XVI8GER4SPP | |
92 | UNSPEC_MMA_XXMFACC | |
93 | UNSPEC_MMA_XXMTACC | |
f8f8909a | 94 | UNSPEC_MMA_XXSETACCZ |
8ee2640b PB |
95 | ]) |
96 | ||
97 | ;; MMA instructions with 1 accumulator argument | |
98 | (define_int_iterator MMA_ACC [UNSPEC_MMA_XXMFACC | |
99 | UNSPEC_MMA_XXMTACC]) | |
100 | ||
101 | ;; MMA instructions with 2 vector arguments | |
102 | (define_int_iterator MMA_VV [UNSPEC_MMA_XVI4GER8 | |
103 | UNSPEC_MMA_XVI8GER4 | |
104 | UNSPEC_MMA_XVI16GER2 | |
105 | UNSPEC_MMA_XVI16GER2S | |
106 | UNSPEC_MMA_XVF16GER2 | |
107 | UNSPEC_MMA_XVBF16GER2 | |
108 | UNSPEC_MMA_XVF32GER]) | |
109 | ||
110 | ;; MMA instructions with 1 accumulator and 2 vector arguments | |
111 | (define_int_iterator MMA_AVV [UNSPEC_MMA_XVI4GER8PP | |
112 | UNSPEC_MMA_XVI8GER4PP | |
113 | UNSPEC_MMA_XVI8GER4SPP | |
114 | UNSPEC_MMA_XVI16GER2PP | |
115 | UNSPEC_MMA_XVI16GER2SPP | |
116 | UNSPEC_MMA_XVF16GER2PP | |
117 | UNSPEC_MMA_XVF16GER2PN | |
118 | UNSPEC_MMA_XVF16GER2NP | |
119 | UNSPEC_MMA_XVF16GER2NN | |
120 | UNSPEC_MMA_XVBF16GER2PP | |
121 | UNSPEC_MMA_XVBF16GER2PN | |
122 | UNSPEC_MMA_XVBF16GER2NP | |
123 | UNSPEC_MMA_XVBF16GER2NN | |
124 | UNSPEC_MMA_XVF32GERPP | |
125 | UNSPEC_MMA_XVF32GERPN | |
126 | UNSPEC_MMA_XVF32GERNP | |
127 | UNSPEC_MMA_XVF32GERNN]) | |
128 | ||
129 | ;; MMA instructions with 1 vector pair and 1 vector arguments | |
130 | (define_int_iterator MMA_PV [UNSPEC_MMA_XVF64GER]) | |
131 | ||
132 | ;; MMA instructions with 1 accumulator, 1 vector pair and 1 vector arguments | |
133 | (define_int_iterator MMA_APV [UNSPEC_MMA_XVF64GERPP | |
134 | UNSPEC_MMA_XVF64GERPN | |
135 | UNSPEC_MMA_XVF64GERNP | |
136 | UNSPEC_MMA_XVF64GERNN]) | |
137 | ||
138 | ;; MMA instructions with 2 vector, 2 4-bit and 1 8-bit arguments | |
139 | (define_int_iterator MMA_VVI4I4I8 [UNSPEC_MMA_PMXVI4GER8]) | |
140 | ||
141 | ;; MMA instructions with 1 accumulator, 2 vector, 2 4-bit and 1 8-bit arguments | |
142 | (define_int_iterator MMA_AVVI4I4I8 [UNSPEC_MMA_PMXVI4GER8PP]) | |
143 | ||
144 | ;; MMA instructions with 2 vector, 2 4-bit and 1 2-bit arguments | |
145 | (define_int_iterator MMA_VVI4I4I2 [UNSPEC_MMA_PMXVI16GER2 | |
146 | UNSPEC_MMA_PMXVI16GER2S | |
147 | UNSPEC_MMA_PMXVF16GER2 | |
148 | UNSPEC_MMA_PMXVBF16GER2]) | |
149 | ||
150 | ;; MMA instructions with 1 accumulator, 2 vector, 2 4-bit and 1 2-bit arguments | |
151 | (define_int_iterator MMA_AVVI4I4I2 [UNSPEC_MMA_PMXVI16GER2PP | |
152 | UNSPEC_MMA_PMXVI16GER2SPP | |
153 | UNSPEC_MMA_PMXVF16GER2PP | |
154 | UNSPEC_MMA_PMXVF16GER2PN | |
155 | UNSPEC_MMA_PMXVF16GER2NP | |
156 | UNSPEC_MMA_PMXVF16GER2NN | |
157 | UNSPEC_MMA_PMXVBF16GER2PP | |
158 | UNSPEC_MMA_PMXVBF16GER2PN | |
159 | UNSPEC_MMA_PMXVBF16GER2NP | |
160 | UNSPEC_MMA_PMXVBF16GER2NN]) | |
161 | ||
162 | ;; MMA instructions with 2 vector and 2 4-bit arguments | |
163 | (define_int_iterator MMA_VVI4I4 [UNSPEC_MMA_PMXVF32GER]) | |
164 | ||
165 | ;; MMA instructions with 1 accumulator, 2 vector and 2 4-bit arguments | |
166 | (define_int_iterator MMA_AVVI4I4 [UNSPEC_MMA_PMXVF32GERPP | |
167 | UNSPEC_MMA_PMXVF32GERPN | |
168 | UNSPEC_MMA_PMXVF32GERNP | |
169 | UNSPEC_MMA_PMXVF32GERNN]) | |
170 | ||
171 | ;; MMA instructions with 2 vector, 1 4-bit and 1 2-bit arguments | |
172 | (define_int_iterator MMA_PVI4I2 [UNSPEC_MMA_PMXVF64GER]) | |
173 | ||
174 | ;; MMA instructions with 1 accumulator, 2 vector, 1 4-bit and 1 2-bit arguments | |
175 | (define_int_iterator MMA_APVI4I2 [UNSPEC_MMA_PMXVF64GERPP | |
176 | UNSPEC_MMA_PMXVF64GERPN | |
177 | UNSPEC_MMA_PMXVF64GERNP | |
178 | UNSPEC_MMA_PMXVF64GERNN]) | |
179 | ||
180 | ;; MMA instructions with 2 vector and 3 4-bit arguments | |
181 | (define_int_iterator MMA_VVI4I4I4 [UNSPEC_MMA_PMXVI8GER4]) | |
182 | ||
183 | ;; MMA instructions with 1 accumulator, 2 vector and 3 4-bit arguments | |
184 | (define_int_iterator MMA_AVVI4I4I4 [UNSPEC_MMA_PMXVI8GER4PP | |
185 | UNSPEC_MMA_PMXVI8GER4SPP]) | |
186 | ||
187 | (define_int_attr acc [(UNSPEC_MMA_XXMFACC "xxmfacc") | |
188 | (UNSPEC_MMA_XXMTACC "xxmtacc")]) | |
189 | ||
190 | (define_int_attr vv [(UNSPEC_MMA_XVI4GER8 "xvi4ger8") | |
191 | (UNSPEC_MMA_XVI8GER4 "xvi8ger4") | |
192 | (UNSPEC_MMA_XVI16GER2 "xvi16ger2") | |
193 | (UNSPEC_MMA_XVI16GER2S "xvi16ger2s") | |
194 | (UNSPEC_MMA_XVF16GER2 "xvf16ger2") | |
195 | (UNSPEC_MMA_XVBF16GER2 "xvbf16ger2") | |
196 | (UNSPEC_MMA_XVF32GER "xvf32ger")]) | |
197 | ||
198 | (define_int_attr avv [(UNSPEC_MMA_XVI4GER8PP "xvi4ger8pp") | |
199 | (UNSPEC_MMA_XVI8GER4PP "xvi8ger4pp") | |
200 | (UNSPEC_MMA_XVI8GER4SPP "xvi8ger4spp") | |
201 | (UNSPEC_MMA_XVI16GER2PP "xvi16ger2pp") | |
202 | (UNSPEC_MMA_XVI16GER2SPP "xvi16ger2spp") | |
203 | (UNSPEC_MMA_XVF16GER2PP "xvf16ger2pp") | |
204 | (UNSPEC_MMA_XVF16GER2PN "xvf16ger2pn") | |
205 | (UNSPEC_MMA_XVF16GER2NP "xvf16ger2np") | |
206 | (UNSPEC_MMA_XVF16GER2NN "xvf16ger2nn") | |
207 | (UNSPEC_MMA_XVBF16GER2PP "xvbf16ger2pp") | |
208 | (UNSPEC_MMA_XVBF16GER2PN "xvbf16ger2pn") | |
209 | (UNSPEC_MMA_XVBF16GER2NP "xvbf16ger2np") | |
210 | (UNSPEC_MMA_XVBF16GER2NN "xvbf16ger2nn") | |
211 | (UNSPEC_MMA_XVF32GERPP "xvf32gerpp") | |
212 | (UNSPEC_MMA_XVF32GERPN "xvf32gerpn") | |
213 | (UNSPEC_MMA_XVF32GERNP "xvf32gernp") | |
214 | (UNSPEC_MMA_XVF32GERNN "xvf32gernn")]) | |
215 | ||
216 | (define_int_attr pv [(UNSPEC_MMA_XVF64GER "xvf64ger")]) | |
217 | ||
218 | (define_int_attr apv [(UNSPEC_MMA_XVF64GERPP "xvf64gerpp") | |
219 | (UNSPEC_MMA_XVF64GERPN "xvf64gerpn") | |
220 | (UNSPEC_MMA_XVF64GERNP "xvf64gernp") | |
221 | (UNSPEC_MMA_XVF64GERNN "xvf64gernn")]) | |
222 | ||
223 | (define_int_attr vvi4i4i8 [(UNSPEC_MMA_PMXVI4GER8 "pmxvi4ger8")]) | |
224 | ||
225 | (define_int_attr avvi4i4i8 [(UNSPEC_MMA_PMXVI4GER8PP "pmxvi4ger8pp")]) | |
226 | ||
227 | (define_int_attr vvi4i4i2 [(UNSPEC_MMA_PMXVI16GER2 "pmxvi16ger2") | |
228 | (UNSPEC_MMA_PMXVI16GER2S "pmxvi16ger2s") | |
229 | (UNSPEC_MMA_PMXVF16GER2 "pmxvf16ger2") | |
230 | (UNSPEC_MMA_PMXVBF16GER2 "pmxvbf16ger2")]) | |
231 | ||
232 | (define_int_attr avvi4i4i2 [(UNSPEC_MMA_PMXVI16GER2PP "pmxvi16ger2pp") | |
233 | (UNSPEC_MMA_PMXVI16GER2SPP "pmxvi16ger2spp") | |
234 | (UNSPEC_MMA_PMXVF16GER2PP "pmxvf16ger2pp") | |
235 | (UNSPEC_MMA_PMXVF16GER2PN "pmxvf16ger2pn") | |
236 | (UNSPEC_MMA_PMXVF16GER2NP "pmxvf16ger2np") | |
237 | (UNSPEC_MMA_PMXVF16GER2NN "pmxvf16ger2nn") | |
238 | (UNSPEC_MMA_PMXVBF16GER2PP "pmxvbf16ger2pp") | |
239 | (UNSPEC_MMA_PMXVBF16GER2PN "pmxvbf16ger2pn") | |
240 | (UNSPEC_MMA_PMXVBF16GER2NP "pmxvbf16ger2np") | |
241 | (UNSPEC_MMA_PMXVBF16GER2NN "pmxvbf16ger2nn")]) | |
242 | ||
243 | (define_int_attr vvi4i4 [(UNSPEC_MMA_PMXVF32GER "pmxvf32ger")]) | |
244 | ||
245 | (define_int_attr avvi4i4 [(UNSPEC_MMA_PMXVF32GERPP "pmxvf32gerpp") | |
246 | (UNSPEC_MMA_PMXVF32GERPN "pmxvf32gerpn") | |
247 | (UNSPEC_MMA_PMXVF32GERNP "pmxvf32gernp") | |
248 | (UNSPEC_MMA_PMXVF32GERNN "pmxvf32gernn")]) | |
249 | ||
250 | (define_int_attr pvi4i2 [(UNSPEC_MMA_PMXVF64GER "pmxvf64ger")]) | |
251 | ||
252 | (define_int_attr apvi4i2 [(UNSPEC_MMA_PMXVF64GERPP "pmxvf64gerpp") | |
253 | (UNSPEC_MMA_PMXVF64GERPN "pmxvf64gerpn") | |
254 | (UNSPEC_MMA_PMXVF64GERNP "pmxvf64gernp") | |
255 | (UNSPEC_MMA_PMXVF64GERNN "pmxvf64gernn")]) | |
256 | ||
257 | (define_int_attr vvi4i4i4 [(UNSPEC_MMA_PMXVI8GER4 "pmxvi8ger4")]) | |
258 | ||
259 | (define_int_attr avvi4i4i4 [(UNSPEC_MMA_PMXVI8GER4PP "pmxvi8ger4pp") | |
260 | (UNSPEC_MMA_PMXVI8GER4SPP "pmxvi8ger4spp")]) | |
261 | ||
262 | ||
f8f8909a AS |
263 | ;; Vector pair support. OOmode can only live in VSRs. |
264 | (define_expand "movoo" | |
265 | [(set (match_operand:OO 0 "nonimmediate_operand") | |
266 | (match_operand:OO 1 "input_operand"))] | |
f002c046 PB |
267 | "TARGET_MMA" |
268 | { | |
f8f8909a | 269 | rs6000_emit_move (operands[0], operands[1], OOmode); |
f002c046 PB |
270 | DONE; |
271 | }) | |
272 | ||
f8f8909a AS |
273 | (define_insn_and_split "*movoo" |
274 | [(set (match_operand:OO 0 "nonimmediate_operand" "=wa,m,wa") | |
275 | (match_operand:OO 1 "input_operand" "m,wa,wa"))] | |
f002c046 | 276 | "TARGET_MMA |
f8f8909a AS |
277 | && (gpc_reg_operand (operands[0], OOmode) |
278 | || gpc_reg_operand (operands[1], OOmode))" | |
f002c046 PB |
279 | "@ |
280 | lxvp%X1 %x0,%1 | |
281 | stxvp%X0 %x1,%0 | |
282 | #" | |
283 | "&& reload_completed | |
284 | && (!MEM_P (operands[0]) && !MEM_P (operands[1]))" | |
285 | [(const_int 0)] | |
286 | { | |
287 | rs6000_split_multireg_move (operands[0], operands[1]); | |
288 | DONE; | |
289 | } | |
290 | [(set_attr "type" "vecload,vecstore,veclogical") | |
291 | (set_attr "length" "*,*,8")]) | |
292 | ||
293 | \f | |
f8f8909a AS |
294 | ;; Vector quad support. XOmode can only live in FPRs. |
295 | (define_expand "movxo" | |
296 | [(set (match_operand:XO 0 "nonimmediate_operand") | |
297 | (match_operand:XO 1 "input_operand"))] | |
f002c046 PB |
298 | "TARGET_MMA" |
299 | { | |
f8f8909a | 300 | rs6000_emit_move (operands[0], operands[1], XOmode); |
f002c046 PB |
301 | DONE; |
302 | }) | |
303 | ||
f8f8909a AS |
304 | (define_insn_and_split "*movxo" |
305 | [(set (match_operand:XO 0 "nonimmediate_operand" "=d,m,d") | |
306 | (match_operand:XO 1 "input_operand" "m,d,d"))] | |
f002c046 | 307 | "TARGET_MMA |
f8f8909a AS |
308 | && (gpc_reg_operand (operands[0], XOmode) |
309 | || gpc_reg_operand (operands[1], XOmode))" | |
9c376d1c PB |
310 | "@ |
311 | # | |
312 | # | |
f8f8909a AS |
313 | #" |
314 | "&& reload_completed" | |
f002c046 PB |
315 | [(const_int 0)] |
316 | { | |
317 | rs6000_split_multireg_move (operands[0], operands[1]); | |
318 | DONE; | |
319 | } | |
f8f8909a AS |
320 | [(set_attr "type" "vecload,vecstore,veclogical") |
321 | (set_attr "length" "8,8,16") | |
322 | (set_attr "max_prefixed_insns" "2,2,*")]) | |
8ee2640b PB |
323 | |
324 | (define_expand "mma_assemble_pair" | |
f8f8909a AS |
325 | [(match_operand:OO 0 "vsx_register_operand") |
326 | (match_operand:V16QI 1 "mma_assemble_input_operand") | |
327 | (match_operand:V16QI 2 "mma_assemble_input_operand")] | |
8ee2640b PB |
328 | "TARGET_MMA" |
329 | { | |
f8f8909a AS |
330 | rtx src = gen_rtx_UNSPEC (OOmode, |
331 | gen_rtvec (2, operands[1], operands[2]), | |
332 | UNSPEC_MMA_ASSEMBLE); | |
333 | emit_move_insn (operands[0], src); | |
334 | DONE; | |
335 | }) | |
8ee2640b | 336 | |
f8f8909a AS |
337 | (define_insn_and_split "*mma_assemble_pair" |
338 | [(set (match_operand:OO 0 "vsx_register_operand" "=wa") | |
339 | (unspec:OO [(match_operand:V16QI 1 "mma_assemble_input_operand" "mwa") | |
340 | (match_operand:V16QI 2 "mma_assemble_input_operand" "mwa")] | |
341 | UNSPEC_MMA_ASSEMBLE))] | |
342 | "TARGET_MMA" | |
343 | "#" | |
344 | "&& reload_completed" | |
345 | [(const_int 0)] | |
346 | { | |
347 | rtx src = gen_rtx_UNSPEC (OOmode, | |
348 | gen_rtvec (2, operands[1], operands[2]), | |
349 | UNSPEC_MMA_ASSEMBLE); | |
350 | rs6000_split_multireg_move (operands[0], src); | |
351 | DONE; | |
352 | }) | |
353 | ||
354 | (define_expand "mma_disassemble_pair" | |
355 | [(match_operand:V16QI 0 "mma_disassemble_output_operand") | |
356 | (match_operand:OO 1 "vsx_register_operand") | |
357 | (match_operand 2 "const_0_to_1_operand")] | |
358 | "TARGET_MMA" | |
359 | { | |
360 | rtx src; | |
361 | int regoff = INTVAL (operands[2]); | |
362 | src = gen_rtx_UNSPEC (V16QImode, | |
363 | gen_rtvec (2, operands[1], GEN_INT (regoff)), | |
364 | UNSPEC_MMA_EXTRACT); | |
365 | emit_move_insn (operands[0], src); | |
366 | DONE; | |
367 | }) | |
8ee2640b | 368 | |
f8f8909a AS |
369 | (define_insn_and_split "*mma_disassemble_pair" |
370 | [(set (match_operand:V16QI 0 "mma_disassemble_output_operand" "=mwa") | |
371 | (unspec:V16QI [(match_operand:OO 1 "vsx_register_operand" "wa") | |
372 | (match_operand 2 "const_0_to_1_operand")] | |
373 | UNSPEC_MMA_EXTRACT))] | |
374 | "TARGET_MMA | |
375 | && vsx_register_operand (operands[1], OOmode)" | |
376 | "#" | |
377 | "&& reload_completed" | |
378 | [(const_int 0)] | |
379 | { | |
380 | int reg = REGNO (operands[1]); | |
381 | int regoff = INTVAL (operands[2]); | |
382 | rtx src = gen_rtx_REG (V16QImode, reg + regoff); | |
383 | emit_move_insn (operands[0], src); | |
8ee2640b PB |
384 | DONE; |
385 | }) | |
386 | ||
387 | (define_expand "mma_assemble_acc" | |
f8f8909a AS |
388 | [(match_operand:XO 0 "fpr_reg_operand") |
389 | (match_operand:V16QI 1 "mma_assemble_input_operand") | |
390 | (match_operand:V16QI 2 "mma_assemble_input_operand") | |
391 | (match_operand:V16QI 3 "mma_assemble_input_operand") | |
392 | (match_operand:V16QI 4 "mma_assemble_input_operand")] | |
8ee2640b PB |
393 | "TARGET_MMA" |
394 | { | |
f8f8909a | 395 | rtx src = gen_rtx_UNSPEC (XOmode, |
8ee2640b PB |
396 | gen_rtvec (4, operands[1], operands[2], |
397 | operands[3], operands[4]), | |
f8f8909a | 398 | UNSPEC_MMA_ASSEMBLE); |
8ee2640b PB |
399 | emit_move_insn (operands[0], src); |
400 | DONE; | |
401 | }) | |
402 | ||
403 | (define_insn_and_split "*mma_assemble_acc" | |
f8f8909a AS |
404 | [(set (match_operand:XO 0 "fpr_reg_operand" "=d") |
405 | (unspec:XO [(match_operand:V16QI 1 "mma_assemble_input_operand" "mwa") | |
406 | (match_operand:V16QI 2 "mma_assemble_input_operand" "mwa") | |
407 | (match_operand:V16QI 3 "mma_assemble_input_operand" "mwa") | |
408 | (match_operand:V16QI 4 "mma_assemble_input_operand" "mwa")] | |
409 | UNSPEC_MMA_ASSEMBLE))] | |
8ee2640b | 410 | "TARGET_MMA |
f8f8909a | 411 | && fpr_reg_operand (operands[0], XOmode)" |
8ee2640b PB |
412 | "#" |
413 | "&& reload_completed" | |
414 | [(const_int 0)] | |
415 | { | |
f8f8909a | 416 | rtx src = gen_rtx_UNSPEC (XOmode, |
8ee2640b PB |
417 | gen_rtvec (4, operands[1], operands[2], |
418 | operands[3], operands[4]), | |
f8f8909a | 419 | UNSPEC_MMA_ASSEMBLE); |
8ee2640b PB |
420 | rs6000_split_multireg_move (operands[0], src); |
421 | DONE; | |
422 | }) | |
423 | ||
f8f8909a AS |
424 | (define_expand "mma_disassemble_acc" |
425 | [(match_operand:V16QI 0 "mma_disassemble_output_operand") | |
426 | (match_operand:XO 1 "fpr_reg_operand") | |
427 | (match_operand 2 "const_0_to_3_operand")] | |
428 | "TARGET_MMA" | |
429 | { | |
430 | rtx src; | |
431 | int regoff = INTVAL (operands[2]); | |
432 | src = gen_rtx_UNSPEC (V16QImode, | |
433 | gen_rtvec (2, operands[1], GEN_INT (regoff)), | |
434 | UNSPEC_MMA_EXTRACT); | |
435 | emit_move_insn (operands[0], src); | |
436 | DONE; | |
437 | }) | |
438 | ||
439 | (define_insn_and_split "*mma_disassemble_acc" | |
440 | [(set (match_operand:V16QI 0 "mma_disassemble_output_operand" "=mwa") | |
441 | (unspec:V16QI [(match_operand:XO 1 "fpr_reg_operand" "d") | |
442 | (match_operand 2 "const_0_to_3_operand")] | |
443 | UNSPEC_MMA_EXTRACT))] | |
444 | "TARGET_MMA | |
445 | && fpr_reg_operand (operands[1], XOmode)" | |
446 | "#" | |
447 | "&& reload_completed" | |
448 | [(const_int 0)] | |
449 | { | |
450 | int reg = REGNO (operands[1]); | |
451 | int regoff = INTVAL (operands[2]); | |
452 | rtx src = gen_rtx_REG (V16QImode, reg + regoff); | |
453 | emit_move_insn (operands[0], src); | |
454 | DONE; | |
455 | }) | |
456 | ||
8ee2640b PB |
457 | ;; MMA instructions that do not use their accumulators as an input, still |
458 | ;; must not allow their vector operands to overlap the registers used by | |
459 | ;; the accumulator. We enforce this by marking the output as early clobber. | |
460 | ||
461 | (define_insn "mma_<acc>" | |
f8f8909a AS |
462 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
463 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0")] | |
8ee2640b PB |
464 | MMA_ACC))] |
465 | "TARGET_MMA" | |
466 | "<acc> %A0" | |
467 | [(set_attr "type" "mma")]) | |
468 | ||
f8f8909a AS |
469 | ;; We can't have integer constants in XOmode so we wrap this in an UNSPEC. |
470 | ||
9c376d1c | 471 | (define_expand "mma_xxsetaccz" |
f8f8909a | 472 | [(set (match_operand:XO 0 "fpr_reg_operand") |
8ee2640b PB |
473 | (const_int 0))] |
474 | "TARGET_MMA" | |
9c376d1c | 475 | { |
f8f8909a AS |
476 | rtx xo0 = gen_rtx_UNSPEC (XOmode, gen_rtvec (1, const0_rtx), |
477 | UNSPEC_MMA_XXSETACCZ); | |
478 | emit_insn (gen_rtx_SET (operands[0], xo0)); | |
9c376d1c PB |
479 | DONE; |
480 | }) | |
8ee2640b | 481 | |
f8f8909a AS |
482 | (define_insn_and_split "*mma_xxsetaccz" |
483 | [(set (match_operand:XO 0 "fpr_reg_operand" "=d") | |
484 | (unspec:XO [(match_operand 1 "const_0_to_1_operand" "O")] | |
485 | UNSPEC_MMA_XXSETACCZ))] | |
486 | "TARGET_MMA" | |
487 | "xxsetaccz %A0" | |
488 | "&& reload_completed" | |
489 | [(set (match_dup 0) (unspec:XO [(match_dup 1)] UNSPEC_MMA_XXSETACCZ))] | |
490 | "" | |
491 | [(set_attr "type" "mma") | |
492 | (set_attr "length" "4")]) | |
493 | ||
8ee2640b | 494 | (define_insn "mma_<vv>" |
f8f8909a AS |
495 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
496 | (unspec:XO [(match_operand:V16QI 1 "vsx_register_operand" "wa") | |
497 | (match_operand:V16QI 2 "vsx_register_operand" "wa")] | |
498 | MMA_VV))] | |
8ee2640b PB |
499 | "TARGET_MMA" |
500 | "<vv> %A0,%x1,%x2" | |
501 | [(set_attr "type" "mma")]) | |
502 | ||
503 | (define_insn "mma_<avv>" | |
f8f8909a AS |
504 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
505 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
506 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
507 | (match_operand:V16QI 3 "vsx_register_operand" "wa")] | |
508 | MMA_AVV))] | |
8ee2640b PB |
509 | "TARGET_MMA" |
510 | "<avv> %A0,%x2,%x3" | |
511 | [(set_attr "type" "mma")]) | |
512 | ||
513 | (define_insn "mma_<pv>" | |
f8f8909a AS |
514 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
515 | (unspec:XO [(match_operand:OO 1 "vsx_register_operand" "wa") | |
516 | (match_operand:V16QI 2 "vsx_register_operand" "wa")] | |
517 | MMA_PV))] | |
8ee2640b PB |
518 | "TARGET_MMA" |
519 | "<pv> %A0,%x1,%x2" | |
520 | [(set_attr "type" "mma")]) | |
521 | ||
522 | (define_insn "mma_<apv>" | |
f8f8909a AS |
523 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
524 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
525 | (match_operand:OO 2 "vsx_register_operand" "wa") | |
526 | (match_operand:V16QI 3 "vsx_register_operand" "wa")] | |
527 | MMA_APV))] | |
8ee2640b PB |
528 | "TARGET_MMA" |
529 | "<apv> %A0,%x2,%x3" | |
530 | [(set_attr "type" "mma")]) | |
531 | ||
532 | (define_insn "mma_<vvi4i4i8>" | |
f8f8909a AS |
533 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
534 | (unspec:XO [(match_operand:V16QI 1 "vsx_register_operand" "wa") | |
535 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
536 | (match_operand:SI 3 "const_0_to_15_operand" "n") | |
537 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
538 | (match_operand:SI 5 "u8bit_cint_operand" "n")] | |
539 | MMA_VVI4I4I8))] | |
8ee2640b PB |
540 | "TARGET_MMA" |
541 | "<vvi4i4i8> %A0,%x1,%x2,%3,%4,%5" | |
542 | [(set_attr "type" "mma") | |
543 | (set_attr "length" "8")]) | |
544 | ||
545 | (define_insn "mma_<avvi4i4i8>" | |
f8f8909a AS |
546 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
547 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
548 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
549 | (match_operand:V16QI 3 "vsx_register_operand" "wa") | |
550 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
551 | (match_operand:SI 5 "const_0_to_15_operand" "n") | |
552 | (match_operand:SI 6 "u8bit_cint_operand" "n")] | |
553 | MMA_AVVI4I4I8))] | |
8ee2640b PB |
554 | "TARGET_MMA" |
555 | "<avvi4i4i8> %A0,%x2,%x3,%4,%5,%6" | |
556 | [(set_attr "type" "mma") | |
557 | (set_attr "length" "8")]) | |
558 | ||
559 | (define_insn "mma_<vvi4i4i2>" | |
f8f8909a AS |
560 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
561 | (unspec:XO [(match_operand:V16QI 1 "vsx_register_operand" "wa") | |
562 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
563 | (match_operand:SI 3 "const_0_to_15_operand" "n") | |
564 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
565 | (match_operand:SI 5 "const_0_to_3_operand" "n")] | |
566 | MMA_VVI4I4I2))] | |
8ee2640b PB |
567 | "TARGET_MMA" |
568 | "<vvi4i4i2> %A0,%x1,%x2,%3,%4,%5" | |
569 | [(set_attr "type" "mma") | |
570 | (set_attr "length" "8")]) | |
571 | ||
572 | (define_insn "mma_<avvi4i4i2>" | |
f8f8909a AS |
573 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
574 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
575 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
576 | (match_operand:V16QI 3 "vsx_register_operand" "wa") | |
577 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
578 | (match_operand:SI 5 "const_0_to_15_operand" "n") | |
579 | (match_operand:SI 6 "const_0_to_3_operand" "n")] | |
580 | MMA_AVVI4I4I2))] | |
8ee2640b PB |
581 | "TARGET_MMA" |
582 | "<avvi4i4i2> %A0,%x2,%x3,%4,%5,%6" | |
583 | [(set_attr "type" "mma") | |
584 | (set_attr "length" "8")]) | |
585 | ||
586 | (define_insn "mma_<vvi4i4>" | |
f8f8909a AS |
587 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
588 | (unspec:XO [(match_operand:V16QI 1 "vsx_register_operand" "wa") | |
589 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
590 | (match_operand:SI 3 "const_0_to_15_operand" "n") | |
591 | (match_operand:SI 4 "const_0_to_15_operand" "n")] | |
592 | MMA_VVI4I4))] | |
8ee2640b PB |
593 | "TARGET_MMA" |
594 | "<vvi4i4> %A0,%x1,%x2,%3,%4" | |
595 | [(set_attr "type" "mma") | |
596 | (set_attr "length" "8")]) | |
597 | ||
598 | (define_insn "mma_<avvi4i4>" | |
f8f8909a AS |
599 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
600 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
601 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
602 | (match_operand:V16QI 3 "vsx_register_operand" "wa") | |
603 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
604 | (match_operand:SI 5 "const_0_to_15_operand" "n")] | |
605 | MMA_AVVI4I4))] | |
8ee2640b PB |
606 | "TARGET_MMA" |
607 | "<avvi4i4> %A0,%x2,%x3,%4,%5" | |
608 | [(set_attr "type" "mma") | |
609 | (set_attr "length" "8")]) | |
610 | ||
611 | (define_insn "mma_<pvi4i2>" | |
f8f8909a AS |
612 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
613 | (unspec:XO [(match_operand:OO 1 "vsx_register_operand" "wa") | |
614 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
615 | (match_operand:SI 3 "const_0_to_15_operand" "n") | |
616 | (match_operand:SI 4 "const_0_to_3_operand" "n")] | |
617 | MMA_PVI4I2))] | |
8ee2640b PB |
618 | "TARGET_MMA" |
619 | "<pvi4i2> %A0,%x1,%x2,%3,%4" | |
620 | [(set_attr "type" "mma") | |
621 | (set_attr "length" "8")]) | |
622 | ||
623 | (define_insn "mma_<apvi4i2>" | |
f8f8909a AS |
624 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
625 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
626 | (match_operand:OO 2 "vsx_register_operand" "wa") | |
627 | (match_operand:V16QI 3 "vsx_register_operand" "wa") | |
628 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
629 | (match_operand:SI 5 "const_0_to_3_operand" "n")] | |
630 | MMA_APVI4I2))] | |
8ee2640b PB |
631 | "TARGET_MMA" |
632 | "<apvi4i2> %A0,%x2,%x3,%4,%5" | |
633 | [(set_attr "type" "mma") | |
634 | (set_attr "length" "8")]) | |
635 | ||
636 | (define_insn "mma_<vvi4i4i4>" | |
f8f8909a AS |
637 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
638 | (unspec:XO [(match_operand:V16QI 1 "vsx_register_operand" "wa") | |
639 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
640 | (match_operand:SI 3 "const_0_to_15_operand" "n") | |
641 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
642 | (match_operand:SI 5 "const_0_to_15_operand" "n")] | |
643 | MMA_VVI4I4I4))] | |
8ee2640b PB |
644 | "TARGET_MMA" |
645 | "<vvi4i4i4> %A0,%x1,%x2,%3,%4,%5" | |
646 | [(set_attr "type" "mma") | |
647 | (set_attr "length" "8")]) | |
648 | ||
649 | (define_insn "mma_<avvi4i4i4>" | |
f8f8909a AS |
650 | [(set (match_operand:XO 0 "fpr_reg_operand" "=&d") |
651 | (unspec:XO [(match_operand:XO 1 "fpr_reg_operand" "0") | |
652 | (match_operand:V16QI 2 "vsx_register_operand" "wa") | |
653 | (match_operand:V16QI 3 "vsx_register_operand" "wa") | |
654 | (match_operand:SI 4 "const_0_to_15_operand" "n") | |
655 | (match_operand:SI 5 "const_0_to_15_operand" "n") | |
656 | (match_operand:SI 6 "const_0_to_15_operand" "n")] | |
657 | MMA_AVVI4I4I4))] | |
8ee2640b PB |
658 | "TARGET_MMA" |
659 | "<avvi4i4i4> %A0,%x2,%x3,%4,%5,%6" | |
660 | [(set_attr "type" "mma") | |
661 | (set_attr "length" "8")]) |