]>
Commit | Line | Data |
---|---|---|
3af82a61 AK |
1 | ;;- Instruction patterns for the System z vector facility builtins. |
2 | ;; Copyright (C) 2015 Free Software Foundation, Inc. | |
3 | ;; Contributed by Andreas Krebbel (Andreas.Krebbel@de.ibm.com) | |
4 | ||
5 | ;; This file is part of GCC. | |
6 | ||
7 | ;; GCC is free software; you can redistribute it and/or modify it under | |
8 | ;; the terms of the GNU General Public License as published by the Free | |
9 | ;; Software Foundation; either version 3, or (at your option) any later | |
10 | ;; version. | |
11 | ||
12 | ;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | ;; WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | ;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | ;; for more details. | |
16 | ||
17 | ;; You should have received a copy of the GNU General Public License | |
18 | ;; along with GCC; see the file COPYING3. If not see | |
19 | ;; <http://www.gnu.org/licenses/>. | |
20 | ||
21 | ; The patterns in this file are enabled with -mzvector | |
22 | ||
23 | (define_mode_iterator V_HW_64 [V2DI V2DF]) | |
24 | (define_mode_iterator V_HW_32_64 [V4SI V2DI V2DF]) | |
25 | (define_mode_iterator VI_HW_SD [V4SI V2DI]) | |
26 | (define_mode_iterator V_HW_HSD [V8HI V4SI V2DI V2DF]) | |
27 | (define_mode_iterator VI_HW_HSD [V8HI V4SI V2DI]) | |
28 | ||
29 | ; The element type of the vector with floating point modes translated | |
30 | ; to int modes of the same size. | |
31 | (define_mode_attr non_vec_int[(V2QI "QI") (V4QI "QI") (V8QI "QI") (V16QI "QI") | |
32 | (V2HI "HI") (V4HI "HI") (V8HI "HI") | |
33 | (V2SI "SI") (V4SI "SI") | |
34 | (V2DI "DI") | |
35 | (V2SF "SI") (V4SF "SI") | |
36 | (V2DF "DI")]) | |
37 | ||
38 | ; Condition code modes generated by int comparisons | |
39 | (define_mode_iterator VICMP [CCVEQ CCVH CCVHU]) | |
40 | ||
41 | ; Comparisons supported by the vec_cmp* builtins | |
42 | (define_code_iterator intcmp [eq gt gtu ge geu lt ltu le leu]) | |
43 | (define_code_iterator fpcmp [eq gt ge lt le]) | |
44 | ||
45 | ; Comparisons supported by the vec_all/any* builtins | |
46 | (define_code_iterator intcmpcc [eq ne gt ge lt le gtu geu ltu leu]) | |
47 | (define_code_iterator fpcmpcc [eq ne gt ge unle unlt lt le]) | |
48 | ||
49 | ; Flags for vector string instructions (vfae all 4, vfee only ZS and CS, vstrc all 4) | |
50 | (define_constants | |
51 | [(VSTRING_FLAG_IN 8) ; invert result | |
52 | (VSTRING_FLAG_RT 4) ; result type | |
53 | (VSTRING_FLAG_ZS 2) ; zero search | |
54 | (VSTRING_FLAG_CS 1)]) ; condition code set | |
55 | ||
56 | ; Rounding modes as being used for e.g. VFI | |
57 | (define_constants | |
58 | [(VEC_RND_CURRENT 0) | |
59 | (VEC_RND_NEAREST_AWAY_FROM_ZERO 1) | |
60 | (VEC_RND_SHORT_PREC 3) | |
61 | (VEC_RND_NEAREST_TO_EVEN 4) | |
62 | (VEC_RND_TO_ZERO 5) | |
63 | (VEC_RND_TO_INF 6) | |
64 | (VEC_RND_TO_MINF 7)]) | |
65 | ||
66 | ||
67 | ; Vector gather element | |
68 | ||
69 | (define_insn "vec_gather_element<mode>" | |
70 | [(set (match_operand:V_HW_32_64 0 "register_operand" "=v") | |
71 | (unspec:V_HW_32_64 [(match_operand:V_HW_32_64 1 "register_operand" "0") | |
72 | (match_operand:<tointvec> 2 "register_operand" "v") | |
73 | (match_operand:BLK 3 "memory_operand" "QR") | |
74 | (match_operand:QI 4 "immediate_operand" "C")] | |
75 | UNSPEC_VEC_GATHER))] | |
76 | "TARGET_VX" | |
77 | "vge<bhfgq>\t%0,%O3(%v2,%R3),%b4" | |
78 | [(set_attr "op_type" "VRV")]) | |
79 | ||
80 | (define_expand "vec_genmask<mode>" | |
81 | [(match_operand:VI_HW 0 "register_operand" "=v") | |
82 | (match_operand:QI 1 "immediate_operand" "C") | |
83 | (match_operand:QI 2 "immediate_operand" "C")] | |
84 | "TARGET_VX" | |
85 | { | |
86 | int nunits = GET_MODE_NUNITS (<VI_HW:MODE>mode); | |
87 | int bitlen = GET_MODE_UNIT_BITSIZE (<VI_HW:MODE>mode); | |
88 | /* To bit little endian style. */ | |
89 | int end = bitlen - 1 - INTVAL (operands[1]); | |
90 | int start = bitlen - 1 - INTVAL (operands[2]); | |
91 | rtx const_vec[16]; | |
92 | int i; | |
93 | unsigned HOST_WIDE_INT mask; | |
94 | bool swapped_p = false; | |
95 | ||
96 | if (start > end) | |
97 | { | |
98 | i = start - 1; start = end + 1; end = i; | |
99 | swapped_p = true; | |
100 | } | |
101 | if (end == 63) | |
102 | mask = HOST_WIDE_INT_M1U; | |
103 | else | |
104 | mask = (HOST_WIDE_INT_1U << (end + 1)) - 1; | |
105 | ||
106 | mask &= ~((HOST_WIDE_INT_1U << start) - 1); | |
107 | ||
108 | if (swapped_p) | |
109 | mask = ~mask; | |
110 | ||
111 | for (i = 0; i < nunits; i++) | |
112 | const_vec[i] = GEN_INT (trunc_int_for_mode (mask, | |
113 | GET_MODE_INNER (<VI_HW:MODE>mode))); | |
114 | ||
115 | emit_insn (gen_rtx_SET (operands[0], | |
116 | gen_rtx_CONST_VECTOR (<VI_HW:MODE>mode, | |
117 | gen_rtvec_v (nunits, const_vec)))); | |
118 | DONE; | |
119 | }) | |
120 | ||
121 | (define_expand "vec_genbytemaskv16qi" | |
122 | [(match_operand:V16QI 0 "register_operand" "") | |
123 | (match_operand 1 "immediate_operand" "")] | |
124 | "TARGET_VX && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[1]), 'K', \"K\")" | |
125 | { | |
126 | int i; | |
127 | unsigned mask = 0x8000; | |
128 | rtx const_vec[16]; | |
129 | unsigned HOST_WIDE_INT byte_mask = INTVAL (operands[1]); | |
130 | ||
131 | for (i = 0; i < 16; i++) | |
132 | { | |
133 | if (mask & byte_mask) | |
134 | const_vec[i] = constm1_rtx; | |
135 | else | |
136 | const_vec[i] = const0_rtx; | |
137 | mask = mask >> 1; | |
138 | } | |
139 | emit_insn (gen_rtx_SET (operands[0], | |
140 | gen_rtx_CONST_VECTOR (V16QImode, | |
141 | gen_rtvec_v (16, const_vec)))); | |
142 | DONE; | |
143 | }) | |
144 | ||
145 | (define_expand "vec_splats<mode>" | |
146 | [(set (match_operand:V_HW 0 "register_operand" "") | |
147 | (vec_duplicate:V_HW (match_operand:<non_vec> 1 "general_operand" "")))] | |
148 | "TARGET_VX") | |
149 | ||
150 | (define_expand "vec_insert<mode>" | |
151 | [(set (match_operand:V_HW 0 "register_operand" "") | |
152 | (unspec:V_HW [(match_operand:<non_vec> 2 "register_operand" "") | |
153 | (match_operand:SI 3 "shift_count_or_setmem_operand" "") | |
154 | (match_operand:V_HW 1 "register_operand" "")] | |
155 | UNSPEC_VEC_SET))] | |
156 | "TARGET_VX" | |
157 | "") | |
158 | ||
159 | ; This is vec_set + modulo arithmetic on the element selector (op 2) | |
160 | (define_expand "vec_promote<mode>" | |
161 | [(set (match_operand:V_HW 0 "register_operand" "") | |
162 | (unspec:V_HW [(match_operand:<non_vec> 1 "register_operand" "") | |
163 | (match_operand:SI 2 "shift_count_or_setmem_operand" "") | |
164 | (match_dup 0)] | |
165 | UNSPEC_VEC_SET))] | |
166 | "TARGET_VX" | |
167 | "") | |
168 | ||
169 | ; vec_extract is also an RTL standard name -> vector.md | |
170 | ||
171 | (define_insn "vec_insert_and_zero<mode>" | |
172 | [(set (match_operand:V_HW 0 "register_operand" "=v") | |
173 | (unspec:V_HW [(match_operand:<non_vec> 1 "memory_operand" "QR")] | |
174 | UNSPEC_VEC_INSERT_AND_ZERO))] | |
175 | "TARGET_VX" | |
176 | "vllez<bhfgq>\t%v0,%1" | |
177 | [(set_attr "op_type" "VRX")]) | |
178 | ||
179 | (define_insn "vlbb" | |
180 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
181 | (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "QR") | |
182 | (match_operand:HI 2 "immediate_operand" " K")] | |
183 | UNSPEC_VEC_LOAD_BNDRY))] | |
184 | "TARGET_VX" | |
185 | "vlbb\t%v0,%1,%2" | |
186 | [(set_attr "op_type" "VRX")]) | |
187 | ||
188 | ; FIXME: The following two patterns might using vec_merge. But what is | |
189 | ; the canonical form: (vec_select (vec_merge op0 op1)) or (vec_merge | |
190 | ; (vec_select op0) (vec_select op1) | |
191 | (define_insn "vec_mergeh<mode>" | |
192 | [(set (match_operand:V_HW 0 "register_operand" "=v") | |
193 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v") | |
194 | (match_operand:V_HW 2 "register_operand" "v")] | |
195 | UNSPEC_VEC_MERGEH))] | |
196 | "TARGET_VX" | |
197 | "vmrh<bhfgq>\t%v0,%1,%2" | |
198 | [(set_attr "op_type" "VRR")]) | |
199 | ||
200 | (define_insn "vec_mergel<mode>" | |
201 | [(set (match_operand:V_HW 0 "register_operand" "=v") | |
202 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v") | |
203 | (match_operand:V_HW 2 "register_operand" "v")] | |
204 | UNSPEC_VEC_MERGEL))] | |
205 | "TARGET_VX" | |
206 | "vmrl<bhfgq>\t%v0,%1,%2" | |
207 | [(set_attr "op_type" "VRR")]) | |
208 | ||
209 | ||
210 | ; Vector pack | |
211 | ||
212 | (define_insn "vec_pack<mode>" | |
213 | [(set (match_operand:<vec_half> 0 "register_operand" "=v") | |
214 | (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v") | |
215 | (match_operand:VI_HW_HSD 2 "register_operand" "v")] | |
216 | UNSPEC_VEC_PACK))] | |
217 | "TARGET_VX" | |
218 | "vpk<bhfgq>\t%v0,%v1,%v2" | |
219 | [(set_attr "op_type" "VRR")]) | |
220 | ||
221 | ||
222 | ; Vector pack saturate | |
223 | ||
224 | (define_insn "vec_packs<mode>" | |
225 | [(set (match_operand:<vec_half> 0 "register_operand" "=v") | |
226 | (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v") | |
227 | (match_operand:VI_HW_HSD 2 "register_operand" "v")] | |
228 | UNSPEC_VEC_PACK_SATURATE))] | |
229 | "TARGET_VX" | |
230 | "vpks<bhfgq>\t%v0,%v1,%v2" | |
231 | [(set_attr "op_type" "VRR")]) | |
232 | ||
233 | ||
234 | ; This is vec_packs_cc + loading cc into a caller specified memory location. | |
235 | (define_expand "vec_packs_cc<mode>" | |
236 | [(parallel | |
237 | [(set (reg:CCRAW CC_REGNUM) | |
238 | (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "") | |
239 | (match_operand:VI_HW_HSD 2 "register_operand" "")] | |
240 | UNSPEC_VEC_PACK_SATURATE_GENCC)) | |
241 | (set (match_operand:<vec_half> 0 "register_operand" "") | |
242 | (unspec:<vec_half> [(match_dup 1) (match_dup 2)] | |
243 | UNSPEC_VEC_PACK_SATURATE_CC))]) | |
244 | (set (match_dup 4) | |
245 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT)) | |
246 | (set (match_operand:SI 3 "memory_operand" "") | |
247 | (match_dup 4))] | |
248 | "TARGET_VX" | |
249 | { | |
250 | operands[4] = gen_reg_rtx (SImode); | |
251 | }) | |
252 | ||
253 | (define_insn "*vec_packs_cc<mode>" | |
254 | [(set (reg:CCRAW CC_REGNUM) | |
255 | (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "v") | |
256 | (match_operand:VI_HW_HSD 2 "register_operand" "v")] | |
257 | UNSPEC_VEC_PACK_SATURATE_GENCC)) | |
258 | (set (match_operand:<vec_half> 0 "register_operand" "=v") | |
259 | (unspec:<vec_half> [(match_dup 1) (match_dup 2)] | |
260 | UNSPEC_VEC_PACK_SATURATE_CC))] | |
261 | "TARGET_VX" | |
262 | "vpks<bhfgq>s\t%v0,%v1,%v2" | |
263 | [(set_attr "op_type" "VRR")]) | |
264 | ||
265 | ||
266 | ; Vector pack logical saturate | |
267 | ||
268 | (define_insn "vec_packsu<mode>" | |
269 | [(set (match_operand:<vec_half> 0 "register_operand" "=v") | |
270 | (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v") | |
271 | (match_operand:VI_HW_HSD 2 "register_operand" "v")] | |
272 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE))] | |
273 | "TARGET_VX" | |
274 | "vpkls<bhfgq>\t%v0,%v1,%v2" | |
275 | [(set_attr "op_type" "VRR")]) | |
276 | ||
277 | ; Emulate saturate unsigned pack on signed operands. | |
278 | ; Zero out negative elements and continue with the unsigned saturating pack. | |
279 | (define_expand "vec_packsu_u<mode>" | |
280 | [(set (match_operand:<vec_half> 0 "register_operand" "=v") | |
281 | (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v") | |
282 | (match_operand:VI_HW_HSD 2 "register_operand" "v")] | |
283 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE))] | |
284 | "TARGET_VX" | |
285 | { | |
286 | rtx null_vec = CONST0_RTX(<MODE>mode); | |
287 | machine_mode half_mode; | |
288 | switch (<MODE>mode) | |
289 | { | |
290 | case V8HImode: half_mode = V16QImode; break; | |
291 | case V4SImode: half_mode = V8HImode; break; | |
292 | case V2DImode: half_mode = V4SImode; break; | |
293 | default: gcc_unreachable (); | |
294 | } | |
295 | s390_expand_vcond (operands[1], operands[1], null_vec, | |
296 | GE, operands[1], null_vec); | |
297 | s390_expand_vcond (operands[2], operands[2], null_vec, | |
298 | GE, operands[2], null_vec); | |
299 | emit_insn (gen_rtx_SET (operands[0], | |
300 | gen_rtx_UNSPEC (half_mode, | |
301 | gen_rtvec (2, operands[1], operands[2]), | |
302 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE))); | |
303 | DONE; | |
304 | }) | |
305 | ||
306 | ; This is vec_packsu_cc + loading cc into a caller specified memory location. | |
307 | ; FIXME: The reg to target mem copy should be issued by reload?! | |
308 | (define_expand "vec_packsu_cc<mode>" | |
309 | [(parallel | |
310 | [(set (reg:CCRAW CC_REGNUM) | |
311 | (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "") | |
312 | (match_operand:VI_HW_HSD 2 "register_operand" "")] | |
313 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE_GENCC)) | |
314 | (set (match_operand:<vec_half> 0 "register_operand" "") | |
315 | (unspec:<vec_half> [(match_dup 1) (match_dup 2)] | |
316 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE_CC))]) | |
317 | (set (match_dup 4) | |
318 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT)) | |
319 | (set (match_operand:SI 3 "memory_operand" "") | |
320 | (match_dup 4))] | |
321 | "TARGET_VX" | |
322 | { | |
323 | operands[4] = gen_reg_rtx (SImode); | |
324 | }) | |
325 | ||
326 | (define_insn "*vec_packsu_cc<mode>" | |
327 | [(set (reg:CCRAW CC_REGNUM) | |
328 | (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "v") | |
329 | (match_operand:VI_HW_HSD 2 "register_operand" "v")] | |
330 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE_GENCC)) | |
331 | (set (match_operand:<vec_half> 0 "register_operand" "=v") | |
332 | (unspec:<vec_half> [(match_dup 1) (match_dup 2)] | |
333 | UNSPEC_VEC_PACK_UNSIGNED_SATURATE_CC))] | |
334 | "TARGET_VX" | |
335 | "vpkls<bhfgq>s\t%v0,%v1,%v2" | |
336 | [(set_attr "op_type" "VRR")]) | |
337 | ||
338 | ||
339 | ; Vector permute | |
340 | ||
341 | ; vec_perm is also RTL standard name, but we can only use it for V16QI | |
342 | ||
343 | (define_insn "vec_zperm<mode>" | |
344 | [(set (match_operand:V_HW_HSD 0 "register_operand" "=v") | |
345 | (unspec:V_HW_HSD [(match_operand:V_HW_HSD 1 "register_operand" "v") | |
346 | (match_operand:V_HW_HSD 2 "register_operand" "v") | |
347 | (match_operand:V16QI 3 "register_operand" "v")] | |
348 | UNSPEC_VEC_PERM))] | |
349 | "TARGET_VX" | |
350 | "vperm\t%v0,%v1,%v2,%v3" | |
351 | [(set_attr "op_type" "VRR")]) | |
352 | ||
353 | (define_expand "vec_permi<mode>" | |
354 | [(set (match_operand:V_HW_64 0 "register_operand" "") | |
355 | (unspec:V_HW_64 [(match_operand:V_HW_64 1 "register_operand" "") | |
356 | (match_operand:V_HW_64 2 "register_operand" "") | |
357 | (match_operand:QI 3 "immediate_operand" "")] | |
358 | UNSPEC_VEC_PERMI))] | |
359 | "TARGET_VX" | |
360 | { | |
361 | HOST_WIDE_INT val = INTVAL (operands[3]); | |
362 | operands[3] = GEN_INT ((val & 1) | (val & 2) << 1); | |
363 | }) | |
364 | ||
365 | (define_insn "*vec_permi<mode>" | |
366 | [(set (match_operand:V_HW_64 0 "register_operand" "=v") | |
367 | (unspec:V_HW_64 [(match_operand:V_HW_64 1 "register_operand" "v") | |
368 | (match_operand:V_HW_64 2 "register_operand" "v") | |
369 | (match_operand:QI 3 "immediate_operand" "C")] | |
370 | UNSPEC_VEC_PERMI))] | |
371 | "TARGET_VX" | |
372 | "vpdi\t%v0,%v1,%v2,%b3" | |
373 | [(set_attr "op_type" "VRR")]) | |
374 | ||
375 | ||
376 | ; Vector replicate | |
377 | ||
378 | ||
379 | ; Replicate from vector element | |
380 | (define_expand "vec_splat<mode>" | |
381 | [(set (match_operand:V_HW 0 "register_operand" "") | |
382 | (vec_duplicate:V_HW (vec_select:<non_vec> | |
383 | (match_operand:V_HW 1 "register_operand" "") | |
384 | (parallel | |
385 | [(match_operand:QI 2 "immediate_operand" "")]))))] | |
386 | "TARGET_VX") | |
387 | ||
388 | ; Vector scatter element | |
389 | ||
390 | ; vscef, vsceg | |
391 | ||
392 | ; A 64 bit target adress generated from 32 bit elements | |
393 | (define_insn "vec_scatter_elementv4si_DI" | |
394 | [(set (mem:SI | |
395 | (plus:DI (zero_extend:DI | |
396 | (unspec:SI [(match_operand:V4SI 1 "register_operand" "v") | |
397 | (match_operand:DI 3 "immediate_operand" "I")] | |
398 | UNSPEC_VEC_EXTRACT)) | |
399 | (match_operand:SI 2 "address_operand" "ZQ"))) | |
400 | (unspec:SI [(match_operand:V4SI 0 "register_operand" "v") | |
401 | (match_dup 3)] UNSPEC_VEC_EXTRACT))] | |
402 | "TARGET_VX && TARGET_64BIT" | |
403 | "vscef\t%v0,%O2(%v1,%R2),%3" | |
404 | [(set_attr "op_type" "VRV")]) | |
405 | ||
406 | ; A 31 bit target address is generated from 64 bit elements | |
407 | (define_insn "vec_scatter_element<V_HW_64:mode>_SI" | |
408 | [(set (mem:<non_vec> | |
409 | (plus:SI (subreg:SI | |
410 | (unspec:<non_vec_int> [(match_operand:V_HW_64 1 "register_operand" "v") | |
411 | (match_operand:DI 3 "immediate_operand" "I")] | |
412 | UNSPEC_VEC_EXTRACT) 4) | |
413 | (match_operand:SI 2 "address_operand" "ZQ"))) | |
414 | (unspec:<non_vec> [(match_operand:V_HW_64 0 "register_operand" "v") | |
415 | (match_dup 3)] UNSPEC_VEC_EXTRACT))] | |
416 | "TARGET_VX && !TARGET_64BIT" | |
af77d1df | 417 | "vsce<V_HW_64:bhfgq>\t%v0,%O2(%v1,%R2),%3" |
3af82a61 AK |
418 | [(set_attr "op_type" "VRV")]) |
419 | ||
420 | ; Element size and target adress size is the same | |
421 | (define_insn "vec_scatter_element<mode>_<non_vec_int>" | |
422 | [(set (mem:<non_vec> | |
423 | (plus:<non_vec_int> (unspec:<non_vec_int> | |
424 | [(match_operand:<tointvec> 1 "register_operand" "v") | |
425 | (match_operand:DI 3 "immediate_operand" "I")] | |
426 | UNSPEC_VEC_EXTRACT) | |
427 | (match_operand:DI 2 "address_operand" "ZQ"))) | |
428 | (unspec:<non_vec> [(match_operand:V_HW_32_64 0 "register_operand" "v") | |
429 | (match_dup 3)] UNSPEC_VEC_EXTRACT))] | |
430 | "TARGET_VX" | |
af77d1df | 431 | "vsce<bhfgq>\t%v0,%O2(%v1,%R2),%3" |
3af82a61 AK |
432 | [(set_attr "op_type" "VRV")]) |
433 | ||
434 | ; Depending on the address size we have to expand a different pattern. | |
435 | ; This however cannot be represented in s390-builtins.def so we do the | |
436 | ; multiplexing here in the expander. | |
437 | (define_expand "vec_scatter_element<V_HW_32_64:mode>" | |
438 | [(match_operand:V_HW_32_64 0 "register_operand" "") | |
439 | (match_operand:<tointvec> 1 "register_operand" "") | |
440 | (match_operand 2 "address_operand" "") | |
441 | (match_operand:DI 3 "immediate_operand" "")] | |
442 | "TARGET_VX" | |
443 | { | |
444 | if (TARGET_64BIT) | |
445 | { | |
446 | PUT_MODE (operands[2], DImode); | |
447 | emit_insn ( | |
448 | gen_vec_scatter_element<V_HW_32_64:mode>_DI (operands[0], operands[1], | |
449 | operands[2], operands[3])); | |
450 | } | |
451 | else | |
452 | { | |
453 | PUT_MODE (operands[2], SImode); | |
454 | emit_insn ( | |
455 | gen_vec_scatter_element<V_HW_32_64:mode>_SI (operands[0], operands[1], | |
456 | operands[2], operands[3])); | |
457 | } | |
458 | DONE; | |
459 | }) | |
460 | ||
461 | ||
462 | ; Vector select | |
463 | ||
464 | ; Operand 3 selects bits from either OP1 (0) or OP2 (1) | |
465 | ||
466 | ; Comparison operator should not matter as long as we always use the same ?! | |
467 | ||
468 | ; Operands 1 and 2 are swapped in order to match the altivec builtin. | |
469 | ; If operand 3 is a const_int bitmask this would be vec_merge | |
470 | (define_expand "vec_sel<mode>" | |
471 | [(set (match_operand:V_HW 0 "register_operand" "") | |
472 | (if_then_else:V_HW | |
473 | (eq (match_operand:<tointvec> 3 "register_operand" "") | |
474 | (match_dup 4)) | |
475 | (match_operand:V_HW 2 "register_operand" "") | |
476 | (match_operand:V_HW 1 "register_operand" "")))] | |
477 | "TARGET_VX" | |
478 | { | |
479 | operands[4] = CONST0_RTX (<tointvec>mode); | |
480 | }) | |
481 | ||
482 | ||
483 | ; Vector sign extend to doubleword | |
484 | ||
485 | ; Sign extend of right most vector element to respective double-word | |
486 | (define_insn "vec_extend<mode>" | |
487 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
488 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
489 | UNSPEC_VEC_EXTEND))] | |
490 | "TARGET_VX" | |
491 | "vseg<bhfgq>\t%v0,%1" | |
492 | [(set_attr "op_type" "VRR")]) | |
493 | ||
494 | ||
495 | ; Vector store with length | |
496 | ||
497 | ; Store bytes in OP1 from OP0 with the highest indexed byte to be | |
498 | ; stored from OP0 given by OP2 | |
499 | (define_insn "vstl<mode>" | |
500 | [(set (match_operand:BLK 2 "memory_operand" "=Q") | |
501 | (unspec:BLK [(match_operand:V 0 "register_operand" "v") | |
502 | (match_operand:SI 1 "register_operand" "d")] | |
503 | UNSPEC_VEC_STORE_LEN))] | |
504 | "TARGET_VX" | |
505 | "vstl\t%v0,%1,%2" | |
506 | [(set_attr "op_type" "VRS")]) | |
507 | ||
508 | ||
509 | ; Vector unpack high | |
510 | ||
511 | ; vuphb, vuphh, vuphf | |
512 | (define_insn "vec_unpackh<mode>" | |
513 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
514 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
515 | UNSPEC_VEC_UNPACKH))] | |
516 | "TARGET_VX" | |
517 | "vuph<bhfgq>\t%v0,%v1" | |
518 | [(set_attr "op_type" "VRR")]) | |
519 | ||
520 | ; vuplhb, vuplhh, vuplhf | |
521 | (define_insn "vec_unpackh_l<mode>" | |
522 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
523 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
524 | UNSPEC_VEC_UNPACKH_L))] | |
525 | "TARGET_VX" | |
526 | "vuplh<bhfgq>\t%v0,%v1" | |
527 | [(set_attr "op_type" "VRR")]) | |
528 | ||
529 | ||
530 | ; Vector unpack low | |
531 | ||
532 | ; vuplb, vuplhw, vuplf | |
533 | (define_insn "vec_unpackl<mode>" | |
534 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
535 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
536 | UNSPEC_VEC_UNPACKL))] | |
537 | "TARGET_VX" | |
538 | "vupl<bhfgq><w>\t%v0,%v1" | |
539 | [(set_attr "op_type" "VRR")]) | |
540 | ||
541 | ; vupllb, vupllh, vupllf | |
542 | (define_insn "vec_unpackl_l<mode>" | |
543 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
544 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
545 | UNSPEC_VEC_UNPACKL_L))] | |
546 | "TARGET_VX" | |
547 | "vupll<bhfgq>\t%v0,%v1" | |
548 | [(set_attr "op_type" "VRR")]) | |
549 | ||
550 | ||
551 | ; Vector add | |
552 | ||
553 | ; vaq | |
554 | ||
555 | ; zvector builtins uses V16QI operands. So replace the modes in order | |
556 | ; to map this to a TImode add. We have to keep the V16QI mode | |
557 | ; operands in the expander in order to allow some operand type | |
558 | ; checking when expanding the builtin. | |
559 | (define_expand "vec_add_u128" | |
560 | [(match_operand:V16QI 0 "register_operand" "") | |
561 | (match_operand:V16QI 1 "register_operand" "") | |
562 | (match_operand:V16QI 2 "register_operand" "")] | |
563 | "TARGET_VX" | |
564 | { | |
565 | rtx op0 = gen_rtx_SUBREG (TImode, operands[0], 0); | |
566 | rtx op1 = gen_rtx_SUBREG (TImode, operands[1], 0); | |
567 | rtx op2 = gen_rtx_SUBREG (TImode, operands[2], 0); | |
568 | ||
569 | emit_insn (gen_rtx_SET (op0, | |
570 | gen_rtx_PLUS (TImode, op1, op2))); | |
571 | DONE; | |
572 | }) | |
573 | ||
574 | ; Vector add compute carry | |
575 | ||
576 | (define_insn "vec_addc<mode>" | |
577 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
578 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
579 | (match_operand:VI_HW 2 "register_operand" "v")] | |
580 | UNSPEC_VEC_ADDC))] | |
581 | "TARGET_VX" | |
582 | "vacc<bhfgq>\t%v0,%v1,%v2" | |
583 | [(set_attr "op_type" "VRR")]) | |
584 | ||
585 | (define_insn "vec_addc_u128" | |
586 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
587 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
588 | (match_operand:V16QI 2 "register_operand" "v")] | |
589 | UNSPEC_VEC_ADDC_U128))] | |
590 | "TARGET_VX" | |
591 | "vaccq\t%v0,%v1,%v2" | |
592 | [(set_attr "op_type" "VRR")]) | |
593 | ||
594 | ||
595 | ; Vector add with carry | |
596 | ||
597 | (define_insn "vec_adde_u128" | |
598 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
599 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
600 | (match_operand:V16QI 2 "register_operand" "v") | |
601 | (match_operand:V16QI 3 "register_operand" "v")] | |
602 | UNSPEC_VEC_ADDE_U128))] | |
603 | "TARGET_VX" | |
604 | "vacq\t%v0,%v1,%v2,%v3" | |
605 | [(set_attr "op_type" "VRR")]) | |
606 | ||
607 | ||
608 | ; Vector add with carry compute carry | |
609 | ||
610 | (define_insn "vec_addec_u128" | |
611 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
612 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
613 | (match_operand:V16QI 2 "register_operand" "v") | |
614 | (match_operand:V16QI 3 "register_operand" "v")] | |
615 | UNSPEC_VEC_ADDEC_U128))] | |
616 | "TARGET_VX" | |
617 | "vacccq\t%v0,%v1,%v2,%v3" | |
618 | [(set_attr "op_type" "VRR")]) | |
619 | ||
620 | ||
621 | ; Vector and | |
622 | ||
623 | ; The following two patterns allow mixed mode and's as required for the intrinsics. | |
624 | (define_insn "and_av2df3" | |
625 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
626 | (and:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0) | |
627 | (match_operand:V2DF 2 "register_operand" "v")))] | |
628 | "TARGET_VX" | |
629 | "vn\t%v0,%v1,%v2" | |
630 | [(set_attr "op_type" "VRR")]) | |
631 | ||
632 | (define_insn "and_cv2df3" | |
633 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
634 | (and:V2DF (match_operand:V2DF 1 "register_operand" "v") | |
635 | (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)))] | |
636 | "TARGET_VX" | |
637 | "vn\t%v0,%v1,%v2" | |
638 | [(set_attr "op_type" "VRR")]) | |
639 | ||
640 | ||
641 | ; Vector and with complement | |
642 | ||
643 | ; vnc | |
644 | (define_insn "vec_andc<mode>3" | |
645 | [(set (match_operand:VT_HW 0 "register_operand" "=v") | |
646 | (and:VT_HW (not:VT_HW (match_operand:VT_HW 2 "register_operand" "v")) | |
647 | (match_operand:VT_HW 1 "register_operand" "v")))] | |
648 | "TARGET_VX" | |
649 | "vnc\t%v0,%v1,%v2" | |
650 | [(set_attr "op_type" "VRR")]) | |
651 | ||
652 | ; The following two patterns allow mixed mode and's as required for the intrinsics. | |
653 | (define_insn "vec_andc_av2df3" | |
654 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
655 | (and:V2DF (not:V2DF (match_operand:V2DF 2 "register_operand" "v")) | |
656 | (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0)))] | |
657 | ||
658 | "TARGET_VX" | |
659 | "vnc\t%v0,%v1,%v2" | |
660 | [(set_attr "op_type" "VRR")]) | |
661 | ||
662 | (define_insn "vec_andc_cv2df3" | |
663 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
664 | (and:V2DF (not:V2DF (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)) | |
665 | (match_operand:V2DF 1 "register_operand" "v")))] | |
666 | "TARGET_VX" | |
667 | "vnc\t%v0,%v1,%v2" | |
668 | [(set_attr "op_type" "VRR")]) | |
669 | ||
670 | ||
671 | ; Vector average | |
672 | ||
673 | (define_insn "vec_avg<mode>" | |
674 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
675 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
676 | (match_operand:VI_HW 2 "register_operand" "v")] | |
677 | UNSPEC_VEC_AVG))] | |
678 | "TARGET_VX" | |
679 | "vavg<bhfgq>\t%v0,%v1,%v2" | |
680 | [(set_attr "op_type" "VRR")]) | |
681 | ||
682 | ; Vector average logical | |
683 | ||
684 | (define_insn "vec_avgu<mode>" | |
685 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
686 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
687 | (match_operand:VI_HW 2 "register_operand" "v")] | |
688 | UNSPEC_VEC_AVGU))] | |
689 | "TARGET_VX" | |
690 | "vavgl<bhfgq>\t%v0,%v1,%v2" | |
691 | [(set_attr "op_type" "VRR")]) | |
692 | ||
693 | ||
694 | ; Vector checksum | |
695 | ||
696 | (define_insn "vec_checksum" | |
697 | [(set (match_operand:V4SI 0 "register_operand" "=v") | |
698 | (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") | |
699 | (match_operand:V4SI 2 "register_operand" "v")] | |
700 | UNSPEC_VEC_CHECKSUM))] | |
701 | "TARGET_VX" | |
702 | "vcksm\t%v0,%v1,%v2" | |
703 | [(set_attr "op_type" "VRR")]) | |
704 | ||
705 | ;; | |
706 | ;; Vector compare | |
707 | ;; | |
708 | ||
709 | ; vec_all/any int compares | |
710 | ||
711 | (define_expand "vec_all_<intcmpcc:code><VI_HW:mode>" | |
712 | [(match_operand:SI 0 "register_operand" "") | |
713 | (intcmpcc (match_operand:VI_HW 1 "register_operand" "") | |
714 | (match_operand:VI_HW 2 "register_operand" ""))] | |
715 | "TARGET_VX" | |
716 | { | |
717 | s390_expand_vec_compare_cc (operands[0], | |
718 | <intcmpcc:CODE>, | |
719 | operands[1], | |
720 | operands[2], | |
721 | true); | |
722 | DONE; | |
723 | }) | |
724 | ||
725 | (define_expand "vec_any_<intcmpcc:code><VI_HW:mode>" | |
726 | [(match_operand:SI 0 "register_operand" "") | |
727 | (intcmpcc (match_operand:VI_HW 1 "register_operand" "") | |
728 | (match_operand:VI_HW 2 "register_operand" ""))] | |
729 | "TARGET_VX" | |
730 | { | |
731 | s390_expand_vec_compare_cc (operands[0], | |
732 | <intcmpcc:CODE>, | |
733 | operands[1], | |
734 | operands[2], | |
735 | false); | |
736 | DONE; | |
737 | }) | |
738 | ||
739 | ; vec_all/any fp compares | |
740 | ||
741 | (define_expand "vec_all_<fpcmpcc:code>v2df" | |
742 | [(match_operand:SI 0 "register_operand" "") | |
743 | (fpcmpcc (match_operand:V2DF 1 "register_operand" "") | |
744 | (match_operand:V2DF 2 "register_operand" ""))] | |
745 | "TARGET_VX" | |
746 | { | |
747 | s390_expand_vec_compare_cc (operands[0], | |
748 | <fpcmpcc:CODE>, | |
749 | operands[1], | |
750 | operands[2], | |
751 | true); | |
752 | DONE; | |
753 | }) | |
754 | ||
755 | (define_expand "vec_any_<fpcmpcc:code>v2df" | |
756 | [(match_operand:SI 0 "register_operand" "") | |
757 | (fpcmpcc (match_operand:V2DF 1 "register_operand" "") | |
758 | (match_operand:V2DF 2 "register_operand" ""))] | |
759 | "TARGET_VX" | |
760 | { | |
761 | s390_expand_vec_compare_cc (operands[0], | |
762 | <fpcmpcc:CODE>, | |
763 | operands[1], | |
764 | operands[2], | |
765 | false); | |
766 | DONE; | |
767 | }) | |
768 | ||
769 | ||
770 | ; Compare without generating CC | |
771 | ||
772 | (define_expand "vec_cmp<intcmp:code><VI_HW:mode>" | |
773 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
774 | (intcmp:VI_HW (match_operand:VI_HW 1 "register_operand" "v") | |
775 | (match_operand:VI_HW 2 "register_operand" "v")))] | |
776 | "TARGET_VX" | |
777 | { | |
778 | s390_expand_vec_compare (operands[0], <intcmp:CODE>, operands[1], operands[2]); | |
779 | DONE; | |
780 | }) | |
781 | ||
782 | (define_expand "vec_cmp<fpcmp:code>v2df" | |
783 | [(set (match_operand:V2DI 0 "register_operand" "=v") | |
784 | (fpcmp:V2DI (match_operand:V2DF 1 "register_operand" "v") | |
785 | (match_operand:V2DF 2 "register_operand" "v")))] | |
786 | "TARGET_VX" | |
787 | { | |
788 | s390_expand_vec_compare (operands[0], <fpcmp:CODE>, operands[1], operands[2]); | |
789 | DONE; | |
790 | }) | |
791 | ||
792 | ||
793 | ; Vector count leading zeros | |
794 | ||
795 | ; vec_cntlz -> clz | |
796 | ; vec_cnttz -> ctz | |
797 | ||
798 | ; Vector xor | |
799 | ||
800 | ; vec_xor -> xor | |
801 | ||
802 | ; The following two patterns allow mixed mode xor's as required for the intrinsics. | |
803 | (define_insn "xor_av2df3" | |
804 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
805 | (xor:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0) | |
806 | (match_operand:V2DF 2 "register_operand" "v")))] | |
807 | "TARGET_VX" | |
808 | "vx\t%v0,%v1,%v2" | |
809 | [(set_attr "op_type" "VRR")]) | |
810 | ||
811 | (define_insn "xor_cv2df3" | |
812 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
813 | (xor:V2DF (match_operand:V2DF 1 "register_operand" "v") | |
814 | (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)))] | |
815 | "TARGET_VX" | |
816 | "vx\t%v0,%v1,%v2" | |
817 | [(set_attr "op_type" "VRR")]) | |
818 | ||
819 | ||
820 | ; Vector Galois field multiply sum | |
821 | ||
822 | (define_insn "vec_gfmsum<mode>" | |
823 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
824 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
825 | (match_operand:VI_HW_QHS 2 "register_operand" "v")] | |
826 | UNSPEC_VEC_GFMSUM))] | |
827 | "TARGET_VX" | |
828 | "vgfm<bhfgq>\t%v0,%v1,%v2" | |
829 | [(set_attr "op_type" "VRR")]) | |
830 | ||
831 | (define_insn "vec_gfmsum_128" | |
832 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
833 | (unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v") | |
834 | (match_operand:V2DI 2 "register_operand" "v")] | |
835 | UNSPEC_VEC_GFMSUM_128))] | |
836 | "TARGET_VX" | |
837 | "vgfmg\t%v0,%v1,%v2" | |
838 | [(set_attr "op_type" "VRR")]) | |
839 | ||
840 | (define_insn "vec_gfmsum_accum<mode>" | |
841 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
842 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
843 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
844 | (match_operand:<vec_double> 3 "register_operand" "v")] | |
845 | UNSPEC_VEC_GFMSUM_ACCUM))] | |
846 | "TARGET_VX" | |
847 | "vgfma<bhfgq>\t%v0,%v1,%v2,%v3" | |
848 | [(set_attr "op_type" "VRR")]) | |
849 | ||
850 | (define_insn "vec_gfmsum_accum_128" | |
851 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
852 | (unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v") | |
853 | (match_operand:V2DI 2 "register_operand" "v") | |
854 | (match_operand:V16QI 3 "register_operand" "v")] | |
855 | UNSPEC_VEC_GFMSUM_ACCUM_128))] | |
856 | "TARGET_VX" | |
857 | "vgfmag\t%v0,%v1,%v2,%v3" | |
858 | [(set_attr "op_type" "VRR")]) | |
859 | ||
860 | ||
861 | ; FIXME: vec_neg ? | |
862 | ||
863 | ; Vector load positive: vec_abs -> abs | |
864 | ; Vector maximum vec_max -> smax, logical vec_max -> umax | |
865 | ; Vector maximum vec_min -> smin, logical vec_min -> umin | |
866 | ||
867 | ||
868 | ; Vector multiply and add high | |
869 | ||
870 | ; vec_mladd -> vec_vmal | |
871 | ; vmalb, vmalh, vmalf, vmalg | |
872 | (define_insn "vec_vmal<mode>" | |
873 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
874 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
875 | (match_operand:VI_HW 2 "register_operand" "v") | |
876 | (match_operand:VI_HW 3 "register_operand" "v")] | |
877 | UNSPEC_VEC_VMAL))] | |
878 | "TARGET_VX" | |
879 | "vmal<bhfgq><w>\t%v0,%v1,%v2,%v3" | |
880 | [(set_attr "op_type" "VRR")]) | |
881 | ||
882 | ; vec_mhadd -> vec_vmah/vec_vmalh | |
883 | ||
884 | ; vmahb; vmahh, vmahf, vmahg | |
885 | (define_insn "vec_vmah<mode>" | |
886 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
887 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
888 | (match_operand:VI_HW 2 "register_operand" "v") | |
889 | (match_operand:VI_HW 3 "register_operand" "v")] | |
890 | UNSPEC_VEC_VMAH))] | |
891 | "TARGET_VX" | |
892 | "vmah<bhfgq>\t%v0,%v1,%v2,%v3" | |
893 | [(set_attr "op_type" "VRR")]) | |
894 | ||
895 | ; vmalhb; vmalhh, vmalhf, vmalhg | |
896 | (define_insn "vec_vmalh<mode>" | |
897 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
898 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
899 | (match_operand:VI_HW 2 "register_operand" "v") | |
900 | (match_operand:VI_HW 3 "register_operand" "v")] | |
901 | UNSPEC_VEC_VMALH))] | |
902 | "TARGET_VX" | |
903 | "vmalh<bhfgq>\t%v0,%v1,%v2,%v3" | |
904 | [(set_attr "op_type" "VRR")]) | |
905 | ||
906 | ; vec_meadd -> vec_vmae/vec_vmale | |
907 | ||
908 | ; vmaeb; vmaeh, vmaef, vmaeg | |
909 | (define_insn "vec_vmae<mode>" | |
910 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
911 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
912 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
913 | (match_operand:<vec_double> 3 "register_operand" "v")] | |
914 | UNSPEC_VEC_VMAE))] | |
915 | "TARGET_VX" | |
916 | "vmae<bhfgq>\t%v0,%v1,%v2,%v3" | |
917 | [(set_attr "op_type" "VRR")]) | |
918 | ||
919 | ; vmaleb; vmaleh, vmalef, vmaleg | |
920 | (define_insn "vec_vmale<mode>" | |
921 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
922 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
923 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
924 | (match_operand:<vec_double> 3 "register_operand" "v")] | |
925 | UNSPEC_VEC_VMALE))] | |
926 | "TARGET_VX" | |
927 | "vmale<bhfgq>\t%v0,%v1,%v2,%v3" | |
928 | [(set_attr "op_type" "VRR")]) | |
929 | ||
930 | ; vec_moadd -> vec_vmao/vec_vmalo | |
931 | ||
932 | ; vmaob; vmaoh, vmaof, vmaog | |
933 | (define_insn "vec_vmao<mode>" | |
934 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
935 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
936 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
937 | (match_operand:<vec_double> 3 "register_operand" "v")] | |
938 | UNSPEC_VEC_VMAO))] | |
939 | "TARGET_VX" | |
940 | "vmao<bhfgq>\t%v0,%v1,%v2,%v3" | |
941 | [(set_attr "op_type" "VRR")]) | |
942 | ||
943 | ; vmalob; vmaloh, vmalof, vmalog | |
944 | (define_insn "vec_vmalo<mode>" | |
945 | [(set (match_operand:<vec_double> 0 "register_operand" "=v") | |
946 | (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
947 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
948 | (match_operand:<vec_double> 3 "register_operand" "v")] | |
949 | UNSPEC_VEC_VMALO))] | |
950 | "TARGET_VX" | |
951 | "vmalo<bhfgq>\t%v0,%v1,%v2,%v3" | |
952 | [(set_attr "op_type" "VRR")]) | |
953 | ||
954 | ||
955 | ; Vector multiply high | |
956 | ||
957 | ; vec_mulh -> vec_smulh/vec_umulh | |
958 | ||
959 | ; vmhb, vmhh, vmhf | |
960 | (define_insn "vec_smulh<mode>" | |
961 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
962 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
963 | (match_operand:VI_HW_QHS 2 "register_operand" "v")] | |
964 | UNSPEC_VEC_SMULT_HI))] | |
965 | "TARGET_VX" | |
966 | "vmh<bhfgq>\t%v0,%v1,%v2" | |
967 | [(set_attr "op_type" "VRR")]) | |
968 | ||
969 | ; vmlhb, vmlhh, vmlhf | |
970 | (define_insn "vec_umulh<mode>" | |
971 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
972 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
973 | (match_operand:VI_HW_QHS 2 "register_operand" "v")] | |
974 | UNSPEC_VEC_UMULT_HI))] | |
975 | "TARGET_VX" | |
976 | "vmlh<bhfgq>\t%v0,%v1,%v2" | |
977 | [(set_attr "op_type" "VRR")]) | |
978 | ||
979 | ||
980 | ; Vector multiply low | |
981 | ||
982 | ; vec_mule -> vec_widen_umult_even/vec_widen_smult_even | |
983 | ; vec_mulo -> vec_widen_umult_odd/vec_widen_smult_odd | |
984 | ||
985 | ||
986 | ; Vector nor | |
987 | ||
988 | (define_insn "vec_nor<mode>3" | |
989 | [(set (match_operand:VT_HW 0 "register_operand" "=v") | |
990 | (not:VT_HW (ior:VT_HW (match_operand:VT_HW 1 "register_operand" "v") | |
991 | (match_operand:VT_HW 2 "register_operand" "v"))))] | |
992 | "TARGET_VX" | |
993 | "vno\t%v0,%v1,%v2" | |
994 | [(set_attr "op_type" "VRR")]) | |
995 | ||
996 | ; The following two patterns allow mixed mode and's as required for the intrinsics. | |
997 | (define_insn "vec_nor_av2df3" | |
998 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
999 | (not:V2DF (ior:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0) | |
1000 | (match_operand:V2DF 2 "register_operand" "v"))))] | |
1001 | "TARGET_VX" | |
1002 | "vno\t%v0,%v1,%v2" | |
1003 | [(set_attr "op_type" "VRR")]) | |
1004 | ||
1005 | (define_insn "vec_nor_cv2df3" | |
1006 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1007 | (not:V2DF (ior:V2DF (match_operand:V2DF 1 "register_operand" "v") | |
1008 | (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0))))] | |
1009 | "TARGET_VX" | |
1010 | "vno\t%v0,%v1,%v2" | |
1011 | [(set_attr "op_type" "VRR")]) | |
1012 | ||
1013 | ||
1014 | ; Vector or | |
1015 | ||
1016 | ; The following two patterns allow mixed mode or's as required for the intrinsics. | |
1017 | (define_insn "ior_av2df3" | |
1018 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1019 | (ior:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0) | |
1020 | (match_operand:V2DF 2 "register_operand" "v")))] | |
1021 | "TARGET_VX" | |
1022 | "vo\t%v0,%v1,%v2" | |
1023 | [(set_attr "op_type" "VRR")]) | |
1024 | ||
1025 | (define_insn "ior_cv2df3" | |
1026 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1027 | (ior:V2DF (match_operand:V2DF 1 "register_operand" "v") | |
1028 | (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)))] | |
1029 | "TARGET_VX" | |
1030 | "vo\t%v0,%v1,%v2" | |
1031 | [(set_attr "op_type" "VRR")]) | |
1032 | ||
1033 | ||
1034 | ; Vector population count vec_popcnt -> popcount | |
1035 | ; Vector element rotate left logical vec_rl -> vrotl, vec_rli -> rot | |
1036 | ||
1037 | ; Vector element rotate and insert under mask | |
1038 | ||
1039 | ; verimb, verimh, verimf, verimg | |
1040 | (define_insn "verim<mode>" | |
1041 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
1042 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "0") | |
1043 | (match_operand:VI_HW 2 "register_operand" "v") | |
1044 | (match_operand:VI_HW 3 "register_operand" "v") | |
1045 | (match_operand:SI 4 "immediate_operand" "I")] | |
1046 | UNSPEC_VEC_RL_MASK))] | |
1047 | "TARGET_VX" | |
1048 | "verim<bhfgq>\t%v0,%v2,%v3,%b4" | |
1049 | [(set_attr "op_type" "VRI")]) | |
1050 | ||
1051 | ||
1052 | ; Vector shift left | |
1053 | ||
1054 | (define_insn "vec_sll<VI_HW:mode><VI_HW_QHS:mode>" | |
1055 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
1056 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
1057 | (match_operand:VI_HW_QHS 2 "register_operand" "v")] | |
1058 | UNSPEC_VEC_SLL))] | |
1059 | "TARGET_VX" | |
1060 | "vsl\t%v0,%v1,%v2" | |
1061 | [(set_attr "op_type" "VRR")]) | |
1062 | ||
1063 | ||
1064 | ; Vector shift left by byte | |
1065 | ||
1066 | (define_insn "vec_slb<mode>" | |
1067 | [(set (match_operand:V_HW 0 "register_operand" "=v") | |
1068 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v") | |
1069 | (match_operand:<tointvec> 2 "register_operand" "v")] | |
1070 | UNSPEC_VEC_SLB))] | |
1071 | "TARGET_VX" | |
1072 | "vslb\t%v0,%v1,%v2" | |
1073 | [(set_attr "op_type" "VRR")]) | |
1074 | ||
1075 | ||
1076 | ; Vector shift left double by byte | |
1077 | ||
1078 | (define_insn "vec_sld<mode>" | |
1079 | [(set (match_operand:V_HW 0 "register_operand" "=v") | |
1080 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v") | |
1081 | (match_operand:V_HW 2 "register_operand" "v") | |
1082 | (match_operand:DI 3 "immediate_operand" "C")] | |
1083 | UNSPEC_VEC_SLDB))] | |
1084 | "TARGET_VX" | |
1085 | "vsldb\t%v0,%v1,%v2,%b3" | |
1086 | [(set_attr "op_type" "VRI")]) | |
1087 | ||
1088 | (define_expand "vec_sldw<mode>" | |
1089 | [(set (match_operand:V_HW 0 "register_operand" "") | |
1090 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "") | |
1091 | (match_operand:V_HW 2 "register_operand" "") | |
1092 | (match_operand:DI 3 "immediate_operand" "")] | |
1093 | UNSPEC_VEC_SLDB))] | |
1094 | "TARGET_VX" | |
1095 | { | |
1096 | operands[3] = GEN_INT (INTVAL (operands[3]) << 2); | |
1097 | }) | |
1098 | ||
1099 | ; Vector shift right arithmetic | |
1100 | ||
1101 | (define_insn "vec_sral<VI_HW:mode><VI_HW_QHS:mode>" | |
1102 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
1103 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
1104 | (match_operand:VI_HW_QHS 2 "register_operand" "v")] | |
1105 | UNSPEC_VEC_SRAL))] | |
1106 | "TARGET_VX" | |
1107 | "vsra\t%v0,%v1,%v2" | |
1108 | [(set_attr "op_type" "VRR")]) | |
1109 | ||
1110 | ||
1111 | ; Vector shift right arithmetic by byte | |
1112 | ||
1113 | (define_insn "vec_srab<mode>" | |
1114 | [(set (match_operand:V_HW 0 "register_operand" "=v") | |
1115 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v") | |
1116 | (match_operand:<tointvec> 2 "register_operand" "v")] | |
1117 | UNSPEC_VEC_SRAB))] | |
1118 | "TARGET_VX" | |
1119 | "vsrab\t%v0,%v1,%v2" | |
1120 | [(set_attr "op_type" "VRR")]) | |
1121 | ||
1122 | ||
1123 | ; Vector shift right logical | |
1124 | ||
1125 | (define_insn "vec_srl<VI_HW:mode><VI_HW_QHS:mode>" | |
1126 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
1127 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
1128 | (match_operand:VI_HW_QHS 2 "register_operand" "v")] | |
1129 | UNSPEC_VEC_SRL))] | |
1130 | "TARGET_VX" | |
1131 | "vsrl\t%v0,%v1,%v2" | |
1132 | [(set_attr "op_type" "VRR")]) | |
1133 | ||
1134 | ||
1135 | ; Vector shift right logical by byte | |
1136 | ||
1137 | ; Pattern definition in vector.md | |
1138 | (define_expand "vec_srb<mode>" | |
1139 | [(set (match_operand:V_HW 0 "register_operand" "") | |
1140 | (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "") | |
1141 | (match_operand:<tointvec> 2 "register_operand" "")] | |
1142 | UNSPEC_VEC_SRLB))] | |
1143 | "TARGET_VX") | |
1144 | ||
1145 | ||
1146 | ; Vector subtract | |
1147 | ||
1148 | (define_insn "vec_sub_u128" | |
1149 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
1150 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
1151 | (match_operand:V16QI 2 "register_operand" "v")] | |
1152 | UNSPEC_VEC_SUB_U128))] | |
1153 | "TARGET_VX" | |
1154 | "vsq\t%v0,%v1,%v2" | |
1155 | [(set_attr "op_type" "VRR")]) | |
1156 | ||
1157 | ||
1158 | ; Vector subtract compute borrow indication | |
1159 | ||
1160 | (define_insn "vec_subc<mode>" | |
1161 | [(set (match_operand:VI_HW 0 "register_operand" "=v") | |
1162 | (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v") | |
1163 | (match_operand:VI_HW 2 "register_operand" "v")] | |
1164 | UNSPEC_VEC_SUBC))] | |
1165 | "TARGET_VX" | |
1166 | "vscbi<bhfgq>\t%v0,%v1,%v2" | |
1167 | [(set_attr "op_type" "VRR")]) | |
1168 | ||
1169 | (define_insn "vec_subc_u128" | |
1170 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
1171 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
1172 | (match_operand:V16QI 2 "register_operand" "v")] | |
1173 | UNSPEC_VEC_SUBC_U128))] | |
1174 | "TARGET_VX" | |
1175 | "vscbiq\t%v0,%v1,%v2" | |
1176 | [(set_attr "op_type" "VRR")]) | |
1177 | ||
1178 | ||
1179 | ; Vector subtract with borrow indication | |
1180 | ||
1181 | (define_insn "vec_sube_u128" | |
1182 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
1183 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
1184 | (match_operand:V16QI 2 "register_operand" "v") | |
1185 | (match_operand:V16QI 3 "register_operand" "v")] | |
1186 | UNSPEC_VEC_SUBE_U128))] | |
1187 | "TARGET_VX" | |
1188 | "vsbiq\t%v0,%v1,%v2,%v3" | |
1189 | [(set_attr "op_type" "VRR")]) | |
1190 | ||
1191 | ||
1192 | ; Vector subtract with borrow compute and borrow indication | |
1193 | ||
1194 | (define_insn "vec_subec_u128" | |
1195 | [(set (match_operand:V16QI 0 "register_operand" "=v") | |
1196 | (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") | |
1197 | (match_operand:V16QI 2 "register_operand" "v") | |
1198 | (match_operand:V16QI 3 "register_operand" "v")] | |
1199 | UNSPEC_VEC_SUBEC_U128))] | |
1200 | "TARGET_VX" | |
1201 | "vsbcbiq\t%v0,%v1,%v2,%v3" | |
1202 | [(set_attr "op_type" "VRR")]) | |
1203 | ||
1204 | ||
1205 | ; Vector sum across | |
1206 | ||
1207 | ; Sum across DImode parts of the 1st operand and add the rightmost | |
1208 | ; element of 2nd operand | |
1209 | ; vsumgh, vsumgf | |
1210 | (define_expand "vec_sum2<mode>" | |
1211 | [(set (match_operand:V2DI 0 "register_operand" "") | |
1212 | (unspec:V2DI [(match_operand:VI_HW_HS 1 "register_operand" "") | |
1213 | (match_operand:VI_HW_HS 2 "register_operand" "")] | |
1214 | UNSPEC_VEC_VSUMG))] | |
1215 | "TARGET_VX") | |
1216 | ||
1217 | ; vsumqh, vsumqf | |
1218 | (define_insn "vec_sum_u128<mode>" | |
1219 | [(set (match_operand:V2DI 0 "register_operand" "=v") | |
1220 | (unspec:V2DI [(match_operand:VI_HW_SD 1 "register_operand" "v") | |
1221 | (match_operand:VI_HW_SD 2 "register_operand" "v")] | |
1222 | UNSPEC_VEC_VSUMQ))] | |
1223 | "TARGET_VX" | |
1224 | "vsumq<bhfgq>\t%v0,%v1,%v2" | |
1225 | [(set_attr "op_type" "VRR")]) | |
1226 | ||
1227 | ; vsumb, vsumh | |
1228 | (define_expand "vec_sum4<mode>" | |
1229 | [(set (match_operand:V4SI 0 "register_operand" "") | |
1230 | (unspec:V4SI [(match_operand:VI_HW_QH 1 "register_operand" "") | |
1231 | (match_operand:VI_HW_QH 2 "register_operand" "")] | |
1232 | UNSPEC_VEC_VSUM))] | |
1233 | "TARGET_VX") | |
1234 | ||
1235 | ||
1236 | ; Vector test under mask | |
1237 | ||
1238 | (define_expand "vec_test_mask_int<mode>" | |
1239 | [(set (reg:CCRAW CC_REGNUM) | |
1240 | (unspec:CCRAW [(match_operand:V_HW 1 "register_operand" "") | |
1241 | (match_operand:<tointvec> 2 "register_operand" "")] | |
1242 | UNSPEC_VEC_TEST_MASK)) | |
1243 | (set (match_operand:SI 0 "register_operand" "") | |
1244 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1245 | "TARGET_VX") | |
1246 | ||
1247 | (define_insn "*vec_test_mask<mode>" | |
1248 | [(set (reg:CCRAW CC_REGNUM) | |
1249 | (unspec:CCRAW [(match_operand:V_HW 0 "register_operand" "v") | |
1250 | (match_operand:<tointvec> 1 "register_operand" "v")] | |
1251 | UNSPEC_VEC_TEST_MASK))] | |
1252 | "TARGET_VX" | |
1253 | "vtm\t%v0,%v1" | |
1254 | [(set_attr "op_type" "VRR")]) | |
1255 | ||
1256 | ||
1257 | ; Vector find any element equal | |
1258 | ||
1259 | ; vfaeb, vfaeh, vfaef | |
1260 | ; vfaezb, vfaezh, vfaezf | |
1261 | (define_insn "vfae<mode>" | |
1262 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1263 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1264 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1265 | (match_operand:SI 3 "immediate_operand" "C")] | |
1266 | UNSPEC_VEC_VFAE))] | |
1267 | "TARGET_VX" | |
1268 | { | |
1269 | unsigned HOST_WIDE_INT flags = INTVAL (operands[3]); | |
1270 | ||
1271 | if (flags & VSTRING_FLAG_ZS) | |
1272 | { | |
1273 | flags &= ~VSTRING_FLAG_ZS; | |
1274 | operands[3] = GEN_INT (flags); | |
1275 | return "vfaez<bhfgq>\t%v0,%v1,%v2,%b3"; | |
1276 | } | |
1277 | return "vfae<bhfgq>\t%v0,%v1,%v2,%b3"; | |
1278 | } | |
1279 | [(set_attr "op_type" "VRR")]) | |
1280 | ||
1281 | ; vfaebs, vfaehs, vfaefs | |
1282 | ; vfaezbs, vfaezhs, vfaezfs | |
1283 | (define_insn "*vfaes<mode>" | |
1284 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1285 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1286 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1287 | (match_operand:SI 3 "immediate_operand" "C")] | |
1288 | UNSPEC_VEC_VFAE)) | |
1289 | (set (reg:CCRAW CC_REGNUM) | |
1290 | (unspec:CCRAW [(match_dup 1) | |
1291 | (match_dup 2) | |
1292 | (match_dup 3)] | |
1293 | UNSPEC_VEC_VFAECC))] | |
1294 | "TARGET_VX" | |
1295 | { | |
1296 | unsigned HOST_WIDE_INT flags = INTVAL (operands[3]); | |
1297 | ||
1298 | if (flags & VSTRING_FLAG_ZS) | |
1299 | { | |
1300 | flags &= ~VSTRING_FLAG_ZS; | |
1301 | operands[3] = GEN_INT (flags); | |
1302 | return "vfaez<bhfgq>s\t%v0,%v1,%v2,%b3"; | |
1303 | } | |
1304 | return "vfae<bhfgq>s\t%v0,%v1,%v2,%b3"; | |
1305 | } | |
1306 | [(set_attr "op_type" "VRR")]) | |
1307 | ||
1308 | (define_expand "vfaez<mode>" | |
1309 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1310 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1311 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1312 | (match_operand:SI 3 "immediate_operand" "C")] | |
1313 | UNSPEC_VEC_VFAE))] | |
1314 | "TARGET_VX" | |
1315 | { | |
1316 | operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_ZS); | |
1317 | }) | |
1318 | ||
1319 | (define_expand "vfaes<mode>" | |
1320 | [(parallel | |
1321 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1322 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1323 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1324 | (match_operand:SI 3 "immediate_operand" "C")] | |
1325 | UNSPEC_VEC_VFAE)) | |
1326 | (set (reg:CCRAW CC_REGNUM) | |
1327 | (unspec:CCRAW [(match_dup 1) | |
1328 | (match_dup 2) | |
1329 | (match_dup 3)] | |
1330 | UNSPEC_VEC_VFAECC))]) | |
1331 | (set (match_operand:SI 4 "memory_operand" "") | |
1332 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1333 | "TARGET_VX" | |
1334 | { | |
1335 | operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_CS); | |
1336 | }) | |
1337 | ||
1338 | (define_expand "vfaezs<mode>" | |
1339 | [(parallel | |
1340 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1341 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1342 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1343 | (match_operand:SI 3 "immediate_operand" "C")] | |
1344 | UNSPEC_VEC_VFAE)) | |
1345 | (set (reg:CCRAW CC_REGNUM) | |
1346 | (unspec:CCRAW [(match_dup 1) | |
1347 | (match_dup 2) | |
1348 | (match_dup 3)] | |
1349 | UNSPEC_VEC_VFAECC))]) | |
1350 | (set (match_operand:SI 4 "memory_operand" "") | |
1351 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1352 | "TARGET_VX" | |
1353 | { | |
1354 | operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_CS | VSTRING_FLAG_ZS); | |
1355 | }) | |
1356 | ||
1357 | ||
1358 | ; Vector find element equal | |
1359 | ||
1360 | ; vfeebs, vfeehs, vfeefs | |
1361 | ; vfeezbs, vfeezhs, vfeezfs | |
1362 | (define_insn "*vfees<mode>" | |
1363 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1364 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1365 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1366 | (match_operand:QI 3 "immediate_operand" "C")] | |
1367 | UNSPEC_VEC_VFEE)) | |
1368 | (set (reg:CCRAW CC_REGNUM) | |
1369 | (unspec:CCRAW [(match_dup 1) | |
1370 | (match_dup 2) | |
1371 | (match_dup 3)] | |
1372 | UNSPEC_VEC_VFEECC))] | |
1373 | "TARGET_VX" | |
1374 | { | |
1375 | unsigned HOST_WIDE_INT flags = INTVAL (operands[3]); | |
1376 | ||
1377 | gcc_assert (!(flags & ~(VSTRING_FLAG_ZS | VSTRING_FLAG_CS))); | |
1378 | flags &= ~VSTRING_FLAG_CS; | |
1379 | ||
1380 | if (flags == VSTRING_FLAG_ZS) | |
1381 | return "vfeez<bhfgq>s\t%v0,%v1,%v2"; | |
1382 | return "vfee<bhfgq>s\t%v0,%v1,%v2,%b3"; | |
1383 | } | |
1384 | [(set_attr "op_type" "VRR")]) | |
1385 | ||
1386 | ; vfeeb, vfeeh, vfeef | |
1387 | (define_insn "vfee<mode>" | |
1388 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1389 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1390 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1391 | (const_int 0)] | |
1392 | UNSPEC_VEC_VFEE))] | |
1393 | "TARGET_VX" | |
1394 | "vfee<bhfgq>\t%v0,%v1,%v2,0" | |
1395 | [(set_attr "op_type" "VRR")]) | |
1396 | ||
1397 | ; vfeezb, vfeezh, vfeezf | |
1398 | (define_insn "vfeez<mode>" | |
1399 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1400 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1401 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1402 | (const_int VSTRING_FLAG_ZS)] | |
1403 | UNSPEC_VEC_VFEE))] | |
1404 | "TARGET_VX" | |
1405 | "vfeez<bhfgq>s\t%v0,%v1,%v2,2" | |
1406 | [(set_attr "op_type" "VRR")]) | |
1407 | ||
1408 | (define_expand "vfees<mode>" | |
1409 | [(parallel | |
1410 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1411 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1412 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1413 | (const_int VSTRING_FLAG_CS)] | |
1414 | UNSPEC_VEC_VFEE)) | |
1415 | (set (reg:CCRAW CC_REGNUM) | |
1416 | (unspec:CCRAW [(match_dup 1) | |
1417 | (match_dup 2) | |
1418 | (const_int VSTRING_FLAG_CS)] | |
1419 | UNSPEC_VEC_VFEECC))]) | |
1420 | (set (match_operand:SI 3 "memory_operand" "") | |
1421 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1422 | "TARGET_VX") | |
1423 | ||
1424 | (define_expand "vfeezs<mode>" | |
1425 | [(parallel | |
1426 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1427 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1428 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1429 | (match_dup 4)] | |
1430 | UNSPEC_VEC_VFEE)) | |
1431 | (set (reg:CCRAW CC_REGNUM) | |
1432 | (unspec:CCRAW [(match_dup 1) | |
1433 | (match_dup 2) | |
1434 | (match_dup 4)] | |
1435 | UNSPEC_VEC_VFEECC))]) | |
1436 | (set (match_operand:SI 3 "memory_operand" "") | |
1437 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1438 | "TARGET_VX" | |
1439 | { | |
1440 | operands[4] = GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS); | |
1441 | }) | |
1442 | ||
1443 | ; Vector find element not equal | |
1444 | ||
1445 | ; vfeneb, vfeneh, vfenef | |
1446 | (define_insn "vfene<mode>" | |
1447 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1448 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1449 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1450 | (const_int 0)] | |
1451 | UNSPEC_VEC_VFENE))] | |
1452 | "TARGET_VX" | |
1453 | "vfene<bhfgq>\t%v0,%v1,%v2,0" | |
1454 | [(set_attr "op_type" "VRR")]) | |
1455 | ||
1456 | ; vec_vfenes can be found in vector.md since it is used for strlen | |
1457 | ||
1458 | ; vfenezb, vfenezh, vfenezf | |
1459 | (define_insn "vfenez<mode>" | |
1460 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1461 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1462 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1463 | (const_int VSTRING_FLAG_ZS)] | |
1464 | UNSPEC_VEC_VFENE))] | |
1465 | "TARGET_VX" | |
1466 | "vfenez<bhfgq>\t%v0,%v1,%v2" | |
1467 | [(set_attr "op_type" "VRR")]) | |
1468 | ||
1469 | (define_expand "vfenes<mode>" | |
1470 | [(parallel | |
1471 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1472 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1473 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1474 | (const_int VSTRING_FLAG_CS)] | |
1475 | UNSPEC_VEC_VFENE)) | |
1476 | (set (reg:CCRAW CC_REGNUM) | |
1477 | (unspec:CCRAW [(match_dup 1) | |
1478 | (match_dup 2) | |
1479 | (const_int VSTRING_FLAG_CS)] | |
1480 | UNSPEC_VEC_VFENECC))]) | |
1481 | (set (match_operand:SI 3 "memory_operand" "") | |
1482 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1483 | "TARGET_VX") | |
1484 | ||
1485 | (define_expand "vfenezs<mode>" | |
1486 | [(parallel | |
1487 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1488 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1489 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1490 | (match_dup 4)] | |
1491 | UNSPEC_VEC_VFENE)) | |
1492 | (set (reg:CCRAW CC_REGNUM) | |
1493 | (unspec:CCRAW [(match_dup 1) | |
1494 | (match_dup 2) | |
1495 | (match_dup 4)] | |
1496 | UNSPEC_VEC_VFENECC))]) | |
1497 | (set (match_operand:SI 3 "memory_operand" "") | |
1498 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1499 | "TARGET_VX" | |
1500 | { | |
1501 | operands[4] = GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS); | |
1502 | }) | |
1503 | ||
1504 | ; Vector isolate string | |
1505 | ||
1506 | ; vistrb, vistrh, vistrf | |
1507 | (define_insn "vistr<mode>" | |
1508 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1509 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
1510 | UNSPEC_VEC_VISTR))] | |
1511 | "TARGET_VX" | |
1512 | "vistr<bhfgq>\t%v0,%v1" | |
1513 | [(set_attr "op_type" "VRR")]) | |
1514 | ||
1515 | ; vistrbs, vistrhs, vistrfs | |
1516 | (define_insn "*vistrs<mode>" | |
1517 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1518 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")] | |
1519 | UNSPEC_VEC_VISTR)) | |
1520 | (set (reg:CCRAW CC_REGNUM) | |
1521 | (unspec:CCRAW [(match_dup 1)] UNSPEC_VEC_VISTRCC))] | |
1522 | "TARGET_VX" | |
1523 | "vistr<bhfgq>s\t%v0,%v1" | |
1524 | [(set_attr "op_type" "VRR")]) | |
1525 | ||
1526 | (define_expand "vistrs<mode>" | |
1527 | [(parallel | |
1528 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1529 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")] | |
1530 | UNSPEC_VEC_VISTR)) | |
1531 | (set (reg:CCRAW CC_REGNUM) | |
1532 | (unspec:CCRAW [(match_dup 1)] | |
1533 | UNSPEC_VEC_VISTRCC))]) | |
1534 | (set (match_operand:SI 2 "memory_operand" "") | |
1535 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1536 | "TARGET_VX") | |
1537 | ||
1538 | ||
1539 | ; Vector compare range | |
1540 | ||
1541 | ; vstrcb, vstrch, vstrcf | |
1542 | ; vstrczb, vstrczh, vstrczf | |
1543 | (define_insn "vstrc<mode>" | |
1544 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1545 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1546 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1547 | (match_operand:VI_HW_QHS 3 "register_operand" "v") | |
1548 | (match_operand:SI 4 "immediate_operand" "C")] | |
1549 | UNSPEC_VEC_VSTRC))] | |
1550 | "TARGET_VX" | |
1551 | { | |
1552 | unsigned HOST_WIDE_INT flags = INTVAL (operands[4]); | |
1553 | ||
1554 | if (flags & VSTRING_FLAG_ZS) | |
1555 | { | |
1556 | flags &= ~VSTRING_FLAG_ZS; | |
1557 | operands[4] = GEN_INT (flags); | |
1558 | return "vstrcz<bhfgq>\t%v0,%v1,%v2,%v3,%b4"; | |
1559 | } | |
1560 | return "vstrc<bhfgq>\t%v0,%v1,%v2,%v3,%b4"; | |
1561 | } | |
1562 | [(set_attr "op_type" "VRR")]) | |
1563 | ||
1564 | ; vstrcbs, vstrchs, vstrcfs | |
1565 | ; vstrczbs, vstrczhs, vstrczfs | |
1566 | (define_insn "*vstrcs<mode>" | |
1567 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1568 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1569 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1570 | (match_operand:VI_HW_QHS 3 "register_operand" "v") | |
1571 | (match_operand:SI 4 "immediate_operand" "C")] | |
1572 | UNSPEC_VEC_VSTRC)) | |
1573 | (set (reg:CCRAW CC_REGNUM) | |
1574 | (unspec:CCRAW [(match_dup 1) | |
1575 | (match_dup 2) | |
1576 | (match_dup 3) | |
1577 | (match_dup 4)] | |
1578 | UNSPEC_VEC_VSTRCCC))] | |
1579 | "TARGET_VX" | |
1580 | { | |
1581 | unsigned HOST_WIDE_INT flags = INTVAL (operands[4]); | |
1582 | ||
1583 | if (flags & VSTRING_FLAG_ZS) | |
1584 | { | |
1585 | flags &= ~VSTRING_FLAG_ZS; | |
1586 | operands[4] = GEN_INT (flags); | |
1587 | return "vstrcz<bhfgq>s\t%v0,%v1,%v2,%v3,%b4"; | |
1588 | } | |
1589 | return "vstrc<bhfgq>s\t%v0,%v1,%v2,%v3,%b4"; | |
1590 | } | |
1591 | [(set_attr "op_type" "VRR")]) | |
1592 | ||
1593 | (define_expand "vstrcz<mode>" | |
1594 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v") | |
1595 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v") | |
1596 | (match_operand:VI_HW_QHS 2 "register_operand" "v") | |
1597 | (match_operand:VI_HW_QHS 3 "register_operand" "v") | |
1598 | (match_operand:SI 4 "immediate_operand" "C")] | |
1599 | UNSPEC_VEC_VSTRC))] | |
1600 | "TARGET_VX" | |
1601 | { | |
1602 | operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_ZS); | |
1603 | }) | |
1604 | ||
1605 | (define_expand "vstrcs<mode>" | |
1606 | [(parallel | |
1607 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1608 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1609 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1610 | (match_operand:VI_HW_QHS 3 "register_operand" "") | |
1611 | (match_operand:SI 4 "immediate_operand" "C")] | |
1612 | UNSPEC_VEC_VSTRC)) | |
1613 | (set (reg:CCRAW CC_REGNUM) | |
1614 | (unspec:CCRAW [(match_dup 1) | |
1615 | (match_dup 2) | |
1616 | (match_dup 3) | |
1617 | (match_dup 4)] | |
1618 | UNSPEC_VEC_VSTRCCC))]) | |
1619 | (set (match_operand:SI 5 "memory_operand" "") | |
1620 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1621 | "TARGET_VX" | |
1622 | { | |
1623 | operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_CS); | |
1624 | }) | |
1625 | ||
1626 | (define_expand "vstrczs<mode>" | |
1627 | [(parallel | |
1628 | [(set (match_operand:VI_HW_QHS 0 "register_operand" "") | |
1629 | (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "") | |
1630 | (match_operand:VI_HW_QHS 2 "register_operand" "") | |
1631 | (match_operand:VI_HW_QHS 3 "register_operand" "") | |
1632 | (match_operand:SI 4 "immediate_operand" "C")] | |
1633 | UNSPEC_VEC_VSTRC)) | |
1634 | (set (reg:CCRAW CC_REGNUM) | |
1635 | (unspec:CCRAW [(match_dup 1) | |
1636 | (match_dup 2) | |
1637 | (match_dup 3) | |
1638 | (match_dup 4)] | |
1639 | UNSPEC_VEC_VSTRCCC))]) | |
1640 | (set (match_operand:SI 5 "memory_operand" "") | |
1641 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1642 | "TARGET_VX" | |
1643 | { | |
1644 | operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_CS | VSTRING_FLAG_ZS); | |
1645 | }) | |
1646 | ||
1647 | ||
1648 | ; Signed V2DI -> V2DF conversion - inexact exception disabled | |
1649 | (define_insn "vec_di_to_df_s64" | |
1650 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1651 | (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "v") | |
1652 | (match_operand:QI 2 "immediate_operand" "C")] | |
1653 | UNSPEC_VEC_VCDGB))] | |
1654 | "TARGET_VX" | |
1655 | "vcdgb\t%v0,%v1,4,%b2" | |
1656 | [(set_attr "op_type" "VRR")]) | |
1657 | ||
1658 | ; The result needs to be multiplied with 2**-op2 | |
1659 | (define_expand "vec_ctd_s64" | |
1660 | [(set (match_operand:V2DF 0 "register_operand" "") | |
1661 | (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "") | |
1662 | (const_int 0)] ; According to current BFP rounding mode | |
1663 | UNSPEC_VEC_VCDGB)) | |
1664 | (use (match_operand:QI 2 "immediate_operand" "")) | |
1665 | (set (match_dup 0) (mult:V2DF (match_dup 0) (match_dup 3)))] | |
1666 | "TARGET_VX" | |
1667 | { | |
1668 | REAL_VALUE_TYPE f; | |
1669 | rtx c; | |
1670 | ||
1671 | real_2expN (&f, -INTVAL (operands[2]), DFmode); | |
1672 | c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode); | |
1673 | ||
1674 | operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c)); | |
1675 | operands[3] = force_reg (V2DFmode, operands[3]); | |
1676 | }) | |
1677 | ||
1678 | ; Unsigned V2DI -> V2DF conversion - inexact exception disabled | |
1679 | (define_insn "vec_di_to_df_u64" | |
1680 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1681 | (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "v") | |
1682 | (match_operand:QI 2 "immediate_operand" "C")] | |
1683 | UNSPEC_VEC_VCDLGB))] | |
1684 | "TARGET_VX" | |
1685 | "vcdlgb\t%v0,%v1,4,%b2" | |
1686 | [(set_attr "op_type" "VRR")]) | |
1687 | ||
1688 | ; The result needs to be multiplied with 2**-op2 | |
1689 | (define_expand "vec_ctd_u64" | |
1690 | [(set (match_operand:V2DF 0 "register_operand" "") | |
1691 | (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "") | |
1692 | (const_int 0)] ; According to current BFP rounding mode | |
1693 | UNSPEC_VEC_VCDLGB)) | |
1694 | (use (match_operand:QI 2 "immediate_operand" "")) | |
1695 | (set (match_dup 0) (mult:V2DF (match_dup 0) (match_dup 3)))] | |
1696 | "TARGET_VX" | |
1697 | { | |
1698 | REAL_VALUE_TYPE f; | |
1699 | rtx c; | |
1700 | ||
1701 | real_2expN (&f, -INTVAL (operands[2]), DFmode); | |
1702 | c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode); | |
1703 | ||
1704 | operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c)); | |
1705 | operands[3] = force_reg (V2DFmode, operands[3]); | |
1706 | }) | |
1707 | ||
1708 | ||
1709 | ; Signed V2DF -> V2DI conversion - inexact exception disabled | |
1710 | (define_insn "vec_df_to_di_s64" | |
1711 | [(set (match_operand:V2DI 0 "register_operand" "=v") | |
1712 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "v") | |
1713 | (match_operand:QI 2 "immediate_operand" "C")] | |
1714 | UNSPEC_VEC_VCGDB))] | |
1715 | "TARGET_VX" | |
1716 | "vcgdb\t%v0,%v1,4,%b2" | |
1717 | [(set_attr "op_type" "VRR")]) | |
1718 | ||
1719 | ; The input needs to be multiplied with 2**op2 | |
1720 | (define_expand "vec_ctsl" | |
1721 | [(use (match_operand:QI 2 "immediate_operand" "")) | |
1722 | (set (match_dup 4) (mult:V2DF (match_operand:V2DF 1 "register_operand" "") | |
1723 | (match_dup 3))) | |
1724 | (set (match_operand:V2DI 0 "register_operand" "") | |
1725 | (unspec:V2DI [(match_dup 4) (const_int 0)] ; According to current BFP rounding mode | |
1726 | UNSPEC_VEC_VCGDB))] | |
1727 | "TARGET_VX" | |
1728 | { | |
1729 | REAL_VALUE_TYPE f; | |
1730 | rtx c; | |
1731 | ||
1732 | real_2expN (&f, INTVAL (operands[2]), DFmode); | |
1733 | c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode); | |
1734 | ||
1735 | operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c)); | |
1736 | operands[3] = force_reg (V2DFmode, operands[3]); | |
1737 | operands[4] = gen_reg_rtx (V2DFmode); | |
1738 | }) | |
1739 | ||
1740 | ; Unsigned V2DF -> V2DI conversion - inexact exception disabled | |
1741 | (define_insn "vec_df_to_di_u64" | |
1742 | [(set (match_operand:V2DI 0 "register_operand" "=v") | |
1743 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "v") | |
1744 | (match_operand:QI 2 "immediate_operand" "C")] | |
1745 | UNSPEC_VEC_VCLGDB))] | |
1746 | "TARGET_VX" | |
1747 | "vclgdb\t%v0,%v1,4,%b2" | |
1748 | [(set_attr "op_type" "VRR")]) | |
1749 | ||
1750 | ; The input needs to be multiplied with 2**op2 | |
1751 | (define_expand "vec_ctul" | |
1752 | [(use (match_operand:QI 2 "immediate_operand" "")) | |
1753 | (set (match_dup 4) (mult:V2DF (match_operand:V2DF 1 "register_operand" "") | |
1754 | (match_dup 3))) | |
1755 | (set (match_operand:V2DI 0 "register_operand" "") | |
1756 | (unspec:V2DI [(match_dup 4) (const_int 0)] ; According to current BFP rounding mode | |
1757 | UNSPEC_VEC_VCLGDB))] | |
1758 | "TARGET_VX" | |
1759 | { | |
1760 | REAL_VALUE_TYPE f; | |
1761 | rtx c; | |
1762 | ||
1763 | real_2expN (&f, INTVAL (operands[2]), DFmode); | |
1764 | c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode); | |
1765 | ||
1766 | operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c)); | |
1767 | operands[3] = force_reg (V2DFmode, operands[3]); | |
1768 | operands[4] = gen_reg_rtx (V2DFmode); | |
1769 | }) | |
1770 | ||
1771 | ; Vector load fp integer - IEEE inexact exception is suppressed | |
1772 | (define_insn "vfidb" | |
1773 | [(set (match_operand:V2DI 0 "register_operand" "=v") | |
1774 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "v") | |
1775 | (match_operand:QI 2 "immediate_operand" "C") | |
1776 | (match_operand:QI 3 "immediate_operand" "C")] | |
1777 | UNSPEC_VEC_VFIDB))] | |
1778 | "TARGET_VX" | |
1779 | "vfidb\t%v0,%v1,%b2,%b3" | |
1780 | [(set_attr "op_type" "VRR")]) | |
1781 | ||
1782 | (define_expand "vec_ceil" | |
1783 | [(set (match_operand:V2DI 0 "register_operand" "") | |
1784 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "") | |
1785 | (const_int VEC_RND_TO_INF)] | |
1786 | UNSPEC_VEC_VFIDB))] | |
1787 | "TARGET_VX") | |
1788 | ||
1789 | (define_expand "vec_floor" | |
1790 | [(set (match_operand:V2DI 0 "register_operand" "") | |
1791 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "") | |
1792 | (const_int VEC_RND_TO_MINF)] | |
1793 | UNSPEC_VEC_VFIDB))] | |
1794 | "TARGET_VX") | |
1795 | ||
1796 | (define_expand "vec_trunc" | |
1797 | [(set (match_operand:V2DI 0 "register_operand" "") | |
1798 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "") | |
1799 | (const_int VEC_RND_TO_ZERO)] | |
1800 | UNSPEC_VEC_VFIDB))] | |
1801 | "TARGET_VX") | |
1802 | ||
1803 | (define_expand "vec_roundc" | |
1804 | [(set (match_operand:V2DI 0 "register_operand" "") | |
1805 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "") | |
1806 | (const_int VEC_RND_CURRENT)] | |
1807 | UNSPEC_VEC_VFIDB))] | |
1808 | "TARGET_VX") | |
1809 | ||
1810 | (define_expand "vec_round" | |
1811 | [(set (match_operand:V2DI 0 "register_operand" "") | |
1812 | (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "") | |
1813 | (const_int VEC_RND_NEAREST_TO_EVEN)] | |
1814 | UNSPEC_VEC_VFIDB))] | |
1815 | "TARGET_VX") | |
1816 | ||
1817 | ||
1818 | ; Vector load lengthened - V4SF -> V2DF | |
1819 | ||
1820 | (define_insn "*vldeb" | |
1821 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1822 | (unspec:V2DF [(match_operand:V4SF 1 "register_operand" "v")] | |
1823 | UNSPEC_VEC_VLDEB))] | |
1824 | "TARGET_VX" | |
1825 | "vldeb\t%v0,%v1" | |
1826 | [(set_attr "op_type" "VRR")]) | |
1827 | ||
1828 | (define_expand "vec_ld2f" | |
1829 | [; Initialize a vector to all zeroes. FIXME: This should not be | |
1830 | ; necessary since all elements of the vector will be set anyway. | |
1831 | ; This is just to make it explicit to the data flow framework. | |
1832 | (set (match_dup 2) (match_dup 3)) | |
1833 | (set (match_dup 2) (unspec:V4SF [(match_operand:SF 1 "memory_operand" "") | |
1834 | (const_int 0) | |
1835 | (match_dup 2)] | |
1836 | UNSPEC_VEC_SET)) | |
1837 | (set (match_dup 2) (unspec:V4SF [(match_dup 4) | |
1838 | (const_int 2) | |
1839 | (match_dup 2)] | |
1840 | UNSPEC_VEC_SET)) | |
1841 | (set (match_operand:V2DF 0 "register_operand" "") | |
1842 | (unspec:V2DF [(match_dup 2)] UNSPEC_VEC_VLDEB))] | |
1843 | "TARGET_VX" | |
1844 | { | |
1845 | operands[2] = gen_reg_rtx (V4SFmode); | |
1846 | operands[3] = CONST0_RTX (V4SFmode); | |
1847 | operands[4] = adjust_address (operands[1], SFmode, 4); | |
1848 | }) | |
1849 | ||
1850 | ||
1851 | ; Vector load rounded - V2DF -> V4SF | |
1852 | ||
1853 | (define_insn "*vledb" | |
1854 | [(set (match_operand:V4SF 0 "register_operand" "=v") | |
1855 | (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "v")] | |
1856 | UNSPEC_VEC_VLEDB))] | |
1857 | "TARGET_VX" | |
1858 | "vledb\t%v0,%v1,0,0" | |
1859 | [(set_attr "op_type" "VRR")]) | |
1860 | ||
1861 | (define_expand "vec_st2f" | |
1862 | [(set (match_dup 2) | |
1863 | (unspec:V4SF [(match_operand:V2DF 0 "register_operand" "")] | |
1864 | UNSPEC_VEC_VLEDB)) | |
1865 | (set (match_operand:SF 1 "memory_operand" "") | |
1866 | (unspec:SF [(match_dup 2) (const_int 0)] UNSPEC_VEC_EXTRACT)) | |
1867 | (set (match_dup 3) | |
1868 | (unspec:SF [(match_dup 2) (const_int 2)] UNSPEC_VEC_EXTRACT))] | |
1869 | "TARGET_VX" | |
1870 | { | |
1871 | operands[2] = gen_reg_rtx (V4SFmode); | |
1872 | operands[3] = adjust_address (operands[1], SFmode, 4); | |
1873 | }) | |
1874 | ||
1875 | ||
1876 | ; Vector load negated fp | |
1877 | ||
1878 | (define_expand "vec_nabs" | |
1879 | [(set (match_operand:V2DF 0 "register_operand" "") | |
1880 | (neg:V2DF (abs:V2DF (match_operand:V2DF 1 "register_operand" ""))))] | |
1881 | "TARGET_VX") | |
1882 | ||
1883 | ; Vector square root fp vec_sqrt -> sqrt rtx standard name | |
1884 | ||
1885 | ; Vector FP test data class immediate | |
1886 | ||
1887 | (define_insn "*vftcidb" | |
1888 | [(set (match_operand:V2DF 0 "register_operand" "=v") | |
1889 | (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "v") | |
1890 | (match_operand:SI 2 "immediate_operand" "J")] | |
1891 | UNSPEC_VEC_VFTCIDB)) | |
1892 | (set (reg:CCRAW CC_REGNUM) | |
1893 | (unspec:CCRAW [(match_dup 1) (match_dup 2)] UNSPEC_VEC_VFTCIDBCC))] | |
1894 | "TARGET_VX" | |
1895 | "vftcidb\t%v0,%v1,%x2" | |
1896 | [(set_attr "op_type" "VRR")]) | |
1897 | ||
1898 | (define_insn "*vftcidb_cconly" | |
1899 | [(set (reg:CCRAW CC_REGNUM) | |
1900 | (unspec:CCRAW [(match_operand:V2DF 1 "register_operand" "v") | |
1901 | (match_operand:SI 2 "immediate_operand" "J")] | |
1902 | UNSPEC_VEC_VFTCIDBCC)) | |
1903 | (clobber (match_scratch:V2DI 0 "=v"))] | |
1904 | "TARGET_VX" | |
1905 | "vftcidb\t%v0,%v1,%x2" | |
1906 | [(set_attr "op_type" "VRR")]) | |
1907 | ||
1908 | (define_expand "vftcidb" | |
1909 | [(parallel | |
1910 | [(set (match_operand:V2DF 0 "register_operand" "") | |
1911 | (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "") | |
1912 | (match_operand:SI 2 "immediate_operand" "")] | |
1913 | UNSPEC_VEC_VFTCIDB)) | |
1914 | (set (reg:CCRAW CC_REGNUM) | |
1915 | (unspec:CCRAW [(match_dup 1) (match_dup 2)] UNSPEC_VEC_VFTCIDBCC))]) | |
1916 | (set (match_operand:SI 3 "memory_operand" "") | |
1917 | (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1918 | "TARGET_VX") | |
1919 | ||
1920 | ;; | |
1921 | ;; Integer compares | |
1922 | ;; | |
1923 | ||
1924 | ; All comparisons which produce a CC need fully populated (VI_HW) | |
1925 | ; vector arguments. Otherwise the any/all CCs would be just bogus. | |
1926 | ||
1927 | (define_insn "*vec_cmp<VICMP:insn_cmp><VI_HW:mode>_cconly" | |
1928 | [(set (reg:VICMP CC_REGNUM) | |
1929 | (compare:VICMP (match_operand:VI_HW 0 "register_operand" "v") | |
1930 | (match_operand:VI_HW 1 "register_operand" "v"))) | |
1931 | (clobber (match_scratch:VI_HW 2 "=v"))] | |
1932 | "TARGET_VX" | |
1933 | "vc<VICMP:insn_cmp><VI_HW:bhfgq>s\t%v2,%v0,%v1" | |
1934 | [(set_attr "op_type" "VRR")]) | |
1935 | ||
1936 | ; FIXME: The following 2x3 definitions should be merged into 2 with | |
1937 | ; VICMP like above but I could not find a way to set the comparison | |
1938 | ; operator (eq) depending on the mode CCVEQ (mode_iterator). Or the | |
1939 | ; other way around - setting the mode depending on the code | |
1940 | ; (code_iterator). | |
1941 | (define_expand "vec_cmpeq<VI_HW:mode>_cc" | |
1942 | [(parallel | |
1943 | [(set (reg:CCVEQ CC_REGNUM) | |
1944 | (compare:CCVEQ (match_operand:VI_HW 1 "register_operand" "v") | |
1945 | (match_operand:VI_HW 2 "register_operand" "v"))) | |
1946 | (set (match_operand:VI_HW 0 "register_operand" "=v") | |
1947 | (eq:VI_HW (match_dup 1) (match_dup 2)))]) | |
1948 | (set (match_operand:SI 3 "memory_operand" "") | |
1949 | (unspec:SI [(reg:CCVEQ CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1950 | "TARGET_VX") | |
1951 | ||
1952 | (define_expand "vec_cmph<VI_HW:mode>_cc" | |
1953 | [(parallel | |
1954 | [(set (reg:CCVH CC_REGNUM) | |
1955 | (compare:CCVH (match_operand:VI_HW 1 "register_operand" "v") | |
1956 | (match_operand:VI_HW 2 "register_operand" "v"))) | |
1957 | (set (match_operand:VI_HW 0 "register_operand" "=v") | |
1958 | (gt:VI_HW (match_dup 1) (match_dup 2)))]) | |
1959 | (set (match_operand:SI 3 "memory_operand" "") | |
1960 | (unspec:SI [(reg:CCVH CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1961 | "TARGET_VX") | |
1962 | ||
1963 | (define_expand "vec_cmphl<VI_HW:mode>_cc" | |
1964 | [(parallel | |
1965 | [(set (reg:CCVHU CC_REGNUM) | |
1966 | (compare:CCVHU (match_operand:VI_HW 1 "register_operand" "v") | |
1967 | (match_operand:VI_HW 2 "register_operand" "v"))) | |
1968 | (set (match_operand:VI_HW 0 "register_operand" "=v") | |
1969 | (gtu:VI_HW (match_dup 1) (match_dup 2)))]) | |
1970 | (set (match_operand:SI 3 "memory_operand" "") | |
1971 | (unspec:SI [(reg:CCVHU CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
1972 | "TARGET_VX") | |
1973 | ||
1974 | ||
1975 | (define_insn "*vec_cmpeq<VI_HW:mode>_cc" | |
1976 | [(set (reg:CCVEQ CC_REGNUM) | |
1977 | (compare:CCVEQ (match_operand:VI_HW 0 "register_operand" "v") | |
1978 | (match_operand:VI_HW 1 "register_operand" "v"))) | |
1979 | (set (match_operand:VI_HW 2 "register_operand" "=v") | |
1980 | (eq:VI_HW (match_dup 0) (match_dup 1)))] | |
1981 | "TARGET_VX" | |
1982 | "vceq<VI_HW:bhfgq>s\t%v2,%v0,%v1" | |
1983 | [(set_attr "op_type" "VRR")]) | |
1984 | ||
1985 | (define_insn "*vec_cmph<VI_HW:mode>_cc" | |
1986 | [(set (reg:CCVH CC_REGNUM) | |
1987 | (compare:CCVH (match_operand:VI_HW 0 "register_operand" "v") | |
1988 | (match_operand:VI_HW 1 "register_operand" "v"))) | |
1989 | (set (match_operand:VI_HW 2 "register_operand" "=v") | |
1990 | (gt:VI_HW (match_dup 0) (match_dup 1)))] | |
1991 | "TARGET_VX" | |
1992 | "vch<VI_HW:bhfgq>s\t%v2,%v0,%v1" | |
1993 | [(set_attr "op_type" "VRR")]) | |
1994 | ||
1995 | (define_insn "*vec_cmphl<VI_HW:mode>_cc" | |
1996 | [(set (reg:CCVHU CC_REGNUM) | |
1997 | (compare:CCVHU (match_operand:VI_HW 0 "register_operand" "v") | |
1998 | (match_operand:VI_HW 1 "register_operand" "v"))) | |
1999 | (set (match_operand:VI_HW 2 "register_operand" "=v") | |
2000 | (gtu:VI_HW (match_dup 0) (match_dup 1)))] | |
2001 | "TARGET_VX" | |
2002 | "vchl<VI_HW:bhfgq>s\t%v2,%v0,%v1" | |
2003 | [(set_attr "op_type" "VRR")]) | |
2004 | ||
2005 | ;; | |
2006 | ;; Floating point comparesg | |
2007 | ;; | |
2008 | ||
2009 | (define_insn "*vec_cmp<insn_cmp>v2df_cconly" | |
2010 | [(set (reg:VFCMP CC_REGNUM) | |
2011 | (compare:VFCMP (match_operand:V2DF 0 "register_operand" "v") | |
2012 | (match_operand:V2DF 1 "register_operand" "v"))) | |
2013 | (clobber (match_scratch:V2DI 2 "=v"))] | |
2014 | "TARGET_VX" | |
2015 | "vfc<asm_fcmp>dbs\t%v2,%v0,%v1" | |
2016 | [(set_attr "op_type" "VRR")]) | |
2017 | ||
2018 | ; FIXME: Merge the following 2x3 patterns with VFCMP | |
2019 | (define_expand "vec_cmpeqv2df_cc" | |
2020 | [(parallel | |
2021 | [(set (reg:CCVEQ CC_REGNUM) | |
2022 | (compare:CCVEQ (match_operand:V2DF 1 "register_operand" "v") | |
2023 | (match_operand:V2DF 2 "register_operand" "v"))) | |
2024 | (set (match_operand:V2DI 0 "register_operand" "=v") | |
2025 | (eq:V2DI (match_dup 1) (match_dup 2)))]) | |
2026 | (set (match_operand:SI 3 "memory_operand" "") | |
2027 | (unspec:SI [(reg:CCVEQ CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
2028 | "TARGET_VX") | |
2029 | ||
2030 | (define_expand "vec_cmphv2df_cc" | |
2031 | [(parallel | |
2032 | [(set (reg:CCVH CC_REGNUM) | |
2033 | (compare:CCVH (match_operand:V2DF 1 "register_operand" "v") | |
2034 | (match_operand:V2DF 2 "register_operand" "v"))) | |
2035 | (set (match_operand:V2DI 0 "register_operand" "=v") | |
2036 | (gt:V2DI (match_dup 1) (match_dup 2)))]) | |
2037 | (set (match_operand:SI 3 "memory_operand" "") | |
2038 | (unspec:SI [(reg:CCVH CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
2039 | "TARGET_VX") | |
2040 | ||
2041 | (define_expand "vec_cmphev2df_cc" | |
2042 | [(parallel | |
2043 | [(set (reg:CCVFHE CC_REGNUM) | |
2044 | (compare:CCVFHE (match_operand:V2DF 1 "register_operand" "v") | |
2045 | (match_operand:V2DF 2 "register_operand" "v"))) | |
2046 | (set (match_operand:V2DI 0 "register_operand" "=v") | |
2047 | (ge:V2DI (match_dup 1) (match_dup 2)))]) | |
2048 | (set (match_operand:SI 3 "memory_operand" "") | |
2049 | (unspec:SI [(reg:CCVFHE CC_REGNUM)] UNSPEC_CC_TO_INT))] | |
2050 | "TARGET_VX") | |
2051 | ||
2052 | ||
2053 | (define_insn "*vec_cmpeqv2df_cc" | |
2054 | [(set (reg:CCVEQ CC_REGNUM) | |
2055 | (compare:CCVEQ (match_operand:V2DF 0 "register_operand" "v") | |
2056 | (match_operand:V2DF 1 "register_operand" "v"))) | |
2057 | (set (match_operand:V2DI 2 "register_operand" "=v") | |
2058 | (eq:V2DI (match_dup 0) (match_dup 1)))] | |
2059 | "TARGET_VX" | |
2060 | "vfcedbs\t%v2,%v0,%v1" | |
2061 | [(set_attr "op_type" "VRR")]) | |
2062 | ||
2063 | (define_insn "*vec_cmphv2df_cc" | |
2064 | [(set (reg:CCVH CC_REGNUM) | |
2065 | (compare:CCVH (match_operand:V2DF 0 "register_operand" "v") | |
2066 | (match_operand:V2DF 1 "register_operand" "v"))) | |
2067 | (set (match_operand:V2DI 2 "register_operand" "=v") | |
2068 | (gt:V2DI (match_dup 0) (match_dup 1)))] | |
2069 | "TARGET_VX" | |
2070 | "vfchdbs\t%v2,%v0,%v1" | |
2071 | [(set_attr "op_type" "VRR")]) | |
2072 | ||
2073 | (define_insn "*vec_cmphev2df_cc" | |
2074 | [(set (reg:CCVFHE CC_REGNUM) | |
2075 | (compare:CCVFHE (match_operand:V2DF 0 "register_operand" "v") | |
2076 | (match_operand:V2DF 1 "register_operand" "v"))) | |
2077 | (set (match_operand:V2DI 2 "register_operand" "=v") | |
2078 | (ge:V2DI (match_dup 0) (match_dup 1)))] | |
2079 | "TARGET_VX" | |
2080 | "vfchedbs\t%v2,%v0,%v1" | |
2081 | [(set_attr "op_type" "VRR")]) |