]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/rs6000/altivec.md
Update copyright years.
[thirdparty/gcc.git] / gcc / config / rs6000 / altivec.md
CommitLineData
10ed84db 1;; AltiVec patterns.
a945c346 2;; Copyright (C) 2002-2024 Free Software Foundation, Inc.
10ed84db
AH
3;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
4
5de601cf 5;; This file is part of GCC.
10ed84db 6
5de601cf
NC
7;; GCC is free software; you can redistribute it and/or modify it
8;; under the terms of the GNU General Public License as published
2f83c7d6 9;; by the Free Software Foundation; either version 3, or (at your
5de601cf 10;; option) any later version.
10ed84db 11
5de601cf
NC
12;; GCC is distributed in the hope that it will be useful, but WITHOUT
13;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15;; License for more details.
10ed84db
AH
16
17;; You should have received a copy of the GNU General Public License
2f83c7d6
NC
18;; along with GCC; see the file COPYING3. If not see
19;; <http://www.gnu.org/licenses/>.
10ed84db 20
f3c33d9d
MM
21(define_c_enum "unspec"
22 [UNSPEC_VCMPBFP
23 UNSPEC_VMSUMU
c1a57681 24 UNSPEC_VMSUMUDM
f3c33d9d
MM
25 UNSPEC_VMSUMM
26 UNSPEC_VMSUMSHM
27 UNSPEC_VMSUMUHS
28 UNSPEC_VMSUMSHS
29 UNSPEC_VMHADDSHS
30 UNSPEC_VMHRADDSHS
f3c33d9d 31 UNSPEC_VADDCUW
f3c33d9d
MM
32 UNSPEC_VAVGU
33 UNSPEC_VAVGS
34 UNSPEC_VMULEUB
35 UNSPEC_VMULESB
36 UNSPEC_VMULEUH
37 UNSPEC_VMULESH
5ff4baa5
CL
38 UNSPEC_VMULEUW
39 UNSPEC_VMULESW
f03122f2
CL
40 UNSPEC_VMULEUD
41 UNSPEC_VMULESD
f3c33d9d
MM
42 UNSPEC_VMULOUB
43 UNSPEC_VMULOSB
44 UNSPEC_VMULOUH
45 UNSPEC_VMULOSH
5ff4baa5
CL
46 UNSPEC_VMULOUW
47 UNSPEC_VMULOSW
f03122f2
CL
48 UNSPEC_VMULOUD
49 UNSPEC_VMULOSD
f3c33d9d 50 UNSPEC_VPKPX
a5965b52
MM
51 UNSPEC_VPACK_SIGN_SIGN_SAT
52 UNSPEC_VPACK_SIGN_UNS_SAT
53 UNSPEC_VPACK_UNS_UNS_SAT
54 UNSPEC_VPACK_UNS_UNS_MOD
52a93551 55 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
952ac945 56 UNSPEC_VREVEV
f3c33d9d
MM
57 UNSPEC_VSLV4SI
58 UNSPEC_VSLO
59 UNSPEC_VSR
60 UNSPEC_VSRO
61 UNSPEC_VSUBCUW
f3c33d9d
MM
62 UNSPEC_VSUM4UBS
63 UNSPEC_VSUM4S
64 UNSPEC_VSUM2SWS
65 UNSPEC_VSUMSWS
66 UNSPEC_VPERM
fe3f3340 67 UNSPEC_VPERMR
f3c33d9d
MM
68 UNSPEC_VPERM_UNS
69 UNSPEC_VRFIN
70 UNSPEC_VCFUX
71 UNSPEC_VCFSX
72 UNSPEC_VCTUXS
73 UNSPEC_VCTSXS
74 UNSPEC_VLOGEFP
75 UNSPEC_VEXPTEFP
fbd86cc6 76 UNSPEC_VSLDOI
a5965b52
MM
77 UNSPEC_VUNPACK_HI_SIGN
78 UNSPEC_VUNPACK_LO_SIGN
52a93551
BS
79 UNSPEC_VUNPACK_HI_SIGN_DIRECT
80 UNSPEC_VUNPACK_LO_SIGN_DIRECT
f3c33d9d 81 UNSPEC_VUPKHPX
f3c33d9d 82 UNSPEC_VUPKLPX
19388c6d 83 UNSPEC_CONVERT_4F32_8I16
58b475a2 84 UNSPEC_CONVERT_4F32_8F16
f3c33d9d
MM
85 UNSPEC_DST
86 UNSPEC_DSTT
87 UNSPEC_DSTST
88 UNSPEC_DSTSTT
89 UNSPEC_LVSL
90 UNSPEC_LVSR
91 UNSPEC_LVE
92 UNSPEC_STVX
93 UNSPEC_STVXL
94 UNSPEC_STVE
95 UNSPEC_SET_VSCR
96 UNSPEC_GET_VRSAVE
97 UNSPEC_LVX
98 UNSPEC_REDUC_PLUS
99 UNSPEC_VECSH
100 UNSPEC_EXTEVEN_V4SI
101 UNSPEC_EXTEVEN_V8HI
102 UNSPEC_EXTEVEN_V16QI
103 UNSPEC_EXTEVEN_V4SF
104 UNSPEC_EXTODD_V4SI
105 UNSPEC_EXTODD_V8HI
106 UNSPEC_EXTODD_V16QI
107 UNSPEC_EXTODD_V4SF
108 UNSPEC_INTERHI_V4SI
109 UNSPEC_INTERHI_V8HI
110 UNSPEC_INTERHI_V16QI
111 UNSPEC_INTERLO_V4SI
112 UNSPEC_INTERLO_V8HI
113 UNSPEC_INTERLO_V16QI
114 UNSPEC_LVLX
115 UNSPEC_LVLXL
116 UNSPEC_LVRX
117 UNSPEC_LVRXL
118 UNSPEC_STVLX
119 UNSPEC_STVLXL
120 UNSPEC_STVRX
121 UNSPEC_STVRXL
5408a64a 122 UNSPEC_VADU
e342b9d8
KN
123 UNSPEC_VSLV
124 UNSPEC_VSRV
f3c33d9d
MM
125 UNSPEC_VMULWHUB
126 UNSPEC_VMULWLUB
127 UNSPEC_VMULWHSB
128 UNSPEC_VMULWLSB
129 UNSPEC_VMULWHUH
130 UNSPEC_VMULWLUH
131 UNSPEC_VMULWHSH
132 UNSPEC_VMULWLSH
a20be0cd
KL
133 UNSPEC_VUPKHU
134 UNSPEC_VUPKLU
f3c33d9d
MM
135 UNSPEC_VPERMSI
136 UNSPEC_VPERMHI
137 UNSPEC_INTERHI
138 UNSPEC_INTERLO
139 UNSPEC_VUPKHS_V4SF
140 UNSPEC_VUPKLS_V4SF
141 UNSPEC_VUPKHU_V4SF
142 UNSPEC_VUPKLU_V4SF
0bd62dca 143 UNSPEC_VGBBD
68d3bacf 144 UNSPEC_VSPLT_DIRECT
00cc7cbf 145 UNSPEC_VMRGEW_DIRECT
6ccd2ece 146 UNSPEC_VMRGOW_DIRECT
b80afde9 147 UNSPEC_VSUMSWS_DIRECT
a16a872d
MM
148 UNSPEC_VADDCUQ
149 UNSPEC_VADDEUQM
150 UNSPEC_VADDECUQ
151 UNSPEC_VSUBCUQ
152 UNSPEC_VSUBEUQM
153 UNSPEC_VSUBECUQ
117f16fb 154 UNSPEC_VBPERMQ
dfc42f08 155 UNSPEC_VBPERMD
06b39289
MM
156 UNSPEC_BCDADD
157 UNSPEC_BCDSUB
158 UNSPEC_BCD_OVERFLOW
05161256 159 UNSPEC_BCDSHIFT
a6607774
BS
160 UNSPEC_VRLMI
161 UNSPEC_VRLNM
ed07d681 162 UNSPEC_VCFUGED
a1821a24
KN
163 UNSPEC_VCLZDM
164 UNSPEC_VCTZDM
7c00c559 165 UNSPEC_VGNB
894ac7bc
KN
166 UNSPEC_VPDEPD
167 UNSPEC_VPEXTD
25bf7d32
KN
168 UNSPEC_VCLRLB
169 UNSPEC_VCLRRB
89ce3290
KN
170 UNSPEC_VSTRIR
171 UNSPEC_VSTRIL
82f10dee
CL
172 UNSPEC_SLDB
173 UNSPEC_SRDB
98b44b0e 174])
d744e06e 175
f3c33d9d
MM
176(define_c_enum "unspecv"
177 [UNSPECV_SET_VRSAVE
178 UNSPECV_MTVSCR
179 UNSPECV_MFVSCR
180 UNSPECV_DSSALL
181 UNSPECV_DSS
3aca4bff
DE
182 ])
183
a6607774 184;; Short vec int modes
3abcb3a7 185(define_mode_iterator VIshort [V8HI V16QI])
81466555 186;; Vec float modes
3abcb3a7
HPN
187(define_mode_iterator VF [V4SF])
188;; Vec modes, pity mode iterators are not composable
189(define_mode_iterator V [V4SI V8HI V16QI V4SF])
a72c65c7
MM
190;; Vec modes for move/logical/permute ops, include vector types for move not
191;; otherwise handled by altivec (v2df, v2di, ti)
c477a667
MM
192(define_mode_iterator VM [V4SI
193 V8HI
194 V16QI
195 V4SF
196 V2DF
197 V2DI
198 V1TI
199 TI
200 (KF "FLOAT128_VECTOR_P (KFmode)")
201 (TF "FLOAT128_VECTOR_P (TFmode)")])
10ed84db 202
a72c65c7 203;; Like VM, except don't do TImode
c477a667
MM
204(define_mode_iterator VM2 [V4SI
205 V8HI
206 V16QI
207 V4SF
208 V2DF
209 V2DI
210 V1TI
211 (KF "FLOAT128_VECTOR_P (KFmode)")
212 (TF "FLOAT128_VECTOR_P (TFmode)")])
10ed84db 213
3fd44c8a
CL
214;; Map the Vector convert single precision to double precision for integer
215;; versus floating point
216(define_mode_attr VS_sxwsp [(V4SI "sxw") (V4SF "sp")])
217
4bfc9db7
MM
218;; Specific iterator for parity which does not have a byte/half-word form, but
219;; does have a quad word form
220(define_mode_iterator VParity [V4SI
221 V2DI
222 V1TI
4a89b7e7 223 TI])
4bfc9db7 224
a5965b52
MM
225(define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
226(define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
227(define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
228 (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
229 (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
a16a872d
MM
230 (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
231 (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
a5965b52
MM
232
233;; Vector pack/unpack
234(define_mode_iterator VP [V2DI V4SI V8HI])
235(define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
236(define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
237(define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
10ed84db 238
c3eaf15a
MM
239;; Vector negate
240(define_mode_iterator VNEG [V4SI V2DI])
241
10ed84db 242;; Vector move instructions.
a72c65c7 243(define_insn "*altivec_mov<mode>"
0c15a902
AM
244 [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
245 (match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
a72c65c7 246 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
81466555
NS
247 && (register_operand (operands[0], <MODE>mode)
248 || register_operand (operands[1], <MODE>mode))"
07a38de7
SB
249 "@
250 stvx %1,%y0
251 lvx %0,%y1
252 vor %0,%1,%1
253 #
254 #
255 #
256 vxor %0,%0,%0
257 * return output_vec_const_move (operands);
258 #"
7c788ce2 259 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
911c8df0 260 (set_attr "length" "*,*,*,20,20,20,*,8,32")])
a72c65c7
MM
261
262;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
263;; is for unions. However for plain data movement, slightly favor the vector
264;; loads
265(define_insn "*altivec_movti"
d0a54439
AM
266 [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
267 (match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
a72c65c7
MM
268 "VECTOR_MEM_ALTIVEC_P (TImode)
269 && (register_operand (operands[0], TImode)
270 || register_operand (operands[1], TImode))"
07a38de7
SB
271 "@
272 stvx %1,%y0
273 lvx %0,%y1
274 vor %0,%1,%1
275 #
276 #
277 #
278 vxor %0,%0,%0
279 * return output_vec_const_move (operands);"
7c788ce2 280 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
10ed84db 281
29e6733c
MM
282;; Load up a vector with the most significant bit set by loading up -1 and
283;; doing a shift left
284(define_split
ad18eed2
SB
285 [(set (match_operand:VM 0 "altivec_register_operand")
286 (match_operand:VM 1 "easy_vector_constant_msb"))]
3ff79f99 287 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
29e6733c
MM
288 [(const_int 0)]
289{
290 rtx dest = operands[0];
e0e82856 291 machine_mode mode;
29e6733c
MM
292 rtvec v;
293 int i, num_elements;
294
e0e82856 295 switch (easy_altivec_constant (operands[1], <MODE>mode))
29e6733c 296 {
e0e82856
JJ
297 case 1:
298 mode = V16QImode;
299 break;
300 case 2:
301 mode = V8HImode;
302 break;
303 case 4:
29e6733c 304 mode = V4SImode;
e0e82856
JJ
305 break;
306 default:
307 gcc_unreachable ();
29e6733c 308 }
e0e82856
JJ
309 if (mode != <MODE>mode)
310 dest = gen_lowpart (mode, dest);
29e6733c
MM
311
312 num_elements = GET_MODE_NUNITS (mode);
313 v = rtvec_alloc (num_elements);
314 for (i = 0; i < num_elements; i++)
315 RTVEC_ELT (v, i) = constm1_rtx;
316
e0e82856 317 rs6000_expand_vector_init (dest, gen_rtx_PARALLEL (mode, v));
f7df4a84 318 emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
29e6733c
MM
319 DONE;
320})
321
66180ff3 322(define_split
ad18eed2
SB
323 [(set (match_operand:VM 0 "altivec_register_operand")
324 (match_operand:VM 1 "easy_vector_constant_add_self"))]
3ff79f99 325 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
66180ff3 326 [(set (match_dup 0) (match_dup 3))
847535b6 327 (set (match_dup 0) (match_dup 4))]
66180ff3
PB
328{
329 rtx dup = gen_easy_altivec_constant (operands[1]);
330 rtx const_vec;
ef4bddc2 331 machine_mode op_mode = <MODE>mode;
66180ff3
PB
332
333 /* Divide the operand of the resulting VEC_DUPLICATE, and use
334 simplify_rtx to make a CONST_VECTOR. */
335 XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
336 XEXP (dup, 0), const1_rtx);
337 const_vec = simplify_rtx (dup);
338
847535b6
JJ
339 if (op_mode == V4SFmode)
340 {
341 op_mode = V4SImode;
342 operands[0] = gen_lowpart (op_mode, operands[0]);
343 }
344 if (GET_MODE (const_vec) == op_mode)
66180ff3
PB
345 operands[3] = const_vec;
346 else
847535b6
JJ
347 operands[3] = gen_lowpart (op_mode, const_vec);
348 operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
66180ff3
PB
349})
350
683be46f 351(define_split
ad18eed2
SB
352 [(set (match_operand:VM 0 "altivec_register_operand")
353 (match_operand:VM 1 "easy_vector_constant_vsldoi"))]
683be46f
MM
354 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
355 [(set (match_dup 2) (match_dup 3))
356 (set (match_dup 4) (match_dup 5))
357 (set (match_dup 0)
358 (unspec:VM [(match_dup 2)
359 (match_dup 4)
360 (match_dup 6)]
361 UNSPEC_VSLDOI))]
362{
363 rtx op1 = operands[1];
364 int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
365 HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
366 rtx rtx_val = GEN_INT (val);
367 int shift = vspltis_shifted (op1);
683be46f
MM
368
369 gcc_assert (shift != 0);
370 operands[2] = gen_reg_rtx (<MODE>mode);
59d06c05 371 operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
683be46f
MM
372 operands[4] = gen_reg_rtx (<MODE>mode);
373
374 if (shift < 0)
375 {
376 operands[5] = CONSTM1_RTX (<MODE>mode);
377 operands[6] = GEN_INT (-shift);
378 }
379 else
380 {
381 operands[5] = CONST0_RTX (<MODE>mode);
382 operands[6] = GEN_INT (shift);
383 }
683be46f
MM
384})
385
080a06fc
XL
386(define_insn_and_split "sldoi_to_mov<mode>"
387 [(set (match_operand:VM 0 "altivec_register_operand")
fd75f6ae 388 (unspec:VM [(match_operand:VM 1 "const_vector_each_byte_same")
080a06fc
XL
389 (match_dup 1)
390 (match_operand:QI 2 "u5bit_cint_operand")]
391 UNSPEC_VSLDOI))]
fd75f6ae 392 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
080a06fc
XL
393 "#"
394 "&& 1"
fd75f6ae
KL
395 [(set (match_dup 0) (match_dup 1))]
396 "{
397 if (!easy_vector_constant (operands[1], <MODE>mode))
398 {
399 rtx dest = gen_reg_rtx (<MODE>mode);
400 emit_move_insn (dest, operands[1]);
401 operands[1] = dest;
402 }
403 }")
080a06fc 404
10ed84db
AH
405(define_insn "get_vrsave_internal"
406 [(set (match_operand:SI 0 "register_operand" "=r")
95bde43a 407 (unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
10ed84db 408 "TARGET_ALTIVEC"
10ed84db
AH
409{
410 if (TARGET_MACHO)
81466555 411 return "mfspr %0,256";
10ed84db 412 else
81466555
NS
413 return "mfvrsave %0";
414}
310b9b1d 415 [(set_attr "type" "*")])
10ed84db
AH
416
417(define_insn "*set_vrsave_internal"
418 [(match_parallel 0 "vrsave_operation"
95bde43a 419 [(set (reg:SI VRSAVE_REGNO)
10ed84db 420 (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
95bde43a 421 (reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
10ed84db 422 "TARGET_ALTIVEC"
10ed84db
AH
423{
424 if (TARGET_MACHO)
81466555 425 return "mtspr 256,%1";
10ed84db 426 else
81466555
NS
427 return "mtvrsave %1";
428}
310b9b1d 429 [(set_attr "type" "*")])
10ed84db 430
d62294f5
FJ
431(define_insn "*save_world"
432 [(match_parallel 0 "save_world_operation"
893fc0a0 433 [(clobber (reg:SI LR_REGNO))
a5ad2017 434 (use (match_operand:SI 1 "call_operand" "s"))])]
d62294f5 435 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
a5ad2017 436 "bl %z1"
a79db151 437 [(set_attr "type" "branch")])
d62294f5
FJ
438
439(define_insn "*restore_world"
440 [(match_parallel 0 "restore_world_operation"
441 [(return)
a5ad2017
DE
442 (use (match_operand:SI 1 "call_operand" "s"))
443 (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
d62294f5 444 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
a5ad2017 445 "b %z1")
d62294f5 446
b427dd7a
AM
447;; The save_vregs and restore_vregs patterns don't use memory_operand
448;; because (plus (reg) (const_int)) is not a valid vector address.
449;; This way is more compact than describing exactly what happens in
450;; the out-of-line functions, ie. loading the constant into r11/r12
451;; then using indexed addressing, and requires less editing of rtl
452;; to describe the operation to dwarf2out_frame_debug_expr.
453(define_insn "*save_vregs_<mode>_r11"
454 [(match_parallel 0 "any_parallel_operand"
893fc0a0 455 [(clobber (reg:P LR_REGNO))
b427dd7a
AM
456 (use (match_operand:P 1 "symbol_ref_operand" "s"))
457 (clobber (reg:P 11))
458 (use (reg:P 0))
459 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
460 (match_operand:P 3 "short_cint_operand" "I")))
8a480dc3
AM
461 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
462 "TARGET_ALTIVEC"
b427dd7a 463 "bl %1"
a79db151 464 [(set_attr "type" "branch")])
b427dd7a
AM
465
466(define_insn "*save_vregs_<mode>_r12"
467 [(match_parallel 0 "any_parallel_operand"
893fc0a0 468 [(clobber (reg:P LR_REGNO))
b427dd7a
AM
469 (use (match_operand:P 1 "symbol_ref_operand" "s"))
470 (clobber (reg:P 12))
471 (use (reg:P 0))
472 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
473 (match_operand:P 3 "short_cint_operand" "I")))
8a480dc3
AM
474 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
475 "TARGET_ALTIVEC"
b427dd7a 476 "bl %1"
a79db151 477 [(set_attr "type" "branch")])
b427dd7a
AM
478
479(define_insn "*restore_vregs_<mode>_r11"
480 [(match_parallel 0 "any_parallel_operand"
893fc0a0 481 [(clobber (reg:P LR_REGNO))
b427dd7a
AM
482 (use (match_operand:P 1 "symbol_ref_operand" "s"))
483 (clobber (reg:P 11))
484 (use (reg:P 0))
8a480dc3 485 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
b427dd7a
AM
486 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
487 (match_operand:P 4 "short_cint_operand" "I"))))])]
8a480dc3 488 "TARGET_ALTIVEC"
b427dd7a 489 "bl %1"
a79db151 490 [(set_attr "type" "branch")])
b427dd7a
AM
491
492(define_insn "*restore_vregs_<mode>_r12"
493 [(match_parallel 0 "any_parallel_operand"
893fc0a0 494 [(clobber (reg:P LR_REGNO))
b427dd7a
AM
495 (use (match_operand:P 1 "symbol_ref_operand" "s"))
496 (clobber (reg:P 12))
497 (use (reg:P 0))
8a480dc3 498 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
b427dd7a
AM
499 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
500 (match_operand:P 4 "short_cint_operand" "I"))))])]
8a480dc3 501 "TARGET_ALTIVEC"
b427dd7a 502 "bl %1"
a79db151 503 [(set_attr "type" "branch")])
b427dd7a 504
10ed84db
AH
505;; Simple binary operations.
506
f96bc213
NS
507;; add
508(define_insn "add<mode>3"
a5965b52
MM
509 [(set (match_operand:VI2 0 "register_operand" "=v")
510 (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
511 (match_operand:VI2 2 "register_operand" "v")))]
512 "<VI_unit>"
f96bc213 513 "vaddu<VI_char>m %0,%1,%2"
10ed84db
AH
514 [(set_attr "type" "vecsimple")])
515
a72c65c7 516(define_insn "*altivec_addv4sf3"
10ed84db
AH
517 [(set (match_operand:V4SF 0 "register_operand" "=v")
518 (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
5aebfdad 519 (match_operand:V4SF 2 "register_operand" "v")))]
a72c65c7 520 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
10ed84db
AH
521 "vaddfp %0,%1,%2"
522 [(set_attr "type" "vecfloat")])
523
524(define_insn "altivec_vaddcuw"
525 [(set (match_operand:V4SI 0 "register_operand" "=v")
526 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
527 (match_operand:V4SI 2 "register_operand" "v")]
528 UNSPEC_VADDCUW))]
a5965b52 529 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
10ed84db
AH
530 "vaddcuw %0,%1,%2"
531 [(set_attr "type" "vecsimple")])
532
aba5fb01
NS
533(define_insn "altivec_vaddu<VI_char>s"
534 [(set (match_operand:VI 0 "register_operand" "=v")
460d53f8
XL
535 (us_plus:VI (match_operand:VI 1 "register_operand" "v")
536 (match_operand:VI 2 "register_operand" "v")))
3b2475ab 537 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
a5965b52 538 "<VI_unit>"
aba5fb01 539 "vaddu<VI_char>s %0,%1,%2"
10ed84db
AH
540 [(set_attr "type" "vecsimple")])
541
aba5fb01
NS
542(define_insn "altivec_vadds<VI_char>s"
543 [(set (match_operand:VI 0 "register_operand" "=v")
460d53f8
XL
544 (ss_plus:VI (match_operand:VI 1 "register_operand" "v")
545 (match_operand:VI 2 "register_operand" "v")))
3b2475ab 546 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
a5965b52 547 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
aba5fb01 548 "vadds<VI_char>s %0,%1,%2"
10ed84db
AH
549 [(set_attr "type" "vecsimple")])
550
f96bc213
NS
551;; sub
552(define_insn "sub<mode>3"
a5965b52
MM
553 [(set (match_operand:VI2 0 "register_operand" "=v")
554 (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
555 (match_operand:VI2 2 "register_operand" "v")))]
556 "<VI_unit>"
f96bc213
NS
557 "vsubu<VI_char>m %0,%1,%2"
558 [(set_attr "type" "vecsimple")])
559
a72c65c7 560(define_insn "*altivec_subv4sf3"
f96bc213
NS
561 [(set (match_operand:V4SF 0 "register_operand" "=v")
562 (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
563 (match_operand:V4SF 2 "register_operand" "v")))]
a72c65c7 564 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
f96bc213
NS
565 "vsubfp %0,%1,%2"
566 [(set_attr "type" "vecfloat")])
567
568(define_insn "altivec_vsubcuw"
569 [(set (match_operand:V4SI 0 "register_operand" "=v")
570 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
571 (match_operand:V4SI 2 "register_operand" "v")]
572 UNSPEC_VSUBCUW))]
a5965b52 573 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
f96bc213
NS
574 "vsubcuw %0,%1,%2"
575 [(set_attr "type" "vecsimple")])
576
aba5fb01
NS
577(define_insn "altivec_vsubu<VI_char>s"
578 [(set (match_operand:VI 0 "register_operand" "=v")
460d53f8
XL
579 (us_minus:VI (match_operand:VI 1 "register_operand" "v")
580 (match_operand:VI 2 "register_operand" "v")))
3b2475ab 581 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
a5965b52 582 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
aba5fb01 583 "vsubu<VI_char>s %0,%1,%2"
10ed84db
AH
584 [(set_attr "type" "vecsimple")])
585
aba5fb01
NS
586(define_insn "altivec_vsubs<VI_char>s"
587 [(set (match_operand:VI 0 "register_operand" "=v")
460d53f8
XL
588 (ss_minus:VI (match_operand:VI 1 "register_operand" "v")
589 (match_operand:VI 2 "register_operand" "v")))
3b2475ab 590 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
a5965b52 591 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
aba5fb01 592 "vsubs<VI_char>s %0,%1,%2"
10ed84db
AH
593 [(set_attr "type" "vecsimple")])
594
f96bc213 595;;
90a15807 596(define_insn "uavg<mode>3_ceil"
aba5fb01
NS
597 [(set (match_operand:VI 0 "register_operand" "=v")
598 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
3aca4bff
DE
599 (match_operand:VI 2 "register_operand" "v")]
600 UNSPEC_VAVGU))]
10ed84db 601 "TARGET_ALTIVEC"
aba5fb01 602 "vavgu<VI_char> %0,%1,%2"
10ed84db
AH
603 [(set_attr "type" "vecsimple")])
604
90a15807 605(define_insn "avg<mode>3_ceil"
aba5fb01
NS
606 [(set (match_operand:VI 0 "register_operand" "=v")
607 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
3aca4bff
DE
608 (match_operand:VI 2 "register_operand" "v")]
609 UNSPEC_VAVGS))]
a5965b52 610 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
aba5fb01 611 "vavgs<VI_char> %0,%1,%2"
10ed84db
AH
612 [(set_attr "type" "vecsimple")])
613
614(define_insn "altivec_vcmpbfp"
615 [(set (match_operand:V4SI 0 "register_operand" "=v")
616 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
21213b4c
DP
617 (match_operand:V4SF 2 "register_operand" "v")]
618 UNSPEC_VCMPBFP))]
a5965b52 619 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
10ed84db
AH
620 "vcmpbfp %0,%1,%2"
621 [(set_attr "type" "veccmp")])
622
f03122f2
CL
623(define_insn "altivec_eqv1ti"
624 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
625 (eq:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
626 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
627 "TARGET_POWER10"
628 "vcmpequq %0,%1,%2"
629 [(set_attr "type" "veccmpfx")])
630
9d36bd3b 631(define_insn "altivec_eq<mode>"
a5965b52
MM
632 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
633 (eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
634 (match_operand:VI2 2 "altivec_register_operand" "v")))]
635 "<VI_unit>"
a72c65c7 636 "vcmpequ<VI_char> %0,%1,%2"
7c788ce2 637 [(set_attr "type" "veccmpfx")])
10ed84db 638
a72c65c7 639(define_insn "*altivec_gt<mode>"
a5965b52
MM
640 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
641 (gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
642 (match_operand:VI2 2 "altivec_register_operand" "v")))]
643 "<VI_unit>"
a72c65c7 644 "vcmpgts<VI_char> %0,%1,%2"
7c788ce2 645 [(set_attr "type" "veccmpfx")])
10ed84db 646
f03122f2
CL
647(define_insn "*altivec_gtv1ti"
648 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
649 (gt:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
650 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
651 "TARGET_POWER10"
652 "vcmpgtsq %0,%1,%2"
653 [(set_attr "type" "veccmpfx")])
654
a72c65c7 655(define_insn "*altivec_gtu<mode>"
a5965b52
MM
656 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
657 (gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
658 (match_operand:VI2 2 "altivec_register_operand" "v")))]
659 "<VI_unit>"
a72c65c7 660 "vcmpgtu<VI_char> %0,%1,%2"
7c788ce2 661 [(set_attr "type" "veccmpfx")])
10ed84db 662
f03122f2
CL
663(define_insn "*altivec_gtuv1ti"
664 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
665 (gtu:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
666 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
667 "TARGET_POWER10"
668 "vcmpgtuq %0,%1,%2"
669 [(set_attr "type" "veccmpfx")])
670
a72c65c7
MM
671(define_insn "*altivec_eqv4sf"
672 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
673 (eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
674 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
675 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
10ed84db
AH
676 "vcmpeqfp %0,%1,%2"
677 [(set_attr "type" "veccmp")])
678
a72c65c7
MM
679(define_insn "*altivec_gtv4sf"
680 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
681 (gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
682 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
683 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
684 "vcmpgtfp %0,%1,%2"
10ed84db
AH
685 [(set_attr "type" "veccmp")])
686
a72c65c7
MM
687(define_insn "*altivec_gev4sf"
688 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
689 (ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
690 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
691 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
692 "vcmpgefp %0,%1,%2"
693 [(set_attr "type" "veccmp")])
10ed84db 694
9222481f 695(define_insn "altivec_vsel<mode>"
5f9ef133 696 [(set (match_operand:VM 0 "register_operand" "=wa,v")
9222481f
XL
697 (ior:VM
698 (and:VM
5f9ef133
XL
699 (not:VM (match_operand:VM 3 "register_operand" "wa,v"))
700 (match_operand:VM 1 "register_operand" "wa,v"))
9222481f
XL
701 (and:VM
702 (match_dup 3)
5f9ef133 703 (match_operand:VM 2 "register_operand" "wa,v"))))]
9222481f 704 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
5f9ef133
XL
705 "@
706 xxsel %x0,%x1,%x2,%x3
707 vsel %0,%1,%2,%3"
708 [(set_attr "type" "vecmove")
709 (set_attr "isa" "<VSisa>")])
10ed84db 710
9222481f 711(define_insn "altivec_vsel<mode>2"
5f9ef133 712 [(set (match_operand:VM 0 "register_operand" "=wa,v")
9222481f
XL
713 (ior:VM
714 (and:VM
5f9ef133
XL
715 (not:VM (match_operand:VM 3 "register_operand" "wa,v"))
716 (match_operand:VM 1 "register_operand" "wa,v"))
9222481f 717 (and:VM
5f9ef133 718 (match_operand:VM 2 "register_operand" "wa,v")
9222481f
XL
719 (match_dup 3))))]
720 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
5f9ef133
XL
721 "@
722 xxsel %x0,%x1,%x2,%x3
723 vsel %0,%1,%2,%3"
724 [(set_attr "type" "vecmove")
725 (set_attr "isa" "<VSisa>")])
9222481f
XL
726
727(define_insn "altivec_vsel<mode>3"
5f9ef133 728 [(set (match_operand:VM 0 "register_operand" "=wa,v")
9222481f
XL
729 (ior:VM
730 (and:VM
5f9ef133
XL
731 (match_operand:VM 3 "register_operand" "wa,v")
732 (match_operand:VM 1 "register_operand" "wa,v"))
9222481f
XL
733 (and:VM
734 (not:VM (match_dup 3))
5f9ef133 735 (match_operand:VM 2 "register_operand" "wa,v"))))]
9222481f 736 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
5f9ef133
XL
737 "@
738 xxsel %x0,%x2,%x1,%x3
739 vsel %0,%2,%1,%3"
740 [(set_attr "type" "vecmove")
741 (set_attr "isa" "<VSisa>")])
9222481f
XL
742
743(define_insn "altivec_vsel<mode>4"
5f9ef133 744 [(set (match_operand:VM 0 "register_operand" "=wa,v")
9222481f
XL
745 (ior:VM
746 (and:VM
5f9ef133
XL
747 (match_operand:VM 1 "register_operand" "wa,v")
748 (match_operand:VM 3 "register_operand" "wa,v"))
9222481f
XL
749 (and:VM
750 (not:VM (match_dup 3))
5f9ef133 751 (match_operand:VM 2 "register_operand" "wa,v"))))]
9222481f 752 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
5f9ef133
XL
753 "@
754 xxsel %x0,%x2,%x1,%x3
755 vsel %0,%2,%1,%3"
756 [(set_attr "type" "vecmove")
757 (set_attr "isa" "<VSisa>")])
10ed84db 758
d6613781 759;; Fused multiply add.
1b1562a5 760
d6613781 761(define_insn "*altivec_fmav4sf4"
1b1562a5
MM
762 [(set (match_operand:V4SF 0 "register_operand" "=v")
763 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
764 (match_operand:V4SF 2 "register_operand" "v")
765 (match_operand:V4SF 3 "register_operand" "v")))]
a72c65c7 766 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
10ed84db
AH
767 "vmaddfp %0,%1,%2,%3"
768 [(set_attr "type" "vecfloat")])
769
a67ed43f 770;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
10ed84db 771
a72c65c7 772(define_expand "altivec_mulv4sf3"
ad18eed2
SB
773 [(set (match_operand:V4SF 0 "register_operand")
774 (fma:V4SF (match_operand:V4SF 1 "register_operand")
775 (match_operand:V4SF 2 "register_operand")
d6613781 776 (match_dup 3)))]
1b1562a5 777 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
a67ed43f
AH
778{
779 rtx neg0;
780
781 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
8cf0db2b
UW
782 neg0 = gen_reg_rtx (V4SImode);
783 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
71d46ca5 784 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
a67ed43f 785
d6613781
RH
786 operands[3] = gen_lowpart (V4SFmode, neg0);
787})
10ed84db 788
a7b376ee 789;; 32-bit integer multiplication
a45f6936
DP
790;; A_high = Operand_0 & 0xFFFF0000 >> 16
791;; A_low = Operand_0 & 0xFFFF
792;; B_high = Operand_1 & 0xFFFF0000 >> 16
793;; B_low = Operand_1 & 0xFFFF
794;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
795
796;; (define_insn "mulv4si3"
797;; [(set (match_operand:V4SI 0 "register_operand" "=v")
798;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
799;; (match_operand:V4SI 2 "register_operand" "v")))]
7faaf44a
DE
800(define_insn "mulv4si3_p8"
801 [(set (match_operand:V4SI 0 "register_operand" "=v")
802 (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
803 (match_operand:V4SI 2 "register_operand" "v")))]
804 "TARGET_P8_VECTOR"
805 "vmuluwm %0,%1,%2"
806 [(set_attr "type" "veccomplex")])
807
a45f6936 808(define_expand "mulv4si3"
ad18eed2
SB
809 [(use (match_operand:V4SI 0 "register_operand"))
810 (use (match_operand:V4SI 1 "register_operand"))
811 (use (match_operand:V4SI 2 "register_operand"))]
a45f6936 812 "TARGET_ALTIVEC"
7faaf44a
DE
813{
814 rtx zero;
815 rtx swap;
816 rtx small_swap;
817 rtx sixteen;
818 rtx one;
819 rtx two;
820 rtx low_product;
821 rtx high_product;
a45f6936 822
7faaf44a
DE
823 if (TARGET_P8_VECTOR)
824 {
825 emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
826 DONE;
827 }
828
829 zero = gen_reg_rtx (V4SImode);
830 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
a45f6936 831
7faaf44a
DE
832 sixteen = gen_reg_rtx (V4SImode);
833 emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
a45f6936 834
7faaf44a
DE
835 swap = gen_reg_rtx (V4SImode);
836 emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
a45f6936 837
7faaf44a
DE
838 one = gen_reg_rtx (V8HImode);
839 convert_move (one, operands[1], 0);
a45f6936 840
7faaf44a
DE
841 two = gen_reg_rtx (V8HImode);
842 convert_move (two, operands[2], 0);
a45f6936 843
7faaf44a
DE
844 small_swap = gen_reg_rtx (V8HImode);
845 convert_move (small_swap, swap, 0);
a45f6936 846
7faaf44a
DE
847 low_product = gen_reg_rtx (V4SImode);
848 emit_insn (gen_altivec_vmulouh (low_product, one, two));
a45f6936 849
7faaf44a
DE
850 high_product = gen_reg_rtx (V4SImode);
851 emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
a45f6936 852
7faaf44a 853 emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
a45f6936 854
7faaf44a 855 emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
a45f6936 856
7faaf44a
DE
857 DONE;
858})
a45f6936 859
cc54af20 860(define_expand "mulv8hi3"
ad18eed2
SB
861 [(use (match_operand:V8HI 0 "register_operand"))
862 (use (match_operand:V8HI 1 "register_operand"))
863 (use (match_operand:V8HI 2 "register_operand"))]
cc54af20 864 "TARGET_ALTIVEC"
cc54af20 865{
7faaf44a
DE
866 rtx zero = gen_reg_rtx (V8HImode);
867
868 emit_insn (gen_altivec_vspltish (zero, const0_rtx));
198a8e3c 869 emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
7faaf44a
DE
870
871 DONE;
872})
a45f6936 873
82f10dee
CL
874;; Map UNSPEC_SLDB to "l" and UNSPEC_SRDB to "r".
875(define_int_attr SLDB_lr [(UNSPEC_SLDB "l")
876 (UNSPEC_SRDB "r")])
877
878(define_int_iterator VSHIFT_DBL_LR [UNSPEC_SLDB UNSPEC_SRDB])
879
880(define_insn "vs<SLDB_lr>db_<mode>"
881 [(set (match_operand:VI2 0 "register_operand" "=v")
882 (unspec:VI2 [(match_operand:VI2 1 "register_operand" "v")
883 (match_operand:VI2 2 "register_operand" "v")
884 (match_operand:QI 3 "const_0_to_12_operand" "n")]
885 VSHIFT_DBL_LR))]
886 "TARGET_POWER10"
887 "vs<SLDB_lr>dbi %0,%1,%2,%3"
888 [(set_attr "type" "vecsimple")])
889
89ce3290
KN
890(define_expand "vstrir_<mode>"
891 [(set (match_operand:VIshort 0 "altivec_register_operand")
892 (unspec:VIshort [(match_operand:VIshort 1 "altivec_register_operand")]
893 UNSPEC_VSTRIR))]
5d9d0c94 894 "TARGET_POWER10"
89ce3290
KN
895{
896 if (BYTES_BIG_ENDIAN)
45e0683d 897 emit_insn (gen_vstrir_direct_<mode> (operands[0], operands[1]));
89ce3290 898 else
45e0683d 899 emit_insn (gen_vstril_direct_<mode> (operands[0], operands[1]));
89ce3290
KN
900 DONE;
901})
902
45e0683d 903(define_insn "vstrir_direct_<mode>"
89ce3290
KN
904 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
905 (unspec:VIshort
906 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
907 UNSPEC_VSTRIR))]
5d9d0c94 908 "TARGET_POWER10"
89ce3290
KN
909 "vstri<wd>r %0,%1"
910 [(set_attr "type" "vecsimple")])
911
45e0683d 912;; This expands into same code as vstrir<mode> followed by condition logic
89ce3290
KN
913;; so that a single vstribr. or vstrihr. or vstribl. or vstrihl. instruction
914;; can, for example, satisfy the needs of a vec_strir () function paired
915;; with a vec_strir_p () function if both take the same incoming arguments.
916(define_expand "vstrir_p_<mode>"
917 [(match_operand:SI 0 "gpc_reg_operand")
918 (match_operand:VIshort 1 "altivec_register_operand")]
5d9d0c94 919 "TARGET_POWER10"
89ce3290
KN
920{
921 rtx scratch = gen_reg_rtx (<MODE>mode);
922 if (BYTES_BIG_ENDIAN)
45e0683d 923 emit_insn (gen_vstrir_p_direct_<mode> (scratch, operands[1]));
89ce3290 924 else
45e0683d 925 emit_insn (gen_vstril_p_direct_<mode> (scratch, operands[1]));
89ce3290
KN
926 emit_insn (gen_cr6_test_for_zero (operands[0]));
927 DONE;
928})
929
45e0683d 930(define_insn "vstrir_p_direct_<mode>"
89ce3290
KN
931 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
932 (unspec:VIshort
933 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
934 UNSPEC_VSTRIR))
935 (set (reg:CC CR6_REGNO)
936 (unspec:CC [(match_dup 1)]
937 UNSPEC_VSTRIR))]
5d9d0c94 938 "TARGET_POWER10"
89ce3290
KN
939 "vstri<wd>r. %0,%1"
940 [(set_attr "type" "vecsimple")])
941
942(define_expand "vstril_<mode>"
943 [(set (match_operand:VIshort 0 "altivec_register_operand")
944 (unspec:VIshort [(match_operand:VIshort 1 "altivec_register_operand")]
945 UNSPEC_VSTRIR))]
5d9d0c94 946 "TARGET_POWER10"
89ce3290
KN
947{
948 if (BYTES_BIG_ENDIAN)
45e0683d 949 emit_insn (gen_vstril_direct_<mode> (operands[0], operands[1]));
89ce3290 950 else
45e0683d 951 emit_insn (gen_vstrir_direct_<mode> (operands[0], operands[1]));
89ce3290
KN
952 DONE;
953})
954
45e0683d 955(define_insn "vstril_direct_<mode>"
89ce3290
KN
956 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
957 (unspec:VIshort
958 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
959 UNSPEC_VSTRIL))]
5d9d0c94 960 "TARGET_POWER10"
89ce3290
KN
961 "vstri<wd>l %0,%1"
962 [(set_attr "type" "vecsimple")])
963
964;; This expands into same code as vstril_<mode> followed by condition logic
965;; so that a single vstribr. or vstrihr. or vstribl. or vstrihl. instruction
966;; can, for example, satisfy the needs of a vec_stril () function paired
967;; with a vec_stril_p () function if both take the same incoming arguments.
968(define_expand "vstril_p_<mode>"
969 [(match_operand:SI 0 "gpc_reg_operand")
970 (match_operand:VIshort 1 "altivec_register_operand")]
5d9d0c94 971 "TARGET_POWER10"
89ce3290
KN
972{
973 rtx scratch = gen_reg_rtx (<MODE>mode);
974 if (BYTES_BIG_ENDIAN)
45e0683d 975 emit_insn (gen_vstril_p_direct_<mode> (scratch, operands[1]));
89ce3290 976 else
45e0683d 977 emit_insn (gen_vstrir_p_direct_<mode> (scratch, operands[1]));
89ce3290
KN
978 emit_insn (gen_cr6_test_for_zero (operands[0]));
979 DONE;
980})
981
45e0683d 982(define_insn "vstril_p_direct_<mode>"
89ce3290
KN
983 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
984 (unspec:VIshort
985 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
986 UNSPEC_VSTRIL))
987 (set (reg:CC CR6_REGNO)
988 (unspec:CC [(match_dup 1)]
989 UNSPEC_VSTRIR))]
5d9d0c94 990 "TARGET_POWER10"
89ce3290
KN
991 "vstri<wd>l. %0,%1"
992 [(set_attr "type" "vecsimple")])
198a8e3c 993
10ed84db 994;; Fused multiply subtract
d6613781 995(define_insn "*altivec_vnmsubfp"
1b1562a5
MM
996 [(set (match_operand:V4SF 0 "register_operand" "=v")
997 (neg:V4SF
998 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
999 (match_operand:V4SF 2 "register_operand" "v")
1000 (neg:V4SF
1001 (match_operand:V4SF 3 "register_operand" "v")))))]
92902797 1002 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1b1562a5
MM
1003 "vnmsubfp %0,%1,%2,%3"
1004 [(set_attr "type" "vecfloat")])
92902797 1005
aba5fb01 1006(define_insn "altivec_vmsumu<VI_char>m"
10ed84db 1007 [(set (match_operand:V4SI 0 "register_operand" "=v")
aba5fb01
NS
1008 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1009 (match_operand:VIshort 2 "register_operand" "v")
3aca4bff
DE
1010 (match_operand:V4SI 3 "register_operand" "v")]
1011 UNSPEC_VMSUMU))]
10ed84db 1012 "TARGET_ALTIVEC"
aba5fb01 1013 "vmsumu<VI_char>m %0,%1,%2,%3"
10ed84db
AH
1014 [(set_attr "type" "veccomplex")])
1015
c1a57681
WS
1016(define_insn "altivec_vmsumudm"
1017 [(set (match_operand:V1TI 0 "register_operand" "=v")
1018 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1019 (match_operand:V2DI 2 "register_operand" "v")
1020 (match_operand:V1TI 3 "register_operand" "v")]
1021 UNSPEC_VMSUMUDM))]
1022 "TARGET_P8_VECTOR"
1023 "vmsumudm %0,%1,%2,%3"
1024 [(set_attr "type" "veccomplex")])
1025
aba5fb01 1026(define_insn "altivec_vmsumm<VI_char>m"
10ed84db 1027 [(set (match_operand:V4SI 0 "register_operand" "=v")
aba5fb01
NS
1028 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1029 (match_operand:VIshort 2 "register_operand" "v")
3aca4bff
DE
1030 (match_operand:V4SI 3 "register_operand" "v")]
1031 UNSPEC_VMSUMM))]
10ed84db 1032 "TARGET_ALTIVEC"
aba5fb01 1033 "vmsumm<VI_char>m %0,%1,%2,%3"
10ed84db
AH
1034 [(set_attr "type" "veccomplex")])
1035
1036(define_insn "altivec_vmsumshm"
1037 [(set (match_operand:V4SI 0 "register_operand" "=v")
1038 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1039 (match_operand:V8HI 2 "register_operand" "v")
3aca4bff
DE
1040 (match_operand:V4SI 3 "register_operand" "v")]
1041 UNSPEC_VMSUMSHM))]
10ed84db 1042 "TARGET_ALTIVEC"
98c9a8e8 1043 "vmsumshm %0,%1,%2,%3"
10ed84db
AH
1044 [(set_attr "type" "veccomplex")])
1045
1046(define_insn "altivec_vmsumuhs"
1047 [(set (match_operand:V4SI 0 "register_operand" "=v")
1048 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1049 (match_operand:V8HI 2 "register_operand" "v")
3aca4bff
DE
1050 (match_operand:V4SI 3 "register_operand" "v")]
1051 UNSPEC_VMSUMUHS))
3b2475ab 1052 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 1053 "TARGET_ALTIVEC"
98c9a8e8 1054 "vmsumuhs %0,%1,%2,%3"
10ed84db
AH
1055 [(set_attr "type" "veccomplex")])
1056
1057(define_insn "altivec_vmsumshs"
1058 [(set (match_operand:V4SI 0 "register_operand" "=v")
1059 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1060 (match_operand:V8HI 2 "register_operand" "v")
3aca4bff
DE
1061 (match_operand:V4SI 3 "register_operand" "v")]
1062 UNSPEC_VMSUMSHS))
3b2475ab 1063 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 1064 "TARGET_ALTIVEC"
98c9a8e8 1065 "vmsumshs %0,%1,%2,%3"
10ed84db
AH
1066 [(set_attr "type" "veccomplex")])
1067
f96bc213 1068;; max
10ed84db 1069
f96bc213 1070(define_insn "umax<mode>3"
a5965b52
MM
1071 [(set (match_operand:VI2 0 "register_operand" "=v")
1072 (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
1073 (match_operand:VI2 2 "register_operand" "v")))]
1074 "<VI_unit>"
f96bc213 1075 "vmaxu<VI_char> %0,%1,%2"
10ed84db
AH
1076 [(set_attr "type" "vecsimple")])
1077
f96bc213 1078(define_insn "smax<mode>3"
a5965b52
MM
1079 [(set (match_operand:VI2 0 "register_operand" "=v")
1080 (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
1081 (match_operand:VI2 2 "register_operand" "v")))]
1082 "<VI_unit>"
f96bc213 1083 "vmaxs<VI_char> %0,%1,%2"
10ed84db
AH
1084 [(set_attr "type" "vecsimple")])
1085
a72c65c7 1086(define_insn "*altivec_smaxv4sf3"
f96bc213
NS
1087 [(set (match_operand:V4SF 0 "register_operand" "=v")
1088 (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
1089 (match_operand:V4SF 2 "register_operand" "v")))]
a72c65c7 1090 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
f96bc213
NS
1091 "vmaxfp %0,%1,%2"
1092 [(set_attr "type" "veccmp")])
10ed84db 1093
f96bc213 1094(define_insn "umin<mode>3"
a5965b52
MM
1095 [(set (match_operand:VI2 0 "register_operand" "=v")
1096 (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
1097 (match_operand:VI2 2 "register_operand" "v")))]
1098 "<VI_unit>"
f96bc213 1099 "vminu<VI_char> %0,%1,%2"
10ed84db
AH
1100 [(set_attr "type" "vecsimple")])
1101
f96bc213 1102(define_insn "smin<mode>3"
a5965b52
MM
1103 [(set (match_operand:VI2 0 "register_operand" "=v")
1104 (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
1105 (match_operand:VI2 2 "register_operand" "v")))]
1106 "<VI_unit>"
f96bc213 1107 "vmins<VI_char> %0,%1,%2"
10ed84db
AH
1108 [(set_attr "type" "vecsimple")])
1109
a72c65c7 1110(define_insn "*altivec_sminv4sf3"
10ed84db 1111 [(set (match_operand:V4SF 0 "register_operand" "=v")
f96bc213 1112 (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
10ed84db 1113 (match_operand:V4SF 2 "register_operand" "v")))]
a72c65c7 1114 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
f96bc213 1115 "vminfp %0,%1,%2"
10ed84db
AH
1116 [(set_attr "type" "veccmp")])
1117
1118(define_insn "altivec_vmhaddshs"
1119 [(set (match_operand:V8HI 0 "register_operand" "=v")
1120 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1121 (match_operand:V8HI 2 "register_operand" "v")
3aca4bff
DE
1122 (match_operand:V8HI 3 "register_operand" "v")]
1123 UNSPEC_VMHADDSHS))
3b2475ab 1124 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 1125 "TARGET_ALTIVEC"
98c9a8e8 1126 "vmhaddshs %0,%1,%2,%3"
10ed84db 1127 [(set_attr "type" "veccomplex")])
3aca4bff 1128
10ed84db
AH
1129(define_insn "altivec_vmhraddshs"
1130 [(set (match_operand:V8HI 0 "register_operand" "=v")
1131 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1132 (match_operand:V8HI 2 "register_operand" "v")
3aca4bff
DE
1133 (match_operand:V8HI 3 "register_operand" "v")]
1134 UNSPEC_VMHRADDSHS))
3b2475ab 1135 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 1136 "TARGET_ALTIVEC"
98c9a8e8 1137 "vmhraddshs %0,%1,%2,%3"
10ed84db 1138 [(set_attr "type" "veccomplex")])
3aca4bff 1139
198a8e3c 1140(define_insn "fmav8hi4"
10ed84db 1141 [(set (match_operand:V8HI 0 "register_operand" "=v")
7faaf44a
DE
1142 (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
1143 (match_operand:V8HI 2 "register_operand" "v"))
1144 (match_operand:V8HI 3 "register_operand" "v")))]
10ed84db 1145 "TARGET_ALTIVEC"
98c9a8e8 1146 "vmladduhm %0,%1,%2,%3"
10ed84db
AH
1147 [(set_attr "type" "veccomplex")])
1148
68d3bacf 1149(define_expand "altivec_vmrghb"
ad18eed2
SB
1150 [(use (match_operand:V16QI 0 "register_operand"))
1151 (use (match_operand:V16QI 1 "register_operand"))
1152 (use (match_operand:V16QI 2 "register_operand"))]
68d3bacf
BS
1153 "TARGET_ALTIVEC"
1154{
0910c516
XL
1155 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrghb_direct
1156 : gen_altivec_vmrglb_direct;
1157 if (!BYTES_BIG_ENDIAN)
1158 std::swap (operands[1], operands[2]);
1159 emit_insn (fun (operands[0], operands[1], operands[2]));
c3e96073 1160 DONE;
68d3bacf
BS
1161})
1162
0910c516 1163(define_insn "altivec_vmrghb_direct"
10ed84db 1164 [(set (match_operand:V16QI 0 "register_operand" "=v")
0910c516 1165 (vec_select:V16QI
5aebfdad
RH
1166 (vec_concat:V32QI
1167 (match_operand:V16QI 1 "register_operand" "v")
1168 (match_operand:V16QI 2 "register_operand" "v"))
1169 (parallel [(const_int 0) (const_int 16)
1170 (const_int 1) (const_int 17)
1171 (const_int 2) (const_int 18)
1172 (const_int 3) (const_int 19)
1173 (const_int 4) (const_int 20)
1174 (const_int 5) (const_int 21)
1175 (const_int 6) (const_int 22)
1176 (const_int 7) (const_int 23)])))]
10ed84db
AH
1177 "TARGET_ALTIVEC"
1178 "vmrghb %0,%1,%2"
1179 [(set_attr "type" "vecperm")])
1180
68d3bacf 1181(define_expand "altivec_vmrghh"
ad18eed2
SB
1182 [(use (match_operand:V8HI 0 "register_operand"))
1183 (use (match_operand:V8HI 1 "register_operand"))
1184 (use (match_operand:V8HI 2 "register_operand"))]
68d3bacf
BS
1185 "TARGET_ALTIVEC"
1186{
0910c516
XL
1187 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrghh_direct
1188 : gen_altivec_vmrglh_direct;
1189 if (!BYTES_BIG_ENDIAN)
1190 std::swap (operands[1], operands[2]);
1191 emit_insn (fun (operands[0], operands[1], operands[2]));
c3e96073 1192 DONE;
68d3bacf
BS
1193})
1194
0910c516 1195(define_insn "altivec_vmrghh_direct"
10ed84db 1196 [(set (match_operand:V8HI 0 "register_operand" "=v")
68d3bacf 1197 (vec_select:V8HI
5aebfdad
RH
1198 (vec_concat:V16HI
1199 (match_operand:V8HI 1 "register_operand" "v")
1200 (match_operand:V8HI 2 "register_operand" "v"))
1201 (parallel [(const_int 0) (const_int 8)
1202 (const_int 1) (const_int 9)
1203 (const_int 2) (const_int 10)
1204 (const_int 3) (const_int 11)])))]
10ed84db
AH
1205 "TARGET_ALTIVEC"
1206 "vmrghh %0,%1,%2"
1207 [(set_attr "type" "vecperm")])
1208
68d3bacf 1209(define_expand "altivec_vmrghw"
ad18eed2
SB
1210 [(use (match_operand:V4SI 0 "register_operand"))
1211 (use (match_operand:V4SI 1 "register_operand"))
1212 (use (match_operand:V4SI 2 "register_operand"))]
68d3bacf
BS
1213 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1214{
0910c516
XL
1215 rtx (*fun) (rtx, rtx, rtx);
1216 fun = BYTES_BIG_ENDIAN ? gen_altivec_vmrghw_direct_v4si
1217 : gen_altivec_vmrglw_direct_v4si;
1218 if (!BYTES_BIG_ENDIAN)
1219 std::swap (operands[1], operands[2]);
1220 emit_insn (fun (operands[0], operands[1], operands[2]));
c3e96073 1221 DONE;
68d3bacf
BS
1222})
1223
0910c516
XL
1224(define_insn "altivec_vmrghw_direct_<mode>"
1225 [(set (match_operand:VSX_W 0 "register_operand" "=wa,v")
1226 (vec_select:VSX_W
1227 (vec_concat:<VS_double>
1228 (match_operand:VSX_W 1 "register_operand" "wa,v")
1229 (match_operand:VSX_W 2 "register_operand" "wa,v"))
5aebfdad
RH
1230 (parallel [(const_int 0) (const_int 4)
1231 (const_int 1) (const_int 5)])))]
68d3bacf 1232 "TARGET_ALTIVEC"
355bae92 1233 "@
ee969a36
SB
1234 xxmrghw %x0,%x1,%x2
1235 vmrghw %0,%1,%2"
10ed84db
AH
1236 [(set_attr "type" "vecperm")])
1237
a72c65c7 1238(define_insn "*altivec_vmrghsf"
98b44b0e 1239 [(set (match_operand:V4SF 0 "register_operand" "=v")
5aebfdad
RH
1240 (vec_select:V4SF
1241 (vec_concat:V8SF
1242 (match_operand:V4SF 1 "register_operand" "v")
1243 (match_operand:V4SF 2 "register_operand" "v"))
1244 (parallel [(const_int 0) (const_int 4)
1245 (const_int 1) (const_int 5)])))]
a72c65c7 1246 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
68d3bacf
BS
1247{
1248 if (BYTES_BIG_ENDIAN)
1249 return "vmrghw %0,%1,%2";
1250 else
1251 return "vmrglw %0,%2,%1";
1252}
98b44b0e
IR
1253 [(set_attr "type" "vecperm")])
1254
68d3bacf 1255(define_expand "altivec_vmrglb"
ad18eed2
SB
1256 [(use (match_operand:V16QI 0 "register_operand"))
1257 (use (match_operand:V16QI 1 "register_operand"))
1258 (use (match_operand:V16QI 2 "register_operand"))]
68d3bacf
BS
1259 "TARGET_ALTIVEC"
1260{
0910c516
XL
1261 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrglb_direct
1262 : gen_altivec_vmrghb_direct;
1263 if (!BYTES_BIG_ENDIAN)
1264 std::swap (operands[1], operands[2]);
1265 emit_insn (fun (operands[0], operands[1], operands[2]));
c3e96073 1266 DONE;
68d3bacf
BS
1267})
1268
0910c516 1269(define_insn "altivec_vmrglb_direct"
10ed84db 1270 [(set (match_operand:V16QI 0 "register_operand" "=v")
0910c516 1271 (vec_select:V16QI
5aebfdad
RH
1272 (vec_concat:V32QI
1273 (match_operand:V16QI 1 "register_operand" "v")
1274 (match_operand:V16QI 2 "register_operand" "v"))
1275 (parallel [(const_int 8) (const_int 24)
1276 (const_int 9) (const_int 25)
1277 (const_int 10) (const_int 26)
1278 (const_int 11) (const_int 27)
1279 (const_int 12) (const_int 28)
1280 (const_int 13) (const_int 29)
1281 (const_int 14) (const_int 30)
1282 (const_int 15) (const_int 31)])))]
10ed84db
AH
1283 "TARGET_ALTIVEC"
1284 "vmrglb %0,%1,%2"
1285 [(set_attr "type" "vecperm")])
1286
68d3bacf 1287(define_expand "altivec_vmrglh"
ad18eed2
SB
1288 [(use (match_operand:V8HI 0 "register_operand"))
1289 (use (match_operand:V8HI 1 "register_operand"))
1290 (use (match_operand:V8HI 2 "register_operand"))]
68d3bacf
BS
1291 "TARGET_ALTIVEC"
1292{
0910c516
XL
1293 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrglh_direct
1294 : gen_altivec_vmrghh_direct;
1295 if (!BYTES_BIG_ENDIAN)
1296 std::swap (operands[1], operands[2]);
1297 emit_insn (fun (operands[0], operands[1], operands[2]));
c3e96073 1298 DONE;
68d3bacf
BS
1299})
1300
0910c516 1301(define_insn "altivec_vmrglh_direct"
10ed84db 1302 [(set (match_operand:V8HI 0 "register_operand" "=v")
5aebfdad
RH
1303 (vec_select:V8HI
1304 (vec_concat:V16HI
1305 (match_operand:V8HI 1 "register_operand" "v")
1306 (match_operand:V8HI 2 "register_operand" "v"))
1307 (parallel [(const_int 4) (const_int 12)
1308 (const_int 5) (const_int 13)
1309 (const_int 6) (const_int 14)
1310 (const_int 7) (const_int 15)])))]
10ed84db
AH
1311 "TARGET_ALTIVEC"
1312 "vmrglh %0,%1,%2"
1313 [(set_attr "type" "vecperm")])
1314
68d3bacf 1315(define_expand "altivec_vmrglw"
ad18eed2
SB
1316 [(use (match_operand:V4SI 0 "register_operand"))
1317 (use (match_operand:V4SI 1 "register_operand"))
1318 (use (match_operand:V4SI 2 "register_operand"))]
68d3bacf
BS
1319 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1320{
0910c516
XL
1321 rtx (*fun) (rtx, rtx, rtx);
1322 fun = BYTES_BIG_ENDIAN ? gen_altivec_vmrglw_direct_v4si
1323 : gen_altivec_vmrghw_direct_v4si;
1324 if (!BYTES_BIG_ENDIAN)
1325 std::swap (operands[1], operands[2]);
1326 emit_insn (fun (operands[0], operands[1], operands[2]));
c3e96073 1327 DONE;
68d3bacf
BS
1328})
1329
0910c516
XL
1330(define_insn "altivec_vmrglw_direct_<mode>"
1331 [(set (match_operand:VSX_W 0 "register_operand" "=wa,v")
1332 (vec_select:VSX_W
1333 (vec_concat:<VS_double>
1334 (match_operand:VSX_W 1 "register_operand" "wa,v")
1335 (match_operand:VSX_W 2 "register_operand" "wa,v"))
5aebfdad
RH
1336 (parallel [(const_int 2) (const_int 6)
1337 (const_int 3) (const_int 7)])))]
68d3bacf 1338 "TARGET_ALTIVEC"
355bae92 1339 "@
ee969a36
SB
1340 xxmrglw %x0,%x1,%x2
1341 vmrglw %0,%1,%2"
10ed84db
AH
1342 [(set_attr "type" "vecperm")])
1343
a72c65c7 1344(define_insn "*altivec_vmrglsf"
98b44b0e 1345 [(set (match_operand:V4SF 0 "register_operand" "=v")
5aebfdad
RH
1346 (vec_select:V4SF
1347 (vec_concat:V8SF
1348 (match_operand:V4SF 1 "register_operand" "v")
1349 (match_operand:V4SF 2 "register_operand" "v"))
1350 (parallel [(const_int 2) (const_int 6)
1351 (const_int 3) (const_int 7)])))]
a72c65c7 1352 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
68d3bacf
BS
1353{
1354 if (BYTES_BIG_ENDIAN)
1355 return "vmrglw %0,%1,%2";
1356 else
1357 return "vmrghw %0,%2,%1";
1358}
98b44b0e
IR
1359 [(set_attr "type" "vecperm")])
1360
19d22f7c
CL
1361;; Power8 vector merge two V2DF/V2DI even words to V2DF
1362(define_expand "p8_vmrgew_<mode>"
1363 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1364 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1365 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1366 "VECTOR_MEM_VSX_P (<MODE>mode)"
1367{
1368 rtvec v;
1369 rtx x;
1370
1371 v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
1372 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1373
1374 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1375 emit_insn (gen_rtx_SET (operands[0], x));
1376 DONE;
1377})
1378
be1418c7
CL
1379;; Power8 vector merge two V4SF/V4SI even words to V4SF
1380(define_insn "p8_vmrgew_<mode>"
1381 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1382 (vec_select:VSX_W
1383 (vec_concat:<VS_double>
1384 (match_operand:VSX_W 1 "register_operand" "v")
1385 (match_operand:VSX_W 2 "register_operand" "v"))
0bd62dca
MM
1386 (parallel [(const_int 0) (const_int 4)
1387 (const_int 2) (const_int 6)])))]
1388 "TARGET_P8_VECTOR"
d737743f
BS
1389{
1390 if (BYTES_BIG_ENDIAN)
1391 return "vmrgew %0,%1,%2";
1392 else
1393 return "vmrgow %0,%2,%1";
1394}
0bd62dca
MM
1395 [(set_attr "type" "vecperm")])
1396
19d22f7c
CL
1397(define_insn "p8_vmrgow_<mode>"
1398 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1399 (vec_select:VSX_W
1400 (vec_concat:<VS_double>
1401 (match_operand:VSX_W 1 "register_operand" "v")
1402 (match_operand:VSX_W 2 "register_operand" "v"))
0bd62dca
MM
1403 (parallel [(const_int 1) (const_int 5)
1404 (const_int 3) (const_int 7)])))]
1405 "TARGET_P8_VECTOR"
d737743f
BS
1406{
1407 if (BYTES_BIG_ENDIAN)
1408 return "vmrgow %0,%1,%2";
1409 else
1410 return "vmrgew %0,%2,%1";
1411}
0bd62dca
MM
1412 [(set_attr "type" "vecperm")])
1413
19d22f7c
CL
1414(define_expand "p8_vmrgow_<mode>"
1415 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1416 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1417 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1418 "VECTOR_MEM_VSX_P (<MODE>mode)"
1419{
1420 rtvec v;
1421 rtx x;
1422
1423 v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
1424 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1425
1426 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1427 emit_insn (gen_rtx_SET (operands[0], x));
1428 DONE;
1429})
1430
6ccd2ece
BS
1431(define_insn "p8_vmrgew_<mode>_direct"
1432 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1433 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1434 (match_operand:VSX_W 2 "register_operand" "v")]
00cc7cbf
MM
1435 UNSPEC_VMRGEW_DIRECT))]
1436 "TARGET_P8_VECTOR"
1437 "vmrgew %0,%1,%2"
1438 [(set_attr "type" "vecperm")])
1439
6ccd2ece
BS
1440(define_insn "p8_vmrgow_<mode>_direct"
1441 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1442 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1443 (match_operand:VSX_W 2 "register_operand" "v")]
1444 UNSPEC_VMRGOW_DIRECT))]
1445 "TARGET_P8_VECTOR"
1446 "vmrgow %0,%1,%2"
1447 [(set_attr "type" "vecperm")])
1448
1a5d2261 1449(define_expand "vec_widen_umult_even_v16qi"
ad18eed2
SB
1450 [(use (match_operand:V8HI 0 "register_operand"))
1451 (use (match_operand:V16QI 1 "register_operand"))
1452 (use (match_operand:V16QI 2 "register_operand"))]
1a5d2261
BS
1453 "TARGET_ALTIVEC"
1454{
427a7384 1455 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1456 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1457 else
1458 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1459 DONE;
1460})
1461
1462(define_expand "vec_widen_smult_even_v16qi"
ad18eed2
SB
1463 [(use (match_operand:V8HI 0 "register_operand"))
1464 (use (match_operand:V16QI 1 "register_operand"))
1465 (use (match_operand:V16QI 2 "register_operand"))]
1a5d2261
BS
1466 "TARGET_ALTIVEC"
1467{
427a7384 1468 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1469 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1470 else
1471 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1472 DONE;
1473})
1474
1475(define_expand "vec_widen_umult_even_v8hi"
ad18eed2
SB
1476 [(use (match_operand:V4SI 0 "register_operand"))
1477 (use (match_operand:V8HI 1 "register_operand"))
1478 (use (match_operand:V8HI 2 "register_operand"))]
1a5d2261
BS
1479 "TARGET_ALTIVEC"
1480{
427a7384 1481 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1482 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1483 else
1484 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1485 DONE;
1486})
1487
1488(define_expand "vec_widen_smult_even_v8hi"
ad18eed2
SB
1489 [(use (match_operand:V4SI 0 "register_operand"))
1490 (use (match_operand:V8HI 1 "register_operand"))
1491 (use (match_operand:V8HI 2 "register_operand"))]
1a5d2261
BS
1492 "TARGET_ALTIVEC"
1493{
427a7384 1494 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1495 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1496 else
1497 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1498 DONE;
1499})
1500
2afb2668
CL
1501(define_expand "vec_widen_umult_even_v4si"
1502 [(use (match_operand:V2DI 0 "register_operand"))
1503 (use (match_operand:V4SI 1 "register_operand"))
1504 (use (match_operand:V4SI 2 "register_operand"))]
1505 "TARGET_P8_VECTOR"
1506{
427a7384 1507 if (BYTES_BIG_ENDIAN)
2afb2668
CL
1508 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1509 else
1510 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1511 DONE;
1512})
1513
f03122f2
CL
1514(define_expand "vec_widen_umult_even_v2di"
1515 [(use (match_operand:V1TI 0 "register_operand"))
1516 (use (match_operand:V2DI 1 "register_operand"))
1517 (use (match_operand:V2DI 2 "register_operand"))]
1518 "TARGET_POWER10"
1519{
1520 if (BYTES_BIG_ENDIAN)
1521 emit_insn (gen_altivec_vmuleud (operands[0], operands[1], operands[2]));
1522 else
1523 emit_insn (gen_altivec_vmuloud (operands[0], operands[1], operands[2]));
1524 DONE;
1525})
1526
2afb2668
CL
1527(define_expand "vec_widen_smult_even_v4si"
1528 [(use (match_operand:V2DI 0 "register_operand"))
1529 (use (match_operand:V4SI 1 "register_operand"))
1530 (use (match_operand:V4SI 2 "register_operand"))]
1531 "TARGET_P8_VECTOR"
1532{
427a7384 1533 if (BYTES_BIG_ENDIAN)
2afb2668
CL
1534 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1535 else
1536 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1537 DONE;
1538})
1539
f03122f2
CL
1540(define_expand "vec_widen_smult_even_v2di"
1541 [(use (match_operand:V1TI 0 "register_operand"))
1542 (use (match_operand:V2DI 1 "register_operand"))
1543 (use (match_operand:V2DI 2 "register_operand"))]
1544 "TARGET_POWER10"
1545{
1546 if (BYTES_BIG_ENDIAN)
1547 emit_insn (gen_altivec_vmulesd (operands[0], operands[1], operands[2]));
1548 else
1549 emit_insn (gen_altivec_vmulosd (operands[0], operands[1], operands[2]));
1550 DONE;
1551})
1552
1a5d2261 1553(define_expand "vec_widen_umult_odd_v16qi"
ad18eed2
SB
1554 [(use (match_operand:V8HI 0 "register_operand"))
1555 (use (match_operand:V16QI 1 "register_operand"))
1556 (use (match_operand:V16QI 2 "register_operand"))]
1a5d2261
BS
1557 "TARGET_ALTIVEC"
1558{
427a7384 1559 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1560 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1561 else
1562 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1563 DONE;
1564})
1565
1566(define_expand "vec_widen_smult_odd_v16qi"
ad18eed2
SB
1567 [(use (match_operand:V8HI 0 "register_operand"))
1568 (use (match_operand:V16QI 1 "register_operand"))
1569 (use (match_operand:V16QI 2 "register_operand"))]
1a5d2261
BS
1570 "TARGET_ALTIVEC"
1571{
427a7384 1572 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1573 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1574 else
1575 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1576 DONE;
1577})
1578
1579(define_expand "vec_widen_umult_odd_v8hi"
ad18eed2
SB
1580 [(use (match_operand:V4SI 0 "register_operand"))
1581 (use (match_operand:V8HI 1 "register_operand"))
1582 (use (match_operand:V8HI 2 "register_operand"))]
1a5d2261
BS
1583 "TARGET_ALTIVEC"
1584{
427a7384 1585 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1586 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1587 else
1588 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1589 DONE;
1590})
1591
1592(define_expand "vec_widen_smult_odd_v8hi"
ad18eed2
SB
1593 [(use (match_operand:V4SI 0 "register_operand"))
1594 (use (match_operand:V8HI 1 "register_operand"))
1595 (use (match_operand:V8HI 2 "register_operand"))]
1a5d2261
BS
1596 "TARGET_ALTIVEC"
1597{
427a7384 1598 if (BYTES_BIG_ENDIAN)
1a5d2261
BS
1599 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1600 else
1601 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1602 DONE;
1603})
1604
2afb2668
CL
1605(define_expand "vec_widen_umult_odd_v4si"
1606 [(use (match_operand:V2DI 0 "register_operand"))
1607 (use (match_operand:V4SI 1 "register_operand"))
1608 (use (match_operand:V4SI 2 "register_operand"))]
1609 "TARGET_P8_VECTOR"
1610{
427a7384 1611 if (BYTES_BIG_ENDIAN)
2afb2668
CL
1612 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1613 else
1614 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1615 DONE;
1616})
1617
f03122f2
CL
1618(define_expand "vec_widen_umult_odd_v2di"
1619 [(use (match_operand:V1TI 0 "register_operand"))
1620 (use (match_operand:V2DI 1 "register_operand"))
1621 (use (match_operand:V2DI 2 "register_operand"))]
1622 "TARGET_POWER10"
1623{
1624 if (BYTES_BIG_ENDIAN)
1625 emit_insn (gen_altivec_vmuloud (operands[0], operands[1], operands[2]));
1626 else
1627 emit_insn (gen_altivec_vmuleud (operands[0], operands[1], operands[2]));
1628 DONE;
1629})
1630
2afb2668
CL
1631(define_expand "vec_widen_smult_odd_v4si"
1632 [(use (match_operand:V2DI 0 "register_operand"))
1633 (use (match_operand:V4SI 1 "register_operand"))
1634 (use (match_operand:V4SI 2 "register_operand"))]
1635 "TARGET_P8_VECTOR"
1636{
427a7384 1637 if (BYTES_BIG_ENDIAN)
2afb2668
CL
1638 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1639 else
1640 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1641 DONE;
1642})
1643
f03122f2
CL
1644(define_expand "vec_widen_smult_odd_v2di"
1645 [(use (match_operand:V1TI 0 "register_operand"))
1646 (use (match_operand:V2DI 1 "register_operand"))
1647 (use (match_operand:V2DI 2 "register_operand"))]
1648 "TARGET_POWER10"
1649{
1650 if (BYTES_BIG_ENDIAN)
1651 emit_insn (gen_altivec_vmulosd (operands[0], operands[1], operands[2]));
1652 else
1653 emit_insn (gen_altivec_vmulesd (operands[0], operands[1], operands[2]));
1654 DONE;
1655})
1656
1a5d2261 1657(define_insn "altivec_vmuleub"
10ed84db
AH
1658 [(set (match_operand:V8HI 0 "register_operand" "=v")
1659 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3aca4bff
DE
1660 (match_operand:V16QI 2 "register_operand" "v")]
1661 UNSPEC_VMULEUB))]
10ed84db
AH
1662 "TARGET_ALTIVEC"
1663 "vmuleub %0,%1,%2"
1664 [(set_attr "type" "veccomplex")])
1665
1a5d2261 1666(define_insn "altivec_vmuloub"
10ed84db
AH
1667 [(set (match_operand:V8HI 0 "register_operand" "=v")
1668 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3aca4bff 1669 (match_operand:V16QI 2 "register_operand" "v")]
1a5d2261 1670 UNSPEC_VMULOUB))]
10ed84db 1671 "TARGET_ALTIVEC"
1a5d2261 1672 "vmuloub %0,%1,%2"
10ed84db
AH
1673 [(set_attr "type" "veccomplex")])
1674
1a5d2261 1675(define_insn "altivec_vmulesb"
10ed84db
AH
1676 [(set (match_operand:V8HI 0 "register_operand" "=v")
1677 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3aca4bff 1678 (match_operand:V16QI 2 "register_operand" "v")]
1a5d2261 1679 UNSPEC_VMULESB))]
10ed84db 1680 "TARGET_ALTIVEC"
1a5d2261 1681 "vmulesb %0,%1,%2"
10ed84db
AH
1682 [(set_attr "type" "veccomplex")])
1683
1a5d2261 1684(define_insn "altivec_vmulosb"
10ed84db
AH
1685 [(set (match_operand:V8HI 0 "register_operand" "=v")
1686 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3aca4bff
DE
1687 (match_operand:V16QI 2 "register_operand" "v")]
1688 UNSPEC_VMULOSB))]
10ed84db
AH
1689 "TARGET_ALTIVEC"
1690 "vmulosb %0,%1,%2"
1691 [(set_attr "type" "veccomplex")])
1692
1a5d2261
BS
1693(define_insn "altivec_vmuleuh"
1694 [(set (match_operand:V4SI 0 "register_operand" "=v")
1695 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1696 (match_operand:V8HI 2 "register_operand" "v")]
1697 UNSPEC_VMULEUH))]
1698 "TARGET_ALTIVEC"
1699 "vmuleuh %0,%1,%2"
1700 [(set_attr "type" "veccomplex")])
1701
1702(define_insn "altivec_vmulouh"
10ed84db
AH
1703 [(set (match_operand:V4SI 0 "register_operand" "=v")
1704 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3aca4bff
DE
1705 (match_operand:V8HI 2 "register_operand" "v")]
1706 UNSPEC_VMULOUH))]
10ed84db
AH
1707 "TARGET_ALTIVEC"
1708 "vmulouh %0,%1,%2"
1709 [(set_attr "type" "veccomplex")])
1710
1a5d2261
BS
1711(define_insn "altivec_vmulesh"
1712 [(set (match_operand:V4SI 0 "register_operand" "=v")
1713 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1714 (match_operand:V8HI 2 "register_operand" "v")]
1715 UNSPEC_VMULESH))]
1716 "TARGET_ALTIVEC"
1717 "vmulesh %0,%1,%2"
1718 [(set_attr "type" "veccomplex")])
1719
1720(define_insn "altivec_vmulosh"
10ed84db
AH
1721 [(set (match_operand:V4SI 0 "register_operand" "=v")
1722 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3aca4bff
DE
1723 (match_operand:V8HI 2 "register_operand" "v")]
1724 UNSPEC_VMULOSH))]
10ed84db
AH
1725 "TARGET_ALTIVEC"
1726 "vmulosh %0,%1,%2"
1727 [(set_attr "type" "veccomplex")])
1728
5ff4baa5
CL
1729(define_insn "altivec_vmuleuw"
1730 [(set (match_operand:V2DI 0 "register_operand" "=v")
1731 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1732 (match_operand:V4SI 2 "register_operand" "v")]
1733 UNSPEC_VMULEUW))]
1734 "TARGET_P8_VECTOR"
1735 "vmuleuw %0,%1,%2"
1736 [(set_attr "type" "veccomplex")])
1737
f03122f2
CL
1738(define_insn "altivec_vmuleud"
1739 [(set (match_operand:V1TI 0 "register_operand" "=v")
1740 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1741 (match_operand:V2DI 2 "register_operand" "v")]
1742 UNSPEC_VMULEUD))]
1743 "TARGET_POWER10"
1744 "vmuleud %0,%1,%2"
1745 [(set_attr "type" "veccomplex")])
1746
5ff4baa5
CL
1747(define_insn "altivec_vmulouw"
1748 [(set (match_operand:V2DI 0 "register_operand" "=v")
1749 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1750 (match_operand:V4SI 2 "register_operand" "v")]
1751 UNSPEC_VMULOUW))]
1752 "TARGET_P8_VECTOR"
1753 "vmulouw %0,%1,%2"
1754 [(set_attr "type" "veccomplex")])
1755
f03122f2
CL
1756(define_insn "altivec_vmuloud"
1757 [(set (match_operand:V1TI 0 "register_operand" "=v")
1758 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1759 (match_operand:V2DI 2 "register_operand" "v")]
1760 UNSPEC_VMULOUD))]
1761 "TARGET_POWER10"
1762 "vmuloud %0,%1,%2"
1763 [(set_attr "type" "veccomplex")])
1764
5ff4baa5
CL
1765(define_insn "altivec_vmulesw"
1766 [(set (match_operand:V2DI 0 "register_operand" "=v")
1767 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1768 (match_operand:V4SI 2 "register_operand" "v")]
1769 UNSPEC_VMULESW))]
1770 "TARGET_P8_VECTOR"
1771 "vmulesw %0,%1,%2"
1772 [(set_attr "type" "veccomplex")])
1773
f03122f2
CL
1774(define_insn "altivec_vmulesd"
1775 [(set (match_operand:V1TI 0 "register_operand" "=v")
1776 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1777 (match_operand:V2DI 2 "register_operand" "v")]
1778 UNSPEC_VMULESD))]
1779 "TARGET_POWER10"
1780 "vmulesd %0,%1,%2"
1781 [(set_attr "type" "veccomplex")])
1782
5ff4baa5
CL
1783(define_insn "altivec_vmulosw"
1784 [(set (match_operand:V2DI 0 "register_operand" "=v")
1785 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1786 (match_operand:V4SI 2 "register_operand" "v")]
1787 UNSPEC_VMULOSW))]
1788 "TARGET_P8_VECTOR"
1789 "vmulosw %0,%1,%2"
1790 [(set_attr "type" "veccomplex")])
f96bc213 1791
f03122f2
CL
1792(define_insn "altivec_vmulosd"
1793 [(set (match_operand:V1TI 0 "register_operand" "=v")
1794 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1795 (match_operand:V2DI 2 "register_operand" "v")]
1796 UNSPEC_VMULOSD))]
1797 "TARGET_POWER10"
1798 "vmulosd %0,%1,%2"
1799 [(set_attr "type" "veccomplex")])
1800
dd7a40e1 1801;; Vector pack/unpack
10ed84db
AH
1802(define_insn "altivec_vpkpx"
1803 [(set (match_operand:V8HI 0 "register_operand" "=v")
1804 (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
1805 (match_operand:V4SI 2 "register_operand" "v")]
1806 UNSPEC_VPKPX))]
10ed84db 1807 "TARGET_ALTIVEC"
6c332313 1808{
427a7384 1809 if (BYTES_BIG_ENDIAN)
6c332313
SB
1810 return "vpkpx %0,%1,%2";
1811 else
1812 return "vpkpx %0,%2,%1";
1813}
10ed84db
AH
1814 [(set_attr "type" "vecperm")])
1815
a5965b52
MM
1816(define_insn "altivec_vpks<VI_char>ss"
1817 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1818 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1819 (match_operand:VP 2 "register_operand" "v")]
1820 UNSPEC_VPACK_SIGN_SIGN_SAT))]
1821 "<VI_unit>"
6c332313 1822{
427a7384 1823 if (BYTES_BIG_ENDIAN)
6c332313
SB
1824 return "vpks<VI_char>ss %0,%1,%2";
1825 else
1826 return "vpks<VI_char>ss %0,%2,%1";
1827}
10ed84db
AH
1828 [(set_attr "type" "vecperm")])
1829
a5965b52
MM
1830(define_insn "altivec_vpks<VI_char>us"
1831 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1832 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1833 (match_operand:VP 2 "register_operand" "v")]
1834 UNSPEC_VPACK_SIGN_UNS_SAT))]
1835 "<VI_unit>"
6c332313 1836{
427a7384 1837 if (BYTES_BIG_ENDIAN)
6c332313
SB
1838 return "vpks<VI_char>us %0,%1,%2";
1839 else
1840 return "vpks<VI_char>us %0,%2,%1";
1841}
10ed84db
AH
1842 [(set_attr "type" "vecperm")])
1843
a5965b52
MM
1844(define_insn "altivec_vpku<VI_char>us"
1845 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1846 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1847 (match_operand:VP 2 "register_operand" "v")]
1848 UNSPEC_VPACK_UNS_UNS_SAT))]
1849 "<VI_unit>"
6c332313 1850{
427a7384 1851 if (BYTES_BIG_ENDIAN)
6c332313
SB
1852 return "vpku<VI_char>us %0,%1,%2";
1853 else
1854 return "vpku<VI_char>us %0,%2,%1";
1855}
10ed84db
AH
1856 [(set_attr "type" "vecperm")])
1857
a5965b52
MM
1858(define_insn "altivec_vpku<VI_char>um"
1859 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1860 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1861 (match_operand:VP 2 "register_operand" "v")]
1862 UNSPEC_VPACK_UNS_UNS_MOD))]
1863 "<VI_unit>"
6c332313 1864{
427a7384 1865 if (BYTES_BIG_ENDIAN)
6c332313
SB
1866 return "vpku<VI_char>um %0,%1,%2";
1867 else
1868 return "vpku<VI_char>um %0,%2,%1";
1869}
52a93551
BS
1870 [(set_attr "type" "vecperm")])
1871
1872(define_insn "altivec_vpku<VI_char>um_direct"
1873 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1874 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1875 (match_operand:VP 2 "register_operand" "v")]
1876 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1877 "<VI_unit>"
6c332313
SB
1878{
1879 if (BYTES_BIG_ENDIAN)
1880 return "vpku<VI_char>um %0,%1,%2";
1881 else
1882 return "vpku<VI_char>um %0,%2,%1";
1883}
10ed84db
AH
1884 [(set_attr "type" "vecperm")])
1885
eaba55ff 1886(define_insn "altivec_vrl<VI_char>"
a5965b52
MM
1887 [(set (match_operand:VI2 0 "register_operand" "=v")
1888 (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1889 (match_operand:VI2 2 "register_operand" "v")))]
1890 "<VI_unit>"
aba5fb01 1891 "vrl<VI_char> %0,%1,%2"
10ed84db
AH
1892 [(set_attr "type" "vecsimple")])
1893
f03122f2
CL
1894(define_insn "altivec_vrlq"
1895 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1896 (rotate:V1TI (match_operand:V1TI 1 "vsx_register_operand" "v")
1897 (match_operand:V1TI 2 "vsx_register_operand" "v")))]
1898 "TARGET_POWER10"
1899;; rotate amount in needs to be in bits[57:63] of operand2.
1900 "vrlq %0,%1,%2"
1901 [(set_attr "type" "vecsimple")])
1902
a6607774
BS
1903(define_insn "altivec_vrl<VI_char>mi"
1904 [(set (match_operand:VIlong 0 "register_operand" "=v")
2142e343
CL
1905 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1906 (match_operand:VIlong 2 "register_operand" "0")
a6607774
BS
1907 (match_operand:VIlong 3 "register_operand" "v")]
1908 UNSPEC_VRLMI))]
1909 "TARGET_P9_VECTOR"
2142e343 1910 "vrl<VI_char>mi %0,%1,%3"
a6607774
BS
1911 [(set_attr "type" "veclogical")])
1912
f03122f2
CL
1913(define_expand "altivec_vrlqmi"
1914 [(set (match_operand:V1TI 0 "vsx_register_operand")
1915 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand")
1916 (match_operand:V1TI 2 "vsx_register_operand")
1917 (match_operand:V1TI 3 "vsx_register_operand")]
1918 UNSPEC_VRLMI))]
1919 "TARGET_POWER10"
1920{
1921 /* Mask bit begin, end fields need to be in bits [41:55] of 128-bit operand2.
1922 Shift amount in needs to be put in bits[57:63] of 128-bit operand2. */
1923 rtx tmp = gen_reg_rtx (V1TImode);
1924
1925 emit_insn (gen_xxswapd_v1ti (tmp, operands[3]));
1926 emit_insn (gen_altivec_vrlqmi_inst (operands[0], operands[1], operands[2],
1927 tmp));
1928 DONE;
1929})
1930
1931(define_insn "altivec_vrlqmi_inst"
1932 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1933 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand" "v")
1934 (match_operand:V1TI 2 "vsx_register_operand" "0")
1935 (match_operand:V1TI 3 "vsx_register_operand" "v")]
1936 UNSPEC_VRLMI))]
1937 "TARGET_POWER10"
1938 "vrlqmi %0,%1,%3"
1939 [(set_attr "type" "veclogical")])
1940
a6607774
BS
1941(define_insn "altivec_vrl<VI_char>nm"
1942 [(set (match_operand:VIlong 0 "register_operand" "=v")
1943 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1944 (match_operand:VIlong 2 "register_operand" "v")]
1945 UNSPEC_VRLNM))]
1946 "TARGET_P9_VECTOR"
1947 "vrl<VI_char>nm %0,%1,%2"
1948 [(set_attr "type" "veclogical")])
1949
f03122f2
CL
1950(define_expand "altivec_vrlqnm"
1951 [(set (match_operand:V1TI 0 "vsx_register_operand")
1952 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand")
1953 (match_operand:V1TI 2 "vsx_register_operand")]
1954 UNSPEC_VRLNM))]
1955 "TARGET_POWER10"
1956{
1957 /* Shift amount in needs to be put in bits[57:63] of 128-bit operand2. */
1958 rtx tmp = gen_reg_rtx (V1TImode);
1959
1960 emit_insn (gen_xxswapd_v1ti (tmp, operands[2]));
1961 emit_insn (gen_altivec_vrlqnm_inst (operands[0], operands[1], tmp));
1962 DONE;
1963})
1964
1965(define_insn "altivec_vrlqnm_inst"
1966 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1967 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand" "v")
1968 (match_operand:V1TI 2 "vsx_register_operand" "v")]
1969 UNSPEC_VRLNM))]
1970 "TARGET_POWER10"
1971 ;; rotate and mask bits need to be in upper 64-bits of operand2.
1972 "vrlqnm %0,%1,%2"
1973 [(set_attr "type" "veclogical")])
1974
10ed84db
AH
1975(define_insn "altivec_vsl"
1976 [(set (match_operand:V4SI 0 "register_operand" "=v")
1977 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
1978 (match_operand:V4SI 2 "register_operand" "v")]
1979 UNSPEC_VSLV4SI))]
10ed84db
AH
1980 "TARGET_ALTIVEC"
1981 "vsl %0,%1,%2"
1982 [(set_attr "type" "vecperm")])
1983
1984(define_insn "altivec_vslo"
1985 [(set (match_operand:V4SI 0 "register_operand" "=v")
1986 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
1987 (match_operand:V4SI 2 "register_operand" "v")]
1988 UNSPEC_VSLO))]
10ed84db
AH
1989 "TARGET_ALTIVEC"
1990 "vslo %0,%1,%2"
1991 [(set_attr "type" "vecperm")])
1992
e342b9d8
KN
1993(define_insn "vslv"
1994 [(set (match_operand:V16QI 0 "register_operand" "=v")
1995 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1996 (match_operand:V16QI 2 "register_operand" "v")]
1997 UNSPEC_VSLV))]
1998 "TARGET_P9_VECTOR"
1999 "vslv %0,%1,%2"
2000 [(set_attr "type" "vecsimple")])
2001
2002(define_insn "vsrv"
2003 [(set (match_operand:V16QI 0 "register_operand" "=v")
2004 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
2005 (match_operand:V16QI 2 "register_operand" "v")]
2006 UNSPEC_VSRV))]
2007 "TARGET_P9_VECTOR"
2008 "vsrv %0,%1,%2"
2009 [(set_attr "type" "vecsimple")])
2010
a72c65c7 2011(define_insn "*altivec_vsl<VI_char>"
a5965b52
MM
2012 [(set (match_operand:VI2 0 "register_operand" "=v")
2013 (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
2014 (match_operand:VI2 2 "register_operand" "v")))]
2015 "<VI_unit>"
e83a75a7
IR
2016 "vsl<VI_char> %0,%1,%2"
2017 [(set_attr "type" "vecsimple")])
2018
f1701864
CL
2019(define_insn "altivec_vslq_<mode>"
2020 [(set (match_operand:VEC_TI 0 "vsx_register_operand" "=v")
2021 (ashift:VEC_TI (match_operand:VEC_TI 1 "vsx_register_operand" "v")
2022 (match_operand:VEC_TI 2 "vsx_register_operand" "v")))]
f03122f2
CL
2023 "TARGET_POWER10"
2024 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2025 "vslq %0,%1,%2"
2026 [(set_attr "type" "vecsimple")])
2027
a72c65c7 2028(define_insn "*altivec_vsr<VI_char>"
a5965b52
MM
2029 [(set (match_operand:VI2 0 "register_operand" "=v")
2030 (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
2031 (match_operand:VI2 2 "register_operand" "v")))]
2032 "<VI_unit>"
aba5fb01 2033 "vsr<VI_char> %0,%1,%2"
10ed84db
AH
2034 [(set_attr "type" "vecsimple")])
2035
f1701864
CL
2036(define_insn "altivec_vsrq_<mode>"
2037 [(set (match_operand:VEC_TI 0 "vsx_register_operand" "=v")
2038 (lshiftrt:VEC_TI (match_operand:VEC_TI 1 "vsx_register_operand" "v")
2039 (match_operand:VEC_TI 2 "vsx_register_operand" "v")))]
f03122f2
CL
2040 "TARGET_POWER10"
2041 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2042 "vsrq %0,%1,%2"
2043 [(set_attr "type" "vecsimple")])
2044
a72c65c7 2045(define_insn "*altivec_vsra<VI_char>"
a5965b52
MM
2046 [(set (match_operand:VI2 0 "register_operand" "=v")
2047 (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
2048 (match_operand:VI2 2 "register_operand" "v")))]
2049 "<VI_unit>"
aba5fb01 2050 "vsra<VI_char> %0,%1,%2"
10ed84db
AH
2051 [(set_attr "type" "vecsimple")])
2052
f03122f2
CL
2053(define_insn "altivec_vsraq"
2054 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
2055 (ashiftrt:V1TI (match_operand:V1TI 1 "vsx_register_operand" "v")
2056 (match_operand:V1TI 2 "vsx_register_operand" "v")))]
2057 "TARGET_POWER10"
2058 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2059 "vsraq %0,%1,%2"
2060 [(set_attr "type" "vecsimple")])
2061
10ed84db
AH
2062(define_insn "altivec_vsr"
2063 [(set (match_operand:V4SI 0 "register_operand" "=v")
2064 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
2065 (match_operand:V4SI 2 "register_operand" "v")]
2066 UNSPEC_VSR))]
10ed84db
AH
2067 "TARGET_ALTIVEC"
2068 "vsr %0,%1,%2"
2069 [(set_attr "type" "vecperm")])
2070
2071(define_insn "altivec_vsro"
2072 [(set (match_operand:V4SI 0 "register_operand" "=v")
2073 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
2074 (match_operand:V4SI 2 "register_operand" "v")]
2075 UNSPEC_VSRO))]
10ed84db
AH
2076 "TARGET_ALTIVEC"
2077 "vsro %0,%1,%2"
2078 [(set_attr "type" "vecperm")])
2079
10ed84db
AH
2080(define_insn "altivec_vsum4ubs"
2081 [(set (match_operand:V4SI 0 "register_operand" "=v")
2082 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
3aca4bff
DE
2083 (match_operand:V4SI 2 "register_operand" "v")]
2084 UNSPEC_VSUM4UBS))
3b2475ab 2085 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db
AH
2086 "TARGET_ALTIVEC"
2087 "vsum4ubs %0,%1,%2"
2088 [(set_attr "type" "veccomplex")])
2089
aba5fb01 2090(define_insn "altivec_vsum4s<VI_char>s"
10ed84db 2091 [(set (match_operand:V4SI 0 "register_operand" "=v")
aba5fb01 2092 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3aca4bff
DE
2093 (match_operand:V4SI 2 "register_operand" "v")]
2094 UNSPEC_VSUM4S))
3b2475ab 2095 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 2096 "TARGET_ALTIVEC"
aba5fb01 2097 "vsum4s<VI_char>s %0,%1,%2"
10ed84db
AH
2098 [(set_attr "type" "veccomplex")])
2099
55b2ce1c
BS
2100(define_expand "altivec_vsum2sws"
2101 [(use (match_operand:V4SI 0 "register_operand"))
2102 (use (match_operand:V4SI 1 "register_operand"))
2103 (use (match_operand:V4SI 2 "register_operand"))]
10ed84db 2104 "TARGET_ALTIVEC"
7b1cd427 2105{
427a7384 2106 if (BYTES_BIG_ENDIAN)
55b2ce1c
BS
2107 emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
2108 operands[2]));
7b1cd427 2109 else
55b2ce1c
BS
2110 {
2111 rtx tmp1 = gen_reg_rtx (V4SImode);
2112 rtx tmp2 = gen_reg_rtx (V4SImode);
2113 emit_insn (gen_altivec_vsldoi_v4si (tmp1, operands[2],
2114 operands[2], GEN_INT (12)));
2115 emit_insn (gen_altivec_vsum2sws_direct (tmp2, operands[1], tmp1));
2116 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
2117 GEN_INT (4)));
2118 }
2119 DONE;
2120})
2121
2122; FIXME: This can probably be expressed without an UNSPEC.
2123(define_insn "altivec_vsum2sws_direct"
10ed84db
AH
2124 [(set (match_operand:V4SI 0 "register_operand" "=v")
2125 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
55b2ce1c
BS
2126 (match_operand:V4SI 2 "register_operand" "v")]
2127 UNSPEC_VSUM2SWS))
2128 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2129 "TARGET_ALTIVEC"
2130 "vsum2sws %0,%1,%2"
2131 [(set_attr "type" "veccomplex")])
2132
2133(define_expand "altivec_vsumsws"
2134 [(use (match_operand:V4SI 0 "register_operand"))
2135 (use (match_operand:V4SI 1 "register_operand"))
2136 (use (match_operand:V4SI 2 "register_operand"))]
b80afde9
BS
2137 "TARGET_ALTIVEC"
2138{
427a7384 2139 if (BYTES_BIG_ENDIAN)
55b2ce1c
BS
2140 emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
2141 operands[2]));
b80afde9 2142 else
55b2ce1c
BS
2143 {
2144 rtx tmp1 = gen_reg_rtx (V4SImode);
2145 rtx tmp2 = gen_reg_rtx (V4SImode);
2146 emit_insn (gen_altivec_vspltw_direct (tmp1, operands[2], const0_rtx));
2147 emit_insn (gen_altivec_vsumsws_direct (tmp2, operands[1], tmp1));
2148 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
2149 GEN_INT (12)));
2150 }
2151 DONE;
2152})
b80afde9 2153
55b2ce1c 2154; FIXME: This can probably be expressed without an UNSPEC.
b80afde9
BS
2155(define_insn "altivec_vsumsws_direct"
2156 [(set (match_operand:V4SI 0 "register_operand" "=v")
2157 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2158 (match_operand:V4SI 2 "register_operand" "v")]
2159 UNSPEC_VSUMSWS_DIRECT))
3b2475ab 2160 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db
AH
2161 "TARGET_ALTIVEC"
2162 "vsumsws %0,%1,%2"
2163 [(set_attr "type" "veccomplex")])
2164
bf53d4b8 2165(define_expand "altivec_vspltb"
ad18eed2
SB
2166 [(use (match_operand:V16QI 0 "register_operand"))
2167 (use (match_operand:V16QI 1 "register_operand"))
a9f6e019 2168 (use (match_operand:QI 2 "const_0_to_15_operand"))]
bf53d4b8
BS
2169 "TARGET_ALTIVEC"
2170{
427a7384 2171 rtvec v = gen_rtvec (1, operands[2]);
bf53d4b8 2172 rtx x;
bf53d4b8
BS
2173 x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2174 x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
f7df4a84 2175 emit_insn (gen_rtx_SET (operands[0], x));
bf53d4b8
BS
2176 DONE;
2177})
2178
2179(define_insn "*altivec_vspltb_internal"
10ed84db 2180 [(set (match_operand:V16QI 0 "register_operand" "=v")
e5f9d916
AH
2181 (vec_duplicate:V16QI
2182 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
2183 (parallel
a9f6e019 2184 [(match_operand:QI 2 "const_0_to_15_operand" "")]))))]
10ed84db 2185 "TARGET_ALTIVEC"
bf53d4b8 2186{
bf53d4b8
BS
2187 if (!BYTES_BIG_ENDIAN)
2188 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
2189
2190 return "vspltb %0,%1,%2";
2191}
2192 [(set_attr "type" "vecperm")])
2193
2194(define_insn "altivec_vspltb_direct"
2195 [(set (match_operand:V16QI 0 "register_operand" "=v")
2196 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
a9f6e019 2197 (match_operand:QI 2 "const_0_to_15_operand" "i")]
bf53d4b8
BS
2198 UNSPEC_VSPLT_DIRECT))]
2199 "TARGET_ALTIVEC"
10ed84db
AH
2200 "vspltb %0,%1,%2"
2201 [(set_attr "type" "vecperm")])
2202
bf53d4b8 2203(define_expand "altivec_vsplth"
ad18eed2
SB
2204 [(use (match_operand:V8HI 0 "register_operand"))
2205 (use (match_operand:V8HI 1 "register_operand"))
a9f6e019 2206 (use (match_operand:QI 2 "const_0_to_7_operand"))]
bf53d4b8
BS
2207 "TARGET_ALTIVEC"
2208{
427a7384 2209 rtvec v = gen_rtvec (1, operands[2]);
bf53d4b8 2210 rtx x;
bf53d4b8
BS
2211 x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2212 x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
f7df4a84 2213 emit_insn (gen_rtx_SET (operands[0], x));
bf53d4b8
BS
2214 DONE;
2215})
2216
2217(define_insn "*altivec_vsplth_internal"
10ed84db 2218 [(set (match_operand:V8HI 0 "register_operand" "=v")
e5f9d916
AH
2219 (vec_duplicate:V8HI
2220 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
2221 (parallel
a9f6e019 2222 [(match_operand:QI 2 "const_0_to_7_operand" "")]))))]
10ed84db 2223 "TARGET_ALTIVEC"
bf53d4b8 2224{
bf53d4b8
BS
2225 if (!BYTES_BIG_ENDIAN)
2226 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2227
2228 return "vsplth %0,%1,%2";
2229}
2230 [(set_attr "type" "vecperm")])
2231
2232(define_insn "altivec_vsplth_direct"
2233 [(set (match_operand:V8HI 0 "register_operand" "=v")
2234 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
a9f6e019 2235 (match_operand:QI 2 "const_0_to_7_operand" "i")]
bf53d4b8
BS
2236 UNSPEC_VSPLT_DIRECT))]
2237 "TARGET_ALTIVEC"
10ed84db
AH
2238 "vsplth %0,%1,%2"
2239 [(set_attr "type" "vecperm")])
2240
bf53d4b8 2241(define_expand "altivec_vspltw"
ad18eed2
SB
2242 [(use (match_operand:V4SI 0 "register_operand"))
2243 (use (match_operand:V4SI 1 "register_operand"))
a9f6e019 2244 (use (match_operand:QI 2 "const_0_to_3_operand"))]
bf53d4b8
BS
2245 "TARGET_ALTIVEC"
2246{
427a7384 2247 rtvec v = gen_rtvec (1, operands[2]);
bf53d4b8 2248 rtx x;
bf53d4b8
BS
2249 x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2250 x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
f7df4a84 2251 emit_insn (gen_rtx_SET (operands[0], x));
bf53d4b8
BS
2252 DONE;
2253})
2254
2255(define_insn "*altivec_vspltw_internal"
10ed84db 2256 [(set (match_operand:V4SI 0 "register_operand" "=v")
e5f9d916
AH
2257 (vec_duplicate:V4SI
2258 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
2259 (parallel
a9f6e019 2260 [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))]
10ed84db 2261 "TARGET_ALTIVEC"
bf53d4b8 2262{
bf53d4b8
BS
2263 if (!BYTES_BIG_ENDIAN)
2264 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2265
2266 return "vspltw %0,%1,%2";
2267}
2268 [(set_attr "type" "vecperm")])
2269
2270(define_insn "altivec_vspltw_direct"
2271 [(set (match_operand:V4SI 0 "register_operand" "=v")
2272 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
a9f6e019 2273 (match_operand:QI 2 "const_0_to_3_operand" "i")]
bf53d4b8
BS
2274 UNSPEC_VSPLT_DIRECT))]
2275 "TARGET_ALTIVEC"
10ed84db
AH
2276 "vspltw %0,%1,%2"
2277 [(set_attr "type" "vecperm")])
2278
bf53d4b8 2279(define_expand "altivec_vspltsf"
ad18eed2
SB
2280 [(use (match_operand:V4SF 0 "register_operand"))
2281 (use (match_operand:V4SF 1 "register_operand"))
a9f6e019 2282 (use (match_operand:QI 2 "const_0_to_3_operand"))]
bf53d4b8
BS
2283 "TARGET_ALTIVEC"
2284{
427a7384 2285 rtvec v = gen_rtvec (1, operands[2]);
bf53d4b8 2286 rtx x;
bf53d4b8
BS
2287 x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2288 x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
f7df4a84 2289 emit_insn (gen_rtx_SET (operands[0], x));
bf53d4b8
BS
2290 DONE;
2291})
2292
2293(define_insn "*altivec_vspltsf_internal"
3aca4bff
DE
2294 [(set (match_operand:V4SF 0 "register_operand" "=v")
2295 (vec_duplicate:V4SF
2296 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
2297 (parallel
a9f6e019 2298 [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))]
a72c65c7 2299 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
bf53d4b8 2300{
bf53d4b8
BS
2301 if (!BYTES_BIG_ENDIAN)
2302 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2303
2304 return "vspltw %0,%1,%2";
2305}
3aca4bff
DE
2306 [(set_attr "type" "vecperm")])
2307
27ffac37
PB
2308(define_insn "altivec_vspltis<VI_char>"
2309 [(set (match_operand:VI 0 "register_operand" "=v")
2310 (vec_duplicate:VI
afca671b 2311 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
10ed84db 2312 "TARGET_ALTIVEC"
27ffac37 2313 "vspltis<VI_char> %0,%1"
310b9b1d 2314 [(set_attr "type" "vecperm")])
10ed84db 2315
29e6733c 2316(define_insn "*altivec_vrfiz"
10ed84db 2317 [(set (match_operand:V4SF 0 "register_operand" "=v")
5aebfdad 2318 (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
a72c65c7 2319 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
98c9a8e8 2320 "vrfiz %0,%1"
10ed84db
AH
2321 [(set_attr "type" "vecfloat")])
2322
60331d00 2323(define_expand "altivec_vperm_<mode>"
ad18eed2
SB
2324 [(set (match_operand:VM 0 "register_operand")
2325 (unspec:VM [(match_operand:VM 1 "register_operand")
2326 (match_operand:VM 2 "register_operand")
2327 (match_operand:V16QI 3 "register_operand")]
60331d00
BS
2328 UNSPEC_VPERM))]
2329 "TARGET_ALTIVEC"
2330{
427a7384 2331 if (!BYTES_BIG_ENDIAN)
60331d00
BS
2332 {
2333 altivec_expand_vec_perm_le (operands);
2334 DONE;
2335 }
2336})
2337
4e8a3a35 2338;; Slightly prefer vperm, since the target does not overlap the source
3ef9e1ec 2339(define_insn "altivec_vperm_<mode>_direct"
ee969a36
SB
2340 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2341 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2342 (match_operand:VM 2 "register_operand" "0,v")
2343 (match_operand:V16QI 3 "register_operand" "wa,v")]
a72c65c7
MM
2344 UNSPEC_VPERM))]
2345 "TARGET_ALTIVEC"
4e8a3a35 2346 "@
ee969a36
SB
2347 xxperm %x0,%x1,%x3
2348 vperm %0,%1,%2,%3"
afc69d4e 2349 [(set_attr "type" "vecperm")
ee969a36 2350 (set_attr "isa" "p9v,*")])
a72c65c7 2351
010f20ee 2352(define_insn "altivec_vperm_v8hiv16qi"
ee969a36
SB
2353 [(set (match_operand:V16QI 0 "register_operand" "=?wa,v")
2354 (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "wa,v")
2355 (match_operand:V8HI 2 "register_operand" "0,v")
2356 (match_operand:V16QI 3 "register_operand" "wa,v")]
010f20ee
BS
2357 UNSPEC_VPERM))]
2358 "TARGET_ALTIVEC"
4e8a3a35 2359 "@
ee969a36
SB
2360 xxperm %x0,%x1,%x3
2361 vperm %0,%1,%2,%3"
afc69d4e 2362 [(set_attr "type" "vecperm")
ee969a36 2363 (set_attr "isa" "p9v,*")])
010f20ee 2364
60331d00 2365(define_expand "altivec_vperm_<mode>_uns"
ad18eed2
SB
2366 [(set (match_operand:VM 0 "register_operand")
2367 (unspec:VM [(match_operand:VM 1 "register_operand")
2368 (match_operand:VM 2 "register_operand")
2369 (match_operand:V16QI 3 "register_operand")]
60331d00
BS
2370 UNSPEC_VPERM_UNS))]
2371 "TARGET_ALTIVEC"
2372{
427a7384 2373 if (!BYTES_BIG_ENDIAN)
60331d00
BS
2374 {
2375 altivec_expand_vec_perm_le (operands);
2376 DONE;
2377 }
2378})
2379
2380(define_insn "*altivec_vperm_<mode>_uns_internal"
ee969a36
SB
2381 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2382 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2383 (match_operand:VM 2 "register_operand" "0,v")
2384 (match_operand:V16QI 3 "register_operand" "wa,v")]
a72c65c7 2385 UNSPEC_VPERM_UNS))]
10ed84db 2386 "TARGET_ALTIVEC"
4e8a3a35 2387 "@
ee969a36
SB
2388 xxperm %x0,%x1,%x3
2389 vperm %0,%1,%2,%3"
afc69d4e 2390 [(set_attr "type" "vecperm")
ee969a36 2391 (set_attr "isa" "p9v,*")])
10ed84db 2392
cba86444 2393(define_expand "vec_permv16qi"
ad18eed2
SB
2394 [(set (match_operand:V16QI 0 "register_operand")
2395 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")
2396 (match_operand:V16QI 2 "register_operand")
2397 (match_operand:V16QI 3 "register_operand")]
cba86444
RH
2398 UNSPEC_VPERM))]
2399 "TARGET_ALTIVEC"
f200869a
BS
2400{
2401 if (!BYTES_BIG_ENDIAN) {
2402 altivec_expand_vec_perm_le (operands);
2403 DONE;
2404 }
2405})
cba86444 2406
fe3f3340 2407(define_insn "*altivec_vpermr_<mode>_internal"
ee969a36
SB
2408 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2409 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2410 (match_operand:VM 2 "register_operand" "0,v")
2411 (match_operand:V16QI 3 "register_operand" "wa,v")]
fe3f3340
MM
2412 UNSPEC_VPERMR))]
2413 "TARGET_P9_VECTOR"
2414 "@
ee969a36
SB
2415 xxpermr %x0,%x1,%x3
2416 vpermr %0,%1,%2,%3"
afc69d4e 2417 [(set_attr "type" "vecperm")
ee969a36 2418 (set_attr "isa" "p9v,*")])
fe3f3340 2419
29e6733c 2420(define_insn "altivec_vrfip" ; ceil
10ed84db 2421 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff 2422 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
29e6733c 2423 UNSPEC_FRIP))]
10ed84db 2424 "TARGET_ALTIVEC"
98c9a8e8 2425 "vrfip %0,%1"
10ed84db
AH
2426 [(set_attr "type" "vecfloat")])
2427
2428(define_insn "altivec_vrfin"
2429 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff
DE
2430 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2431 UNSPEC_VRFIN))]
10ed84db 2432 "TARGET_ALTIVEC"
98c9a8e8 2433 "vrfin %0,%1"
10ed84db
AH
2434 [(set_attr "type" "vecfloat")])
2435
29e6733c 2436(define_insn "*altivec_vrfim" ; floor
10ed84db 2437 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff 2438 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
29e6733c 2439 UNSPEC_FRIM))]
10ed84db 2440 "TARGET_ALTIVEC"
98c9a8e8 2441 "vrfim %0,%1"
10ed84db
AH
2442 [(set_attr "type" "vecfloat")])
2443
2444(define_insn "altivec_vcfux"
2445 [(set (match_operand:V4SF 0 "register_operand" "=v")
2446 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
2447 (match_operand:QI 2 "immediate_operand" "i")]
2448 UNSPEC_VCFUX))]
10ed84db 2449 "TARGET_ALTIVEC"
98c9a8e8 2450 "vcfux %0,%1,%2"
10ed84db
AH
2451 [(set_attr "type" "vecfloat")])
2452
2453(define_insn "altivec_vcfsx"
2454 [(set (match_operand:V4SF 0 "register_operand" "=v")
2455 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
3aca4bff
DE
2456 (match_operand:QI 2 "immediate_operand" "i")]
2457 UNSPEC_VCFSX))]
10ed84db 2458 "TARGET_ALTIVEC"
98c9a8e8 2459 "vcfsx %0,%1,%2"
10ed84db
AH
2460 [(set_attr "type" "vecfloat")])
2461
2462(define_insn "altivec_vctuxs"
2463 [(set (match_operand:V4SI 0 "register_operand" "=v")
2464 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
3aca4bff
DE
2465 (match_operand:QI 2 "immediate_operand" "i")]
2466 UNSPEC_VCTUXS))
3b2475ab 2467 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 2468 "TARGET_ALTIVEC"
98c9a8e8 2469 "vctuxs %0,%1,%2"
10ed84db
AH
2470 [(set_attr "type" "vecfloat")])
2471
2472(define_insn "altivec_vctsxs"
2473 [(set (match_operand:V4SI 0 "register_operand" "=v")
2474 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
3aca4bff
DE
2475 (match_operand:QI 2 "immediate_operand" "i")]
2476 UNSPEC_VCTSXS))
3b2475ab 2477 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
10ed84db 2478 "TARGET_ALTIVEC"
98c9a8e8 2479 "vctsxs %0,%1,%2"
10ed84db
AH
2480 [(set_attr "type" "vecfloat")])
2481
2482(define_insn "altivec_vlogefp"
2483 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff
DE
2484 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2485 UNSPEC_VLOGEFP))]
10ed84db 2486 "TARGET_ALTIVEC"
98c9a8e8 2487 "vlogefp %0,%1"
10ed84db
AH
2488 [(set_attr "type" "vecfloat")])
2489
2490(define_insn "altivec_vexptefp"
2491 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff
DE
2492 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2493 UNSPEC_VEXPTEFP))]
10ed84db 2494 "TARGET_ALTIVEC"
98c9a8e8 2495 "vexptefp %0,%1"
10ed84db
AH
2496 [(set_attr "type" "vecfloat")])
2497
92902797 2498(define_insn "*altivec_vrsqrtefp"
10ed84db 2499 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff 2500 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
92902797
MM
2501 UNSPEC_RSQRT))]
2502 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
98c9a8e8 2503 "vrsqrtefp %0,%1"
10ed84db
AH
2504 [(set_attr "type" "vecfloat")])
2505
2506(define_insn "altivec_vrefp"
2507 [(set (match_operand:V4SF 0 "register_operand" "=v")
3aca4bff 2508 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
92902797
MM
2509 UNSPEC_FRES))]
2510 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
98c9a8e8 2511 "vrefp %0,%1"
10ed84db
AH
2512 [(set_attr "type" "vecfloat")])
2513
29e6733c 2514(define_expand "altivec_copysign_v4sf3"
ad18eed2
SB
2515 [(use (match_operand:V4SF 0 "register_operand"))
2516 (use (match_operand:V4SF 1 "register_operand"))
2517 (use (match_operand:V4SF 2 "register_operand"))]
29e6733c 2518 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
29e6733c
MM
2519{
2520 rtx mask = gen_reg_rtx (V4SImode);
fa87544c
RS
2521 rtx mask_val = gen_int_mode (HOST_WIDE_INT_1U << 31, SImode);
2522 rtvec v = gen_rtvec (4, mask_val, mask_val, mask_val, mask_val);
29e6733c 2523
ff03930a 2524 emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
29e6733c
MM
2525 emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2526 gen_lowpart (V4SFmode, mask)));
2527 DONE;
6c332313 2528})
29e6733c 2529
3aca4bff 2530(define_insn "altivec_vsldoi_<mode>"
a72c65c7
MM
2531 [(set (match_operand:VM 0 "register_operand" "=v")
2532 (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2533 (match_operand:VM 2 "register_operand" "v")
2534 (match_operand:QI 3 "immediate_operand" "i")]
fbd86cc6 2535 UNSPEC_VSLDOI))]
10ed84db 2536 "TARGET_ALTIVEC"
98c9a8e8 2537 "vsldoi %0,%1,%2,%3"
10ed84db
AH
2538 [(set_attr "type" "vecperm")])
2539
a5965b52
MM
2540(define_insn "altivec_vupkhs<VU_char>"
2541 [(set (match_operand:VP 0 "register_operand" "=v")
2542 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2543 UNSPEC_VUNPACK_HI_SIGN))]
2544 "<VI_unit>"
52a93551 2545{
427a7384 2546 if (BYTES_BIG_ENDIAN)
52a93551
BS
2547 return "vupkhs<VU_char> %0,%1";
2548 else
2549 return "vupkls<VU_char> %0,%1";
2550}
2551 [(set_attr "type" "vecperm")])
2552
f3d87219 2553(define_insn "altivec_vupkhs<VU_char>_direct"
52a93551
BS
2554 [(set (match_operand:VP 0 "register_operand" "=v")
2555 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2556 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2557 "<VI_unit>"
a5965b52
MM
2558 "vupkhs<VU_char> %0,%1"
2559 [(set_attr "type" "vecperm")])
2560
2561(define_insn "altivec_vupkls<VU_char>"
2562 [(set (match_operand:VP 0 "register_operand" "=v")
2563 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2564 UNSPEC_VUNPACK_LO_SIGN))]
2565 "<VI_unit>"
52a93551 2566{
427a7384 2567 if (BYTES_BIG_ENDIAN)
52a93551
BS
2568 return "vupkls<VU_char> %0,%1";
2569 else
2570 return "vupkhs<VU_char> %0,%1";
2571}
2572 [(set_attr "type" "vecperm")])
2573
2574(define_insn "*altivec_vupkls<VU_char>_direct"
2575 [(set (match_operand:VP 0 "register_operand" "=v")
2576 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2577 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2578 "<VI_unit>"
a5965b52 2579 "vupkls<VU_char> %0,%1"
10ed84db
AH
2580 [(set_attr "type" "vecperm")])
2581
2582(define_insn "altivec_vupkhpx"
2583 [(set (match_operand:V4SI 0 "register_operand" "=v")
5aebfdad 2584 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3aca4bff 2585 UNSPEC_VUPKHPX))]
10ed84db 2586 "TARGET_ALTIVEC"
52a93551 2587{
427a7384 2588 if (BYTES_BIG_ENDIAN)
52a93551
BS
2589 return "vupkhpx %0,%1";
2590 else
2591 return "vupklpx %0,%1";
2592}
10ed84db
AH
2593 [(set_attr "type" "vecperm")])
2594
10ed84db
AH
2595(define_insn "altivec_vupklpx"
2596 [(set (match_operand:V4SI 0 "register_operand" "=v")
5aebfdad 2597 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3aca4bff 2598 UNSPEC_VUPKLPX))]
10ed84db 2599 "TARGET_ALTIVEC"
52a93551 2600{
427a7384 2601 if (BYTES_BIG_ENDIAN)
52a93551
BS
2602 return "vupklpx %0,%1";
2603 else
2604 return "vupkhpx %0,%1";
2605}
10ed84db
AH
2606 [(set_attr "type" "vecperm")])
2607
cd295a80
HG
2608/* The cbranch_optab doesn't allow FAIL, so old cpus which are
2609 inefficient on unaligned vsx are disabled as the cost is high
2610 for unaligned load/store. */
2611(define_expand "cbranchv16qi4"
2612 [(use (match_operator 0 "equality_operator"
2613 [(match_operand:V16QI 1 "reg_or_mem_operand")
2614 (match_operand:V16QI 2 "reg_or_mem_operand")]))
2615 (use (match_operand 3))]
2616 "VECTOR_MEM_VSX_P (V16QImode)
2617 && TARGET_EFFICIENT_UNALIGNED_VSX"
2618{
2619 /* Use direct move for P8 LE to skip doubleword swap, as the byte
2620 order doesn't matter for equality compare. If any operands are
2621 altivec indexed or indirect operands, the load can be implemented
2622 directly by altivec aligned load instruction and swap is no
2623 need. */
2624 if (!TARGET_P9_VECTOR
2625 && !BYTES_BIG_ENDIAN
2626 && MEM_P (operands[1])
2627 && !altivec_indexed_or_indirect_operand (operands[1], V16QImode)
2628 && MEM_P (operands[2])
2629 && !altivec_indexed_or_indirect_operand (operands[2], V16QImode))
2630 {
2631 rtx reg_op1 = gen_reg_rtx (V16QImode);
2632 rtx reg_op2 = gen_reg_rtx (V16QImode);
2633 rs6000_emit_le_vsx_permute (reg_op1, operands[1], V16QImode);
2634 rs6000_emit_le_vsx_permute (reg_op2, operands[2], V16QImode);
2635 operands[1] = reg_op1;
2636 operands[2] = reg_op2;
2637 }
2638 else
2639 {
2640 operands[1] = force_reg (V16QImode, operands[1]);
2641 operands[2] = force_reg (V16QImode, operands[2]);
2642 }
2643
2644 rtx_code code = GET_CODE (operands[0]);
2645 operands[0] = gen_rtx_fmt_ee (code, V16QImode, operands[1], operands[2]);
2646 rs6000_emit_cbranch (V16QImode, operands);
2647 DONE;
2648})
2649
a72c65c7
MM
2650;; Compare vectors producing a vector result and a predicate, setting CR6 to
2651;; indicate a combined status
9d36bd3b 2652(define_insn "altivec_vcmpequ<VI_char>_p"
b65261f6 2653 [(set (reg:CC CR6_REGNO)
a5965b52
MM
2654 (unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2655 (match_operand:VI2 2 "register_operand" "v"))]
a72c65c7 2656 UNSPEC_PREDICATE))
a5965b52
MM
2657 (set (match_operand:VI2 0 "register_operand" "=v")
2658 (eq:VI2 (match_dup 1)
2659 (match_dup 2)))]
2660 "<VI_unit>"
a72c65c7 2661 "vcmpequ<VI_char>. %0,%1,%2"
7c788ce2 2662 [(set_attr "type" "veccmpfx")])
10ed84db 2663
f03122f2
CL
2664(define_insn "altivec_vcmpequt_p"
2665 [(set (reg:CC CR6_REGNO)
2666 (unspec:CC [(eq:CC (match_operand:V1TI 1 "altivec_register_operand" "v")
2667 (match_operand:V1TI 2 "altivec_register_operand" "v"))]
2668 UNSPEC_PREDICATE))
2669 (set (match_operand:V1TI 0 "altivec_register_operand" "=v")
2670 (eq:V1TI (match_dup 1)
2671 (match_dup 2)))]
2672 "TARGET_POWER10"
2673 "vcmpequq. %0,%1,%2"
2674 [(set_attr "type" "veccmpfx")])
2675
29e2bc5f
CL
2676;; Expand for builtin vcmpne{b,h,w}
2677(define_expand "altivec_vcmpne_<mode>"
2678 [(set (match_operand:VSX_EXTRACT_I 3 "altivec_register_operand" "=v")
2679 (eq:VSX_EXTRACT_I (match_operand:VSX_EXTRACT_I 1 "altivec_register_operand" "v")
2680 (match_operand:VSX_EXTRACT_I 2 "altivec_register_operand" "v")))
2681 (set (match_operand:VSX_EXTRACT_I 0 "altivec_register_operand" "=v")
2682 (not:VSX_EXTRACT_I (match_dup 3)))]
2683 "TARGET_ALTIVEC"
2684 {
2685 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
2686 })
2687
a72c65c7 2688(define_insn "*altivec_vcmpgts<VI_char>_p"
b65261f6 2689 [(set (reg:CC CR6_REGNO)
a5965b52
MM
2690 (unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2691 (match_operand:VI2 2 "register_operand" "v"))]
a72c65c7 2692 UNSPEC_PREDICATE))
a5965b52
MM
2693 (set (match_operand:VI2 0 "register_operand" "=v")
2694 (gt:VI2 (match_dup 1)
2695 (match_dup 2)))]
2696 "<VI_unit>"
a72c65c7 2697 "vcmpgts<VI_char>. %0,%1,%2"
7c788ce2 2698 [(set_attr "type" "veccmpfx")])
10ed84db 2699
f03122f2
CL
2700(define_insn "*altivec_vcmpgtst_p"
2701 [(set (reg:CC CR6_REGNO)
2702 (unspec:CC [(gt:CC (match_operand:V1TI 1 "register_operand" "v")
2703 (match_operand:V1TI 2 "register_operand" "v"))]
2704 UNSPEC_PREDICATE))
2705 (set (match_operand:V1TI 0 "register_operand" "=v")
2706 (gt:V1TI (match_dup 1)
2707 (match_dup 2)))]
2708 "TARGET_POWER10"
2709 "vcmpgtsq. %0,%1,%2"
2710 [(set_attr "type" "veccmpfx")])
2711
a72c65c7 2712(define_insn "*altivec_vcmpgtu<VI_char>_p"
b65261f6 2713 [(set (reg:CC CR6_REGNO)
a5965b52
MM
2714 (unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2715 (match_operand:VI2 2 "register_operand" "v"))]
a72c65c7 2716 UNSPEC_PREDICATE))
a5965b52
MM
2717 (set (match_operand:VI2 0 "register_operand" "=v")
2718 (gtu:VI2 (match_dup 1)
2719 (match_dup 2)))]
2720 "<VI_unit>"
a72c65c7 2721 "vcmpgtu<VI_char>. %0,%1,%2"
7c788ce2 2722 [(set_attr "type" "veccmpfx")])
10ed84db 2723
f03122f2
CL
2724(define_insn "*altivec_vcmpgtut_p"
2725 [(set (reg:CC CR6_REGNO)
2726 (unspec:CC [(gtu:CC (match_operand:V1TI 1 "register_operand" "v")
2727 (match_operand:V1TI 2 "register_operand" "v"))]
2728 UNSPEC_PREDICATE))
2729 (set (match_operand:V1TI 0 "register_operand" "=v")
2730 (gtu:V1TI (match_dup 1)
2731 (match_dup 2)))]
2732 "TARGET_POWER10"
2733 "vcmpgtuq. %0,%1,%2"
2734 [(set_attr "type" "veccmpfx")])
2735
a72c65c7 2736(define_insn "*altivec_vcmpeqfp_p"
b65261f6 2737 [(set (reg:CC CR6_REGNO)
a72c65c7
MM
2738 (unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2739 (match_operand:V4SF 2 "register_operand" "v"))]
2740 UNSPEC_PREDICATE))
2741 (set (match_operand:V4SF 0 "register_operand" "=v")
2742 (eq:V4SF (match_dup 1)
2743 (match_dup 2)))]
2744 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2745 "vcmpeqfp. %0,%1,%2"
2746 [(set_attr "type" "veccmp")])
10ed84db 2747
a72c65c7 2748(define_insn "*altivec_vcmpgtfp_p"
b65261f6 2749 [(set (reg:CC CR6_REGNO)
a72c65c7
MM
2750 (unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2751 (match_operand:V4SF 2 "register_operand" "v"))]
2752 UNSPEC_PREDICATE))
2753 (set (match_operand:V4SF 0 "register_operand" "=v")
2754 (gt:V4SF (match_dup 1)
2755 (match_dup 2)))]
2756 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2757 "vcmpgtfp. %0,%1,%2"
2758 [(set_attr "type" "veccmp")])
10ed84db 2759
a72c65c7 2760(define_insn "*altivec_vcmpgefp_p"
b65261f6 2761 [(set (reg:CC CR6_REGNO)
a72c65c7
MM
2762 (unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2763 (match_operand:V4SF 2 "register_operand" "v"))]
2764 UNSPEC_PREDICATE))
2765 (set (match_operand:V4SF 0 "register_operand" "=v")
2766 (ge:V4SF (match_dup 1)
2767 (match_dup 2)))]
2768 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2769 "vcmpgefp. %0,%1,%2"
2770 [(set_attr "type" "veccmp")])
10ed84db 2771
a72c65c7 2772(define_insn "altivec_vcmpbfp_p"
b65261f6 2773 [(set (reg:CC CR6_REGNO)
a72c65c7
MM
2774 (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2775 (match_operand:V4SF 2 "register_operand" "v")]
2776 UNSPEC_VCMPBFP))
2777 (set (match_operand:V4SF 0 "register_operand" "=v")
2778 (unspec:V4SF [(match_dup 1)
2779 (match_dup 2)]
2780 UNSPEC_VCMPBFP))]
2781 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2782 "vcmpbfp. %0,%1,%2"
2783 [(set_attr "type" "veccmp")])
10ed84db
AH
2784
2785(define_insn "altivec_mtvscr"
3b2475ab 2786 [(set (reg:SI VSCR_REGNO)
10ed84db 2787 (unspec_volatile:SI
3aca4bff 2788 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
10ed84db
AH
2789 "TARGET_ALTIVEC"
2790 "mtvscr %0"
2791 [(set_attr "type" "vecsimple")])
2792
2793(define_insn "altivec_mfvscr"
2794 [(set (match_operand:V8HI 0 "register_operand" "=v")
3b2475ab 2795 (unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
10ed84db
AH
2796 "TARGET_ALTIVEC"
2797 "mfvscr %0"
2798 [(set_attr "type" "vecsimple")])
2799
2800(define_insn "altivec_dssall"
3aca4bff 2801 [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
10ed84db
AH
2802 "TARGET_ALTIVEC"
2803 "dssall"
2804 [(set_attr "type" "vecsimple")])
2805
2806(define_insn "altivec_dss"
3aca4bff
DE
2807 [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2808 UNSPECV_DSS)]
10ed84db
AH
2809 "TARGET_ALTIVEC"
2810 "dss %0"
2811 [(set_attr "type" "vecsimple")])
2812
2813(define_insn "altivec_dst"
666158b9 2814 [(unspec [(match_operand 0 "register_operand" "b")
10ed84db 2815 (match_operand:SI 1 "register_operand" "r")
3aca4bff 2816 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
666158b9
GK
2817 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2818 "dst %0,%1,%2"
10ed84db
AH
2819 [(set_attr "type" "vecsimple")])
2820
2821(define_insn "altivec_dstt"
666158b9 2822 [(unspec [(match_operand 0 "register_operand" "b")
10ed84db 2823 (match_operand:SI 1 "register_operand" "r")
3aca4bff 2824 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
666158b9
GK
2825 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2826 "dstt %0,%1,%2"
10ed84db
AH
2827 [(set_attr "type" "vecsimple")])
2828
2829(define_insn "altivec_dstst"
666158b9 2830 [(unspec [(match_operand 0 "register_operand" "b")
10ed84db 2831 (match_operand:SI 1 "register_operand" "r")
3aca4bff 2832 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
666158b9
GK
2833 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2834 "dstst %0,%1,%2"
10ed84db
AH
2835 [(set_attr "type" "vecsimple")])
2836
2837(define_insn "altivec_dststt"
666158b9 2838 [(unspec [(match_operand 0 "register_operand" "b")
10ed84db 2839 (match_operand:SI 1 "register_operand" "r")
3aca4bff 2840 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
666158b9
GK
2841 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2842 "dststt %0,%1,%2"
10ed84db
AH
2843 [(set_attr "type" "vecsimple")])
2844
0b4718c9 2845(define_expand "altivec_lvsl"
ad18eed2
SB
2846 [(use (match_operand:V16QI 0 "register_operand"))
2847 (use (match_operand:V16QI 1 "memory_operand"))]
0b4718c9
BS
2848 "TARGET_ALTIVEC"
2849{
427a7384 2850 if (BYTES_BIG_ENDIAN)
0b4718c9
BS
2851 emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2852 else
2853 {
ef339d6e 2854 rtx mask, constv, vperm;
0b4718c9
BS
2855 mask = gen_reg_rtx (V16QImode);
2856 emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
ef339d6e 2857 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
0b4718c9
BS
2858 constv = force_reg (V16QImode, constv);
2859 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2860 UNSPEC_VPERM);
f7df4a84 2861 emit_insn (gen_rtx_SET (operands[0], vperm));
0b4718c9
BS
2862 }
2863 DONE;
2864})
2865
f64b9156 2866(define_insn "altivec_lvsl_reg_<mode>"
1262c6cf
CL
2867 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2868 (unspec:V16QI
f64b9156 2869 [(match_operand:GPR 1 "gpc_reg_operand" "b")]
1262c6cf
CL
2870 UNSPEC_LVSL_REG))]
2871 "TARGET_ALTIVEC"
2872 "lvsl %0,0,%1"
2873 [(set_attr "type" "vecload")])
2874
0b4718c9 2875(define_insn "altivec_lvsl_direct"
10ed84db 2876 [(set (match_operand:V16QI 0 "register_operand" "=v")
1ba24090
SB
2877 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2878 UNSPEC_LVSL))]
10ed84db 2879 "TARGET_ALTIVEC"
b4a62fa0 2880 "lvsl %0,%y1"
10ed84db
AH
2881 [(set_attr "type" "vecload")])
2882
0b4718c9 2883(define_expand "altivec_lvsr"
1262c6cf
CL
2884 [(use (match_operand:V16QI 0 "altivec_register_operand"))
2885 (use (match_operand:V16QI 1 "memory_operand"))]
0b4718c9
BS
2886 "TARGET_ALTIVEC"
2887{
427a7384 2888 if (BYTES_BIG_ENDIAN)
0b4718c9
BS
2889 emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2890 else
2891 {
ef339d6e 2892 rtx mask, constv, vperm;
0b4718c9
BS
2893 mask = gen_reg_rtx (V16QImode);
2894 emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
ef339d6e 2895 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
0b4718c9
BS
2896 constv = force_reg (V16QImode, constv);
2897 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2898 UNSPEC_VPERM);
f7df4a84 2899 emit_insn (gen_rtx_SET (operands[0], vperm));
0b4718c9
BS
2900 }
2901 DONE;
2902})
2903
f64b9156 2904(define_insn "altivec_lvsr_reg_<mode>"
1262c6cf
CL
2905 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2906 (unspec:V16QI
f64b9156 2907 [(match_operand:GPR 1 "gpc_reg_operand" "b")]
1262c6cf
CL
2908 UNSPEC_LVSR_REG))]
2909 "TARGET_ALTIVEC"
2910 "lvsr %0,0,%1"
2911 [(set_attr "type" "vecload")])
2912
0b4718c9 2913(define_insn "altivec_lvsr_direct"
10ed84db 2914 [(set (match_operand:V16QI 0 "register_operand" "=v")
1ba24090
SB
2915 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2916 UNSPEC_LVSR))]
10ed84db 2917 "TARGET_ALTIVEC"
b4a62fa0 2918 "lvsr %0,%y1"
10ed84db
AH
2919 [(set_attr "type" "vecload")])
2920
7ccf35ed 2921(define_expand "build_vector_mask_for_load"
ad18eed2
SB
2922 [(set (match_operand:V16QI 0 "register_operand")
2923 (unspec:V16QI [(match_operand 1 "memory_operand")] UNSPEC_LVSR))]
7ccf35ed 2924 "TARGET_ALTIVEC"
6c332313 2925{
7ccf35ed
DN
2926 rtx addr;
2927 rtx temp;
2928
2e42a52f 2929 gcc_assert (MEM_P (operands[1]));
7ccf35ed
DN
2930
2931 addr = XEXP (operands[1], 0);
2932 temp = gen_reg_rtx (GET_MODE (addr));
f7df4a84 2933 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
7ccf35ed 2934 emit_insn (gen_altivec_lvsr (operands[0],
8308679f 2935 replace_equiv_address (operands[1], temp)));
7ccf35ed 2936 DONE;
6c332313 2937})
7ccf35ed 2938
10ed84db
AH
2939;; Parallel some of the LVE* and STV*'s with unspecs because some have
2940;; identical rtl but different instructions-- and gcc gets confused.
2941
427a7384 2942(define_insn "altivec_lve<VI_char>x"
10ed84db 2943 [(parallel
aba5fb01 2944 [(set (match_operand:VI 0 "register_operand" "=v")
7a4eca66 2945 (match_operand:VI 1 "memory_operand" "Z"))
3aca4bff 2946 (unspec [(const_int 0)] UNSPEC_LVE)])]
10ed84db 2947 "TARGET_ALTIVEC"
aba5fb01 2948 "lve<VI_char>x %0,%y1"
10ed84db
AH
2949 [(set_attr "type" "vecload")])
2950
3aca4bff
DE
2951(define_insn "*altivec_lvesfx"
2952 [(parallel
2953 [(set (match_operand:V4SF 0 "register_operand" "=v")
7a4eca66 2954 (match_operand:V4SF 1 "memory_operand" "Z"))
3aca4bff
DE
2955 (unspec [(const_int 0)] UNSPEC_LVE)])]
2956 "TARGET_ALTIVEC"
2957 "lvewx %0,%y1"
2958 [(set_attr "type" "vecload")])
2959
427a7384 2960(define_insn "altivec_lvxl_<mode>"
4b3a6bcb
WS
2961 [(parallel
2962 [(set (match_operand:VM2 0 "register_operand" "=v")
2963 (match_operand:VM2 1 "memory_operand" "Z"))
27ffac37 2964 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
10ed84db 2965 "TARGET_ALTIVEC"
1bed93e4 2966 "lvxl %0,%y1"
10ed84db
AH
2967 [(set_attr "type" "vecload")])
2968
85a7c926
BS
2969; This version of lvx is used only in cases where we need to force an lvx
2970; over any other load, and we don't care about losing CSE opportunities.
2971; Its primary use is for prologue register saves.
b9e2e732 2972(define_insn "altivec_lvx_<mode>_internal"
c9485473
MM
2973 [(parallel
2974 [(set (match_operand:VM2 0 "register_operand" "=v")
2975 (match_operand:VM2 1 "memory_operand" "Z"))
2976 (unspec [(const_int 0)] UNSPEC_LVX)])]
10ed84db 2977 "TARGET_ALTIVEC"
b4a62fa0 2978 "lvx %0,%y1"
10ed84db
AH
2979 [(set_attr "type" "vecload")])
2980
91d014ff
PB
2981; The following patterns embody what lvx should usually look like.
2982(define_expand "altivec_lvx_<VM2:mode>"
2983 [(set (match_operand:VM2 0 "register_operand")
2984 (match_operand:VM2 1 "altivec_indexed_or_indirect_operand"))]
2985 "TARGET_ALTIVEC"
2986{
2987 rtx addr = XEXP (operand1, 0);
2988 if (rs6000_sum_of_two_registers_p (addr))
2989 {
2990 rtx op1 = XEXP (addr, 0);
2991 rtx op2 = XEXP (addr, 1);
2992 if (TARGET_64BIT)
2993 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_di (operand0, op1, op2));
2994 else
2995 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_si (operand0, op1, op2));
2996 }
2997 else
2998 {
2999 if (TARGET_64BIT)
3000 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_di (operand0, addr));
3001 else
3002 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_si (operand0, addr));
3003 }
3004 DONE;
3005})
85a7c926 3006
91d014ff
PB
3007; The next two patterns embody what lvx should usually look like.
3008(define_insn "altivec_lvx_<VM2:mode>_2op_<P:mptrsize>"
85a7c926 3009 [(set (match_operand:VM2 0 "register_operand" "=v")
91d014ff
PB
3010 (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
3011 (match_operand:P 2 "register_operand" "r"))
3012 (const_int -16))))]
3013 "TARGET_ALTIVEC"
85a7c926
BS
3014 "lvx %0,%1,%2"
3015 [(set_attr "type" "vecload")])
3016
91d014ff 3017(define_insn "altivec_lvx_<VM2:mode>_1op_<P:mptrsize>"
85a7c926 3018 [(set (match_operand:VM2 0 "register_operand" "=v")
91d014ff
PB
3019 (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
3020 (const_int -16))))]
3021 "TARGET_ALTIVEC"
85a7c926
BS
3022 "lvx %0,0,%1"
3023 [(set_attr "type" "vecload")])
3024
3025; This version of stvx is used only in cases where we need to force an stvx
3026; over any other store, and we don't care about losing CSE opportunities.
3027; Its primary use is for epilogue register restores.
b9e2e732 3028(define_insn "altivec_stvx_<mode>_internal"
10ed84db 3029 [(parallel
c9485473
MM
3030 [(set (match_operand:VM2 0 "memory_operand" "=Z")
3031 (match_operand:VM2 1 "register_operand" "v"))
3aca4bff 3032 (unspec [(const_int 0)] UNSPEC_STVX)])]
10ed84db 3033 "TARGET_ALTIVEC"
b4a62fa0 3034 "stvx %1,%y0"
10ed84db
AH
3035 [(set_attr "type" "vecstore")])
3036
91d014ff
PB
3037; The following patterns embody what stvx should usually look like.
3038(define_expand "altivec_stvx_<VM2:mode>"
3039 [(set (match_operand:VM2 1 "altivec_indexed_or_indirect_operand")
3040 (match_operand:VM2 0 "register_operand"))]
3041 "TARGET_ALTIVEC"
3042{
3043 rtx addr = XEXP (operand1, 0);
3044 if (rs6000_sum_of_two_registers_p (addr))
3045 {
3046 rtx op1 = XEXP (addr, 0);
3047 rtx op2 = XEXP (addr, 1);
3048 if (TARGET_64BIT)
3049 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_di (operand0, op1, op2));
3050 else
3051 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_si (operand0, op1, op2));
3052 }
3053 else
3054 {
3055 if (TARGET_64BIT)
3056 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_di (operand0, addr));
3057 else
3058 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_si (operand0, addr));
3059 }
3060 DONE;
3061})
85a7c926 3062
91d014ff
PB
3063; The next two patterns embody what stvx should usually look like.
3064(define_insn "altivec_stvx_<VM2:mode>_2op_<P:mptrsize>"
3065 [(set (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
3066 (match_operand:P 2 "register_operand" "r"))
3067 (const_int -16)))
3068 (match_operand:VM2 0 "register_operand" "v"))]
3069 "TARGET_ALTIVEC"
85a7c926
BS
3070 "stvx %0,%1,%2"
3071 [(set_attr "type" "vecstore")])
3072
91d014ff
PB
3073(define_insn "altivec_stvx_<VM2:mode>_1op_<P:mptrsize>"
3074 [(set (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
3075 (const_int -16)))
3076 (match_operand:VM2 0 "register_operand" "v"))]
3077 "TARGET_ALTIVEC"
85a7c926
BS
3078 "stvx %0,0,%1"
3079 [(set_attr "type" "vecstore")])
3080
427a7384 3081(define_insn "altivec_stvxl_<mode>"
10ed84db 3082 [(parallel
4b3a6bcb
WS
3083 [(set (match_operand:VM2 0 "memory_operand" "=Z")
3084 (match_operand:VM2 1 "register_operand" "v"))
3aca4bff 3085 (unspec [(const_int 0)] UNSPEC_STVXL)])]
10ed84db 3086 "TARGET_ALTIVEC"
b4a62fa0 3087 "stvxl %1,%y0"
10ed84db
AH
3088 [(set_attr "type" "vecstore")])
3089
427a7384 3090(define_insn "altivec_stve<VI_char>x"
99eeedc4
JJ
3091 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
3092 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
10ed84db 3093 "TARGET_ALTIVEC"
aba5fb01 3094 "stve<VI_char>x %1,%y0"
10ed84db
AH
3095 [(set_attr "type" "vecstore")])
3096
3aca4bff 3097(define_insn "*altivec_stvesfx"
99eeedc4
JJ
3098 [(set (match_operand:SF 0 "memory_operand" "=Z")
3099 (unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
3aca4bff
DE
3100 "TARGET_ALTIVEC"
3101 "stvewx %1,%y0"
3102 [(set_attr "type" "vecstore")])
3103
3fd44c8a
CL
3104;; Generate doublee
3105;; signed int/float to double convert words 0 and 2
3106(define_expand "doublee<mode>2"
3107 [(set (match_operand:V2DF 0 "register_operand" "=v")
3108 (match_operand:VSX_W 1 "register_operand" "v"))]
3109 "TARGET_VSX"
3110{
3111 machine_mode op_mode = GET_MODE (operands[1]);
3112
427a7384 3113 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3114 {
3115 /* Big endian word numbering for words in operand is 0 1 2 3.
3116 Input words 0 and 2 are where they need to be. */
3117 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
3118 }
3119 else
3120 {
3121 /* Little endian word numbering for operand is 3 2 1 0.
3122 take (operand[1] operand[1]) and shift left one word
3123 3 2 1 0 3 2 1 0 => 2 1 0 3
3124 Input words 2 and 0 are now where they need to be for the
3125 conversion. */
3126 rtx rtx_tmp;
3127 rtx rtx_val = GEN_INT (1);
3128
3129 rtx_tmp = gen_reg_rtx (op_mode);
3130 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3131 operands[1], rtx_val));
3132 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3133 }
3134 DONE;
3135}
3136 [(set_attr "type" "veccomplex")])
3137
3138;; Generate unsdoublee
3139;; unsigned int to double convert words 0 and 2
3140(define_expand "unsdoubleev4si2"
3141 [(set (match_operand:V2DF 0 "register_operand" "=v")
3142 (match_operand:V4SI 1 "register_operand" "v"))]
3143 "TARGET_VSX"
3144{
427a7384 3145 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3146 {
3147 /* Big endian word numbering for words in operand is 0 1 2 3.
3148 Input words 0 and 2 are where they need to be. */
3149 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3150 }
3151 else
3152 {
3153 /* Little endian word numbering for operand is 3 2 1 0.
3154 take (operand[1] operand[1]) and shift left one word
3155 3 2 1 0 3 2 1 0 => 2 1 0 3
3156 Input words 2 and 0 are now where they need to be for the
3157 conversion. */
3158 rtx rtx_tmp;
3159 rtx rtx_val = GEN_INT (1);
3160
3161 rtx_tmp = gen_reg_rtx (V4SImode);
3162 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3163 operands[1], rtx_val));
3164 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3165 }
3166 DONE;
3167}
3168 [(set_attr "type" "veccomplex")])
3169
3170;; Generate doubleov
3171;; signed int/float to double convert words 1 and 3
3172(define_expand "doubleo<mode>2"
3173 [(set (match_operand:V2DF 0 "register_operand" "=v")
3174 (match_operand:VSX_W 1 "register_operand" "v"))]
3175 "TARGET_VSX"
3176{
3177 machine_mode op_mode = GET_MODE (operands[1]);
3178
427a7384 3179 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3180 {
3181 /* Big endian word numbering for words in operand is 0 1 2 3.
3182 take (operand[1] operand[1]) and shift left one word
3183 0 1 2 3 0 1 2 3 => 1 2 3 0
3184 Input words 1 and 3 are now where they need to be for the
3185 conversion. */
3186 rtx rtx_tmp;
3187 rtx rtx_val = GEN_INT (1);
3188
3189 rtx_tmp = gen_reg_rtx (op_mode);
3190 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3191 operands[1], rtx_val));
3192 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3193 }
3194 else
3195 {
3196 /* Little endian word numbering for operand is 3 2 1 0.
3197 Input words 3 and 1 are where they need to be. */
3198 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
3199 }
3200 DONE;
3201}
3202 [(set_attr "type" "veccomplex")])
3203
3204;; Generate unsdoubleov
3205;; unsigned int to double convert words 1 and 3
3206(define_expand "unsdoubleov4si2"
3207 [(set (match_operand:V2DF 0 "register_operand" "=v")
3208 (match_operand:V4SI 1 "register_operand" "v"))]
3209 "TARGET_VSX"
3210{
427a7384 3211 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3212 {
3213 /* Big endian word numbering for words in operand is 0 1 2 3.
3214 take (operand[1] operand[1]) and shift left one word
3215 0 1 2 3 0 1 2 3 => 1 2 3 0
3216 Input words 1 and 3 are now where they need to be for the
3217 conversion. */
3218 rtx rtx_tmp;
3219 rtx rtx_val = GEN_INT (1);
3220
3221 rtx_tmp = gen_reg_rtx (V4SImode);
3222 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3223 operands[1], rtx_val));
3224 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3225 }
3226 else
3227 {
3228 /* Want to convert the words 1 and 3.
3229 Little endian word numbering for operand is 3 2 1 0.
3230 Input words 3 and 1 are where they need to be. */
3231 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3232 }
3233 DONE;
3234}
3235 [(set_attr "type" "veccomplex")])
3236
3237;; Generate doublehv
3238;; signed int/float to double convert words 0 and 1
3239(define_expand "doubleh<mode>2"
3240 [(set (match_operand:V2DF 0 "register_operand" "=v")
3241 (match_operand:VSX_W 1 "register_operand" "v"))]
3242 "TARGET_VSX"
3243{
3244 rtx rtx_tmp;
3245 rtx rtx_val;
3246
3247 machine_mode op_mode = GET_MODE (operands[1]);
3248 rtx_tmp = gen_reg_rtx (op_mode);
3249
427a7384 3250 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3251 {
3252 /* Big endian word numbering for words in operand is 0 1 2 3.
3253 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3254 take (rts_tmp operand[1]) and shift left three words
3255 1 2 3 0 0 1 2 3 => 0 0 1 2
3256 Input words 0 and 1 are now where they need to be for the
3257 conversion. */
3258 rtx_val = GEN_INT (1);
3259 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3260 operands[1], rtx_val));
3261
3262 rtx_val = GEN_INT (3);
3263 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3264 operands[1], rtx_val));
3265 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3266 }
3267 else
3268 {
3269 /* Little endian word numbering for operand is 3 2 1 0.
3270 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3271 take (operand[1] rts_tmp) and shift left two words
3272 3 2 1 0 0 3 2 1 => 1 0 0 3
3273 Input words 0 and 1 are now where they need to be for the
3274 conversion. */
3275 rtx_val = GEN_INT (3);
3276 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3277 operands[1], rtx_val));
3278
3279 rtx_val = GEN_INT (2);
3280 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3281 rtx_tmp, rtx_val));
3282 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3283 }
3284 DONE;
3285}
3286 [(set_attr "type" "veccomplex")])
3287
3288;; Generate unsdoublehv
3289;; unsigned int to double convert words 0 and 1
3290(define_expand "unsdoublehv4si2"
3291 [(set (match_operand:V2DF 0 "register_operand" "=v")
3292 (match_operand:V4SI 1 "register_operand" "v"))]
3293 "TARGET_VSX"
3294{
3295 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3296 rtx rtx_val = GEN_INT (12);
3297
427a7384 3298 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3299 {
3300 /* Big endian word numbering for words in operand is 0 1 2 3.
3301 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3302 take (rts_tmp operand[1]) and shift left three words
3303 1 2 3 0 0 1 2 3 => 0 0 1 2
3304 Input words 0 and 1 are now where they need to be for the
3305 conversion. */
3306 rtx_val = GEN_INT (1);
3307 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3308 operands[1], rtx_val));
3309
3310 rtx_val = GEN_INT (3);
3311 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3312 operands[1], rtx_val));
3313 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3314 }
3315 else
3316 {
3317 /* Little endian word numbering for operand is 3 2 1 0.
3318 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3319 take (operand[1] rts_tmp) and shift left two words
3320 3 2 1 0 0 3 2 1 => 1 0 0 3
3321 Input words 1 and 0 are now where they need to be for the
3322 conversion. */
3323 rtx_val = GEN_INT (3);
3324
3325 rtx_tmp = gen_reg_rtx (V4SImode);
3326 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3327 operands[1], rtx_val));
3328
3329 rtx_val = GEN_INT (2);
3330 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3331 rtx_tmp, rtx_val));
3332 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3333 }
3334 DONE;
3335}
3336 [(set_attr "type" "veccomplex")])
3337
3338;; Generate doublelv
3339;; signed int/float to double convert words 2 and 3
3340(define_expand "doublel<mode>2"
3341 [(set (match_operand:V2DF 0 "register_operand" "=v")
3342 (match_operand:VSX_W 1 "register_operand" "v"))]
3343 "TARGET_VSX"
3344{
3345 rtx rtx_tmp;
3346 rtx rtx_val = GEN_INT (3);
3347
3348 machine_mode op_mode = GET_MODE (operands[1]);
3349 rtx_tmp = gen_reg_rtx (op_mode);
3350
427a7384 3351 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3352 {
3353 /* Big endian word numbering for operand is 0 1 2 3.
3354 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3355 take (operand[1] rtx_tmp) and shift left two words
3356 0 1 2 3 3 0 1 2 => 2 3 3 0
3357 now use convert instruction to convert word 2 and 3 in the
3358 input vector. */
3359 rtx_val = GEN_INT (3);
3360 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3361 operands[1], rtx_val));
3362
4a7f0dc0
CL
3363 rtx_val = GEN_INT (2);
3364 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3365 rtx_tmp, rtx_val));
3fd44c8a
CL
3366 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3367 }
3368 else
3369 {
3370 /* Little endian word numbering for operand is 3 2 1 0.
3371 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3372 take (rtx_tmp operand[1]) and shift left three words
3373 2 1 0 3 3 2 1 0 => 3 3 2 1
3374 now use convert instruction to convert word 3 and 2 in the
3375 input vector. */
3376 rtx_val = GEN_INT (1);
3377 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3378 operands[1], rtx_val));
3379
3380 rtx_val = GEN_INT (3);
3381 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3382 operands[1], rtx_val));
3383 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3384 }
3385 DONE;
3386}
3387 [(set_attr "type" "veccomplex")])
3388
3389;; Generate unsdoublelv
3390;; unsigned int to double convert convert 2 and 3
3391(define_expand "unsdoublelv4si2"
3392 [(set (match_operand:V2DF 0 "register_operand" "=v")
3393 (match_operand:V4SI 1 "register_operand" "v"))]
3394 "TARGET_VSX"
3395{
3396 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3397 rtx rtx_val = GEN_INT (12);
3398
427a7384 3399 if (BYTES_BIG_ENDIAN)
3fd44c8a
CL
3400 {
3401 /* Big endian word numbering for operand is 0 1 2 3.
3402 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3403 take (operand[1] rtx_tmp) and shift left two words
3404 0 1 2 3 3 0 1 2 => 2 3 3 0
3405 now use convert instruction to convert word 2 and 3 in the
3406 input vector. */
3407 rtx_val = GEN_INT (3);
3408 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3409 operands[1], rtx_val));
3410
3411 rtx_val = GEN_INT (2);
3412 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3413 rtx_tmp, rtx_val));
3414 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3415 }
3416 else
3417 {
3418 /* Little endian word numbering for operand is 3 2 1 0.
3419 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3420 take (rtx_tmp operand[1]) and shift left three words
3421 2 1 0 3 3 2 1 0 => 3 3 2 1
3422 now use convert instruction to convert word 3 and 2 in the
3423 input vector. */
3424 rtx_val = GEN_INT (1);
3425 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp,
3426 operands[1], operands[1], rtx_val));
3427
3428 rtx_val = GEN_INT (3);
3429 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3430 operands[1], rtx_val));
3431 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3432 }
3433 DONE;
3434}
3435 [(set_attr "type" "veccomplex")])
3436
19388c6d
CL
3437;; Generate two vector F32 converted to packed vector I16 vector
3438(define_expand "convert_4f32_8i16"
3439 [(set (match_operand:V8HI 0 "register_operand" "=v")
3440 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3441 (match_operand:V4SF 2 "register_operand" "v")]
3442 UNSPEC_CONVERT_4F32_8I16))]
3443 "TARGET_P9_VECTOR"
3444{
3445 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3446 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3447
3448 emit_insn (gen_altivec_vctuxs (rtx_tmp_hi, operands[1], const0_rtx));
3449 emit_insn (gen_altivec_vctuxs (rtx_tmp_lo, operands[2], const0_rtx));
3450 emit_insn (gen_altivec_vpkswss (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3451 DONE;
3452})
3453
58b475a2
WS
3454
3455;; Convert two vector F32 to packed vector F16.
3456;; This builtin packs 32-bit floating-point values into a packed
3457;; 16-bit floating point values (stored in 16bit integer type).
3458;; (vector unsigned short r = vec_pack_to_short_fp32 (a, b);
3459;; The expected codegen for this builtin is
3460;; xvcvsphp t, a
3461;; xvcvsphp u, b
3462;; if (little endian)
3463;; vpkuwum r, t, u
3464;; else
3465;; vpkuwum r, u, t
3466
3467(define_expand "convert_4f32_8f16"
3468 [(set (match_operand:V8HI 0 "register_operand" "=v")
3469 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3470 (match_operand:V4SF 2 "register_operand" "v")]
3471 UNSPEC_CONVERT_4F32_8F16))]
3472 "TARGET_P9_VECTOR"
3473{
3474 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3475 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3476
3477 emit_insn (gen_vsx_xvcvsphp (rtx_tmp_hi, operands[1]));
3478 emit_insn (gen_vsx_xvcvsphp (rtx_tmp_lo, operands[2]));
3479 if (!BYTES_BIG_ENDIAN)
3480 emit_insn (gen_altivec_vpkuwum (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3481 else
3482 emit_insn (gen_altivec_vpkuwum (operands[0], rtx_tmp_lo, rtx_tmp_hi));
3483 DONE;
3484})
3485
3486
27ffac37 3487;; Generate
a5965b52 3488;; xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
27ffac37
PB
3489;; vsubu?m SCRATCH2,SCRATCH1,%1
3490;; vmaxs? %0,%1,SCRATCH2"
3491(define_expand "abs<mode>2"
a5965b52
MM
3492 [(set (match_dup 2) (match_dup 3))
3493 (set (match_dup 4)
3494 (minus:VI2 (match_dup 2)
3495 (match_operand:VI2 1 "register_operand" "v")))
3496 (set (match_operand:VI2 0 "register_operand" "=v")
3497 (smax:VI2 (match_dup 1) (match_dup 4)))]
3498 "<VI_unit>"
27ffac37 3499{
a5965b52 3500 operands[2] = gen_reg_rtx (<MODE>mode);
59d06c05 3501 operands[3] = CONST0_RTX (<MODE>mode);
a5965b52 3502 operands[4] = gen_reg_rtx (<MODE>mode);
27ffac37 3503})
10ed84db 3504
76689ffc
CL
3505;; Generate
3506;; vspltisw SCRATCH1,0
3507;; vsubu?m SCRATCH2,SCRATCH1,%1
3508;; vmins? %0,%1,SCRATCH2"
3509(define_expand "nabs<mode>2"
3510 [(set (match_dup 2) (match_dup 3))
3511 (set (match_dup 4)
3512 (minus:VI2 (match_dup 2)
3513 (match_operand:VI2 1 "register_operand" "v")))
3514 (set (match_operand:VI2 0 "register_operand" "=v")
3515 (smin:VI2 (match_dup 1) (match_dup 4)))]
3516 "<VI_unit>"
3517{
76689ffc 3518 operands[2] = gen_reg_rtx (<MODE>mode);
59d06c05 3519 operands[3] = CONST0_RTX (<MODE>mode);
76689ffc
CL
3520 operands[4] = gen_reg_rtx (<MODE>mode);
3521})
3522
27ffac37
PB
3523;; Generate
3524;; vspltisw SCRATCH1,-1
3525;; vslw SCRATCH2,SCRATCH1,SCRATCH1
3526;; vandc %0,%1,SCRATCH2
a72c65c7 3527(define_expand "altivec_absv4sf2"
27ffac37 3528 [(set (match_dup 2)
8cf0db2b 3529 (vec_duplicate:V4SI (const_int -1)))
27ffac37 3530 (set (match_dup 3)
e83a75a7 3531 (ashift:V4SI (match_dup 2) (match_dup 2)))
27ffac37 3532 (set (match_operand:V4SF 0 "register_operand" "=v")
8cf0db2b 3533 (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
27ffac37 3534 (match_operand:V4SF 1 "register_operand" "v")))]
10ed84db 3535 "TARGET_ALTIVEC"
27ffac37 3536{
8cf0db2b
UW
3537 operands[2] = gen_reg_rtx (V4SImode);
3538 operands[3] = gen_reg_rtx (V4SImode);
27ffac37 3539})
10ed84db 3540
27ffac37
PB
3541;; Generate
3542;; vspltis? SCRATCH0,0
3543;; vsubs?s SCRATCH2,SCRATCH1,%1
3544;; vmaxs? %0,%1,SCRATCH2"
3545(define_expand "altivec_abss_<mode>"
3546 [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
3547 (parallel [(set (match_dup 3)
460d53f8
XL
3548 (ss_minus:VI (match_dup 2)
3549 (match_operand:VI 1 "register_operand" "v")))
3b2475ab
SB
3550 (set (reg:SI VSCR_REGNO)
3551 (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
27ffac37
PB
3552 (set (match_operand:VI 0 "register_operand" "=v")
3553 (smax:VI (match_dup 1) (match_dup 3)))]
10ed84db 3554 "TARGET_ALTIVEC"
27ffac37
PB
3555{
3556 operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
3557 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
3558})
7ccf35ed 3559
5e8edf67
AL
3560(define_expand "reduc_plus_scal_<mode>"
3561 [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
3aca4bff
DE
3562 (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
3563 UNSPEC_REDUC_PLUS))]
61d3cdbb 3564 "TARGET_ALTIVEC"
1ba24090 3565{
a6b46ba2 3566 rtx vzero = gen_reg_rtx (V4SImode);
61d3cdbb 3567 rtx vtmp1 = gen_reg_rtx (V4SImode);
5e8edf67
AL
3568 rtx vtmp2 = gen_reg_rtx (<MODE>mode);
3569 rtx dest = gen_lowpart (V4SImode, vtmp2);
427a7384 3570 int elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
61d3cdbb 3571
a6b46ba2
DN
3572 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3573 emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
b80afde9 3574 emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
98060bbe 3575 rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
61d3cdbb 3576 DONE;
1ba24090 3577})
61d3cdbb 3578
c3eaf15a
MM
3579(define_insn "*p9_neg<mode>2"
3580 [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
3581 (neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
3582 "TARGET_P9_VECTOR"
3583 "vneg<VI_char> %0,%1"
3584 [(set_attr "type" "vecsimple")])
3585
70a39602 3586(define_expand "neg<mode>2"
ad18eed2
SB
3587 [(set (match_operand:VI2 0 "register_operand")
3588 (neg:VI2 (match_operand:VI2 1 "register_operand")))]
c3eaf15a 3589 "<VI_unit>"
70a39602 3590{
c3eaf15a
MM
3591 if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
3592 {
3593 rtx vzero;
70a39602 3594
c3eaf15a
MM
3595 vzero = gen_reg_rtx (GET_MODE (operands[0]));
3596 emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
3597 emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
3598 DONE;
3599 }
3600})
70a39602 3601
20f06221
DN
3602(define_expand "udot_prod<mode>"
3603 [(set (match_operand:V4SI 0 "register_operand" "=v")
3604 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3605 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3606 (match_operand:VIshort 2 "register_operand" "v")]
3607 UNSPEC_VMSUMU)))]
3608 "TARGET_ALTIVEC"
6c332313 3609{
20f06221
DN
3610 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
3611 DONE;
6c332313
SB
3612})
3613
20f06221
DN
3614(define_expand "sdot_prodv8hi"
3615 [(set (match_operand:V4SI 0 "register_operand" "=v")
3616 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3617 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3618 (match_operand:V8HI 2 "register_operand" "v")]
3619 UNSPEC_VMSUMSHM)))]
3620 "TARGET_ALTIVEC"
20f06221
DN
3621{
3622 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
3623 DONE;
6c332313 3624})
20f06221
DN
3625
3626(define_expand "widen_usum<mode>3"
3627 [(set (match_operand:V4SI 0 "register_operand" "=v")
3628 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3629 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
3630 UNSPEC_VMSUMU)))]
3631 "TARGET_ALTIVEC"
20f06221
DN
3632{
3633 rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
3634
3635 emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
3636 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
3637 DONE;
6c332313 3638})
20f06221
DN
3639
3640(define_expand "widen_ssumv16qi3"
3641 [(set (match_operand:V4SI 0 "register_operand" "=v")
3642 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3643 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
3644 UNSPEC_VMSUMM)))]
3645 "TARGET_ALTIVEC"
20f06221
DN
3646{
3647 rtx vones = gen_reg_rtx (V16QImode);
3648
3649 emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
3650 emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
3651 DONE;
6c332313 3652})
20f06221
DN
3653
3654(define_expand "widen_ssumv8hi3"
3655 [(set (match_operand:V4SI 0 "register_operand" "=v")
3656 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3657 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3658 UNSPEC_VMSUMSHM)))]
3659 "TARGET_ALTIVEC"
20f06221
DN
3660{
3661 rtx vones = gen_reg_rtx (V8HImode);
3662
3663 emit_insn (gen_altivec_vspltish (vones, const1_rtx));
3664 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
3665 DONE;
6c332313 3666})
20f06221 3667
a5965b52
MM
3668(define_expand "vec_unpacks_hi_<VP_small_lc>"
3669 [(set (match_operand:VP 0 "register_operand" "=v")
3670 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
d85f364c 3671 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
a5965b52
MM
3672 "<VI_unit>"
3673 "")
89d67cca 3674
a5965b52
MM
3675(define_expand "vec_unpacks_lo_<VP_small_lc>"
3676 [(set (match_operand:VP 0 "register_operand" "=v")
3677 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
d85f364c 3678 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
a5965b52
MM
3679 "<VI_unit>"
3680 "")
89d67cca
DN
3681
3682(define_insn "vperm_v8hiv4si"
ee969a36
SB
3683 [(set (match_operand:V4SI 0 "register_operand" "=?wa,v")
3684 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "wa,v")
3685 (match_operand:V4SI 2 "register_operand" "0,v")
3686 (match_operand:V16QI 3 "register_operand" "wa,v")]
89d67cca
DN
3687 UNSPEC_VPERMSI))]
3688 "TARGET_ALTIVEC"
4e8a3a35 3689 "@
ee969a36
SB
3690 xxperm %x0,%x1,%x3
3691 vperm %0,%1,%2,%3"
afc69d4e 3692 [(set_attr "type" "vecperm")
ee969a36 3693 (set_attr "isa" "p9v,*")])
89d67cca
DN
3694
3695(define_insn "vperm_v16qiv8hi"
ee969a36
SB
3696 [(set (match_operand:V8HI 0 "register_operand" "=?wa,v")
3697 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "wa,v")
3698 (match_operand:V8HI 2 "register_operand" "0,v")
3699 (match_operand:V16QI 3 "register_operand" "wa,v")]
89d67cca
DN
3700 UNSPEC_VPERMHI))]
3701 "TARGET_ALTIVEC"
4e8a3a35 3702 "@
ee969a36
SB
3703 xxperm %x0,%x1,%x3
3704 vperm %0,%1,%2,%3"
afc69d4e 3705 [(set_attr "type" "vecperm")
ee969a36 3706 (set_attr "isa" "p9v,*")])
89d67cca 3707
a20be0cd
KL
3708(define_expand "vec_unpacku_hi_<VP_small_lc>"
3709 [(set (match_operand:VP 0 "register_operand" "=v")
3710 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3711 UNSPEC_VUPKHU))]
89d67cca 3712 "TARGET_ALTIVEC"
89d67cca 3713{
a20be0cd
KL
3714 rtx vzero = gen_reg_rtx (<VP_small>mode);
3715 emit_insn (gen_altivec_vspltis<VU_char> (vzero, const0_rtx));
89d67cca 3716
a20be0cd
KL
3717 rtx res = gen_reg_rtx (<VP_small>mode);
3718 rtx op1 = operands[1];
89d67cca 3719
a20be0cd
KL
3720 if (BYTES_BIG_ENDIAN)
3721 emit_insn (gen_altivec_vmrgh<VU_char> (res, vzero, op1));
3722 else
3723 emit_insn (gen_altivec_vmrgl<VU_char> (res, op1, vzero));
3724
3725 emit_insn (gen_move_insn (operands[0], gen_lowpart (<MODE>mode, res)));
89d67cca 3726 DONE;
6c332313 3727})
89d67cca 3728
a20be0cd
KL
3729(define_expand "vec_unpacku_lo_<VP_small_lc>"
3730 [(set (match_operand:VP 0 "register_operand" "=v")
3731 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3732 UNSPEC_VUPKLU))]
89d67cca 3733 "TARGET_ALTIVEC"
89d67cca 3734{
a20be0cd
KL
3735 rtx vzero = gen_reg_rtx (<VP_small>mode);
3736 emit_insn (gen_altivec_vspltis<VU_char> (vzero, const0_rtx));
89d67cca 3737
a20be0cd
KL
3738 rtx res = gen_reg_rtx (<VP_small>mode);
3739 rtx op1 = operands[1];
3740
3741 if (BYTES_BIG_ENDIAN)
3742 emit_insn (gen_altivec_vmrgl<VU_char> (res, vzero, op1));
3743 else
3744 emit_insn (gen_altivec_vmrgh<VU_char> (res, op1, vzero));
3745
3746 emit_insn (gen_move_insn (operands[0], gen_lowpart (<MODE>mode, res)));
89d67cca 3747 DONE;
6c332313 3748})
89d67cca
DN
3749
3750(define_expand "vec_widen_umult_hi_v16qi"
3751 [(set (match_operand:V8HI 0 "register_operand" "=v")
3752 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3753 (match_operand:V16QI 2 "register_operand" "v")]
3754 UNSPEC_VMULWHUB))]
3755 "TARGET_ALTIVEC"
89d67cca
DN
3756{
3757 rtx ve = gen_reg_rtx (V8HImode);
3758 rtx vo = gen_reg_rtx (V8HImode);
3759
578acbf0 3760 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3761 {
3762 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3763 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
68d3bacf 3764 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
d13dfec8 3765 }
578acbf0 3766 else
d13dfec8
BS
3767 {
3768 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3769 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
68d3bacf 3770 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
d13dfec8 3771 }
89d67cca 3772 DONE;
6c332313 3773})
89d67cca
DN
3774
3775(define_expand "vec_widen_umult_lo_v16qi"
3776 [(set (match_operand:V8HI 0 "register_operand" "=v")
3777 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3778 (match_operand:V16QI 2 "register_operand" "v")]
3779 UNSPEC_VMULWLUB))]
3780 "TARGET_ALTIVEC"
89d67cca
DN
3781{
3782 rtx ve = gen_reg_rtx (V8HImode);
3783 rtx vo = gen_reg_rtx (V8HImode);
3784
578acbf0 3785 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3786 {
3787 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3788 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
68d3bacf 3789 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
d13dfec8 3790 }
578acbf0 3791 else
d13dfec8
BS
3792 {
3793 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3794 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
68d3bacf 3795 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
d13dfec8 3796 }
89d67cca 3797 DONE;
6c332313 3798})
89d67cca
DN
3799
3800(define_expand "vec_widen_smult_hi_v16qi"
3801 [(set (match_operand:V8HI 0 "register_operand" "=v")
3802 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3803 (match_operand:V16QI 2 "register_operand" "v")]
3804 UNSPEC_VMULWHSB))]
3805 "TARGET_ALTIVEC"
89d67cca
DN
3806{
3807 rtx ve = gen_reg_rtx (V8HImode);
3808 rtx vo = gen_reg_rtx (V8HImode);
3809
578acbf0 3810 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3811 {
3812 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3813 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
68d3bacf 3814 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
d13dfec8 3815 }
578acbf0 3816 else
d13dfec8
BS
3817 {
3818 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3819 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
68d3bacf 3820 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
d13dfec8 3821 }
89d67cca 3822 DONE;
6c332313 3823})
89d67cca
DN
3824
3825(define_expand "vec_widen_smult_lo_v16qi"
3826 [(set (match_operand:V8HI 0 "register_operand" "=v")
3827 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3828 (match_operand:V16QI 2 "register_operand" "v")]
3829 UNSPEC_VMULWLSB))]
3830 "TARGET_ALTIVEC"
89d67cca
DN
3831{
3832 rtx ve = gen_reg_rtx (V8HImode);
3833 rtx vo = gen_reg_rtx (V8HImode);
3834
578acbf0 3835 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3836 {
3837 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3838 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
68d3bacf 3839 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
d13dfec8 3840 }
578acbf0 3841 else
d13dfec8
BS
3842 {
3843 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3844 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
68d3bacf 3845 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
d13dfec8 3846 }
89d67cca 3847 DONE;
6c332313 3848})
89d67cca
DN
3849
3850(define_expand "vec_widen_umult_hi_v8hi"
3851 [(set (match_operand:V4SI 0 "register_operand" "=v")
3852 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3853 (match_operand:V8HI 2 "register_operand" "v")]
3854 UNSPEC_VMULWHUH))]
3855 "TARGET_ALTIVEC"
89d67cca
DN
3856{
3857 rtx ve = gen_reg_rtx (V4SImode);
3858 rtx vo = gen_reg_rtx (V4SImode);
3859
578acbf0 3860 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3861 {
3862 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3863 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
0910c516 3864 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], ve, vo));
d13dfec8 3865 }
578acbf0 3866 else
d13dfec8
BS
3867 {
3868 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3869 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
0910c516 3870 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], vo, ve));
d13dfec8 3871 }
89d67cca 3872 DONE;
6c332313 3873})
89d67cca
DN
3874
3875(define_expand "vec_widen_umult_lo_v8hi"
3876 [(set (match_operand:V4SI 0 "register_operand" "=v")
3877 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3878 (match_operand:V8HI 2 "register_operand" "v")]
3879 UNSPEC_VMULWLUH))]
3880 "TARGET_ALTIVEC"
89d67cca
DN
3881{
3882 rtx ve = gen_reg_rtx (V4SImode);
3883 rtx vo = gen_reg_rtx (V4SImode);
3884
578acbf0 3885 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3886 {
3887 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3888 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
0910c516 3889 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], ve, vo));
d13dfec8 3890 }
578acbf0 3891 else
d13dfec8
BS
3892 {
3893 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3894 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
0910c516 3895 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], vo, ve));
d13dfec8 3896 }
89d67cca 3897 DONE;
6c332313 3898})
89d67cca
DN
3899
3900(define_expand "vec_widen_smult_hi_v8hi"
3901 [(set (match_operand:V4SI 0 "register_operand" "=v")
3902 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3903 (match_operand:V8HI 2 "register_operand" "v")]
3904 UNSPEC_VMULWHSH))]
3905 "TARGET_ALTIVEC"
89d67cca
DN
3906{
3907 rtx ve = gen_reg_rtx (V4SImode);
3908 rtx vo = gen_reg_rtx (V4SImode);
3909
578acbf0 3910 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3911 {
3912 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3913 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
0910c516 3914 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], ve, vo));
d13dfec8 3915 }
578acbf0 3916 else
d13dfec8
BS
3917 {
3918 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3919 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
0910c516 3920 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], vo, ve));
d13dfec8 3921 }
89d67cca 3922 DONE;
6c332313 3923})
89d67cca
DN
3924
3925(define_expand "vec_widen_smult_lo_v8hi"
3926 [(set (match_operand:V4SI 0 "register_operand" "=v")
3927 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3928 (match_operand:V8HI 2 "register_operand" "v")]
3929 UNSPEC_VMULWLSH))]
3930 "TARGET_ALTIVEC"
89d67cca
DN
3931{
3932 rtx ve = gen_reg_rtx (V4SImode);
3933 rtx vo = gen_reg_rtx (V4SImode);
3934
578acbf0 3935 if (BYTES_BIG_ENDIAN)
d13dfec8
BS
3936 {
3937 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3938 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
0910c516 3939 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], ve, vo));
d13dfec8 3940 }
578acbf0 3941 else
d13dfec8
BS
3942 {
3943 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3944 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
0910c516 3945 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], vo, ve));
d13dfec8 3946 }
89d67cca 3947 DONE;
6c332313 3948})
89d67cca 3949
a5965b52
MM
3950(define_expand "vec_pack_trunc_<mode>"
3951 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3952 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3953 (match_operand:VP 2 "register_operand" "v")]
3954 UNSPEC_VPACK_UNS_UNS_MOD))]
3955 "<VI_unit>"
3956 "")
89d67cca 3957
010f20ee
BS
3958(define_expand "mulv16qi3"
3959 [(set (match_operand:V16QI 0 "register_operand" "=v")
3960 (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3961 (match_operand:V16QI 2 "register_operand" "v")))]
3962 "TARGET_ALTIVEC"
010f20ee
BS
3963{
3964 rtx even = gen_reg_rtx (V8HImode);
3965 rtx odd = gen_reg_rtx (V8HImode);
3966 rtx mask = gen_reg_rtx (V16QImode);
3967 rtvec v = rtvec_alloc (16);
3968 int i;
3969
3970 for (i = 0; i < 8; ++i) {
3971 RTVEC_ELT (v, 2 * i)
3972 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3973 RTVEC_ELT (v, 2 * i + 1)
3974 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3975 }
3976
ff03930a 3977 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
010f20ee
BS
3978 emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3979 emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3980 emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3981 DONE;
6c332313 3982})
010f20ee 3983
cb90e18c
CL
3984(define_expand "altivec_vpermxor"
3985 [(use (match_operand:V16QI 0 "register_operand"))
3986 (use (match_operand:V16QI 1 "register_operand"))
3987 (use (match_operand:V16QI 2 "register_operand"))
3988 (use (match_operand:V16QI 3 "register_operand"))]
3989 "TARGET_P8_VECTOR"
3990{
3991 if (!BYTES_BIG_ENDIAN)
3992 {
3993 /* vpermxor indexes the bytes using Big Endian numbering. If LE,
3994 change indexing in operand[3] to BE index. */
3995 rtx be_index = gen_reg_rtx (V16QImode);
3996
3997 emit_insn (gen_one_cmplv16qi2 (be_index, operands[3]));
3998 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3999 operands[2], be_index));
4000 }
4001 else
4002 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
4003 operands[2], operands[3]));
4004 DONE;
4005})
4006
a72c65c7 4007(define_expand "altivec_negv4sf2"
ad18eed2
SB
4008 [(use (match_operand:V4SF 0 "register_operand"))
4009 (use (match_operand:V4SF 1 "register_operand"))]
70a39602 4010 "TARGET_ALTIVEC"
70a39602
IR
4011{
4012 rtx neg0;
4013
4014 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
8cf0db2b
UW
4015 neg0 = gen_reg_rtx (V4SImode);
4016 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
71d46ca5 4017 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
70a39602
IR
4018
4019 /* XOR */
8cf0db2b
UW
4020 emit_insn (gen_xorv4sf3 (operands[0],
4021 gen_lowpart (V4SFmode, neg0), operands[1]));
70a39602
IR
4022
4023 DONE;
6c332313 4024})
98b44b0e 4025
f4eae645 4026;; Vector reverse elements for V16QI V8HI V4SI V4SF
952ac945 4027(define_expand "altivec_vreve<mode>2"
f4eae645
HG
4028 [(set (match_operand:VEC_K 0 "register_operand" "=v")
4029 (unspec:VEC_K [(match_operand:VEC_K 1 "register_operand" "v")]
952ac945
CL
4030 UNSPEC_VREVEV))]
4031 "TARGET_ALTIVEC"
4032{
f4eae645
HG
4033 if (TARGET_P9_VECTOR)
4034 {
4035 if (<MODE>mode == V16QImode)
4036 emit_insn (gen_p9_xxbrq_v16qi (operands[0], operands[1]));
4037 else if (<MODE>mode == V8HImode)
4038 {
4039 rtx subreg1 = simplify_gen_subreg (V1TImode, operands[1],
4040 <MODE>mode, 0);
4041 rtx temp = gen_reg_rtx (V1TImode);
4042 emit_insn (gen_p9_xxbrq_v1ti (temp, subreg1));
4043 rtx subreg2 = simplify_gen_subreg (<MODE>mode, temp,
4044 V1TImode, 0);
4045 emit_insn (gen_p9_xxbrh_v8hi (operands[0], subreg2));
4046 }
4047 else /* V4SI and V4SF. */
4048 {
4049 rtx subreg1 = simplify_gen_subreg (V1TImode, operands[1],
4050 <MODE>mode, 0);
4051 rtx temp = gen_reg_rtx (V1TImode);
4052 emit_insn (gen_p9_xxbrq_v1ti (temp, subreg1));
4053 rtx subreg2 = simplify_gen_subreg (<MODE>mode, temp,
4054 V1TImode, 0);
4055 if (<MODE>mode == V4SImode)
4056 emit_insn (gen_p9_xxbrw_v4si (operands[0], subreg2));
4057 else
4058 emit_insn (gen_p9_xxbrw_v4sf (operands[0], subreg2));
4059 }
4060 DONE;
4061 }
4062
952ac945
CL
4063 int i, j, size, num_elements;
4064 rtvec v = rtvec_alloc (16);
4065 rtx mask = gen_reg_rtx (V16QImode);
4066
4067 size = GET_MODE_UNIT_SIZE (<MODE>mode);
4068 num_elements = GET_MODE_NUNITS (<MODE>mode);
4069
4070 for (j = 0; j < num_elements; j++)
4071 for (i = 0; i < size; i++)
4072 RTVEC_ELT (v, i + j * size)
4073 = GEN_INT (i + (num_elements - 1 - j) * size);
4074
ff03930a 4075 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
952ac945
CL
4076 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
4077 operands[1], mask));
4078 DONE;
4079})
4080
f4eae645
HG
4081;; Vector reverse elements for V2DI V2DF
4082(define_expand "altivec_vreve<mode>2"
4083 [(set (match_operand:VEC_64 0 "register_operand" "=v")
4084 (unspec:VEC_64 [(match_operand:VEC_64 1 "register_operand" "v")]
4085 UNSPEC_VREVEV))]
4086 "TARGET_ALTIVEC"
4087{
4088 emit_insn (gen_xxswapd_<mode> (operands[0], operands[1]));
4089 DONE;
4090})
4091
0b61703c
AP
4092;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
4093;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
4094(define_insn "altivec_lvlx"
4095 [(set (match_operand:V16QI 0 "register_operand" "=v")
1ba24090 4096 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
0b61703c
AP
4097 UNSPEC_LVLX))]
4098 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4099 "lvlx %0,%y1"
4100 [(set_attr "type" "vecload")])
4101
4102(define_insn "altivec_lvlxl"
4103 [(set (match_operand:V16QI 0 "register_operand" "=v")
1ba24090 4104 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
0b61703c
AP
4105 UNSPEC_LVLXL))]
4106 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4107 "lvlxl %0,%y1"
4108 [(set_attr "type" "vecload")])
4109
4110(define_insn "altivec_lvrx"
4111 [(set (match_operand:V16QI 0 "register_operand" "=v")
1ba24090 4112 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
0b61703c
AP
4113 UNSPEC_LVRX))]
4114 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4115 "lvrx %0,%y1"
4116 [(set_attr "type" "vecload")])
4117
4118(define_insn "altivec_lvrxl"
4119 [(set (match_operand:V16QI 0 "register_operand" "=v")
1ba24090 4120 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
0b61703c
AP
4121 UNSPEC_LVRXL))]
4122 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4123 "lvrxl %0,%y1"
4124 [(set_attr "type" "vecload")])
4125
4126(define_insn "altivec_stvlx"
4127 [(parallel
fbc932e7
EW
4128 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4129 (match_operand:V16QI 1 "register_operand" "v"))
0b61703c
AP
4130 (unspec [(const_int 0)] UNSPEC_STVLX)])]
4131 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4132 "stvlx %1,%y0"
4133 [(set_attr "type" "vecstore")])
4134
4135(define_insn "altivec_stvlxl"
4136 [(parallel
fbc932e7
EW
4137 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4138 (match_operand:V16QI 1 "register_operand" "v"))
0b61703c
AP
4139 (unspec [(const_int 0)] UNSPEC_STVLXL)])]
4140 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4141 "stvlxl %1,%y0"
4142 [(set_attr "type" "vecstore")])
4143
4144(define_insn "altivec_stvrx"
4145 [(parallel
fbc932e7
EW
4146 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4147 (match_operand:V16QI 1 "register_operand" "v"))
0b61703c
AP
4148 (unspec [(const_int 0)] UNSPEC_STVRX)])]
4149 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4150 "stvrx %1,%y0"
4151 [(set_attr "type" "vecstore")])
4152
4153(define_insn "altivec_stvrxl"
4154 [(parallel
fbc932e7
EW
4155 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4156 (match_operand:V16QI 1 "register_operand" "v"))
0b61703c
AP
4157 (unspec [(const_int 0)] UNSPEC_STVRXL)])]
4158 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4159 "stvrxl %1,%y0"
4160 [(set_attr "type" "vecstore")])
4161
4643b716 4162(define_expand "vec_unpacks_float_hi_v8hi"
ad18eed2
SB
4163 [(set (match_operand:V4SF 0 "register_operand")
4164 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4643b716
DN
4165 UNSPEC_VUPKHS_V4SF))]
4166 "TARGET_ALTIVEC"
4643b716
DN
4167{
4168 rtx tmp = gen_reg_rtx (V4SImode);
4169
4170 emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
4171 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4172 DONE;
6c332313 4173})
4643b716
DN
4174
4175(define_expand "vec_unpacks_float_lo_v8hi"
ad18eed2
SB
4176 [(set (match_operand:V4SF 0 "register_operand")
4177 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4643b716
DN
4178 UNSPEC_VUPKLS_V4SF))]
4179 "TARGET_ALTIVEC"
4643b716
DN
4180{
4181 rtx tmp = gen_reg_rtx (V4SImode);
4182
4183 emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
4184 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4185 DONE;
6c332313 4186})
4643b716
DN
4187
4188(define_expand "vec_unpacku_float_hi_v8hi"
ad18eed2
SB
4189 [(set (match_operand:V4SF 0 "register_operand")
4190 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4643b716
DN
4191 UNSPEC_VUPKHU_V4SF))]
4192 "TARGET_ALTIVEC"
4643b716
DN
4193{
4194 rtx tmp = gen_reg_rtx (V4SImode);
4195
4196 emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
4197 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4198 DONE;
6c332313 4199})
4643b716
DN
4200
4201(define_expand "vec_unpacku_float_lo_v8hi"
ad18eed2
SB
4202 [(set (match_operand:V4SF 0 "register_operand")
4203 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4643b716
DN
4204 UNSPEC_VUPKLU_V4SF))]
4205 "TARGET_ALTIVEC"
4643b716
DN
4206{
4207 rtx tmp = gen_reg_rtx (V4SImode);
4208
4209 emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
4210 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4211 DONE;
6c332313 4212})
0bd62dca
MM
4213
4214\f
4bfc9db7 4215;; Power8/power9 vector instructions encoded as Altivec instructions
0bd62dca
MM
4216
4217;; Vector count leading zeros
4218(define_insn "*p8v_clz<mode>2"
4219 [(set (match_operand:VI2 0 "register_operand" "=v")
4220 (clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4221 "TARGET_P8_VECTOR"
4222 "vclz<wd> %0,%1"
b24a46be 4223 [(set_attr "type" "vecsimple")])
0bd62dca 4224
5408a64a
KN
4225;; Vector absolute difference unsigned
4226(define_expand "vadu<mode>3"
4227 [(set (match_operand:VI 0 "register_operand")
4228 (unspec:VI [(match_operand:VI 1 "register_operand")
4229 (match_operand:VI 2 "register_operand")]
4230 UNSPEC_VADU))]
4231 "TARGET_P9_VECTOR")
4232
4233;; Vector absolute difference unsigned
962b9668 4234(define_insn "p9_vadu<mode>3"
5408a64a
KN
4235 [(set (match_operand:VI 0 "register_operand" "=v")
4236 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
4237 (match_operand:VI 2 "register_operand" "v")]
4238 UNSPEC_VADU))]
4239 "TARGET_P9_VECTOR"
4240 "vabsdu<wd> %0,%1,%2"
4241 [(set_attr "type" "vecsimple")])
4242
4bfc9db7
MM
4243;; Vector count trailing zeros
4244(define_insn "*p9v_ctz<mode>2"
4245 [(set (match_operand:VI2 0 "register_operand" "=v")
4246 (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4247 "TARGET_P9_VECTOR"
4248 "vctz<wd> %0,%1"
b24a46be 4249 [(set_attr "type" "vecsimple")])
4bfc9db7 4250
0bd62dca
MM
4251;; Vector population count
4252(define_insn "*p8v_popcount<mode>2"
4253 [(set (match_operand:VI2 0 "register_operand" "=v")
4254 (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4255 "TARGET_P8_VECTOR"
4256 "vpopcnt<wd> %0,%1"
b24a46be 4257 [(set_attr "type" "vecsimple")])
0bd62dca 4258
4bfc9db7 4259;; Vector parity
cdd2d664
KL
4260(define_insn "rs6000_vprtyb<mode>2"
4261 [(set (match_operand:VEC_IP 0 "register_operand" "=v")
4262 (unspec:VEC_IP
4263 [(match_operand:VEC_IP 1 "register_operand" "v")]
4264 UNSPEC_PARITY))]
4bfc9db7
MM
4265 "TARGET_P9_VECTOR"
4266 "vprtyb<wd> %0,%1"
b24a46be 4267 [(set_attr "type" "vecsimple")])
4bfc9db7 4268
0bd62dca
MM
4269;; Vector Gather Bits by Bytes by Doubleword
4270(define_insn "p8v_vgbbd"
4271 [(set (match_operand:V16QI 0 "register_operand" "=v")
4272 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
4273 UNSPEC_VGBBD))]
4274 "TARGET_P8_VECTOR"
4275 "vgbbd %0,%1"
b24a46be 4276 [(set_attr "type" "vecsimple")])
a16a872d
MM
4277
4278\f
4279;; 128-bit binary integer arithmetic
4280;; We have a special container type (V1TImode) to allow operations using the
4281;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
4282;; having to worry about the register allocator deciding GPRs are better.
4283
4284(define_insn "altivec_vadduqm"
4285 [(set (match_operand:V1TI 0 "register_operand" "=v")
4286 (plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4287 (match_operand:V1TI 2 "register_operand" "v")))]
4288 "TARGET_VADDUQM"
4289 "vadduqm %0,%1,%2"
b24a46be 4290 [(set_attr "type" "vecsimple")])
a16a872d
MM
4291
4292(define_insn "altivec_vaddcuq"
4293 [(set (match_operand:V1TI 0 "register_operand" "=v")
4294 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4295 (match_operand:V1TI 2 "register_operand" "v")]
4296 UNSPEC_VADDCUQ))]
4297 "TARGET_VADDUQM"
4298 "vaddcuq %0,%1,%2"
b24a46be 4299 [(set_attr "type" "vecsimple")])
a16a872d
MM
4300
4301(define_insn "altivec_vsubuqm"
4302 [(set (match_operand:V1TI 0 "register_operand" "=v")
4303 (minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4304 (match_operand:V1TI 2 "register_operand" "v")))]
4305 "TARGET_VADDUQM"
4306 "vsubuqm %0,%1,%2"
b24a46be 4307 [(set_attr "type" "vecsimple")])
a16a872d
MM
4308
4309(define_insn "altivec_vsubcuq"
4310 [(set (match_operand:V1TI 0 "register_operand" "=v")
4311 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4312 (match_operand:V1TI 2 "register_operand" "v")]
4313 UNSPEC_VSUBCUQ))]
4314 "TARGET_VADDUQM"
4315 "vsubcuq %0,%1,%2"
b24a46be 4316 [(set_attr "type" "vecsimple")])
a16a872d
MM
4317
4318(define_insn "altivec_vaddeuqm"
4319 [(set (match_operand:V1TI 0 "register_operand" "=v")
4320 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4321 (match_operand:V1TI 2 "register_operand" "v")
4322 (match_operand:V1TI 3 "register_operand" "v")]
4323 UNSPEC_VADDEUQM))]
4324 "TARGET_VADDUQM"
4325 "vaddeuqm %0,%1,%2,%3"
b24a46be 4326 [(set_attr "type" "vecsimple")])
a16a872d
MM
4327
4328(define_insn "altivec_vaddecuq"
4329 [(set (match_operand:V1TI 0 "register_operand" "=v")
4330 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4331 (match_operand:V1TI 2 "register_operand" "v")
4332 (match_operand:V1TI 3 "register_operand" "v")]
4333 UNSPEC_VADDECUQ))]
4334 "TARGET_VADDUQM"
4335 "vaddecuq %0,%1,%2,%3"
b24a46be 4336 [(set_attr "type" "vecsimple")])
a16a872d
MM
4337
4338(define_insn "altivec_vsubeuqm"
4339 [(set (match_operand:V1TI 0 "register_operand" "=v")
4340 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4341 (match_operand:V1TI 2 "register_operand" "v")
4342 (match_operand:V1TI 3 "register_operand" "v")]
4343 UNSPEC_VSUBEUQM))]
4344 "TARGET_VADDUQM"
4345 "vsubeuqm %0,%1,%2,%3"
b24a46be 4346 [(set_attr "type" "vecsimple")])
a16a872d
MM
4347
4348(define_insn "altivec_vsubecuq"
4349 [(set (match_operand:V1TI 0 "register_operand" "=v")
4350 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4351 (match_operand:V1TI 2 "register_operand" "v")
4352 (match_operand:V1TI 3 "register_operand" "v")]
4353 UNSPEC_VSUBECUQ))]
4354 "TARGET_VADDUQM"
4355 "vsubecuq %0,%1,%2,%3"
b24a46be 4356 [(set_attr "type" "vecsimple")])
a16a872d 4357
117f16fb
MM
4358;; We use V2DI as the output type to simplify converting the permute
4359;; bits into an integer
4360(define_insn "altivec_vbpermq"
4361 [(set (match_operand:V2DI 0 "register_operand" "=v")
4362 (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
4363 (match_operand:V16QI 2 "register_operand" "v")]
4364 UNSPEC_VBPERMQ))]
4365 "TARGET_P8_VECTOR"
4366 "vbpermq %0,%1,%2"
20ca9ae2 4367 [(set_attr "type" "vecperm")])
dfc42f08
BS
4368
4369; One of the vector API interfaces requires returning vector unsigned char.
4370(define_insn "altivec_vbpermq2"
4371 [(set (match_operand:V16QI 0 "register_operand" "=v")
4372 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4373 (match_operand:V16QI 2 "register_operand" "v")]
4374 UNSPEC_VBPERMQ))]
4375 "TARGET_P8_VECTOR"
4376 "vbpermq %0,%1,%2"
20ca9ae2 4377 [(set_attr "type" "vecperm")])
dfc42f08
BS
4378
4379(define_insn "altivec_vbpermd"
4380 [(set (match_operand:V2DI 0 "register_operand" "=v")
4381 (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
4382 (match_operand:V16QI 2 "register_operand" "v")]
4383 UNSPEC_VBPERMD))]
4384 "TARGET_P9_VECTOR"
4385 "vbpermd %0,%1,%2"
4386 [(set_attr "type" "vecsimple")])
06b39289 4387
962b9668
BS
4388;; Support for SAD (sum of absolute differences).
4389
4390;; Due to saturating semantics, we can't combine the sum-across
4391;; with the vector accumulate in vsum4ubs. A vadduwm is needed.
4392(define_expand "usadv16qi"
4393 [(use (match_operand:V4SI 0 "register_operand"))
4394 (use (match_operand:V16QI 1 "register_operand"))
4395 (use (match_operand:V16QI 2 "register_operand"))
4396 (use (match_operand:V4SI 3 "register_operand"))]
4397 "TARGET_P9_VECTOR"
4398{
4399 rtx absd = gen_reg_rtx (V16QImode);
4400 rtx zero = gen_reg_rtx (V4SImode);
4401 rtx psum = gen_reg_rtx (V4SImode);
4402
4403 emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
4404 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4405 emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
4406 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4407 DONE;
4408})
4409
4410;; Since vsum4shs is saturating and further performs signed
4411;; arithmetic, we can't combine the sum-across with the vector
4412;; accumulate in vsum4shs. A vadduwm is needed.
4413(define_expand "usadv8hi"
4414 [(use (match_operand:V4SI 0 "register_operand"))
4415 (use (match_operand:V8HI 1 "register_operand"))
4416 (use (match_operand:V8HI 2 "register_operand"))
4417 (use (match_operand:V4SI 3 "register_operand"))]
4418 "TARGET_P9_VECTOR"
4419{
4420 rtx absd = gen_reg_rtx (V8HImode);
4421 rtx zero = gen_reg_rtx (V4SImode);
4422 rtx psum = gen_reg_rtx (V4SImode);
4423
4424 emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
4425 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4426 emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
4427 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4428 DONE;
4429})
4430
06b39289
MM
4431;; Decimal Integer operations
4432(define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
4433
4434(define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
4435 (UNSPEC_BCDSUB "sub")])
4436
05161256
CL
4437(define_code_iterator BCD_TEST [eq lt le gt ge unordered])
4438(define_mode_iterator VBCD [V1TI V16QI])
06b39289 4439
05161256
CL
4440(define_insn "bcd<bcd_add_sub>_<mode>"
4441 [(set (match_operand:VBCD 0 "register_operand" "=v")
4442 (unspec:VBCD [(match_operand:VBCD 1 "register_operand" "v")
4443 (match_operand:VBCD 2 "register_operand" "v")
c1170481 4444 (match_operand:QI 3 "const_0_to_1_operand" "n")]
06b39289 4445 UNSPEC_BCD_ADD_SUB))
b65261f6 4446 (clobber (reg:CCFP CR6_REGNO))]
06b39289
MM
4447 "TARGET_P8_VECTOR"
4448 "bcd<bcd_add_sub>. %0,%1,%2,%3"
b24a46be 4449 [(set_attr "type" "vecsimple")])
06b39289
MM
4450
4451;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
4452;; can use the unordered test for BCD nans and add/subtracts that overflow. An
4453;; UNORDERED test on an integer type (like V1TImode) is not defined. The type
4454;; probably should be one that can go in the VMX (Altivec) registers, so we
4455;; can't use DDmode or DFmode.
05161256 4456(define_insn "*bcd<bcd_add_sub>_test_<mode>"
b65261f6 4457 [(set (reg:CCFP CR6_REGNO)
06b39289 4458 (compare:CCFP
05161256
CL
4459 (unspec:V2DF [(match_operand:VBCD 1 "register_operand" "v")
4460 (match_operand:VBCD 2 "register_operand" "v")
06b39289
MM
4461 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4462 UNSPEC_BCD_ADD_SUB)
4463 (match_operand:V2DF 4 "zero_constant" "j")))
05161256 4464 (clobber (match_scratch:VBCD 0 "=v"))]
06b39289
MM
4465 "TARGET_P8_VECTOR"
4466 "bcd<bcd_add_sub>. %0,%1,%2,%3"
b24a46be 4467 [(set_attr "type" "vecsimple")])
06b39289 4468
05161256
CL
4469(define_insn "*bcd<bcd_add_sub>_test2_<mode>"
4470 [(set (match_operand:VBCD 0 "register_operand" "=v")
4471 (unspec:VBCD [(match_operand:VBCD 1 "register_operand" "v")
4472 (match_operand:VBCD 2 "register_operand" "v")
06b39289
MM
4473 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4474 UNSPEC_BCD_ADD_SUB))
b65261f6 4475 (set (reg:CCFP CR6_REGNO)
06b39289
MM
4476 (compare:CCFP
4477 (unspec:V2DF [(match_dup 1)
4478 (match_dup 2)
4479 (match_dup 3)]
4480 UNSPEC_BCD_ADD_SUB)
4481 (match_operand:V2DF 4 "zero_constant" "j")))]
4482 "TARGET_P8_VECTOR"
4483 "bcd<bcd_add_sub>. %0,%1,%2,%3"
b24a46be 4484 [(set_attr "type" "vecsimple")])
06b39289 4485
ed07d681
KN
4486(define_insn "vcfuged"
4487 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4488 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4489 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4490 UNSPEC_VCFUGED))]
5d9d0c94 4491 "TARGET_POWER10"
ed07d681
KN
4492 "vcfuged %0,%1,%2"
4493 [(set_attr "type" "vecsimple")])
4494
a1821a24
KN
4495(define_insn "vclzdm"
4496 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4497 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4498 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4499 UNSPEC_VCLZDM))]
5d9d0c94 4500 "TARGET_POWER10"
a1821a24
KN
4501 "vclzdm %0,%1,%2"
4502 [(set_attr "type" "vecsimple")])
4503
4504(define_insn "vctzdm"
4505 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4506 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4507 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4508 UNSPEC_VCTZDM))]
5d9d0c94 4509 "TARGET_POWER10"
a1821a24
KN
4510 "vctzdm %0,%1,%2"
4511 [(set_attr "type" "vecsimple")])
4512
894ac7bc
KN
4513(define_insn "vpdepd"
4514 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4515 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4516 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4517 UNSPEC_VPDEPD))]
5d9d0c94 4518 "TARGET_POWER10"
894ac7bc
KN
4519 "vpdepd %0,%1,%2"
4520 [(set_attr "type" "vecsimple")])
4521
4522(define_insn "vpextd"
4523 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4524 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4525 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4526 UNSPEC_VPEXTD))]
5d9d0c94 4527 "TARGET_POWER10"
894ac7bc
KN
4528 "vpextd %0,%1,%2"
4529 [(set_attr "type" "vecsimple")])
4530
7c00c559
KN
4531(define_insn "vgnb"
4532 [(set (match_operand:DI 0 "register_operand" "=r")
4533 (unspec:DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4534 (match_operand:QI 2 "u3bit_cint_operand" "n")]
4535 UNSPEC_VGNB))]
5d9d0c94 4536 "TARGET_POWER10"
7c00c559
KN
4537 "vgnb %0,%1,%2"
4538 [(set_attr "type" "vecsimple")])
4539
25bf7d32
KN
4540(define_insn "vclrlb"
4541 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
4542 (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
4543 (match_operand:SI 2 "gpc_reg_operand" "r")]
4544 UNSPEC_VCLRLB))]
5d9d0c94 4545 "TARGET_POWER10"
25bf7d32
KN
4546{
4547 if (BYTES_BIG_ENDIAN)
4548 return "vclrlb %0,%1,%2";
4549 else
4550 return "vclrrb %0,%1,%2";
4551}
4552 [(set_attr "type" "vecsimple")])
4553
4554(define_insn "vclrrb"
4555 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
4556 (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
4557 (match_operand:SI 2 "gpc_reg_operand" "r")]
4558 UNSPEC_VCLRRB))]
5d9d0c94 4559 "TARGET_POWER10"
25bf7d32
KN
4560{
4561 if (BYTES_BIG_ENDIAN)
4562 return "vclrrb %0,%1,%2";
4563 else
4564 return "vclrlb %0,%1,%2";
4565}
4566 [(set_attr "type" "vecsimple")])
a1821a24 4567
05161256 4568(define_expand "bcd<bcd_add_sub>_<code>_<mode>"
b65261f6 4569 [(parallel [(set (reg:CCFP CR6_REGNO)
06b39289 4570 (compare:CCFP
05161256
CL
4571 (unspec:V2DF [(match_operand:VBCD 1 "register_operand")
4572 (match_operand:VBCD 2 "register_operand")
ad18eed2 4573 (match_operand:QI 3 "const_0_to_1_operand")]
06b39289
MM
4574 UNSPEC_BCD_ADD_SUB)
4575 (match_dup 4)))
05161256 4576 (clobber (match_scratch:VBCD 5))])
ad18eed2 4577 (set (match_operand:SI 0 "register_operand")
b65261f6 4578 (BCD_TEST:SI (reg:CCFP CR6_REGNO)
06b39289
MM
4579 (const_int 0)))]
4580 "TARGET_P8_VECTOR"
4581{
4582 operands[4] = CONST0_RTX (V2DFmode);
4583})
4584
05161256
CL
4585(define_insn "*bcdinvalid_<mode>"
4586 [(set (reg:CCFP CR6_REGNO)
4587 (compare:CCFP
4588 (unspec:V2DF [(match_operand:VBCD 1 "register_operand" "v")]
4589 UNSPEC_BCDADD)
4590 (match_operand:V2DF 2 "zero_constant" "j")))
4591 (clobber (match_scratch:VBCD 0 "=v"))]
4592 "TARGET_P8_VECTOR"
4593 "bcdadd. %0,%1,%1,0"
4594 [(set_attr "type" "vecsimple")])
4595
4596(define_expand "bcdinvalid_<mode>"
4597 [(parallel [(set (reg:CCFP CR6_REGNO)
4598 (compare:CCFP
4599 (unspec:V2DF [(match_operand:VBCD 1 "register_operand")]
4600 UNSPEC_BCDADD)
4601 (match_dup 2)))
4602 (clobber (match_scratch:VBCD 3))])
4603 (set (match_operand:SI 0 "register_operand")
4604 (unordered:SI (reg:CCFP CR6_REGNO)
4605 (const_int 0)))]
4606 "TARGET_P8_VECTOR"
4607{
4608 operands[2] = CONST0_RTX (V2DFmode);
4609})
4610
4611(define_insn "bcdshift_v16qi"
4612 [(set (match_operand:V16QI 0 "register_operand" "=v")
4613 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4614 (match_operand:V16QI 2 "register_operand" "v")
4615 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4616 UNSPEC_BCDSHIFT))
4617 (clobber (reg:CCFP CR6_REGNO))]
4618 "TARGET_P8_VECTOR"
4619 "bcds. %0,%1,%2,%3"
4620 [(set_attr "type" "vecsimple")])
4621
4622(define_expand "bcdmul10_v16qi"
4623 [(set (match_operand:V16QI 0 "register_operand")
4624 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")]
4625 UNSPEC_BCDSHIFT))
4626 (clobber (reg:CCFP CR6_REGNO))]
4627 "TARGET_P9_VECTOR"
4628{
4629 rtx one = gen_reg_rtx (V16QImode);
4630
4631 emit_insn (gen_altivec_vspltisb (one, const1_rtx));
4632 emit_insn (gen_bcdshift_v16qi (operands[0], one, operands[1], const0_rtx));
4633
4634 DONE;
4635})
4636
4637(define_expand "bcddiv10_v16qi"
4638 [(set (match_operand:V16QI 0 "register_operand")
4639 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")]
4640 UNSPEC_BCDSHIFT))
4641 (clobber (reg:CCFP CR6_REGNO))]
4642 "TARGET_P9_VECTOR"
4643{
4644 rtx one = gen_reg_rtx (V16QImode);
4645
4646 emit_insn (gen_altivec_vspltisb (one, constm1_rtx));
4647 emit_insn (gen_bcdshift_v16qi (operands[0], one, operands[1], const0_rtx));
4648
4649 DONE;
4650})
4651
4652
06b39289
MM
4653;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
4654;; the bcdadd/bcdsub that tests the value. The combiner won't work since
4655;; CR6 is a hard coded register. Unfortunately, all of the Altivec predicate
4656;; support is hard coded to use the fixed register CR6 instead of creating
4657;; a register class for CR6.
4658
4659(define_peephole2
ad18eed2
SB
4660 [(parallel [(set (match_operand:V1TI 0 "register_operand")
4661 (unspec:V1TI [(match_operand:V1TI 1 "register_operand")
4662 (match_operand:V1TI 2 "register_operand")
4663 (match_operand:QI 3 "const_0_to_1_operand")]
06b39289 4664 UNSPEC_BCD_ADD_SUB))
b65261f6
SB
4665 (clobber (reg:CCFP CR6_REGNO))])
4666 (parallel [(set (reg:CCFP CR6_REGNO)
06b39289
MM
4667 (compare:CCFP
4668 (unspec:V2DF [(match_dup 1)
4669 (match_dup 2)
4670 (match_dup 3)]
4671 UNSPEC_BCD_ADD_SUB)
ad18eed2
SB
4672 (match_operand:V2DF 4 "zero_constant")))
4673 (clobber (match_operand:V1TI 5 "register_operand"))])]
06b39289
MM
4674 "TARGET_P8_VECTOR"
4675 [(parallel [(set (match_dup 0)
4676 (unspec:V1TI [(match_dup 1)
4677 (match_dup 2)
4678 (match_dup 3)]
4679 UNSPEC_BCD_ADD_SUB))
b65261f6 4680 (set (reg:CCFP CR6_REGNO)
06b39289
MM
4681 (compare:CCFP
4682 (unspec:V2DF [(match_dup 1)
4683 (match_dup 2)
4684 (match_dup 3)]
4685 UNSPEC_BCD_ADD_SUB)
4686 (match_dup 4)))])])