]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/altivec.md
Update copyright years.
[thirdparty/gcc.git] / gcc / config / rs6000 / altivec.md
1 ;; AltiVec patterns.
2 ;; Copyright (C) 2002-2022 Free Software Foundation, Inc.
3 ;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
4
5 ;; This file is part of GCC.
6
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 3, or (at your
10 ;; option) any later version.
11
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
16
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
20
21 (define_c_enum "unspec"
22 [UNSPEC_VCMPBFP
23 UNSPEC_VMSUMU
24 UNSPEC_VMSUMUDM
25 UNSPEC_VMSUMM
26 UNSPEC_VMSUMSHM
27 UNSPEC_VMSUMUHS
28 UNSPEC_VMSUMSHS
29 UNSPEC_VMHADDSHS
30 UNSPEC_VMHRADDSHS
31 UNSPEC_VADDCUW
32 UNSPEC_VAVGU
33 UNSPEC_VAVGS
34 UNSPEC_VMULEUB
35 UNSPEC_VMULESB
36 UNSPEC_VMULEUH
37 UNSPEC_VMULESH
38 UNSPEC_VMULEUW
39 UNSPEC_VMULESW
40 UNSPEC_VMULEUD
41 UNSPEC_VMULESD
42 UNSPEC_VMULOUB
43 UNSPEC_VMULOSB
44 UNSPEC_VMULOUH
45 UNSPEC_VMULOSH
46 UNSPEC_VMULOUW
47 UNSPEC_VMULOSW
48 UNSPEC_VMULOUD
49 UNSPEC_VMULOSD
50 UNSPEC_VPKPX
51 UNSPEC_VPACK_SIGN_SIGN_SAT
52 UNSPEC_VPACK_SIGN_UNS_SAT
53 UNSPEC_VPACK_UNS_UNS_SAT
54 UNSPEC_VPACK_UNS_UNS_MOD
55 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
56 UNSPEC_VREVEV
57 UNSPEC_VSLV4SI
58 UNSPEC_VSLO
59 UNSPEC_VSR
60 UNSPEC_VSRO
61 UNSPEC_VSUBCUW
62 UNSPEC_VSUM4UBS
63 UNSPEC_VSUM4S
64 UNSPEC_VSUM2SWS
65 UNSPEC_VSUMSWS
66 UNSPEC_VPERM
67 UNSPEC_VPERMR
68 UNSPEC_VPERM_UNS
69 UNSPEC_VRFIN
70 UNSPEC_VCFUX
71 UNSPEC_VCFSX
72 UNSPEC_VCTUXS
73 UNSPEC_VCTSXS
74 UNSPEC_VLOGEFP
75 UNSPEC_VEXPTEFP
76 UNSPEC_VSLDOI
77 UNSPEC_VUNPACK_HI_SIGN
78 UNSPEC_VUNPACK_LO_SIGN
79 UNSPEC_VUNPACK_HI_SIGN_DIRECT
80 UNSPEC_VUNPACK_LO_SIGN_DIRECT
81 UNSPEC_VUPKHPX
82 UNSPEC_VUPKLPX
83 UNSPEC_CONVERT_4F32_8I16
84 UNSPEC_CONVERT_4F32_8F16
85 UNSPEC_DST
86 UNSPEC_DSTT
87 UNSPEC_DSTST
88 UNSPEC_DSTSTT
89 UNSPEC_LVSL
90 UNSPEC_LVSR
91 UNSPEC_LVE
92 UNSPEC_STVX
93 UNSPEC_STVXL
94 UNSPEC_STVE
95 UNSPEC_SET_VSCR
96 UNSPEC_GET_VRSAVE
97 UNSPEC_LVX
98 UNSPEC_REDUC_PLUS
99 UNSPEC_VECSH
100 UNSPEC_EXTEVEN_V4SI
101 UNSPEC_EXTEVEN_V8HI
102 UNSPEC_EXTEVEN_V16QI
103 UNSPEC_EXTEVEN_V4SF
104 UNSPEC_EXTODD_V4SI
105 UNSPEC_EXTODD_V8HI
106 UNSPEC_EXTODD_V16QI
107 UNSPEC_EXTODD_V4SF
108 UNSPEC_INTERHI_V4SI
109 UNSPEC_INTERHI_V8HI
110 UNSPEC_INTERHI_V16QI
111 UNSPEC_INTERLO_V4SI
112 UNSPEC_INTERLO_V8HI
113 UNSPEC_INTERLO_V16QI
114 UNSPEC_LVLX
115 UNSPEC_LVLXL
116 UNSPEC_LVRX
117 UNSPEC_LVRXL
118 UNSPEC_STVLX
119 UNSPEC_STVLXL
120 UNSPEC_STVRX
121 UNSPEC_STVRXL
122 UNSPEC_VADU
123 UNSPEC_VSLV
124 UNSPEC_VSRV
125 UNSPEC_VMULWHUB
126 UNSPEC_VMULWLUB
127 UNSPEC_VMULWHSB
128 UNSPEC_VMULWLSB
129 UNSPEC_VMULWHUH
130 UNSPEC_VMULWLUH
131 UNSPEC_VMULWHSH
132 UNSPEC_VMULWLSH
133 UNSPEC_VUPKHU
134 UNSPEC_VUPKLU
135 UNSPEC_VPERMSI
136 UNSPEC_VPERMHI
137 UNSPEC_INTERHI
138 UNSPEC_INTERLO
139 UNSPEC_VUPKHS_V4SF
140 UNSPEC_VUPKLS_V4SF
141 UNSPEC_VUPKHU_V4SF
142 UNSPEC_VUPKLU_V4SF
143 UNSPEC_VGBBD
144 UNSPEC_VSPLT_DIRECT
145 UNSPEC_VMRGEW_DIRECT
146 UNSPEC_VMRGOW_DIRECT
147 UNSPEC_VSUMSWS_DIRECT
148 UNSPEC_VADDCUQ
149 UNSPEC_VADDEUQM
150 UNSPEC_VADDECUQ
151 UNSPEC_VSUBCUQ
152 UNSPEC_VSUBEUQM
153 UNSPEC_VSUBECUQ
154 UNSPEC_VBPERMQ
155 UNSPEC_VBPERMD
156 UNSPEC_BCDADD
157 UNSPEC_BCDSUB
158 UNSPEC_BCD_OVERFLOW
159 UNSPEC_BCDSHIFT
160 UNSPEC_VRLMI
161 UNSPEC_VRLNM
162 UNSPEC_VCFUGED
163 UNSPEC_VCLZDM
164 UNSPEC_VCTZDM
165 UNSPEC_VGNB
166 UNSPEC_VPDEPD
167 UNSPEC_VPEXTD
168 UNSPEC_VCLRLB
169 UNSPEC_VCLRRB
170 UNSPEC_VSTRIR
171 UNSPEC_VSTRIL
172 UNSPEC_SLDB
173 UNSPEC_SRDB
174 ])
175
176 (define_c_enum "unspecv"
177 [UNSPECV_SET_VRSAVE
178 UNSPECV_MTVSCR
179 UNSPECV_MFVSCR
180 UNSPECV_DSSALL
181 UNSPECV_DSS
182 ])
183
184 ;; Short vec int modes
185 (define_mode_iterator VIshort [V8HI V16QI])
186 ;; Vec float modes
187 (define_mode_iterator VF [V4SF])
188 ;; Vec modes, pity mode iterators are not composable
189 (define_mode_iterator V [V4SI V8HI V16QI V4SF])
190 ;; Vec modes for move/logical/permute ops, include vector types for move not
191 ;; otherwise handled by altivec (v2df, v2di, ti)
192 (define_mode_iterator VM [V4SI
193 V8HI
194 V16QI
195 V4SF
196 V2DF
197 V2DI
198 V1TI
199 TI
200 (KF "FLOAT128_VECTOR_P (KFmode)")
201 (TF "FLOAT128_VECTOR_P (TFmode)")])
202
203 ;; Like VM, except don't do TImode
204 (define_mode_iterator VM2 [V4SI
205 V8HI
206 V16QI
207 V4SF
208 V2DF
209 V2DI
210 V1TI
211 (KF "FLOAT128_VECTOR_P (KFmode)")
212 (TF "FLOAT128_VECTOR_P (TFmode)")])
213
214 ;; Map the Vector convert single precision to double precision for integer
215 ;; versus floating point
216 (define_mode_attr VS_sxwsp [(V4SI "sxw") (V4SF "sp")])
217
218 ;; Specific iterator for parity which does not have a byte/half-word form, but
219 ;; does have a quad word form
220 (define_mode_iterator VParity [V4SI
221 V2DI
222 V1TI
223 TI])
224
225 (define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
226 (define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
227 (define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
228 (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
229 (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
230 (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
231 (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
232
233 ;; Vector pack/unpack
234 (define_mode_iterator VP [V2DI V4SI V8HI])
235 (define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
236 (define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
237 (define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
238
239 ;; Vector negate
240 (define_mode_iterator VNEG [V4SI V2DI])
241
242 ;; Vector move instructions.
243 (define_insn "*altivec_mov<mode>"
244 [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
245 (match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
246 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
247 && (register_operand (operands[0], <MODE>mode)
248 || register_operand (operands[1], <MODE>mode))"
249 "@
250 stvx %1,%y0
251 lvx %0,%y1
252 vor %0,%1,%1
253 #
254 #
255 #
256 vxor %0,%0,%0
257 * return output_vec_const_move (operands);
258 #"
259 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
260 (set_attr "length" "*,*,*,20,20,20,*,8,32")])
261
262 ;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
263 ;; is for unions. However for plain data movement, slightly favor the vector
264 ;; loads
265 (define_insn "*altivec_movti"
266 [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
267 (match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
268 "VECTOR_MEM_ALTIVEC_P (TImode)
269 && (register_operand (operands[0], TImode)
270 || register_operand (operands[1], TImode))"
271 "@
272 stvx %1,%y0
273 lvx %0,%y1
274 vor %0,%1,%1
275 #
276 #
277 #
278 vxor %0,%0,%0
279 * return output_vec_const_move (operands);"
280 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
281
282 ;; Load up a vector with the most significant bit set by loading up -1 and
283 ;; doing a shift left
284 (define_split
285 [(set (match_operand:VM 0 "altivec_register_operand")
286 (match_operand:VM 1 "easy_vector_constant_msb"))]
287 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
288 [(const_int 0)]
289 {
290 rtx dest = operands[0];
291 machine_mode mode;
292 rtvec v;
293 int i, num_elements;
294
295 switch (easy_altivec_constant (operands[1], <MODE>mode))
296 {
297 case 1:
298 mode = V16QImode;
299 break;
300 case 2:
301 mode = V8HImode;
302 break;
303 case 4:
304 mode = V4SImode;
305 break;
306 default:
307 gcc_unreachable ();
308 }
309 if (mode != <MODE>mode)
310 dest = gen_lowpart (mode, dest);
311
312 num_elements = GET_MODE_NUNITS (mode);
313 v = rtvec_alloc (num_elements);
314 for (i = 0; i < num_elements; i++)
315 RTVEC_ELT (v, i) = constm1_rtx;
316
317 rs6000_expand_vector_init (dest, gen_rtx_PARALLEL (mode, v));
318 emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
319 DONE;
320 })
321
322 (define_split
323 [(set (match_operand:VM 0 "altivec_register_operand")
324 (match_operand:VM 1 "easy_vector_constant_add_self"))]
325 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
326 [(set (match_dup 0) (match_dup 3))
327 (set (match_dup 0) (match_dup 4))]
328 {
329 rtx dup = gen_easy_altivec_constant (operands[1]);
330 rtx const_vec;
331 machine_mode op_mode = <MODE>mode;
332
333 /* Divide the operand of the resulting VEC_DUPLICATE, and use
334 simplify_rtx to make a CONST_VECTOR. */
335 XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
336 XEXP (dup, 0), const1_rtx);
337 const_vec = simplify_rtx (dup);
338
339 if (op_mode == V4SFmode)
340 {
341 op_mode = V4SImode;
342 operands[0] = gen_lowpart (op_mode, operands[0]);
343 }
344 if (GET_MODE (const_vec) == op_mode)
345 operands[3] = const_vec;
346 else
347 operands[3] = gen_lowpart (op_mode, const_vec);
348 operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
349 })
350
351 (define_split
352 [(set (match_operand:VM 0 "altivec_register_operand")
353 (match_operand:VM 1 "easy_vector_constant_vsldoi"))]
354 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
355 [(set (match_dup 2) (match_dup 3))
356 (set (match_dup 4) (match_dup 5))
357 (set (match_dup 0)
358 (unspec:VM [(match_dup 2)
359 (match_dup 4)
360 (match_dup 6)]
361 UNSPEC_VSLDOI))]
362 {
363 rtx op1 = operands[1];
364 int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
365 HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
366 rtx rtx_val = GEN_INT (val);
367 int shift = vspltis_shifted (op1);
368
369 gcc_assert (shift != 0);
370 operands[2] = gen_reg_rtx (<MODE>mode);
371 operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
372 operands[4] = gen_reg_rtx (<MODE>mode);
373
374 if (shift < 0)
375 {
376 operands[5] = CONSTM1_RTX (<MODE>mode);
377 operands[6] = GEN_INT (-shift);
378 }
379 else
380 {
381 operands[5] = CONST0_RTX (<MODE>mode);
382 operands[6] = GEN_INT (shift);
383 }
384 })
385
386 (define_insn "get_vrsave_internal"
387 [(set (match_operand:SI 0 "register_operand" "=r")
388 (unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
389 "TARGET_ALTIVEC"
390 {
391 if (TARGET_MACHO)
392 return "mfspr %0,256";
393 else
394 return "mfvrsave %0";
395 }
396 [(set_attr "type" "*")])
397
398 (define_insn "*set_vrsave_internal"
399 [(match_parallel 0 "vrsave_operation"
400 [(set (reg:SI VRSAVE_REGNO)
401 (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
402 (reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
403 "TARGET_ALTIVEC"
404 {
405 if (TARGET_MACHO)
406 return "mtspr 256,%1";
407 else
408 return "mtvrsave %1";
409 }
410 [(set_attr "type" "*")])
411
412 (define_insn "*save_world"
413 [(match_parallel 0 "save_world_operation"
414 [(clobber (reg:SI LR_REGNO))
415 (use (match_operand:SI 1 "call_operand" "s"))])]
416 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
417 "bl %z1"
418 [(set_attr "type" "branch")])
419
420 (define_insn "*restore_world"
421 [(match_parallel 0 "restore_world_operation"
422 [(return)
423 (use (match_operand:SI 1 "call_operand" "s"))
424 (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
425 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
426 "b %z1")
427
428 ;; The save_vregs and restore_vregs patterns don't use memory_operand
429 ;; because (plus (reg) (const_int)) is not a valid vector address.
430 ;; This way is more compact than describing exactly what happens in
431 ;; the out-of-line functions, ie. loading the constant into r11/r12
432 ;; then using indexed addressing, and requires less editing of rtl
433 ;; to describe the operation to dwarf2out_frame_debug_expr.
434 (define_insn "*save_vregs_<mode>_r11"
435 [(match_parallel 0 "any_parallel_operand"
436 [(clobber (reg:P LR_REGNO))
437 (use (match_operand:P 1 "symbol_ref_operand" "s"))
438 (clobber (reg:P 11))
439 (use (reg:P 0))
440 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
441 (match_operand:P 3 "short_cint_operand" "I")))
442 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
443 "TARGET_ALTIVEC"
444 "bl %1"
445 [(set_attr "type" "branch")])
446
447 (define_insn "*save_vregs_<mode>_r12"
448 [(match_parallel 0 "any_parallel_operand"
449 [(clobber (reg:P LR_REGNO))
450 (use (match_operand:P 1 "symbol_ref_operand" "s"))
451 (clobber (reg:P 12))
452 (use (reg:P 0))
453 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
454 (match_operand:P 3 "short_cint_operand" "I")))
455 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
456 "TARGET_ALTIVEC"
457 "bl %1"
458 [(set_attr "type" "branch")])
459
460 (define_insn "*restore_vregs_<mode>_r11"
461 [(match_parallel 0 "any_parallel_operand"
462 [(clobber (reg:P LR_REGNO))
463 (use (match_operand:P 1 "symbol_ref_operand" "s"))
464 (clobber (reg:P 11))
465 (use (reg:P 0))
466 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
467 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
468 (match_operand:P 4 "short_cint_operand" "I"))))])]
469 "TARGET_ALTIVEC"
470 "bl %1"
471 [(set_attr "type" "branch")])
472
473 (define_insn "*restore_vregs_<mode>_r12"
474 [(match_parallel 0 "any_parallel_operand"
475 [(clobber (reg:P LR_REGNO))
476 (use (match_operand:P 1 "symbol_ref_operand" "s"))
477 (clobber (reg:P 12))
478 (use (reg:P 0))
479 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
480 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
481 (match_operand:P 4 "short_cint_operand" "I"))))])]
482 "TARGET_ALTIVEC"
483 "bl %1"
484 [(set_attr "type" "branch")])
485
486 ;; Simple binary operations.
487
488 ;; add
489 (define_insn "add<mode>3"
490 [(set (match_operand:VI2 0 "register_operand" "=v")
491 (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
492 (match_operand:VI2 2 "register_operand" "v")))]
493 "<VI_unit>"
494 "vaddu<VI_char>m %0,%1,%2"
495 [(set_attr "type" "vecsimple")])
496
497 (define_insn "*altivec_addv4sf3"
498 [(set (match_operand:V4SF 0 "register_operand" "=v")
499 (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
500 (match_operand:V4SF 2 "register_operand" "v")))]
501 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
502 "vaddfp %0,%1,%2"
503 [(set_attr "type" "vecfloat")])
504
505 (define_insn "altivec_vaddcuw"
506 [(set (match_operand:V4SI 0 "register_operand" "=v")
507 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
508 (match_operand:V4SI 2 "register_operand" "v")]
509 UNSPEC_VADDCUW))]
510 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
511 "vaddcuw %0,%1,%2"
512 [(set_attr "type" "vecsimple")])
513
514 (define_insn "altivec_vaddu<VI_char>s"
515 [(set (match_operand:VI 0 "register_operand" "=v")
516 (us_plus:VI (match_operand:VI 1 "register_operand" "v")
517 (match_operand:VI 2 "register_operand" "v")))
518 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
519 "<VI_unit>"
520 "vaddu<VI_char>s %0,%1,%2"
521 [(set_attr "type" "vecsimple")])
522
523 (define_insn "altivec_vadds<VI_char>s"
524 [(set (match_operand:VI 0 "register_operand" "=v")
525 (ss_plus:VI (match_operand:VI 1 "register_operand" "v")
526 (match_operand:VI 2 "register_operand" "v")))
527 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
528 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
529 "vadds<VI_char>s %0,%1,%2"
530 [(set_attr "type" "vecsimple")])
531
532 ;; sub
533 (define_insn "sub<mode>3"
534 [(set (match_operand:VI2 0 "register_operand" "=v")
535 (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
536 (match_operand:VI2 2 "register_operand" "v")))]
537 "<VI_unit>"
538 "vsubu<VI_char>m %0,%1,%2"
539 [(set_attr "type" "vecsimple")])
540
541 (define_insn "*altivec_subv4sf3"
542 [(set (match_operand:V4SF 0 "register_operand" "=v")
543 (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
544 (match_operand:V4SF 2 "register_operand" "v")))]
545 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
546 "vsubfp %0,%1,%2"
547 [(set_attr "type" "vecfloat")])
548
549 (define_insn "altivec_vsubcuw"
550 [(set (match_operand:V4SI 0 "register_operand" "=v")
551 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
552 (match_operand:V4SI 2 "register_operand" "v")]
553 UNSPEC_VSUBCUW))]
554 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
555 "vsubcuw %0,%1,%2"
556 [(set_attr "type" "vecsimple")])
557
558 (define_insn "altivec_vsubu<VI_char>s"
559 [(set (match_operand:VI 0 "register_operand" "=v")
560 (us_minus:VI (match_operand:VI 1 "register_operand" "v")
561 (match_operand:VI 2 "register_operand" "v")))
562 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
563 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
564 "vsubu<VI_char>s %0,%1,%2"
565 [(set_attr "type" "vecsimple")])
566
567 (define_insn "altivec_vsubs<VI_char>s"
568 [(set (match_operand:VI 0 "register_operand" "=v")
569 (ss_minus:VI (match_operand:VI 1 "register_operand" "v")
570 (match_operand:VI 2 "register_operand" "v")))
571 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
572 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
573 "vsubs<VI_char>s %0,%1,%2"
574 [(set_attr "type" "vecsimple")])
575
576 ;;
577 (define_insn "uavg<mode>3_ceil"
578 [(set (match_operand:VI 0 "register_operand" "=v")
579 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
580 (match_operand:VI 2 "register_operand" "v")]
581 UNSPEC_VAVGU))]
582 "TARGET_ALTIVEC"
583 "vavgu<VI_char> %0,%1,%2"
584 [(set_attr "type" "vecsimple")])
585
586 (define_insn "avg<mode>3_ceil"
587 [(set (match_operand:VI 0 "register_operand" "=v")
588 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
589 (match_operand:VI 2 "register_operand" "v")]
590 UNSPEC_VAVGS))]
591 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
592 "vavgs<VI_char> %0,%1,%2"
593 [(set_attr "type" "vecsimple")])
594
595 (define_insn "altivec_vcmpbfp"
596 [(set (match_operand:V4SI 0 "register_operand" "=v")
597 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
598 (match_operand:V4SF 2 "register_operand" "v")]
599 UNSPEC_VCMPBFP))]
600 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
601 "vcmpbfp %0,%1,%2"
602 [(set_attr "type" "veccmp")])
603
604 (define_insn "altivec_eqv1ti"
605 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
606 (eq:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
607 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
608 "TARGET_POWER10"
609 "vcmpequq %0,%1,%2"
610 [(set_attr "type" "veccmpfx")])
611
612 (define_insn "altivec_eq<mode>"
613 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
614 (eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
615 (match_operand:VI2 2 "altivec_register_operand" "v")))]
616 "<VI_unit>"
617 "vcmpequ<VI_char> %0,%1,%2"
618 [(set_attr "type" "veccmpfx")])
619
620 (define_insn "*altivec_gt<mode>"
621 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
622 (gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
623 (match_operand:VI2 2 "altivec_register_operand" "v")))]
624 "<VI_unit>"
625 "vcmpgts<VI_char> %0,%1,%2"
626 [(set_attr "type" "veccmpfx")])
627
628 (define_insn "*altivec_gtv1ti"
629 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
630 (gt:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
631 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
632 "TARGET_POWER10"
633 "vcmpgtsq %0,%1,%2"
634 [(set_attr "type" "veccmpfx")])
635
636 (define_insn "*altivec_gtu<mode>"
637 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
638 (gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
639 (match_operand:VI2 2 "altivec_register_operand" "v")))]
640 "<VI_unit>"
641 "vcmpgtu<VI_char> %0,%1,%2"
642 [(set_attr "type" "veccmpfx")])
643
644 (define_insn "*altivec_gtuv1ti"
645 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
646 (gtu:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
647 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
648 "TARGET_POWER10"
649 "vcmpgtuq %0,%1,%2"
650 [(set_attr "type" "veccmpfx")])
651
652 (define_insn "*altivec_eqv4sf"
653 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
654 (eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
655 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
656 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
657 "vcmpeqfp %0,%1,%2"
658 [(set_attr "type" "veccmp")])
659
660 (define_insn "*altivec_gtv4sf"
661 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
662 (gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
663 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
664 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
665 "vcmpgtfp %0,%1,%2"
666 [(set_attr "type" "veccmp")])
667
668 (define_insn "*altivec_gev4sf"
669 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
670 (ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
671 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
672 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
673 "vcmpgefp %0,%1,%2"
674 [(set_attr "type" "veccmp")])
675
676 (define_insn "altivec_vsel<mode>"
677 [(set (match_operand:VM 0 "register_operand" "=wa,v")
678 (ior:VM
679 (and:VM
680 (not:VM (match_operand:VM 3 "register_operand" "wa,v"))
681 (match_operand:VM 1 "register_operand" "wa,v"))
682 (and:VM
683 (match_dup 3)
684 (match_operand:VM 2 "register_operand" "wa,v"))))]
685 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
686 "@
687 xxsel %x0,%x1,%x2,%x3
688 vsel %0,%1,%2,%3"
689 [(set_attr "type" "vecmove")
690 (set_attr "isa" "<VSisa>")])
691
692 (define_insn "altivec_vsel<mode>2"
693 [(set (match_operand:VM 0 "register_operand" "=wa,v")
694 (ior:VM
695 (and:VM
696 (not:VM (match_operand:VM 3 "register_operand" "wa,v"))
697 (match_operand:VM 1 "register_operand" "wa,v"))
698 (and:VM
699 (match_operand:VM 2 "register_operand" "wa,v")
700 (match_dup 3))))]
701 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
702 "@
703 xxsel %x0,%x1,%x2,%x3
704 vsel %0,%1,%2,%3"
705 [(set_attr "type" "vecmove")
706 (set_attr "isa" "<VSisa>")])
707
708 (define_insn "altivec_vsel<mode>3"
709 [(set (match_operand:VM 0 "register_operand" "=wa,v")
710 (ior:VM
711 (and:VM
712 (match_operand:VM 3 "register_operand" "wa,v")
713 (match_operand:VM 1 "register_operand" "wa,v"))
714 (and:VM
715 (not:VM (match_dup 3))
716 (match_operand:VM 2 "register_operand" "wa,v"))))]
717 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
718 "@
719 xxsel %x0,%x2,%x1,%x3
720 vsel %0,%2,%1,%3"
721 [(set_attr "type" "vecmove")
722 (set_attr "isa" "<VSisa>")])
723
724 (define_insn "altivec_vsel<mode>4"
725 [(set (match_operand:VM 0 "register_operand" "=wa,v")
726 (ior:VM
727 (and:VM
728 (match_operand:VM 1 "register_operand" "wa,v")
729 (match_operand:VM 3 "register_operand" "wa,v"))
730 (and:VM
731 (not:VM (match_dup 3))
732 (match_operand:VM 2 "register_operand" "wa,v"))))]
733 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
734 "@
735 xxsel %x0,%x2,%x1,%x3
736 vsel %0,%2,%1,%3"
737 [(set_attr "type" "vecmove")
738 (set_attr "isa" "<VSisa>")])
739
740 ;; Fused multiply add.
741
742 (define_insn "*altivec_fmav4sf4"
743 [(set (match_operand:V4SF 0 "register_operand" "=v")
744 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
745 (match_operand:V4SF 2 "register_operand" "v")
746 (match_operand:V4SF 3 "register_operand" "v")))]
747 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
748 "vmaddfp %0,%1,%2,%3"
749 [(set_attr "type" "vecfloat")])
750
751 ;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
752
753 (define_expand "altivec_mulv4sf3"
754 [(set (match_operand:V4SF 0 "register_operand")
755 (fma:V4SF (match_operand:V4SF 1 "register_operand")
756 (match_operand:V4SF 2 "register_operand")
757 (match_dup 3)))]
758 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
759 {
760 rtx neg0;
761
762 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
763 neg0 = gen_reg_rtx (V4SImode);
764 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
765 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
766
767 operands[3] = gen_lowpart (V4SFmode, neg0);
768 })
769
770 ;; 32-bit integer multiplication
771 ;; A_high = Operand_0 & 0xFFFF0000 >> 16
772 ;; A_low = Operand_0 & 0xFFFF
773 ;; B_high = Operand_1 & 0xFFFF0000 >> 16
774 ;; B_low = Operand_1 & 0xFFFF
775 ;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
776
777 ;; (define_insn "mulv4si3"
778 ;; [(set (match_operand:V4SI 0 "register_operand" "=v")
779 ;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
780 ;; (match_operand:V4SI 2 "register_operand" "v")))]
781 (define_insn "mulv4si3_p8"
782 [(set (match_operand:V4SI 0 "register_operand" "=v")
783 (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
784 (match_operand:V4SI 2 "register_operand" "v")))]
785 "TARGET_P8_VECTOR"
786 "vmuluwm %0,%1,%2"
787 [(set_attr "type" "veccomplex")])
788
789 (define_expand "mulv4si3"
790 [(use (match_operand:V4SI 0 "register_operand"))
791 (use (match_operand:V4SI 1 "register_operand"))
792 (use (match_operand:V4SI 2 "register_operand"))]
793 "TARGET_ALTIVEC"
794 {
795 rtx zero;
796 rtx swap;
797 rtx small_swap;
798 rtx sixteen;
799 rtx one;
800 rtx two;
801 rtx low_product;
802 rtx high_product;
803
804 if (TARGET_P8_VECTOR)
805 {
806 emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
807 DONE;
808 }
809
810 zero = gen_reg_rtx (V4SImode);
811 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
812
813 sixteen = gen_reg_rtx (V4SImode);
814 emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
815
816 swap = gen_reg_rtx (V4SImode);
817 emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
818
819 one = gen_reg_rtx (V8HImode);
820 convert_move (one, operands[1], 0);
821
822 two = gen_reg_rtx (V8HImode);
823 convert_move (two, operands[2], 0);
824
825 small_swap = gen_reg_rtx (V8HImode);
826 convert_move (small_swap, swap, 0);
827
828 low_product = gen_reg_rtx (V4SImode);
829 emit_insn (gen_altivec_vmulouh (low_product, one, two));
830
831 high_product = gen_reg_rtx (V4SImode);
832 emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
833
834 emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
835
836 emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
837
838 DONE;
839 })
840
841 (define_expand "mulv8hi3"
842 [(use (match_operand:V8HI 0 "register_operand"))
843 (use (match_operand:V8HI 1 "register_operand"))
844 (use (match_operand:V8HI 2 "register_operand"))]
845 "TARGET_ALTIVEC"
846 {
847 rtx zero = gen_reg_rtx (V8HImode);
848
849 emit_insn (gen_altivec_vspltish (zero, const0_rtx));
850 emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
851
852 DONE;
853 })
854
855 ;; Map UNSPEC_SLDB to "l" and UNSPEC_SRDB to "r".
856 (define_int_attr SLDB_lr [(UNSPEC_SLDB "l")
857 (UNSPEC_SRDB "r")])
858
859 (define_int_iterator VSHIFT_DBL_LR [UNSPEC_SLDB UNSPEC_SRDB])
860
861 (define_insn "vs<SLDB_lr>db_<mode>"
862 [(set (match_operand:VI2 0 "register_operand" "=v")
863 (unspec:VI2 [(match_operand:VI2 1 "register_operand" "v")
864 (match_operand:VI2 2 "register_operand" "v")
865 (match_operand:QI 3 "const_0_to_12_operand" "n")]
866 VSHIFT_DBL_LR))]
867 "TARGET_POWER10"
868 "vs<SLDB_lr>dbi %0,%1,%2,%3"
869 [(set_attr "type" "vecsimple")])
870
871 (define_expand "vstrir_<mode>"
872 [(set (match_operand:VIshort 0 "altivec_register_operand")
873 (unspec:VIshort [(match_operand:VIshort 1 "altivec_register_operand")]
874 UNSPEC_VSTRIR))]
875 "TARGET_POWER10"
876 {
877 if (BYTES_BIG_ENDIAN)
878 emit_insn (gen_vstrir_code_<mode> (operands[0], operands[1]));
879 else
880 emit_insn (gen_vstril_code_<mode> (operands[0], operands[1]));
881 DONE;
882 })
883
884 (define_insn "vstrir_code_<mode>"
885 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
886 (unspec:VIshort
887 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
888 UNSPEC_VSTRIR))]
889 "TARGET_POWER10"
890 "vstri<wd>r %0,%1"
891 [(set_attr "type" "vecsimple")])
892
893 ;; This expands into same code as vstrir_<mode> followed by condition logic
894 ;; so that a single vstribr. or vstrihr. or vstribl. or vstrihl. instruction
895 ;; can, for example, satisfy the needs of a vec_strir () function paired
896 ;; with a vec_strir_p () function if both take the same incoming arguments.
897 (define_expand "vstrir_p_<mode>"
898 [(match_operand:SI 0 "gpc_reg_operand")
899 (match_operand:VIshort 1 "altivec_register_operand")]
900 "TARGET_POWER10"
901 {
902 rtx scratch = gen_reg_rtx (<MODE>mode);
903 if (BYTES_BIG_ENDIAN)
904 emit_insn (gen_vstrir_p_code_<mode> (scratch, operands[1]));
905 else
906 emit_insn (gen_vstril_p_code_<mode> (scratch, operands[1]));
907 emit_insn (gen_cr6_test_for_zero (operands[0]));
908 DONE;
909 })
910
911 (define_insn "vstrir_p_code_<mode>"
912 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
913 (unspec:VIshort
914 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
915 UNSPEC_VSTRIR))
916 (set (reg:CC CR6_REGNO)
917 (unspec:CC [(match_dup 1)]
918 UNSPEC_VSTRIR))]
919 "TARGET_POWER10"
920 "vstri<wd>r. %0,%1"
921 [(set_attr "type" "vecsimple")])
922
923 (define_expand "vstril_<mode>"
924 [(set (match_operand:VIshort 0 "altivec_register_operand")
925 (unspec:VIshort [(match_operand:VIshort 1 "altivec_register_operand")]
926 UNSPEC_VSTRIR))]
927 "TARGET_POWER10"
928 {
929 if (BYTES_BIG_ENDIAN)
930 emit_insn (gen_vstril_code_<mode> (operands[0], operands[1]));
931 else
932 emit_insn (gen_vstrir_code_<mode> (operands[0], operands[1]));
933 DONE;
934 })
935
936 (define_insn "vstril_code_<mode>"
937 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
938 (unspec:VIshort
939 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
940 UNSPEC_VSTRIL))]
941 "TARGET_POWER10"
942 "vstri<wd>l %0,%1"
943 [(set_attr "type" "vecsimple")])
944
945 ;; This expands into same code as vstril_<mode> followed by condition logic
946 ;; so that a single vstribr. or vstrihr. or vstribl. or vstrihl. instruction
947 ;; can, for example, satisfy the needs of a vec_stril () function paired
948 ;; with a vec_stril_p () function if both take the same incoming arguments.
949 (define_expand "vstril_p_<mode>"
950 [(match_operand:SI 0 "gpc_reg_operand")
951 (match_operand:VIshort 1 "altivec_register_operand")]
952 "TARGET_POWER10"
953 {
954 rtx scratch = gen_reg_rtx (<MODE>mode);
955 if (BYTES_BIG_ENDIAN)
956 emit_insn (gen_vstril_p_code_<mode> (scratch, operands[1]));
957 else
958 emit_insn (gen_vstrir_p_code_<mode> (scratch, operands[1]));
959 emit_insn (gen_cr6_test_for_zero (operands[0]));
960 DONE;
961 })
962
963 (define_insn "vstril_p_code_<mode>"
964 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
965 (unspec:VIshort
966 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
967 UNSPEC_VSTRIL))
968 (set (reg:CC CR6_REGNO)
969 (unspec:CC [(match_dup 1)]
970 UNSPEC_VSTRIR))]
971 "TARGET_POWER10"
972 "vstri<wd>l. %0,%1"
973 [(set_attr "type" "vecsimple")])
974
975 ;; Fused multiply subtract
976 (define_insn "*altivec_vnmsubfp"
977 [(set (match_operand:V4SF 0 "register_operand" "=v")
978 (neg:V4SF
979 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
980 (match_operand:V4SF 2 "register_operand" "v")
981 (neg:V4SF
982 (match_operand:V4SF 3 "register_operand" "v")))))]
983 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
984 "vnmsubfp %0,%1,%2,%3"
985 [(set_attr "type" "vecfloat")])
986
987 (define_insn "altivec_vmsumu<VI_char>m"
988 [(set (match_operand:V4SI 0 "register_operand" "=v")
989 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
990 (match_operand:VIshort 2 "register_operand" "v")
991 (match_operand:V4SI 3 "register_operand" "v")]
992 UNSPEC_VMSUMU))]
993 "TARGET_ALTIVEC"
994 "vmsumu<VI_char>m %0,%1,%2,%3"
995 [(set_attr "type" "veccomplex")])
996
997 (define_insn "altivec_vmsumudm"
998 [(set (match_operand:V1TI 0 "register_operand" "=v")
999 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1000 (match_operand:V2DI 2 "register_operand" "v")
1001 (match_operand:V1TI 3 "register_operand" "v")]
1002 UNSPEC_VMSUMUDM))]
1003 "TARGET_P8_VECTOR"
1004 "vmsumudm %0,%1,%2,%3"
1005 [(set_attr "type" "veccomplex")])
1006
1007 (define_insn "altivec_vmsumm<VI_char>m"
1008 [(set (match_operand:V4SI 0 "register_operand" "=v")
1009 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1010 (match_operand:VIshort 2 "register_operand" "v")
1011 (match_operand:V4SI 3 "register_operand" "v")]
1012 UNSPEC_VMSUMM))]
1013 "TARGET_ALTIVEC"
1014 "vmsumm<VI_char>m %0,%1,%2,%3"
1015 [(set_attr "type" "veccomplex")])
1016
1017 (define_insn "altivec_vmsumshm"
1018 [(set (match_operand:V4SI 0 "register_operand" "=v")
1019 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1020 (match_operand:V8HI 2 "register_operand" "v")
1021 (match_operand:V4SI 3 "register_operand" "v")]
1022 UNSPEC_VMSUMSHM))]
1023 "TARGET_ALTIVEC"
1024 "vmsumshm %0,%1,%2,%3"
1025 [(set_attr "type" "veccomplex")])
1026
1027 (define_insn "altivec_vmsumuhs"
1028 [(set (match_operand:V4SI 0 "register_operand" "=v")
1029 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1030 (match_operand:V8HI 2 "register_operand" "v")
1031 (match_operand:V4SI 3 "register_operand" "v")]
1032 UNSPEC_VMSUMUHS))
1033 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1034 "TARGET_ALTIVEC"
1035 "vmsumuhs %0,%1,%2,%3"
1036 [(set_attr "type" "veccomplex")])
1037
1038 (define_insn "altivec_vmsumshs"
1039 [(set (match_operand:V4SI 0 "register_operand" "=v")
1040 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1041 (match_operand:V8HI 2 "register_operand" "v")
1042 (match_operand:V4SI 3 "register_operand" "v")]
1043 UNSPEC_VMSUMSHS))
1044 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1045 "TARGET_ALTIVEC"
1046 "vmsumshs %0,%1,%2,%3"
1047 [(set_attr "type" "veccomplex")])
1048
1049 ;; max
1050
1051 (define_insn "umax<mode>3"
1052 [(set (match_operand:VI2 0 "register_operand" "=v")
1053 (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
1054 (match_operand:VI2 2 "register_operand" "v")))]
1055 "<VI_unit>"
1056 "vmaxu<VI_char> %0,%1,%2"
1057 [(set_attr "type" "vecsimple")])
1058
1059 (define_insn "smax<mode>3"
1060 [(set (match_operand:VI2 0 "register_operand" "=v")
1061 (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
1062 (match_operand:VI2 2 "register_operand" "v")))]
1063 "<VI_unit>"
1064 "vmaxs<VI_char> %0,%1,%2"
1065 [(set_attr "type" "vecsimple")])
1066
1067 (define_insn "*altivec_smaxv4sf3"
1068 [(set (match_operand:V4SF 0 "register_operand" "=v")
1069 (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
1070 (match_operand:V4SF 2 "register_operand" "v")))]
1071 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1072 "vmaxfp %0,%1,%2"
1073 [(set_attr "type" "veccmp")])
1074
1075 (define_insn "umin<mode>3"
1076 [(set (match_operand:VI2 0 "register_operand" "=v")
1077 (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
1078 (match_operand:VI2 2 "register_operand" "v")))]
1079 "<VI_unit>"
1080 "vminu<VI_char> %0,%1,%2"
1081 [(set_attr "type" "vecsimple")])
1082
1083 (define_insn "smin<mode>3"
1084 [(set (match_operand:VI2 0 "register_operand" "=v")
1085 (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
1086 (match_operand:VI2 2 "register_operand" "v")))]
1087 "<VI_unit>"
1088 "vmins<VI_char> %0,%1,%2"
1089 [(set_attr "type" "vecsimple")])
1090
1091 (define_insn "*altivec_sminv4sf3"
1092 [(set (match_operand:V4SF 0 "register_operand" "=v")
1093 (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
1094 (match_operand:V4SF 2 "register_operand" "v")))]
1095 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1096 "vminfp %0,%1,%2"
1097 [(set_attr "type" "veccmp")])
1098
1099 (define_insn "altivec_vmhaddshs"
1100 [(set (match_operand:V8HI 0 "register_operand" "=v")
1101 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1102 (match_operand:V8HI 2 "register_operand" "v")
1103 (match_operand:V8HI 3 "register_operand" "v")]
1104 UNSPEC_VMHADDSHS))
1105 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1106 "TARGET_ALTIVEC"
1107 "vmhaddshs %0,%1,%2,%3"
1108 [(set_attr "type" "veccomplex")])
1109
1110 (define_insn "altivec_vmhraddshs"
1111 [(set (match_operand:V8HI 0 "register_operand" "=v")
1112 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1113 (match_operand:V8HI 2 "register_operand" "v")
1114 (match_operand:V8HI 3 "register_operand" "v")]
1115 UNSPEC_VMHRADDSHS))
1116 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1117 "TARGET_ALTIVEC"
1118 "vmhraddshs %0,%1,%2,%3"
1119 [(set_attr "type" "veccomplex")])
1120
1121 (define_insn "fmav8hi4"
1122 [(set (match_operand:V8HI 0 "register_operand" "=v")
1123 (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
1124 (match_operand:V8HI 2 "register_operand" "v"))
1125 (match_operand:V8HI 3 "register_operand" "v")))]
1126 "TARGET_ALTIVEC"
1127 "vmladduhm %0,%1,%2,%3"
1128 [(set_attr "type" "veccomplex")])
1129
1130 (define_expand "altivec_vmrghb"
1131 [(use (match_operand:V16QI 0 "register_operand"))
1132 (use (match_operand:V16QI 1 "register_operand"))
1133 (use (match_operand:V16QI 2 "register_operand"))]
1134 "TARGET_ALTIVEC"
1135 {
1136 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrghb_direct
1137 : gen_altivec_vmrglb_direct;
1138 if (!BYTES_BIG_ENDIAN)
1139 std::swap (operands[1], operands[2]);
1140 emit_insn (fun (operands[0], operands[1], operands[2]));
1141 DONE;
1142 })
1143
1144 (define_insn "altivec_vmrghb_direct"
1145 [(set (match_operand:V16QI 0 "register_operand" "=v")
1146 (vec_select:V16QI
1147 (vec_concat:V32QI
1148 (match_operand:V16QI 1 "register_operand" "v")
1149 (match_operand:V16QI 2 "register_operand" "v"))
1150 (parallel [(const_int 0) (const_int 16)
1151 (const_int 1) (const_int 17)
1152 (const_int 2) (const_int 18)
1153 (const_int 3) (const_int 19)
1154 (const_int 4) (const_int 20)
1155 (const_int 5) (const_int 21)
1156 (const_int 6) (const_int 22)
1157 (const_int 7) (const_int 23)])))]
1158 "TARGET_ALTIVEC"
1159 "vmrghb %0,%1,%2"
1160 [(set_attr "type" "vecperm")])
1161
1162 (define_expand "altivec_vmrghh"
1163 [(use (match_operand:V8HI 0 "register_operand"))
1164 (use (match_operand:V8HI 1 "register_operand"))
1165 (use (match_operand:V8HI 2 "register_operand"))]
1166 "TARGET_ALTIVEC"
1167 {
1168 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrghh_direct
1169 : gen_altivec_vmrglh_direct;
1170 if (!BYTES_BIG_ENDIAN)
1171 std::swap (operands[1], operands[2]);
1172 emit_insn (fun (operands[0], operands[1], operands[2]));
1173 DONE;
1174 })
1175
1176 (define_insn "altivec_vmrghh_direct"
1177 [(set (match_operand:V8HI 0 "register_operand" "=v")
1178 (vec_select:V8HI
1179 (vec_concat:V16HI
1180 (match_operand:V8HI 1 "register_operand" "v")
1181 (match_operand:V8HI 2 "register_operand" "v"))
1182 (parallel [(const_int 0) (const_int 8)
1183 (const_int 1) (const_int 9)
1184 (const_int 2) (const_int 10)
1185 (const_int 3) (const_int 11)])))]
1186 "TARGET_ALTIVEC"
1187 "vmrghh %0,%1,%2"
1188 [(set_attr "type" "vecperm")])
1189
1190 (define_expand "altivec_vmrghw"
1191 [(use (match_operand:V4SI 0 "register_operand"))
1192 (use (match_operand:V4SI 1 "register_operand"))
1193 (use (match_operand:V4SI 2 "register_operand"))]
1194 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1195 {
1196 rtx (*fun) (rtx, rtx, rtx);
1197 fun = BYTES_BIG_ENDIAN ? gen_altivec_vmrghw_direct_v4si
1198 : gen_altivec_vmrglw_direct_v4si;
1199 if (!BYTES_BIG_ENDIAN)
1200 std::swap (operands[1], operands[2]);
1201 emit_insn (fun (operands[0], operands[1], operands[2]));
1202 DONE;
1203 })
1204
1205 (define_insn "altivec_vmrghw_direct_<mode>"
1206 [(set (match_operand:VSX_W 0 "register_operand" "=wa,v")
1207 (vec_select:VSX_W
1208 (vec_concat:<VS_double>
1209 (match_operand:VSX_W 1 "register_operand" "wa,v")
1210 (match_operand:VSX_W 2 "register_operand" "wa,v"))
1211 (parallel [(const_int 0) (const_int 4)
1212 (const_int 1) (const_int 5)])))]
1213 "TARGET_ALTIVEC"
1214 "@
1215 xxmrghw %x0,%x1,%x2
1216 vmrghw %0,%1,%2"
1217 [(set_attr "type" "vecperm")])
1218
1219 (define_insn "*altivec_vmrghsf"
1220 [(set (match_operand:V4SF 0 "register_operand" "=v")
1221 (vec_select:V4SF
1222 (vec_concat:V8SF
1223 (match_operand:V4SF 1 "register_operand" "v")
1224 (match_operand:V4SF 2 "register_operand" "v"))
1225 (parallel [(const_int 0) (const_int 4)
1226 (const_int 1) (const_int 5)])))]
1227 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1228 {
1229 if (BYTES_BIG_ENDIAN)
1230 return "vmrghw %0,%1,%2";
1231 else
1232 return "vmrglw %0,%2,%1";
1233 }
1234 [(set_attr "type" "vecperm")])
1235
1236 (define_expand "altivec_vmrglb"
1237 [(use (match_operand:V16QI 0 "register_operand"))
1238 (use (match_operand:V16QI 1 "register_operand"))
1239 (use (match_operand:V16QI 2 "register_operand"))]
1240 "TARGET_ALTIVEC"
1241 {
1242 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrglb_direct
1243 : gen_altivec_vmrghb_direct;
1244 if (!BYTES_BIG_ENDIAN)
1245 std::swap (operands[1], operands[2]);
1246 emit_insn (fun (operands[0], operands[1], operands[2]));
1247 DONE;
1248 })
1249
1250 (define_insn "altivec_vmrglb_direct"
1251 [(set (match_operand:V16QI 0 "register_operand" "=v")
1252 (vec_select:V16QI
1253 (vec_concat:V32QI
1254 (match_operand:V16QI 1 "register_operand" "v")
1255 (match_operand:V16QI 2 "register_operand" "v"))
1256 (parallel [(const_int 8) (const_int 24)
1257 (const_int 9) (const_int 25)
1258 (const_int 10) (const_int 26)
1259 (const_int 11) (const_int 27)
1260 (const_int 12) (const_int 28)
1261 (const_int 13) (const_int 29)
1262 (const_int 14) (const_int 30)
1263 (const_int 15) (const_int 31)])))]
1264 "TARGET_ALTIVEC"
1265 "vmrglb %0,%1,%2"
1266 [(set_attr "type" "vecperm")])
1267
1268 (define_expand "altivec_vmrglh"
1269 [(use (match_operand:V8HI 0 "register_operand"))
1270 (use (match_operand:V8HI 1 "register_operand"))
1271 (use (match_operand:V8HI 2 "register_operand"))]
1272 "TARGET_ALTIVEC"
1273 {
1274 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrglh_direct
1275 : gen_altivec_vmrghh_direct;
1276 if (!BYTES_BIG_ENDIAN)
1277 std::swap (operands[1], operands[2]);
1278 emit_insn (fun (operands[0], operands[1], operands[2]));
1279 DONE;
1280 })
1281
1282 (define_insn "altivec_vmrglh_direct"
1283 [(set (match_operand:V8HI 0 "register_operand" "=v")
1284 (vec_select:V8HI
1285 (vec_concat:V16HI
1286 (match_operand:V8HI 1 "register_operand" "v")
1287 (match_operand:V8HI 2 "register_operand" "v"))
1288 (parallel [(const_int 4) (const_int 12)
1289 (const_int 5) (const_int 13)
1290 (const_int 6) (const_int 14)
1291 (const_int 7) (const_int 15)])))]
1292 "TARGET_ALTIVEC"
1293 "vmrglh %0,%1,%2"
1294 [(set_attr "type" "vecperm")])
1295
1296 (define_expand "altivec_vmrglw"
1297 [(use (match_operand:V4SI 0 "register_operand"))
1298 (use (match_operand:V4SI 1 "register_operand"))
1299 (use (match_operand:V4SI 2 "register_operand"))]
1300 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1301 {
1302 rtx (*fun) (rtx, rtx, rtx);
1303 fun = BYTES_BIG_ENDIAN ? gen_altivec_vmrglw_direct_v4si
1304 : gen_altivec_vmrghw_direct_v4si;
1305 if (!BYTES_BIG_ENDIAN)
1306 std::swap (operands[1], operands[2]);
1307 emit_insn (fun (operands[0], operands[1], operands[2]));
1308 DONE;
1309 })
1310
1311 (define_insn "altivec_vmrglw_direct_<mode>"
1312 [(set (match_operand:VSX_W 0 "register_operand" "=wa,v")
1313 (vec_select:VSX_W
1314 (vec_concat:<VS_double>
1315 (match_operand:VSX_W 1 "register_operand" "wa,v")
1316 (match_operand:VSX_W 2 "register_operand" "wa,v"))
1317 (parallel [(const_int 2) (const_int 6)
1318 (const_int 3) (const_int 7)])))]
1319 "TARGET_ALTIVEC"
1320 "@
1321 xxmrglw %x0,%x1,%x2
1322 vmrglw %0,%1,%2"
1323 [(set_attr "type" "vecperm")])
1324
1325 (define_insn "*altivec_vmrglsf"
1326 [(set (match_operand:V4SF 0 "register_operand" "=v")
1327 (vec_select:V4SF
1328 (vec_concat:V8SF
1329 (match_operand:V4SF 1 "register_operand" "v")
1330 (match_operand:V4SF 2 "register_operand" "v"))
1331 (parallel [(const_int 2) (const_int 6)
1332 (const_int 3) (const_int 7)])))]
1333 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1334 {
1335 if (BYTES_BIG_ENDIAN)
1336 return "vmrglw %0,%1,%2";
1337 else
1338 return "vmrghw %0,%2,%1";
1339 }
1340 [(set_attr "type" "vecperm")])
1341
1342 ;; Power8 vector merge two V2DF/V2DI even words to V2DF
1343 (define_expand "p8_vmrgew_<mode>"
1344 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1345 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1346 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1347 "VECTOR_MEM_VSX_P (<MODE>mode)"
1348 {
1349 rtvec v;
1350 rtx x;
1351
1352 v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
1353 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1354
1355 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1356 emit_insn (gen_rtx_SET (operands[0], x));
1357 DONE;
1358 })
1359
1360 ;; Power8 vector merge two V4SF/V4SI even words to V4SF
1361 (define_insn "p8_vmrgew_<mode>"
1362 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1363 (vec_select:VSX_W
1364 (vec_concat:<VS_double>
1365 (match_operand:VSX_W 1 "register_operand" "v")
1366 (match_operand:VSX_W 2 "register_operand" "v"))
1367 (parallel [(const_int 0) (const_int 4)
1368 (const_int 2) (const_int 6)])))]
1369 "TARGET_P8_VECTOR"
1370 {
1371 if (BYTES_BIG_ENDIAN)
1372 return "vmrgew %0,%1,%2";
1373 else
1374 return "vmrgow %0,%2,%1";
1375 }
1376 [(set_attr "type" "vecperm")])
1377
1378 (define_insn "p8_vmrgow_<mode>"
1379 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1380 (vec_select:VSX_W
1381 (vec_concat:<VS_double>
1382 (match_operand:VSX_W 1 "register_operand" "v")
1383 (match_operand:VSX_W 2 "register_operand" "v"))
1384 (parallel [(const_int 1) (const_int 5)
1385 (const_int 3) (const_int 7)])))]
1386 "TARGET_P8_VECTOR"
1387 {
1388 if (BYTES_BIG_ENDIAN)
1389 return "vmrgow %0,%1,%2";
1390 else
1391 return "vmrgew %0,%2,%1";
1392 }
1393 [(set_attr "type" "vecperm")])
1394
1395 (define_expand "p8_vmrgow_<mode>"
1396 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1397 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1398 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1399 "VECTOR_MEM_VSX_P (<MODE>mode)"
1400 {
1401 rtvec v;
1402 rtx x;
1403
1404 v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
1405 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1406
1407 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1408 emit_insn (gen_rtx_SET (operands[0], x));
1409 DONE;
1410 })
1411
1412 (define_insn "p8_vmrgew_<mode>_direct"
1413 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1414 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1415 (match_operand:VSX_W 2 "register_operand" "v")]
1416 UNSPEC_VMRGEW_DIRECT))]
1417 "TARGET_P8_VECTOR"
1418 "vmrgew %0,%1,%2"
1419 [(set_attr "type" "vecperm")])
1420
1421 (define_insn "p8_vmrgow_<mode>_direct"
1422 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1423 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1424 (match_operand:VSX_W 2 "register_operand" "v")]
1425 UNSPEC_VMRGOW_DIRECT))]
1426 "TARGET_P8_VECTOR"
1427 "vmrgow %0,%1,%2"
1428 [(set_attr "type" "vecperm")])
1429
1430 (define_expand "vec_widen_umult_even_v16qi"
1431 [(use (match_operand:V8HI 0 "register_operand"))
1432 (use (match_operand:V16QI 1 "register_operand"))
1433 (use (match_operand:V16QI 2 "register_operand"))]
1434 "TARGET_ALTIVEC"
1435 {
1436 if (BYTES_BIG_ENDIAN)
1437 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1438 else
1439 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1440 DONE;
1441 })
1442
1443 (define_expand "vec_widen_smult_even_v16qi"
1444 [(use (match_operand:V8HI 0 "register_operand"))
1445 (use (match_operand:V16QI 1 "register_operand"))
1446 (use (match_operand:V16QI 2 "register_operand"))]
1447 "TARGET_ALTIVEC"
1448 {
1449 if (BYTES_BIG_ENDIAN)
1450 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1451 else
1452 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1453 DONE;
1454 })
1455
1456 (define_expand "vec_widen_umult_even_v8hi"
1457 [(use (match_operand:V4SI 0 "register_operand"))
1458 (use (match_operand:V8HI 1 "register_operand"))
1459 (use (match_operand:V8HI 2 "register_operand"))]
1460 "TARGET_ALTIVEC"
1461 {
1462 if (BYTES_BIG_ENDIAN)
1463 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1464 else
1465 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1466 DONE;
1467 })
1468
1469 (define_expand "vec_widen_smult_even_v8hi"
1470 [(use (match_operand:V4SI 0 "register_operand"))
1471 (use (match_operand:V8HI 1 "register_operand"))
1472 (use (match_operand:V8HI 2 "register_operand"))]
1473 "TARGET_ALTIVEC"
1474 {
1475 if (BYTES_BIG_ENDIAN)
1476 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1477 else
1478 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1479 DONE;
1480 })
1481
1482 (define_expand "vec_widen_umult_even_v4si"
1483 [(use (match_operand:V2DI 0 "register_operand"))
1484 (use (match_operand:V4SI 1 "register_operand"))
1485 (use (match_operand:V4SI 2 "register_operand"))]
1486 "TARGET_P8_VECTOR"
1487 {
1488 if (BYTES_BIG_ENDIAN)
1489 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1490 else
1491 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1492 DONE;
1493 })
1494
1495 (define_expand "vec_widen_umult_even_v2di"
1496 [(use (match_operand:V1TI 0 "register_operand"))
1497 (use (match_operand:V2DI 1 "register_operand"))
1498 (use (match_operand:V2DI 2 "register_operand"))]
1499 "TARGET_POWER10"
1500 {
1501 if (BYTES_BIG_ENDIAN)
1502 emit_insn (gen_altivec_vmuleud (operands[0], operands[1], operands[2]));
1503 else
1504 emit_insn (gen_altivec_vmuloud (operands[0], operands[1], operands[2]));
1505 DONE;
1506 })
1507
1508 (define_expand "vec_widen_smult_even_v4si"
1509 [(use (match_operand:V2DI 0 "register_operand"))
1510 (use (match_operand:V4SI 1 "register_operand"))
1511 (use (match_operand:V4SI 2 "register_operand"))]
1512 "TARGET_P8_VECTOR"
1513 {
1514 if (BYTES_BIG_ENDIAN)
1515 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1516 else
1517 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1518 DONE;
1519 })
1520
1521 (define_expand "vec_widen_smult_even_v2di"
1522 [(use (match_operand:V1TI 0 "register_operand"))
1523 (use (match_operand:V2DI 1 "register_operand"))
1524 (use (match_operand:V2DI 2 "register_operand"))]
1525 "TARGET_POWER10"
1526 {
1527 if (BYTES_BIG_ENDIAN)
1528 emit_insn (gen_altivec_vmulesd (operands[0], operands[1], operands[2]));
1529 else
1530 emit_insn (gen_altivec_vmulosd (operands[0], operands[1], operands[2]));
1531 DONE;
1532 })
1533
1534 (define_expand "vec_widen_umult_odd_v16qi"
1535 [(use (match_operand:V8HI 0 "register_operand"))
1536 (use (match_operand:V16QI 1 "register_operand"))
1537 (use (match_operand:V16QI 2 "register_operand"))]
1538 "TARGET_ALTIVEC"
1539 {
1540 if (BYTES_BIG_ENDIAN)
1541 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1542 else
1543 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1544 DONE;
1545 })
1546
1547 (define_expand "vec_widen_smult_odd_v16qi"
1548 [(use (match_operand:V8HI 0 "register_operand"))
1549 (use (match_operand:V16QI 1 "register_operand"))
1550 (use (match_operand:V16QI 2 "register_operand"))]
1551 "TARGET_ALTIVEC"
1552 {
1553 if (BYTES_BIG_ENDIAN)
1554 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1555 else
1556 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1557 DONE;
1558 })
1559
1560 (define_expand "vec_widen_umult_odd_v8hi"
1561 [(use (match_operand:V4SI 0 "register_operand"))
1562 (use (match_operand:V8HI 1 "register_operand"))
1563 (use (match_operand:V8HI 2 "register_operand"))]
1564 "TARGET_ALTIVEC"
1565 {
1566 if (BYTES_BIG_ENDIAN)
1567 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1568 else
1569 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1570 DONE;
1571 })
1572
1573 (define_expand "vec_widen_smult_odd_v8hi"
1574 [(use (match_operand:V4SI 0 "register_operand"))
1575 (use (match_operand:V8HI 1 "register_operand"))
1576 (use (match_operand:V8HI 2 "register_operand"))]
1577 "TARGET_ALTIVEC"
1578 {
1579 if (BYTES_BIG_ENDIAN)
1580 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1581 else
1582 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1583 DONE;
1584 })
1585
1586 (define_expand "vec_widen_umult_odd_v4si"
1587 [(use (match_operand:V2DI 0 "register_operand"))
1588 (use (match_operand:V4SI 1 "register_operand"))
1589 (use (match_operand:V4SI 2 "register_operand"))]
1590 "TARGET_P8_VECTOR"
1591 {
1592 if (BYTES_BIG_ENDIAN)
1593 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1594 else
1595 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1596 DONE;
1597 })
1598
1599 (define_expand "vec_widen_umult_odd_v2di"
1600 [(use (match_operand:V1TI 0 "register_operand"))
1601 (use (match_operand:V2DI 1 "register_operand"))
1602 (use (match_operand:V2DI 2 "register_operand"))]
1603 "TARGET_POWER10"
1604 {
1605 if (BYTES_BIG_ENDIAN)
1606 emit_insn (gen_altivec_vmuloud (operands[0], operands[1], operands[2]));
1607 else
1608 emit_insn (gen_altivec_vmuleud (operands[0], operands[1], operands[2]));
1609 DONE;
1610 })
1611
1612 (define_expand "vec_widen_smult_odd_v4si"
1613 [(use (match_operand:V2DI 0 "register_operand"))
1614 (use (match_operand:V4SI 1 "register_operand"))
1615 (use (match_operand:V4SI 2 "register_operand"))]
1616 "TARGET_P8_VECTOR"
1617 {
1618 if (BYTES_BIG_ENDIAN)
1619 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1620 else
1621 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1622 DONE;
1623 })
1624
1625 (define_expand "vec_widen_smult_odd_v2di"
1626 [(use (match_operand:V1TI 0 "register_operand"))
1627 (use (match_operand:V2DI 1 "register_operand"))
1628 (use (match_operand:V2DI 2 "register_operand"))]
1629 "TARGET_POWER10"
1630 {
1631 if (BYTES_BIG_ENDIAN)
1632 emit_insn (gen_altivec_vmulosd (operands[0], operands[1], operands[2]));
1633 else
1634 emit_insn (gen_altivec_vmulesd (operands[0], operands[1], operands[2]));
1635 DONE;
1636 })
1637
1638 (define_insn "altivec_vmuleub"
1639 [(set (match_operand:V8HI 0 "register_operand" "=v")
1640 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1641 (match_operand:V16QI 2 "register_operand" "v")]
1642 UNSPEC_VMULEUB))]
1643 "TARGET_ALTIVEC"
1644 "vmuleub %0,%1,%2"
1645 [(set_attr "type" "veccomplex")])
1646
1647 (define_insn "altivec_vmuloub"
1648 [(set (match_operand:V8HI 0 "register_operand" "=v")
1649 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1650 (match_operand:V16QI 2 "register_operand" "v")]
1651 UNSPEC_VMULOUB))]
1652 "TARGET_ALTIVEC"
1653 "vmuloub %0,%1,%2"
1654 [(set_attr "type" "veccomplex")])
1655
1656 (define_insn "altivec_vmulesb"
1657 [(set (match_operand:V8HI 0 "register_operand" "=v")
1658 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1659 (match_operand:V16QI 2 "register_operand" "v")]
1660 UNSPEC_VMULESB))]
1661 "TARGET_ALTIVEC"
1662 "vmulesb %0,%1,%2"
1663 [(set_attr "type" "veccomplex")])
1664
1665 (define_insn "altivec_vmulosb"
1666 [(set (match_operand:V8HI 0 "register_operand" "=v")
1667 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1668 (match_operand:V16QI 2 "register_operand" "v")]
1669 UNSPEC_VMULOSB))]
1670 "TARGET_ALTIVEC"
1671 "vmulosb %0,%1,%2"
1672 [(set_attr "type" "veccomplex")])
1673
1674 (define_insn "altivec_vmuleuh"
1675 [(set (match_operand:V4SI 0 "register_operand" "=v")
1676 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1677 (match_operand:V8HI 2 "register_operand" "v")]
1678 UNSPEC_VMULEUH))]
1679 "TARGET_ALTIVEC"
1680 "vmuleuh %0,%1,%2"
1681 [(set_attr "type" "veccomplex")])
1682
1683 (define_insn "altivec_vmulouh"
1684 [(set (match_operand:V4SI 0 "register_operand" "=v")
1685 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1686 (match_operand:V8HI 2 "register_operand" "v")]
1687 UNSPEC_VMULOUH))]
1688 "TARGET_ALTIVEC"
1689 "vmulouh %0,%1,%2"
1690 [(set_attr "type" "veccomplex")])
1691
1692 (define_insn "altivec_vmulesh"
1693 [(set (match_operand:V4SI 0 "register_operand" "=v")
1694 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1695 (match_operand:V8HI 2 "register_operand" "v")]
1696 UNSPEC_VMULESH))]
1697 "TARGET_ALTIVEC"
1698 "vmulesh %0,%1,%2"
1699 [(set_attr "type" "veccomplex")])
1700
1701 (define_insn "altivec_vmulosh"
1702 [(set (match_operand:V4SI 0 "register_operand" "=v")
1703 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1704 (match_operand:V8HI 2 "register_operand" "v")]
1705 UNSPEC_VMULOSH))]
1706 "TARGET_ALTIVEC"
1707 "vmulosh %0,%1,%2"
1708 [(set_attr "type" "veccomplex")])
1709
1710 (define_insn "altivec_vmuleuw"
1711 [(set (match_operand:V2DI 0 "register_operand" "=v")
1712 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1713 (match_operand:V4SI 2 "register_operand" "v")]
1714 UNSPEC_VMULEUW))]
1715 "TARGET_P8_VECTOR"
1716 "vmuleuw %0,%1,%2"
1717 [(set_attr "type" "veccomplex")])
1718
1719 (define_insn "altivec_vmuleud"
1720 [(set (match_operand:V1TI 0 "register_operand" "=v")
1721 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1722 (match_operand:V2DI 2 "register_operand" "v")]
1723 UNSPEC_VMULEUD))]
1724 "TARGET_POWER10"
1725 "vmuleud %0,%1,%2"
1726 [(set_attr "type" "veccomplex")])
1727
1728 (define_insn "altivec_vmulouw"
1729 [(set (match_operand:V2DI 0 "register_operand" "=v")
1730 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1731 (match_operand:V4SI 2 "register_operand" "v")]
1732 UNSPEC_VMULOUW))]
1733 "TARGET_P8_VECTOR"
1734 "vmulouw %0,%1,%2"
1735 [(set_attr "type" "veccomplex")])
1736
1737 (define_insn "altivec_vmuloud"
1738 [(set (match_operand:V1TI 0 "register_operand" "=v")
1739 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1740 (match_operand:V2DI 2 "register_operand" "v")]
1741 UNSPEC_VMULOUD))]
1742 "TARGET_POWER10"
1743 "vmuloud %0,%1,%2"
1744 [(set_attr "type" "veccomplex")])
1745
1746 (define_insn "altivec_vmulesw"
1747 [(set (match_operand:V2DI 0 "register_operand" "=v")
1748 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1749 (match_operand:V4SI 2 "register_operand" "v")]
1750 UNSPEC_VMULESW))]
1751 "TARGET_P8_VECTOR"
1752 "vmulesw %0,%1,%2"
1753 [(set_attr "type" "veccomplex")])
1754
1755 (define_insn "altivec_vmulesd"
1756 [(set (match_operand:V1TI 0 "register_operand" "=v")
1757 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1758 (match_operand:V2DI 2 "register_operand" "v")]
1759 UNSPEC_VMULESD))]
1760 "TARGET_POWER10"
1761 "vmulesd %0,%1,%2"
1762 [(set_attr "type" "veccomplex")])
1763
1764 (define_insn "altivec_vmulosw"
1765 [(set (match_operand:V2DI 0 "register_operand" "=v")
1766 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1767 (match_operand:V4SI 2 "register_operand" "v")]
1768 UNSPEC_VMULOSW))]
1769 "TARGET_P8_VECTOR"
1770 "vmulosw %0,%1,%2"
1771 [(set_attr "type" "veccomplex")])
1772
1773 (define_insn "altivec_vmulosd"
1774 [(set (match_operand:V1TI 0 "register_operand" "=v")
1775 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1776 (match_operand:V2DI 2 "register_operand" "v")]
1777 UNSPEC_VMULOSD))]
1778 "TARGET_POWER10"
1779 "vmulosd %0,%1,%2"
1780 [(set_attr "type" "veccomplex")])
1781
1782 ;; Vector pack/unpack
1783 (define_insn "altivec_vpkpx"
1784 [(set (match_operand:V8HI 0 "register_operand" "=v")
1785 (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
1786 (match_operand:V4SI 2 "register_operand" "v")]
1787 UNSPEC_VPKPX))]
1788 "TARGET_ALTIVEC"
1789 {
1790 if (BYTES_BIG_ENDIAN)
1791 return "vpkpx %0,%1,%2";
1792 else
1793 return "vpkpx %0,%2,%1";
1794 }
1795 [(set_attr "type" "vecperm")])
1796
1797 (define_insn "altivec_vpks<VI_char>ss"
1798 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1799 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1800 (match_operand:VP 2 "register_operand" "v")]
1801 UNSPEC_VPACK_SIGN_SIGN_SAT))]
1802 "<VI_unit>"
1803 {
1804 if (BYTES_BIG_ENDIAN)
1805 return "vpks<VI_char>ss %0,%1,%2";
1806 else
1807 return "vpks<VI_char>ss %0,%2,%1";
1808 }
1809 [(set_attr "type" "vecperm")])
1810
1811 (define_insn "altivec_vpks<VI_char>us"
1812 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1813 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1814 (match_operand:VP 2 "register_operand" "v")]
1815 UNSPEC_VPACK_SIGN_UNS_SAT))]
1816 "<VI_unit>"
1817 {
1818 if (BYTES_BIG_ENDIAN)
1819 return "vpks<VI_char>us %0,%1,%2";
1820 else
1821 return "vpks<VI_char>us %0,%2,%1";
1822 }
1823 [(set_attr "type" "vecperm")])
1824
1825 (define_insn "altivec_vpku<VI_char>us"
1826 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1827 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1828 (match_operand:VP 2 "register_operand" "v")]
1829 UNSPEC_VPACK_UNS_UNS_SAT))]
1830 "<VI_unit>"
1831 {
1832 if (BYTES_BIG_ENDIAN)
1833 return "vpku<VI_char>us %0,%1,%2";
1834 else
1835 return "vpku<VI_char>us %0,%2,%1";
1836 }
1837 [(set_attr "type" "vecperm")])
1838
1839 (define_insn "altivec_vpku<VI_char>um"
1840 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1841 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1842 (match_operand:VP 2 "register_operand" "v")]
1843 UNSPEC_VPACK_UNS_UNS_MOD))]
1844 "<VI_unit>"
1845 {
1846 if (BYTES_BIG_ENDIAN)
1847 return "vpku<VI_char>um %0,%1,%2";
1848 else
1849 return "vpku<VI_char>um %0,%2,%1";
1850 }
1851 [(set_attr "type" "vecperm")])
1852
1853 (define_insn "altivec_vpku<VI_char>um_direct"
1854 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1855 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1856 (match_operand:VP 2 "register_operand" "v")]
1857 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1858 "<VI_unit>"
1859 {
1860 if (BYTES_BIG_ENDIAN)
1861 return "vpku<VI_char>um %0,%1,%2";
1862 else
1863 return "vpku<VI_char>um %0,%2,%1";
1864 }
1865 [(set_attr "type" "vecperm")])
1866
1867 (define_insn "*altivec_vrl<VI_char>"
1868 [(set (match_operand:VI2 0 "register_operand" "=v")
1869 (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1870 (match_operand:VI2 2 "register_operand" "v")))]
1871 "<VI_unit>"
1872 "vrl<VI_char> %0,%1,%2"
1873 [(set_attr "type" "vecsimple")])
1874
1875 (define_insn "altivec_vrlq"
1876 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1877 (rotate:V1TI (match_operand:V1TI 1 "vsx_register_operand" "v")
1878 (match_operand:V1TI 2 "vsx_register_operand" "v")))]
1879 "TARGET_POWER10"
1880 ;; rotate amount in needs to be in bits[57:63] of operand2.
1881 "vrlq %0,%1,%2"
1882 [(set_attr "type" "vecsimple")])
1883
1884 (define_insn "altivec_vrl<VI_char>mi"
1885 [(set (match_operand:VIlong 0 "register_operand" "=v")
1886 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1887 (match_operand:VIlong 2 "register_operand" "0")
1888 (match_operand:VIlong 3 "register_operand" "v")]
1889 UNSPEC_VRLMI))]
1890 "TARGET_P9_VECTOR"
1891 "vrl<VI_char>mi %0,%1,%3"
1892 [(set_attr "type" "veclogical")])
1893
1894 (define_expand "altivec_vrlqmi"
1895 [(set (match_operand:V1TI 0 "vsx_register_operand")
1896 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand")
1897 (match_operand:V1TI 2 "vsx_register_operand")
1898 (match_operand:V1TI 3 "vsx_register_operand")]
1899 UNSPEC_VRLMI))]
1900 "TARGET_POWER10"
1901 {
1902 /* Mask bit begin, end fields need to be in bits [41:55] of 128-bit operand2.
1903 Shift amount in needs to be put in bits[57:63] of 128-bit operand2. */
1904 rtx tmp = gen_reg_rtx (V1TImode);
1905
1906 emit_insn (gen_xxswapd_v1ti (tmp, operands[3]));
1907 emit_insn (gen_altivec_vrlqmi_inst (operands[0], operands[1], operands[2],
1908 tmp));
1909 DONE;
1910 })
1911
1912 (define_insn "altivec_vrlqmi_inst"
1913 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1914 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand" "v")
1915 (match_operand:V1TI 2 "vsx_register_operand" "0")
1916 (match_operand:V1TI 3 "vsx_register_operand" "v")]
1917 UNSPEC_VRLMI))]
1918 "TARGET_POWER10"
1919 "vrlqmi %0,%1,%3"
1920 [(set_attr "type" "veclogical")])
1921
1922 (define_insn "altivec_vrl<VI_char>nm"
1923 [(set (match_operand:VIlong 0 "register_operand" "=v")
1924 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1925 (match_operand:VIlong 2 "register_operand" "v")]
1926 UNSPEC_VRLNM))]
1927 "TARGET_P9_VECTOR"
1928 "vrl<VI_char>nm %0,%1,%2"
1929 [(set_attr "type" "veclogical")])
1930
1931 (define_expand "altivec_vrlqnm"
1932 [(set (match_operand:V1TI 0 "vsx_register_operand")
1933 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand")
1934 (match_operand:V1TI 2 "vsx_register_operand")]
1935 UNSPEC_VRLNM))]
1936 "TARGET_POWER10"
1937 {
1938 /* Shift amount in needs to be put in bits[57:63] of 128-bit operand2. */
1939 rtx tmp = gen_reg_rtx (V1TImode);
1940
1941 emit_insn (gen_xxswapd_v1ti (tmp, operands[2]));
1942 emit_insn (gen_altivec_vrlqnm_inst (operands[0], operands[1], tmp));
1943 DONE;
1944 })
1945
1946 (define_insn "altivec_vrlqnm_inst"
1947 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1948 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand" "v")
1949 (match_operand:V1TI 2 "vsx_register_operand" "v")]
1950 UNSPEC_VRLNM))]
1951 "TARGET_POWER10"
1952 ;; rotate and mask bits need to be in upper 64-bits of operand2.
1953 "vrlqnm %0,%1,%2"
1954 [(set_attr "type" "veclogical")])
1955
1956 (define_insn "altivec_vsl"
1957 [(set (match_operand:V4SI 0 "register_operand" "=v")
1958 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1959 (match_operand:V4SI 2 "register_operand" "v")]
1960 UNSPEC_VSLV4SI))]
1961 "TARGET_ALTIVEC"
1962 "vsl %0,%1,%2"
1963 [(set_attr "type" "vecperm")])
1964
1965 (define_insn "altivec_vslo"
1966 [(set (match_operand:V4SI 0 "register_operand" "=v")
1967 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1968 (match_operand:V4SI 2 "register_operand" "v")]
1969 UNSPEC_VSLO))]
1970 "TARGET_ALTIVEC"
1971 "vslo %0,%1,%2"
1972 [(set_attr "type" "vecperm")])
1973
1974 (define_insn "vslv"
1975 [(set (match_operand:V16QI 0 "register_operand" "=v")
1976 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1977 (match_operand:V16QI 2 "register_operand" "v")]
1978 UNSPEC_VSLV))]
1979 "TARGET_P9_VECTOR"
1980 "vslv %0,%1,%2"
1981 [(set_attr "type" "vecsimple")])
1982
1983 (define_insn "vsrv"
1984 [(set (match_operand:V16QI 0 "register_operand" "=v")
1985 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1986 (match_operand:V16QI 2 "register_operand" "v")]
1987 UNSPEC_VSRV))]
1988 "TARGET_P9_VECTOR"
1989 "vsrv %0,%1,%2"
1990 [(set_attr "type" "vecsimple")])
1991
1992 (define_insn "*altivec_vsl<VI_char>"
1993 [(set (match_operand:VI2 0 "register_operand" "=v")
1994 (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
1995 (match_operand:VI2 2 "register_operand" "v")))]
1996 "<VI_unit>"
1997 "vsl<VI_char> %0,%1,%2"
1998 [(set_attr "type" "vecsimple")])
1999
2000 (define_insn "altivec_vslq_<mode>"
2001 [(set (match_operand:VEC_TI 0 "vsx_register_operand" "=v")
2002 (ashift:VEC_TI (match_operand:VEC_TI 1 "vsx_register_operand" "v")
2003 (match_operand:VEC_TI 2 "vsx_register_operand" "v")))]
2004 "TARGET_POWER10"
2005 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2006 "vslq %0,%1,%2"
2007 [(set_attr "type" "vecsimple")])
2008
2009 (define_insn "*altivec_vsr<VI_char>"
2010 [(set (match_operand:VI2 0 "register_operand" "=v")
2011 (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
2012 (match_operand:VI2 2 "register_operand" "v")))]
2013 "<VI_unit>"
2014 "vsr<VI_char> %0,%1,%2"
2015 [(set_attr "type" "vecsimple")])
2016
2017 (define_insn "altivec_vsrq_<mode>"
2018 [(set (match_operand:VEC_TI 0 "vsx_register_operand" "=v")
2019 (lshiftrt:VEC_TI (match_operand:VEC_TI 1 "vsx_register_operand" "v")
2020 (match_operand:VEC_TI 2 "vsx_register_operand" "v")))]
2021 "TARGET_POWER10"
2022 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2023 "vsrq %0,%1,%2"
2024 [(set_attr "type" "vecsimple")])
2025
2026 (define_insn "*altivec_vsra<VI_char>"
2027 [(set (match_operand:VI2 0 "register_operand" "=v")
2028 (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
2029 (match_operand:VI2 2 "register_operand" "v")))]
2030 "<VI_unit>"
2031 "vsra<VI_char> %0,%1,%2"
2032 [(set_attr "type" "vecsimple")])
2033
2034 (define_insn "altivec_vsraq"
2035 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
2036 (ashiftrt:V1TI (match_operand:V1TI 1 "vsx_register_operand" "v")
2037 (match_operand:V1TI 2 "vsx_register_operand" "v")))]
2038 "TARGET_POWER10"
2039 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2040 "vsraq %0,%1,%2"
2041 [(set_attr "type" "vecsimple")])
2042
2043 (define_insn "altivec_vsr"
2044 [(set (match_operand:V4SI 0 "register_operand" "=v")
2045 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2046 (match_operand:V4SI 2 "register_operand" "v")]
2047 UNSPEC_VSR))]
2048 "TARGET_ALTIVEC"
2049 "vsr %0,%1,%2"
2050 [(set_attr "type" "vecperm")])
2051
2052 (define_insn "altivec_vsro"
2053 [(set (match_operand:V4SI 0 "register_operand" "=v")
2054 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2055 (match_operand:V4SI 2 "register_operand" "v")]
2056 UNSPEC_VSRO))]
2057 "TARGET_ALTIVEC"
2058 "vsro %0,%1,%2"
2059 [(set_attr "type" "vecperm")])
2060
2061 (define_insn "altivec_vsum4ubs"
2062 [(set (match_operand:V4SI 0 "register_operand" "=v")
2063 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
2064 (match_operand:V4SI 2 "register_operand" "v")]
2065 UNSPEC_VSUM4UBS))
2066 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2067 "TARGET_ALTIVEC"
2068 "vsum4ubs %0,%1,%2"
2069 [(set_attr "type" "veccomplex")])
2070
2071 (define_insn "altivec_vsum4s<VI_char>s"
2072 [(set (match_operand:V4SI 0 "register_operand" "=v")
2073 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
2074 (match_operand:V4SI 2 "register_operand" "v")]
2075 UNSPEC_VSUM4S))
2076 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2077 "TARGET_ALTIVEC"
2078 "vsum4s<VI_char>s %0,%1,%2"
2079 [(set_attr "type" "veccomplex")])
2080
2081 (define_expand "altivec_vsum2sws"
2082 [(use (match_operand:V4SI 0 "register_operand"))
2083 (use (match_operand:V4SI 1 "register_operand"))
2084 (use (match_operand:V4SI 2 "register_operand"))]
2085 "TARGET_ALTIVEC"
2086 {
2087 if (BYTES_BIG_ENDIAN)
2088 emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
2089 operands[2]));
2090 else
2091 {
2092 rtx tmp1 = gen_reg_rtx (V4SImode);
2093 rtx tmp2 = gen_reg_rtx (V4SImode);
2094 emit_insn (gen_altivec_vsldoi_v4si (tmp1, operands[2],
2095 operands[2], GEN_INT (12)));
2096 emit_insn (gen_altivec_vsum2sws_direct (tmp2, operands[1], tmp1));
2097 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
2098 GEN_INT (4)));
2099 }
2100 DONE;
2101 })
2102
2103 ; FIXME: This can probably be expressed without an UNSPEC.
2104 (define_insn "altivec_vsum2sws_direct"
2105 [(set (match_operand:V4SI 0 "register_operand" "=v")
2106 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2107 (match_operand:V4SI 2 "register_operand" "v")]
2108 UNSPEC_VSUM2SWS))
2109 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2110 "TARGET_ALTIVEC"
2111 "vsum2sws %0,%1,%2"
2112 [(set_attr "type" "veccomplex")])
2113
2114 (define_expand "altivec_vsumsws"
2115 [(use (match_operand:V4SI 0 "register_operand"))
2116 (use (match_operand:V4SI 1 "register_operand"))
2117 (use (match_operand:V4SI 2 "register_operand"))]
2118 "TARGET_ALTIVEC"
2119 {
2120 if (BYTES_BIG_ENDIAN)
2121 emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
2122 operands[2]));
2123 else
2124 {
2125 rtx tmp1 = gen_reg_rtx (V4SImode);
2126 rtx tmp2 = gen_reg_rtx (V4SImode);
2127 emit_insn (gen_altivec_vspltw_direct (tmp1, operands[2], const0_rtx));
2128 emit_insn (gen_altivec_vsumsws_direct (tmp2, operands[1], tmp1));
2129 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
2130 GEN_INT (12)));
2131 }
2132 DONE;
2133 })
2134
2135 ; FIXME: This can probably be expressed without an UNSPEC.
2136 (define_insn "altivec_vsumsws_direct"
2137 [(set (match_operand:V4SI 0 "register_operand" "=v")
2138 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2139 (match_operand:V4SI 2 "register_operand" "v")]
2140 UNSPEC_VSUMSWS_DIRECT))
2141 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2142 "TARGET_ALTIVEC"
2143 "vsumsws %0,%1,%2"
2144 [(set_attr "type" "veccomplex")])
2145
2146 (define_expand "altivec_vspltb"
2147 [(use (match_operand:V16QI 0 "register_operand"))
2148 (use (match_operand:V16QI 1 "register_operand"))
2149 (use (match_operand:QI 2 "const_0_to_15_operand"))]
2150 "TARGET_ALTIVEC"
2151 {
2152 rtvec v = gen_rtvec (1, operands[2]);
2153 rtx x;
2154 x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2155 x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
2156 emit_insn (gen_rtx_SET (operands[0], x));
2157 DONE;
2158 })
2159
2160 (define_insn "*altivec_vspltb_internal"
2161 [(set (match_operand:V16QI 0 "register_operand" "=v")
2162 (vec_duplicate:V16QI
2163 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
2164 (parallel
2165 [(match_operand:QI 2 "const_0_to_15_operand" "")]))))]
2166 "TARGET_ALTIVEC"
2167 {
2168 if (!BYTES_BIG_ENDIAN)
2169 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
2170
2171 return "vspltb %0,%1,%2";
2172 }
2173 [(set_attr "type" "vecperm")])
2174
2175 (define_insn "altivec_vspltb_direct"
2176 [(set (match_operand:V16QI 0 "register_operand" "=v")
2177 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
2178 (match_operand:QI 2 "const_0_to_15_operand" "i")]
2179 UNSPEC_VSPLT_DIRECT))]
2180 "TARGET_ALTIVEC"
2181 "vspltb %0,%1,%2"
2182 [(set_attr "type" "vecperm")])
2183
2184 (define_expand "altivec_vsplth"
2185 [(use (match_operand:V8HI 0 "register_operand"))
2186 (use (match_operand:V8HI 1 "register_operand"))
2187 (use (match_operand:QI 2 "const_0_to_7_operand"))]
2188 "TARGET_ALTIVEC"
2189 {
2190 rtvec v = gen_rtvec (1, operands[2]);
2191 rtx x;
2192 x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2193 x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
2194 emit_insn (gen_rtx_SET (operands[0], x));
2195 DONE;
2196 })
2197
2198 (define_insn "*altivec_vsplth_internal"
2199 [(set (match_operand:V8HI 0 "register_operand" "=v")
2200 (vec_duplicate:V8HI
2201 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
2202 (parallel
2203 [(match_operand:QI 2 "const_0_to_7_operand" "")]))))]
2204 "TARGET_ALTIVEC"
2205 {
2206 if (!BYTES_BIG_ENDIAN)
2207 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2208
2209 return "vsplth %0,%1,%2";
2210 }
2211 [(set_attr "type" "vecperm")])
2212
2213 (define_insn "altivec_vsplth_direct"
2214 [(set (match_operand:V8HI 0 "register_operand" "=v")
2215 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
2216 (match_operand:QI 2 "const_0_to_7_operand" "i")]
2217 UNSPEC_VSPLT_DIRECT))]
2218 "TARGET_ALTIVEC"
2219 "vsplth %0,%1,%2"
2220 [(set_attr "type" "vecperm")])
2221
2222 (define_expand "altivec_vspltw"
2223 [(use (match_operand:V4SI 0 "register_operand"))
2224 (use (match_operand:V4SI 1 "register_operand"))
2225 (use (match_operand:QI 2 "const_0_to_3_operand"))]
2226 "TARGET_ALTIVEC"
2227 {
2228 rtvec v = gen_rtvec (1, operands[2]);
2229 rtx x;
2230 x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2231 x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
2232 emit_insn (gen_rtx_SET (operands[0], x));
2233 DONE;
2234 })
2235
2236 (define_insn "*altivec_vspltw_internal"
2237 [(set (match_operand:V4SI 0 "register_operand" "=v")
2238 (vec_duplicate:V4SI
2239 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
2240 (parallel
2241 [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))]
2242 "TARGET_ALTIVEC"
2243 {
2244 if (!BYTES_BIG_ENDIAN)
2245 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2246
2247 return "vspltw %0,%1,%2";
2248 }
2249 [(set_attr "type" "vecperm")])
2250
2251 (define_insn "altivec_vspltw_direct"
2252 [(set (match_operand:V4SI 0 "register_operand" "=v")
2253 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2254 (match_operand:QI 2 "const_0_to_3_operand" "i")]
2255 UNSPEC_VSPLT_DIRECT))]
2256 "TARGET_ALTIVEC"
2257 "vspltw %0,%1,%2"
2258 [(set_attr "type" "vecperm")])
2259
2260 (define_expand "altivec_vspltsf"
2261 [(use (match_operand:V4SF 0 "register_operand"))
2262 (use (match_operand:V4SF 1 "register_operand"))
2263 (use (match_operand:QI 2 "const_0_to_3_operand"))]
2264 "TARGET_ALTIVEC"
2265 {
2266 rtvec v = gen_rtvec (1, operands[2]);
2267 rtx x;
2268 x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2269 x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
2270 emit_insn (gen_rtx_SET (operands[0], x));
2271 DONE;
2272 })
2273
2274 (define_insn "*altivec_vspltsf_internal"
2275 [(set (match_operand:V4SF 0 "register_operand" "=v")
2276 (vec_duplicate:V4SF
2277 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
2278 (parallel
2279 [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))]
2280 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2281 {
2282 if (!BYTES_BIG_ENDIAN)
2283 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2284
2285 return "vspltw %0,%1,%2";
2286 }
2287 [(set_attr "type" "vecperm")])
2288
2289 (define_insn "altivec_vspltis<VI_char>"
2290 [(set (match_operand:VI 0 "register_operand" "=v")
2291 (vec_duplicate:VI
2292 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
2293 "TARGET_ALTIVEC"
2294 "vspltis<VI_char> %0,%1"
2295 [(set_attr "type" "vecperm")])
2296
2297 (define_insn "*altivec_vrfiz"
2298 [(set (match_operand:V4SF 0 "register_operand" "=v")
2299 (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
2300 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2301 "vrfiz %0,%1"
2302 [(set_attr "type" "vecfloat")])
2303
2304 (define_expand "altivec_vperm_<mode>"
2305 [(set (match_operand:VM 0 "register_operand")
2306 (unspec:VM [(match_operand:VM 1 "register_operand")
2307 (match_operand:VM 2 "register_operand")
2308 (match_operand:V16QI 3 "register_operand")]
2309 UNSPEC_VPERM))]
2310 "TARGET_ALTIVEC"
2311 {
2312 if (!BYTES_BIG_ENDIAN)
2313 {
2314 altivec_expand_vec_perm_le (operands);
2315 DONE;
2316 }
2317 })
2318
2319 ;; Slightly prefer vperm, since the target does not overlap the source
2320 (define_insn "altivec_vperm_<mode>_direct"
2321 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2322 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2323 (match_operand:VM 2 "register_operand" "0,v")
2324 (match_operand:V16QI 3 "register_operand" "wa,v")]
2325 UNSPEC_VPERM))]
2326 "TARGET_ALTIVEC"
2327 "@
2328 xxperm %x0,%x1,%x3
2329 vperm %0,%1,%2,%3"
2330 [(set_attr "type" "vecperm")
2331 (set_attr "isa" "p9v,*")])
2332
2333 (define_insn "altivec_vperm_v8hiv16qi"
2334 [(set (match_operand:V16QI 0 "register_operand" "=?wa,v")
2335 (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "wa,v")
2336 (match_operand:V8HI 2 "register_operand" "0,v")
2337 (match_operand:V16QI 3 "register_operand" "wa,v")]
2338 UNSPEC_VPERM))]
2339 "TARGET_ALTIVEC"
2340 "@
2341 xxperm %x0,%x1,%x3
2342 vperm %0,%1,%2,%3"
2343 [(set_attr "type" "vecperm")
2344 (set_attr "isa" "p9v,*")])
2345
2346 (define_expand "altivec_vperm_<mode>_uns"
2347 [(set (match_operand:VM 0 "register_operand")
2348 (unspec:VM [(match_operand:VM 1 "register_operand")
2349 (match_operand:VM 2 "register_operand")
2350 (match_operand:V16QI 3 "register_operand")]
2351 UNSPEC_VPERM_UNS))]
2352 "TARGET_ALTIVEC"
2353 {
2354 if (!BYTES_BIG_ENDIAN)
2355 {
2356 altivec_expand_vec_perm_le (operands);
2357 DONE;
2358 }
2359 })
2360
2361 (define_insn "*altivec_vperm_<mode>_uns_internal"
2362 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2363 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2364 (match_operand:VM 2 "register_operand" "0,v")
2365 (match_operand:V16QI 3 "register_operand" "wa,v")]
2366 UNSPEC_VPERM_UNS))]
2367 "TARGET_ALTIVEC"
2368 "@
2369 xxperm %x0,%x1,%x3
2370 vperm %0,%1,%2,%3"
2371 [(set_attr "type" "vecperm")
2372 (set_attr "isa" "p9v,*")])
2373
2374 (define_expand "vec_permv16qi"
2375 [(set (match_operand:V16QI 0 "register_operand")
2376 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")
2377 (match_operand:V16QI 2 "register_operand")
2378 (match_operand:V16QI 3 "register_operand")]
2379 UNSPEC_VPERM))]
2380 "TARGET_ALTIVEC"
2381 {
2382 if (!BYTES_BIG_ENDIAN) {
2383 altivec_expand_vec_perm_le (operands);
2384 DONE;
2385 }
2386 })
2387
2388 (define_insn "*altivec_vpermr_<mode>_internal"
2389 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2390 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2391 (match_operand:VM 2 "register_operand" "0,v")
2392 (match_operand:V16QI 3 "register_operand" "wa,v")]
2393 UNSPEC_VPERMR))]
2394 "TARGET_P9_VECTOR"
2395 "@
2396 xxpermr %x0,%x1,%x3
2397 vpermr %0,%1,%2,%3"
2398 [(set_attr "type" "vecperm")
2399 (set_attr "isa" "p9v,*")])
2400
2401 (define_insn "altivec_vrfip" ; ceil
2402 [(set (match_operand:V4SF 0 "register_operand" "=v")
2403 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2404 UNSPEC_FRIP))]
2405 "TARGET_ALTIVEC"
2406 "vrfip %0,%1"
2407 [(set_attr "type" "vecfloat")])
2408
2409 (define_insn "altivec_vrfin"
2410 [(set (match_operand:V4SF 0 "register_operand" "=v")
2411 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2412 UNSPEC_VRFIN))]
2413 "TARGET_ALTIVEC"
2414 "vrfin %0,%1"
2415 [(set_attr "type" "vecfloat")])
2416
2417 (define_insn "*altivec_vrfim" ; floor
2418 [(set (match_operand:V4SF 0 "register_operand" "=v")
2419 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2420 UNSPEC_FRIM))]
2421 "TARGET_ALTIVEC"
2422 "vrfim %0,%1"
2423 [(set_attr "type" "vecfloat")])
2424
2425 (define_insn "altivec_vcfux"
2426 [(set (match_operand:V4SF 0 "register_operand" "=v")
2427 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2428 (match_operand:QI 2 "immediate_operand" "i")]
2429 UNSPEC_VCFUX))]
2430 "TARGET_ALTIVEC"
2431 "vcfux %0,%1,%2"
2432 [(set_attr "type" "vecfloat")])
2433
2434 (define_insn "altivec_vcfsx"
2435 [(set (match_operand:V4SF 0 "register_operand" "=v")
2436 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2437 (match_operand:QI 2 "immediate_operand" "i")]
2438 UNSPEC_VCFSX))]
2439 "TARGET_ALTIVEC"
2440 "vcfsx %0,%1,%2"
2441 [(set_attr "type" "vecfloat")])
2442
2443 (define_insn "altivec_vctuxs"
2444 [(set (match_operand:V4SI 0 "register_operand" "=v")
2445 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2446 (match_operand:QI 2 "immediate_operand" "i")]
2447 UNSPEC_VCTUXS))
2448 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2449 "TARGET_ALTIVEC"
2450 "vctuxs %0,%1,%2"
2451 [(set_attr "type" "vecfloat")])
2452
2453 (define_insn "altivec_vctsxs"
2454 [(set (match_operand:V4SI 0 "register_operand" "=v")
2455 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2456 (match_operand:QI 2 "immediate_operand" "i")]
2457 UNSPEC_VCTSXS))
2458 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2459 "TARGET_ALTIVEC"
2460 "vctsxs %0,%1,%2"
2461 [(set_attr "type" "vecfloat")])
2462
2463 (define_insn "altivec_vlogefp"
2464 [(set (match_operand:V4SF 0 "register_operand" "=v")
2465 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2466 UNSPEC_VLOGEFP))]
2467 "TARGET_ALTIVEC"
2468 "vlogefp %0,%1"
2469 [(set_attr "type" "vecfloat")])
2470
2471 (define_insn "altivec_vexptefp"
2472 [(set (match_operand:V4SF 0 "register_operand" "=v")
2473 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2474 UNSPEC_VEXPTEFP))]
2475 "TARGET_ALTIVEC"
2476 "vexptefp %0,%1"
2477 [(set_attr "type" "vecfloat")])
2478
2479 (define_insn "*altivec_vrsqrtefp"
2480 [(set (match_operand:V4SF 0 "register_operand" "=v")
2481 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2482 UNSPEC_RSQRT))]
2483 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2484 "vrsqrtefp %0,%1"
2485 [(set_attr "type" "vecfloat")])
2486
2487 (define_insn "altivec_vrefp"
2488 [(set (match_operand:V4SF 0 "register_operand" "=v")
2489 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2490 UNSPEC_FRES))]
2491 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2492 "vrefp %0,%1"
2493 [(set_attr "type" "vecfloat")])
2494
2495 (define_expand "altivec_copysign_v4sf3"
2496 [(use (match_operand:V4SF 0 "register_operand"))
2497 (use (match_operand:V4SF 1 "register_operand"))
2498 (use (match_operand:V4SF 2 "register_operand"))]
2499 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2500 {
2501 rtx mask = gen_reg_rtx (V4SImode);
2502 rtx mask_val = gen_int_mode (HOST_WIDE_INT_1U << 31, SImode);
2503 rtvec v = gen_rtvec (4, mask_val, mask_val, mask_val, mask_val);
2504
2505 emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
2506 emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2507 gen_lowpart (V4SFmode, mask)));
2508 DONE;
2509 })
2510
2511 (define_insn "altivec_vsldoi_<mode>"
2512 [(set (match_operand:VM 0 "register_operand" "=v")
2513 (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2514 (match_operand:VM 2 "register_operand" "v")
2515 (match_operand:QI 3 "immediate_operand" "i")]
2516 UNSPEC_VSLDOI))]
2517 "TARGET_ALTIVEC"
2518 "vsldoi %0,%1,%2,%3"
2519 [(set_attr "type" "vecperm")])
2520
2521 (define_insn "altivec_vupkhs<VU_char>"
2522 [(set (match_operand:VP 0 "register_operand" "=v")
2523 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2524 UNSPEC_VUNPACK_HI_SIGN))]
2525 "<VI_unit>"
2526 {
2527 if (BYTES_BIG_ENDIAN)
2528 return "vupkhs<VU_char> %0,%1";
2529 else
2530 return "vupkls<VU_char> %0,%1";
2531 }
2532 [(set_attr "type" "vecperm")])
2533
2534 (define_insn "*altivec_vupkhs<VU_char>_direct"
2535 [(set (match_operand:VP 0 "register_operand" "=v")
2536 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2537 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2538 "<VI_unit>"
2539 "vupkhs<VU_char> %0,%1"
2540 [(set_attr "type" "vecperm")])
2541
2542 (define_insn "altivec_vupkls<VU_char>"
2543 [(set (match_operand:VP 0 "register_operand" "=v")
2544 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2545 UNSPEC_VUNPACK_LO_SIGN))]
2546 "<VI_unit>"
2547 {
2548 if (BYTES_BIG_ENDIAN)
2549 return "vupkls<VU_char> %0,%1";
2550 else
2551 return "vupkhs<VU_char> %0,%1";
2552 }
2553 [(set_attr "type" "vecperm")])
2554
2555 (define_insn "*altivec_vupkls<VU_char>_direct"
2556 [(set (match_operand:VP 0 "register_operand" "=v")
2557 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2558 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2559 "<VI_unit>"
2560 "vupkls<VU_char> %0,%1"
2561 [(set_attr "type" "vecperm")])
2562
2563 (define_insn "altivec_vupkhpx"
2564 [(set (match_operand:V4SI 0 "register_operand" "=v")
2565 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2566 UNSPEC_VUPKHPX))]
2567 "TARGET_ALTIVEC"
2568 {
2569 if (BYTES_BIG_ENDIAN)
2570 return "vupkhpx %0,%1";
2571 else
2572 return "vupklpx %0,%1";
2573 }
2574 [(set_attr "type" "vecperm")])
2575
2576 (define_insn "altivec_vupklpx"
2577 [(set (match_operand:V4SI 0 "register_operand" "=v")
2578 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2579 UNSPEC_VUPKLPX))]
2580 "TARGET_ALTIVEC"
2581 {
2582 if (BYTES_BIG_ENDIAN)
2583 return "vupklpx %0,%1";
2584 else
2585 return "vupkhpx %0,%1";
2586 }
2587 [(set_attr "type" "vecperm")])
2588
2589 ;; Compare vectors producing a vector result and a predicate, setting CR6 to
2590 ;; indicate a combined status
2591 (define_insn "altivec_vcmpequ<VI_char>_p"
2592 [(set (reg:CC CR6_REGNO)
2593 (unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2594 (match_operand:VI2 2 "register_operand" "v"))]
2595 UNSPEC_PREDICATE))
2596 (set (match_operand:VI2 0 "register_operand" "=v")
2597 (eq:VI2 (match_dup 1)
2598 (match_dup 2)))]
2599 "<VI_unit>"
2600 "vcmpequ<VI_char>. %0,%1,%2"
2601 [(set_attr "type" "veccmpfx")])
2602
2603 (define_insn "altivec_vcmpequt_p"
2604 [(set (reg:CC CR6_REGNO)
2605 (unspec:CC [(eq:CC (match_operand:V1TI 1 "altivec_register_operand" "v")
2606 (match_operand:V1TI 2 "altivec_register_operand" "v"))]
2607 UNSPEC_PREDICATE))
2608 (set (match_operand:V1TI 0 "altivec_register_operand" "=v")
2609 (eq:V1TI (match_dup 1)
2610 (match_dup 2)))]
2611 "TARGET_POWER10"
2612 "vcmpequq. %0,%1,%2"
2613 [(set_attr "type" "veccmpfx")])
2614
2615 (define_insn "*altivec_vcmpgts<VI_char>_p"
2616 [(set (reg:CC CR6_REGNO)
2617 (unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2618 (match_operand:VI2 2 "register_operand" "v"))]
2619 UNSPEC_PREDICATE))
2620 (set (match_operand:VI2 0 "register_operand" "=v")
2621 (gt:VI2 (match_dup 1)
2622 (match_dup 2)))]
2623 "<VI_unit>"
2624 "vcmpgts<VI_char>. %0,%1,%2"
2625 [(set_attr "type" "veccmpfx")])
2626
2627 (define_insn "*altivec_vcmpgtst_p"
2628 [(set (reg:CC CR6_REGNO)
2629 (unspec:CC [(gt:CC (match_operand:V1TI 1 "register_operand" "v")
2630 (match_operand:V1TI 2 "register_operand" "v"))]
2631 UNSPEC_PREDICATE))
2632 (set (match_operand:V1TI 0 "register_operand" "=v")
2633 (gt:V1TI (match_dup 1)
2634 (match_dup 2)))]
2635 "TARGET_POWER10"
2636 "vcmpgtsq. %0,%1,%2"
2637 [(set_attr "type" "veccmpfx")])
2638
2639 (define_insn "*altivec_vcmpgtu<VI_char>_p"
2640 [(set (reg:CC CR6_REGNO)
2641 (unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2642 (match_operand:VI2 2 "register_operand" "v"))]
2643 UNSPEC_PREDICATE))
2644 (set (match_operand:VI2 0 "register_operand" "=v")
2645 (gtu:VI2 (match_dup 1)
2646 (match_dup 2)))]
2647 "<VI_unit>"
2648 "vcmpgtu<VI_char>. %0,%1,%2"
2649 [(set_attr "type" "veccmpfx")])
2650
2651 (define_insn "*altivec_vcmpgtut_p"
2652 [(set (reg:CC CR6_REGNO)
2653 (unspec:CC [(gtu:CC (match_operand:V1TI 1 "register_operand" "v")
2654 (match_operand:V1TI 2 "register_operand" "v"))]
2655 UNSPEC_PREDICATE))
2656 (set (match_operand:V1TI 0 "register_operand" "=v")
2657 (gtu:V1TI (match_dup 1)
2658 (match_dup 2)))]
2659 "TARGET_POWER10"
2660 "vcmpgtuq. %0,%1,%2"
2661 [(set_attr "type" "veccmpfx")])
2662
2663 (define_insn "*altivec_vcmpeqfp_p"
2664 [(set (reg:CC CR6_REGNO)
2665 (unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2666 (match_operand:V4SF 2 "register_operand" "v"))]
2667 UNSPEC_PREDICATE))
2668 (set (match_operand:V4SF 0 "register_operand" "=v")
2669 (eq:V4SF (match_dup 1)
2670 (match_dup 2)))]
2671 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2672 "vcmpeqfp. %0,%1,%2"
2673 [(set_attr "type" "veccmp")])
2674
2675 (define_insn "*altivec_vcmpgtfp_p"
2676 [(set (reg:CC CR6_REGNO)
2677 (unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2678 (match_operand:V4SF 2 "register_operand" "v"))]
2679 UNSPEC_PREDICATE))
2680 (set (match_operand:V4SF 0 "register_operand" "=v")
2681 (gt:V4SF (match_dup 1)
2682 (match_dup 2)))]
2683 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2684 "vcmpgtfp. %0,%1,%2"
2685 [(set_attr "type" "veccmp")])
2686
2687 (define_insn "*altivec_vcmpgefp_p"
2688 [(set (reg:CC CR6_REGNO)
2689 (unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2690 (match_operand:V4SF 2 "register_operand" "v"))]
2691 UNSPEC_PREDICATE))
2692 (set (match_operand:V4SF 0 "register_operand" "=v")
2693 (ge:V4SF (match_dup 1)
2694 (match_dup 2)))]
2695 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2696 "vcmpgefp. %0,%1,%2"
2697 [(set_attr "type" "veccmp")])
2698
2699 (define_insn "altivec_vcmpbfp_p"
2700 [(set (reg:CC CR6_REGNO)
2701 (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2702 (match_operand:V4SF 2 "register_operand" "v")]
2703 UNSPEC_VCMPBFP))
2704 (set (match_operand:V4SF 0 "register_operand" "=v")
2705 (unspec:V4SF [(match_dup 1)
2706 (match_dup 2)]
2707 UNSPEC_VCMPBFP))]
2708 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2709 "vcmpbfp. %0,%1,%2"
2710 [(set_attr "type" "veccmp")])
2711
2712 (define_insn "altivec_mtvscr"
2713 [(set (reg:SI VSCR_REGNO)
2714 (unspec_volatile:SI
2715 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
2716 "TARGET_ALTIVEC"
2717 "mtvscr %0"
2718 [(set_attr "type" "vecsimple")])
2719
2720 (define_insn "altivec_mfvscr"
2721 [(set (match_operand:V8HI 0 "register_operand" "=v")
2722 (unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
2723 "TARGET_ALTIVEC"
2724 "mfvscr %0"
2725 [(set_attr "type" "vecsimple")])
2726
2727 (define_insn "altivec_dssall"
2728 [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
2729 "TARGET_ALTIVEC"
2730 "dssall"
2731 [(set_attr "type" "vecsimple")])
2732
2733 (define_insn "altivec_dss"
2734 [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2735 UNSPECV_DSS)]
2736 "TARGET_ALTIVEC"
2737 "dss %0"
2738 [(set_attr "type" "vecsimple")])
2739
2740 (define_insn "altivec_dst"
2741 [(unspec [(match_operand 0 "register_operand" "b")
2742 (match_operand:SI 1 "register_operand" "r")
2743 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
2744 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2745 "dst %0,%1,%2"
2746 [(set_attr "type" "vecsimple")])
2747
2748 (define_insn "altivec_dstt"
2749 [(unspec [(match_operand 0 "register_operand" "b")
2750 (match_operand:SI 1 "register_operand" "r")
2751 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
2752 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2753 "dstt %0,%1,%2"
2754 [(set_attr "type" "vecsimple")])
2755
2756 (define_insn "altivec_dstst"
2757 [(unspec [(match_operand 0 "register_operand" "b")
2758 (match_operand:SI 1 "register_operand" "r")
2759 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
2760 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2761 "dstst %0,%1,%2"
2762 [(set_attr "type" "vecsimple")])
2763
2764 (define_insn "altivec_dststt"
2765 [(unspec [(match_operand 0 "register_operand" "b")
2766 (match_operand:SI 1 "register_operand" "r")
2767 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
2768 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2769 "dststt %0,%1,%2"
2770 [(set_attr "type" "vecsimple")])
2771
2772 (define_expand "altivec_lvsl"
2773 [(use (match_operand:V16QI 0 "register_operand"))
2774 (use (match_operand:V16QI 1 "memory_operand"))]
2775 "TARGET_ALTIVEC"
2776 {
2777 if (BYTES_BIG_ENDIAN)
2778 emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2779 else
2780 {
2781 rtx mask, constv, vperm;
2782 mask = gen_reg_rtx (V16QImode);
2783 emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
2784 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2785 constv = force_reg (V16QImode, constv);
2786 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2787 UNSPEC_VPERM);
2788 emit_insn (gen_rtx_SET (operands[0], vperm));
2789 }
2790 DONE;
2791 })
2792
2793 (define_insn "altivec_lvsl_reg_<mode>"
2794 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2795 (unspec:V16QI
2796 [(match_operand:GPR 1 "gpc_reg_operand" "b")]
2797 UNSPEC_LVSL_REG))]
2798 "TARGET_ALTIVEC"
2799 "lvsl %0,0,%1"
2800 [(set_attr "type" "vecload")])
2801
2802 (define_insn "altivec_lvsl_direct"
2803 [(set (match_operand:V16QI 0 "register_operand" "=v")
2804 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2805 UNSPEC_LVSL))]
2806 "TARGET_ALTIVEC"
2807 "lvsl %0,%y1"
2808 [(set_attr "type" "vecload")])
2809
2810 (define_expand "altivec_lvsr"
2811 [(use (match_operand:V16QI 0 "altivec_register_operand"))
2812 (use (match_operand:V16QI 1 "memory_operand"))]
2813 "TARGET_ALTIVEC"
2814 {
2815 if (BYTES_BIG_ENDIAN)
2816 emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2817 else
2818 {
2819 rtx mask, constv, vperm;
2820 mask = gen_reg_rtx (V16QImode);
2821 emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
2822 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2823 constv = force_reg (V16QImode, constv);
2824 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2825 UNSPEC_VPERM);
2826 emit_insn (gen_rtx_SET (operands[0], vperm));
2827 }
2828 DONE;
2829 })
2830
2831 (define_insn "altivec_lvsr_reg_<mode>"
2832 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2833 (unspec:V16QI
2834 [(match_operand:GPR 1 "gpc_reg_operand" "b")]
2835 UNSPEC_LVSR_REG))]
2836 "TARGET_ALTIVEC"
2837 "lvsr %0,0,%1"
2838 [(set_attr "type" "vecload")])
2839
2840 (define_insn "altivec_lvsr_direct"
2841 [(set (match_operand:V16QI 0 "register_operand" "=v")
2842 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2843 UNSPEC_LVSR))]
2844 "TARGET_ALTIVEC"
2845 "lvsr %0,%y1"
2846 [(set_attr "type" "vecload")])
2847
2848 (define_expand "build_vector_mask_for_load"
2849 [(set (match_operand:V16QI 0 "register_operand")
2850 (unspec:V16QI [(match_operand 1 "memory_operand")] UNSPEC_LVSR))]
2851 "TARGET_ALTIVEC"
2852 {
2853 rtx addr;
2854 rtx temp;
2855
2856 gcc_assert (MEM_P (operands[1]));
2857
2858 addr = XEXP (operands[1], 0);
2859 temp = gen_reg_rtx (GET_MODE (addr));
2860 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
2861 emit_insn (gen_altivec_lvsr (operands[0],
2862 replace_equiv_address (operands[1], temp)));
2863 DONE;
2864 })
2865
2866 ;; Parallel some of the LVE* and STV*'s with unspecs because some have
2867 ;; identical rtl but different instructions-- and gcc gets confused.
2868
2869 (define_insn "altivec_lve<VI_char>x"
2870 [(parallel
2871 [(set (match_operand:VI 0 "register_operand" "=v")
2872 (match_operand:VI 1 "memory_operand" "Z"))
2873 (unspec [(const_int 0)] UNSPEC_LVE)])]
2874 "TARGET_ALTIVEC"
2875 "lve<VI_char>x %0,%y1"
2876 [(set_attr "type" "vecload")])
2877
2878 (define_insn "*altivec_lvesfx"
2879 [(parallel
2880 [(set (match_operand:V4SF 0 "register_operand" "=v")
2881 (match_operand:V4SF 1 "memory_operand" "Z"))
2882 (unspec [(const_int 0)] UNSPEC_LVE)])]
2883 "TARGET_ALTIVEC"
2884 "lvewx %0,%y1"
2885 [(set_attr "type" "vecload")])
2886
2887 (define_insn "altivec_lvxl_<mode>"
2888 [(parallel
2889 [(set (match_operand:VM2 0 "register_operand" "=v")
2890 (match_operand:VM2 1 "memory_operand" "Z"))
2891 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2892 "TARGET_ALTIVEC"
2893 "lvxl %0,%y1"
2894 [(set_attr "type" "vecload")])
2895
2896 ; This version of lvx is used only in cases where we need to force an lvx
2897 ; over any other load, and we don't care about losing CSE opportunities.
2898 ; Its primary use is for prologue register saves.
2899 (define_insn "altivec_lvx_<mode>_internal"
2900 [(parallel
2901 [(set (match_operand:VM2 0 "register_operand" "=v")
2902 (match_operand:VM2 1 "memory_operand" "Z"))
2903 (unspec [(const_int 0)] UNSPEC_LVX)])]
2904 "TARGET_ALTIVEC"
2905 "lvx %0,%y1"
2906 [(set_attr "type" "vecload")])
2907
2908 ; The following patterns embody what lvx should usually look like.
2909 (define_expand "altivec_lvx_<VM2:mode>"
2910 [(set (match_operand:VM2 0 "register_operand")
2911 (match_operand:VM2 1 "altivec_indexed_or_indirect_operand"))]
2912 "TARGET_ALTIVEC"
2913 {
2914 rtx addr = XEXP (operand1, 0);
2915 if (rs6000_sum_of_two_registers_p (addr))
2916 {
2917 rtx op1 = XEXP (addr, 0);
2918 rtx op2 = XEXP (addr, 1);
2919 if (TARGET_64BIT)
2920 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_di (operand0, op1, op2));
2921 else
2922 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_si (operand0, op1, op2));
2923 }
2924 else
2925 {
2926 if (TARGET_64BIT)
2927 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_di (operand0, addr));
2928 else
2929 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_si (operand0, addr));
2930 }
2931 DONE;
2932 })
2933
2934 ; The next two patterns embody what lvx should usually look like.
2935 (define_insn "altivec_lvx_<VM2:mode>_2op_<P:mptrsize>"
2936 [(set (match_operand:VM2 0 "register_operand" "=v")
2937 (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
2938 (match_operand:P 2 "register_operand" "r"))
2939 (const_int -16))))]
2940 "TARGET_ALTIVEC"
2941 "lvx %0,%1,%2"
2942 [(set_attr "type" "vecload")])
2943
2944 (define_insn "altivec_lvx_<VM2:mode>_1op_<P:mptrsize>"
2945 [(set (match_operand:VM2 0 "register_operand" "=v")
2946 (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
2947 (const_int -16))))]
2948 "TARGET_ALTIVEC"
2949 "lvx %0,0,%1"
2950 [(set_attr "type" "vecload")])
2951
2952 ; This version of stvx is used only in cases where we need to force an stvx
2953 ; over any other store, and we don't care about losing CSE opportunities.
2954 ; Its primary use is for epilogue register restores.
2955 (define_insn "altivec_stvx_<mode>_internal"
2956 [(parallel
2957 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2958 (match_operand:VM2 1 "register_operand" "v"))
2959 (unspec [(const_int 0)] UNSPEC_STVX)])]
2960 "TARGET_ALTIVEC"
2961 "stvx %1,%y0"
2962 [(set_attr "type" "vecstore")])
2963
2964 ; The following patterns embody what stvx should usually look like.
2965 (define_expand "altivec_stvx_<VM2:mode>"
2966 [(set (match_operand:VM2 1 "altivec_indexed_or_indirect_operand")
2967 (match_operand:VM2 0 "register_operand"))]
2968 "TARGET_ALTIVEC"
2969 {
2970 rtx addr = XEXP (operand1, 0);
2971 if (rs6000_sum_of_two_registers_p (addr))
2972 {
2973 rtx op1 = XEXP (addr, 0);
2974 rtx op2 = XEXP (addr, 1);
2975 if (TARGET_64BIT)
2976 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_di (operand0, op1, op2));
2977 else
2978 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_si (operand0, op1, op2));
2979 }
2980 else
2981 {
2982 if (TARGET_64BIT)
2983 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_di (operand0, addr));
2984 else
2985 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_si (operand0, addr));
2986 }
2987 DONE;
2988 })
2989
2990 ; The next two patterns embody what stvx should usually look like.
2991 (define_insn "altivec_stvx_<VM2:mode>_2op_<P:mptrsize>"
2992 [(set (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
2993 (match_operand:P 2 "register_operand" "r"))
2994 (const_int -16)))
2995 (match_operand:VM2 0 "register_operand" "v"))]
2996 "TARGET_ALTIVEC"
2997 "stvx %0,%1,%2"
2998 [(set_attr "type" "vecstore")])
2999
3000 (define_insn "altivec_stvx_<VM2:mode>_1op_<P:mptrsize>"
3001 [(set (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
3002 (const_int -16)))
3003 (match_operand:VM2 0 "register_operand" "v"))]
3004 "TARGET_ALTIVEC"
3005 "stvx %0,0,%1"
3006 [(set_attr "type" "vecstore")])
3007
3008 (define_insn "altivec_stvxl_<mode>"
3009 [(parallel
3010 [(set (match_operand:VM2 0 "memory_operand" "=Z")
3011 (match_operand:VM2 1 "register_operand" "v"))
3012 (unspec [(const_int 0)] UNSPEC_STVXL)])]
3013 "TARGET_ALTIVEC"
3014 "stvxl %1,%y0"
3015 [(set_attr "type" "vecstore")])
3016
3017 (define_insn "altivec_stve<VI_char>x"
3018 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
3019 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
3020 "TARGET_ALTIVEC"
3021 "stve<VI_char>x %1,%y0"
3022 [(set_attr "type" "vecstore")])
3023
3024 (define_insn "*altivec_stvesfx"
3025 [(set (match_operand:SF 0 "memory_operand" "=Z")
3026 (unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
3027 "TARGET_ALTIVEC"
3028 "stvewx %1,%y0"
3029 [(set_attr "type" "vecstore")])
3030
3031 ;; Generate doublee
3032 ;; signed int/float to double convert words 0 and 2
3033 (define_expand "doublee<mode>2"
3034 [(set (match_operand:V2DF 0 "register_operand" "=v")
3035 (match_operand:VSX_W 1 "register_operand" "v"))]
3036 "TARGET_VSX"
3037 {
3038 machine_mode op_mode = GET_MODE (operands[1]);
3039
3040 if (BYTES_BIG_ENDIAN)
3041 {
3042 /* Big endian word numbering for words in operand is 0 1 2 3.
3043 Input words 0 and 2 are where they need to be. */
3044 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
3045 }
3046 else
3047 {
3048 /* Little endian word numbering for operand is 3 2 1 0.
3049 take (operand[1] operand[1]) and shift left one word
3050 3 2 1 0 3 2 1 0 => 2 1 0 3
3051 Input words 2 and 0 are now where they need to be for the
3052 conversion. */
3053 rtx rtx_tmp;
3054 rtx rtx_val = GEN_INT (1);
3055
3056 rtx_tmp = gen_reg_rtx (op_mode);
3057 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3058 operands[1], rtx_val));
3059 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3060 }
3061 DONE;
3062 }
3063 [(set_attr "type" "veccomplex")])
3064
3065 ;; Generate unsdoublee
3066 ;; unsigned int to double convert words 0 and 2
3067 (define_expand "unsdoubleev4si2"
3068 [(set (match_operand:V2DF 0 "register_operand" "=v")
3069 (match_operand:V4SI 1 "register_operand" "v"))]
3070 "TARGET_VSX"
3071 {
3072 if (BYTES_BIG_ENDIAN)
3073 {
3074 /* Big endian word numbering for words in operand is 0 1 2 3.
3075 Input words 0 and 2 are where they need to be. */
3076 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3077 }
3078 else
3079 {
3080 /* Little endian word numbering for operand is 3 2 1 0.
3081 take (operand[1] operand[1]) and shift left one word
3082 3 2 1 0 3 2 1 0 => 2 1 0 3
3083 Input words 2 and 0 are now where they need to be for the
3084 conversion. */
3085 rtx rtx_tmp;
3086 rtx rtx_val = GEN_INT (1);
3087
3088 rtx_tmp = gen_reg_rtx (V4SImode);
3089 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3090 operands[1], rtx_val));
3091 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3092 }
3093 DONE;
3094 }
3095 [(set_attr "type" "veccomplex")])
3096
3097 ;; Generate doubleov
3098 ;; signed int/float to double convert words 1 and 3
3099 (define_expand "doubleo<mode>2"
3100 [(set (match_operand:V2DF 0 "register_operand" "=v")
3101 (match_operand:VSX_W 1 "register_operand" "v"))]
3102 "TARGET_VSX"
3103 {
3104 machine_mode op_mode = GET_MODE (operands[1]);
3105
3106 if (BYTES_BIG_ENDIAN)
3107 {
3108 /* Big endian word numbering for words in operand is 0 1 2 3.
3109 take (operand[1] operand[1]) and shift left one word
3110 0 1 2 3 0 1 2 3 => 1 2 3 0
3111 Input words 1 and 3 are now where they need to be for the
3112 conversion. */
3113 rtx rtx_tmp;
3114 rtx rtx_val = GEN_INT (1);
3115
3116 rtx_tmp = gen_reg_rtx (op_mode);
3117 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3118 operands[1], rtx_val));
3119 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3120 }
3121 else
3122 {
3123 /* Little endian word numbering for operand is 3 2 1 0.
3124 Input words 3 and 1 are where they need to be. */
3125 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
3126 }
3127 DONE;
3128 }
3129 [(set_attr "type" "veccomplex")])
3130
3131 ;; Generate unsdoubleov
3132 ;; unsigned int to double convert words 1 and 3
3133 (define_expand "unsdoubleov4si2"
3134 [(set (match_operand:V2DF 0 "register_operand" "=v")
3135 (match_operand:V4SI 1 "register_operand" "v"))]
3136 "TARGET_VSX"
3137 {
3138 if (BYTES_BIG_ENDIAN)
3139 {
3140 /* Big endian word numbering for words in operand is 0 1 2 3.
3141 take (operand[1] operand[1]) and shift left one word
3142 0 1 2 3 0 1 2 3 => 1 2 3 0
3143 Input words 1 and 3 are now where they need to be for the
3144 conversion. */
3145 rtx rtx_tmp;
3146 rtx rtx_val = GEN_INT (1);
3147
3148 rtx_tmp = gen_reg_rtx (V4SImode);
3149 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3150 operands[1], rtx_val));
3151 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3152 }
3153 else
3154 {
3155 /* Want to convert the words 1 and 3.
3156 Little endian word numbering for operand is 3 2 1 0.
3157 Input words 3 and 1 are where they need to be. */
3158 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3159 }
3160 DONE;
3161 }
3162 [(set_attr "type" "veccomplex")])
3163
3164 ;; Generate doublehv
3165 ;; signed int/float to double convert words 0 and 1
3166 (define_expand "doubleh<mode>2"
3167 [(set (match_operand:V2DF 0 "register_operand" "=v")
3168 (match_operand:VSX_W 1 "register_operand" "v"))]
3169 "TARGET_VSX"
3170 {
3171 rtx rtx_tmp;
3172 rtx rtx_val;
3173
3174 machine_mode op_mode = GET_MODE (operands[1]);
3175 rtx_tmp = gen_reg_rtx (op_mode);
3176
3177 if (BYTES_BIG_ENDIAN)
3178 {
3179 /* Big endian word numbering for words in operand is 0 1 2 3.
3180 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3181 take (rts_tmp operand[1]) and shift left three words
3182 1 2 3 0 0 1 2 3 => 0 0 1 2
3183 Input words 0 and 1 are now where they need to be for the
3184 conversion. */
3185 rtx_val = GEN_INT (1);
3186 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3187 operands[1], rtx_val));
3188
3189 rtx_val = GEN_INT (3);
3190 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3191 operands[1], rtx_val));
3192 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3193 }
3194 else
3195 {
3196 /* Little endian word numbering for operand is 3 2 1 0.
3197 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3198 take (operand[1] rts_tmp) and shift left two words
3199 3 2 1 0 0 3 2 1 => 1 0 0 3
3200 Input words 0 and 1 are now where they need to be for the
3201 conversion. */
3202 rtx_val = GEN_INT (3);
3203 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3204 operands[1], rtx_val));
3205
3206 rtx_val = GEN_INT (2);
3207 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3208 rtx_tmp, rtx_val));
3209 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3210 }
3211 DONE;
3212 }
3213 [(set_attr "type" "veccomplex")])
3214
3215 ;; Generate unsdoublehv
3216 ;; unsigned int to double convert words 0 and 1
3217 (define_expand "unsdoublehv4si2"
3218 [(set (match_operand:V2DF 0 "register_operand" "=v")
3219 (match_operand:V4SI 1 "register_operand" "v"))]
3220 "TARGET_VSX"
3221 {
3222 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3223 rtx rtx_val = GEN_INT (12);
3224
3225 if (BYTES_BIG_ENDIAN)
3226 {
3227 /* Big endian word numbering for words in operand is 0 1 2 3.
3228 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3229 take (rts_tmp operand[1]) and shift left three words
3230 1 2 3 0 0 1 2 3 => 0 0 1 2
3231 Input words 0 and 1 are now where they need to be for the
3232 conversion. */
3233 rtx_val = GEN_INT (1);
3234 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3235 operands[1], rtx_val));
3236
3237 rtx_val = GEN_INT (3);
3238 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3239 operands[1], rtx_val));
3240 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3241 }
3242 else
3243 {
3244 /* Little endian word numbering for operand is 3 2 1 0.
3245 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3246 take (operand[1] rts_tmp) and shift left two words
3247 3 2 1 0 0 3 2 1 => 1 0 0 3
3248 Input words 1 and 0 are now where they need to be for the
3249 conversion. */
3250 rtx_val = GEN_INT (3);
3251
3252 rtx_tmp = gen_reg_rtx (V4SImode);
3253 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3254 operands[1], rtx_val));
3255
3256 rtx_val = GEN_INT (2);
3257 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3258 rtx_tmp, rtx_val));
3259 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3260 }
3261 DONE;
3262 }
3263 [(set_attr "type" "veccomplex")])
3264
3265 ;; Generate doublelv
3266 ;; signed int/float to double convert words 2 and 3
3267 (define_expand "doublel<mode>2"
3268 [(set (match_operand:V2DF 0 "register_operand" "=v")
3269 (match_operand:VSX_W 1 "register_operand" "v"))]
3270 "TARGET_VSX"
3271 {
3272 rtx rtx_tmp;
3273 rtx rtx_val = GEN_INT (3);
3274
3275 machine_mode op_mode = GET_MODE (operands[1]);
3276 rtx_tmp = gen_reg_rtx (op_mode);
3277
3278 if (BYTES_BIG_ENDIAN)
3279 {
3280 /* Big endian word numbering for operand is 0 1 2 3.
3281 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3282 take (operand[1] rtx_tmp) and shift left two words
3283 0 1 2 3 3 0 1 2 => 2 3 3 0
3284 now use convert instruction to convert word 2 and 3 in the
3285 input vector. */
3286 rtx_val = GEN_INT (3);
3287 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3288 operands[1], rtx_val));
3289
3290 rtx_val = GEN_INT (2);
3291 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3292 rtx_tmp, rtx_val));
3293 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3294 }
3295 else
3296 {
3297 /* Little endian word numbering for operand is 3 2 1 0.
3298 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3299 take (rtx_tmp operand[1]) and shift left three words
3300 2 1 0 3 3 2 1 0 => 3 3 2 1
3301 now use convert instruction to convert word 3 and 2 in the
3302 input vector. */
3303 rtx_val = GEN_INT (1);
3304 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3305 operands[1], rtx_val));
3306
3307 rtx_val = GEN_INT (3);
3308 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3309 operands[1], rtx_val));
3310 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3311 }
3312 DONE;
3313 }
3314 [(set_attr "type" "veccomplex")])
3315
3316 ;; Generate unsdoublelv
3317 ;; unsigned int to double convert convert 2 and 3
3318 (define_expand "unsdoublelv4si2"
3319 [(set (match_operand:V2DF 0 "register_operand" "=v")
3320 (match_operand:V4SI 1 "register_operand" "v"))]
3321 "TARGET_VSX"
3322 {
3323 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3324 rtx rtx_val = GEN_INT (12);
3325
3326 if (BYTES_BIG_ENDIAN)
3327 {
3328 /* Big endian word numbering for operand is 0 1 2 3.
3329 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3330 take (operand[1] rtx_tmp) and shift left two words
3331 0 1 2 3 3 0 1 2 => 2 3 3 0
3332 now use convert instruction to convert word 2 and 3 in the
3333 input vector. */
3334 rtx_val = GEN_INT (3);
3335 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3336 operands[1], rtx_val));
3337
3338 rtx_val = GEN_INT (2);
3339 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3340 rtx_tmp, rtx_val));
3341 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3342 }
3343 else
3344 {
3345 /* Little endian word numbering for operand is 3 2 1 0.
3346 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3347 take (rtx_tmp operand[1]) and shift left three words
3348 2 1 0 3 3 2 1 0 => 3 3 2 1
3349 now use convert instruction to convert word 3 and 2 in the
3350 input vector. */
3351 rtx_val = GEN_INT (1);
3352 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp,
3353 operands[1], operands[1], rtx_val));
3354
3355 rtx_val = GEN_INT (3);
3356 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3357 operands[1], rtx_val));
3358 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3359 }
3360 DONE;
3361 }
3362 [(set_attr "type" "veccomplex")])
3363
3364 ;; Generate two vector F32 converted to packed vector I16 vector
3365 (define_expand "convert_4f32_8i16"
3366 [(set (match_operand:V8HI 0 "register_operand" "=v")
3367 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3368 (match_operand:V4SF 2 "register_operand" "v")]
3369 UNSPEC_CONVERT_4F32_8I16))]
3370 "TARGET_P9_VECTOR"
3371 {
3372 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3373 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3374
3375 emit_insn (gen_altivec_vctuxs (rtx_tmp_hi, operands[1], const0_rtx));
3376 emit_insn (gen_altivec_vctuxs (rtx_tmp_lo, operands[2], const0_rtx));
3377 emit_insn (gen_altivec_vpkswss (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3378 DONE;
3379 })
3380
3381
3382 ;; Convert two vector F32 to packed vector F16.
3383 ;; This builtin packs 32-bit floating-point values into a packed
3384 ;; 16-bit floating point values (stored in 16bit integer type).
3385 ;; (vector unsigned short r = vec_pack_to_short_fp32 (a, b);
3386 ;; The expected codegen for this builtin is
3387 ;; xvcvsphp t, a
3388 ;; xvcvsphp u, b
3389 ;; if (little endian)
3390 ;; vpkuwum r, t, u
3391 ;; else
3392 ;; vpkuwum r, u, t
3393
3394 (define_expand "convert_4f32_8f16"
3395 [(set (match_operand:V8HI 0 "register_operand" "=v")
3396 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3397 (match_operand:V4SF 2 "register_operand" "v")]
3398 UNSPEC_CONVERT_4F32_8F16))]
3399 "TARGET_P9_VECTOR"
3400 {
3401 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3402 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3403
3404 emit_insn (gen_vsx_xvcvsphp (rtx_tmp_hi, operands[1]));
3405 emit_insn (gen_vsx_xvcvsphp (rtx_tmp_lo, operands[2]));
3406 if (!BYTES_BIG_ENDIAN)
3407 emit_insn (gen_altivec_vpkuwum (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3408 else
3409 emit_insn (gen_altivec_vpkuwum (operands[0], rtx_tmp_lo, rtx_tmp_hi));
3410 DONE;
3411 })
3412
3413
3414 ;; Generate
3415 ;; xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
3416 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3417 ;; vmaxs? %0,%1,SCRATCH2"
3418 (define_expand "abs<mode>2"
3419 [(set (match_dup 2) (match_dup 3))
3420 (set (match_dup 4)
3421 (minus:VI2 (match_dup 2)
3422 (match_operand:VI2 1 "register_operand" "v")))
3423 (set (match_operand:VI2 0 "register_operand" "=v")
3424 (smax:VI2 (match_dup 1) (match_dup 4)))]
3425 "<VI_unit>"
3426 {
3427 operands[2] = gen_reg_rtx (<MODE>mode);
3428 operands[3] = CONST0_RTX (<MODE>mode);
3429 operands[4] = gen_reg_rtx (<MODE>mode);
3430 })
3431
3432 ;; Generate
3433 ;; vspltisw SCRATCH1,0
3434 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3435 ;; vmins? %0,%1,SCRATCH2"
3436 (define_expand "nabs<mode>2"
3437 [(set (match_dup 2) (match_dup 3))
3438 (set (match_dup 4)
3439 (minus:VI2 (match_dup 2)
3440 (match_operand:VI2 1 "register_operand" "v")))
3441 (set (match_operand:VI2 0 "register_operand" "=v")
3442 (smin:VI2 (match_dup 1) (match_dup 4)))]
3443 "<VI_unit>"
3444 {
3445 operands[2] = gen_reg_rtx (<MODE>mode);
3446 operands[3] = CONST0_RTX (<MODE>mode);
3447 operands[4] = gen_reg_rtx (<MODE>mode);
3448 })
3449
3450 ;; Generate
3451 ;; vspltisw SCRATCH1,-1
3452 ;; vslw SCRATCH2,SCRATCH1,SCRATCH1
3453 ;; vandc %0,%1,SCRATCH2
3454 (define_expand "altivec_absv4sf2"
3455 [(set (match_dup 2)
3456 (vec_duplicate:V4SI (const_int -1)))
3457 (set (match_dup 3)
3458 (ashift:V4SI (match_dup 2) (match_dup 2)))
3459 (set (match_operand:V4SF 0 "register_operand" "=v")
3460 (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
3461 (match_operand:V4SF 1 "register_operand" "v")))]
3462 "TARGET_ALTIVEC"
3463 {
3464 operands[2] = gen_reg_rtx (V4SImode);
3465 operands[3] = gen_reg_rtx (V4SImode);
3466 })
3467
3468 ;; Generate
3469 ;; vspltis? SCRATCH0,0
3470 ;; vsubs?s SCRATCH2,SCRATCH1,%1
3471 ;; vmaxs? %0,%1,SCRATCH2"
3472 (define_expand "altivec_abss_<mode>"
3473 [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
3474 (parallel [(set (match_dup 3)
3475 (ss_minus:VI (match_dup 2)
3476 (match_operand:VI 1 "register_operand" "v")))
3477 (set (reg:SI VSCR_REGNO)
3478 (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
3479 (set (match_operand:VI 0 "register_operand" "=v")
3480 (smax:VI (match_dup 1) (match_dup 3)))]
3481 "TARGET_ALTIVEC"
3482 {
3483 operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
3484 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
3485 })
3486
3487 (define_expand "reduc_plus_scal_<mode>"
3488 [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
3489 (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
3490 UNSPEC_REDUC_PLUS))]
3491 "TARGET_ALTIVEC"
3492 {
3493 rtx vzero = gen_reg_rtx (V4SImode);
3494 rtx vtmp1 = gen_reg_rtx (V4SImode);
3495 rtx vtmp2 = gen_reg_rtx (<MODE>mode);
3496 rtx dest = gen_lowpart (V4SImode, vtmp2);
3497 int elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
3498
3499 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3500 emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
3501 emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
3502 rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
3503 DONE;
3504 })
3505
3506 (define_insn "*p9_neg<mode>2"
3507 [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
3508 (neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
3509 "TARGET_P9_VECTOR"
3510 "vneg<VI_char> %0,%1"
3511 [(set_attr "type" "vecsimple")])
3512
3513 (define_expand "neg<mode>2"
3514 [(set (match_operand:VI2 0 "register_operand")
3515 (neg:VI2 (match_operand:VI2 1 "register_operand")))]
3516 "<VI_unit>"
3517 {
3518 if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
3519 {
3520 rtx vzero;
3521
3522 vzero = gen_reg_rtx (GET_MODE (operands[0]));
3523 emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
3524 emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
3525 DONE;
3526 }
3527 })
3528
3529 (define_expand "udot_prod<mode>"
3530 [(set (match_operand:V4SI 0 "register_operand" "=v")
3531 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3532 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3533 (match_operand:VIshort 2 "register_operand" "v")]
3534 UNSPEC_VMSUMU)))]
3535 "TARGET_ALTIVEC"
3536 {
3537 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
3538 DONE;
3539 })
3540
3541 (define_expand "sdot_prodv8hi"
3542 [(set (match_operand:V4SI 0 "register_operand" "=v")
3543 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3544 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3545 (match_operand:V8HI 2 "register_operand" "v")]
3546 UNSPEC_VMSUMSHM)))]
3547 "TARGET_ALTIVEC"
3548 {
3549 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
3550 DONE;
3551 })
3552
3553 (define_expand "widen_usum<mode>3"
3554 [(set (match_operand:V4SI 0 "register_operand" "=v")
3555 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3556 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
3557 UNSPEC_VMSUMU)))]
3558 "TARGET_ALTIVEC"
3559 {
3560 rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
3561
3562 emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
3563 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
3564 DONE;
3565 })
3566
3567 (define_expand "widen_ssumv16qi3"
3568 [(set (match_operand:V4SI 0 "register_operand" "=v")
3569 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3570 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
3571 UNSPEC_VMSUMM)))]
3572 "TARGET_ALTIVEC"
3573 {
3574 rtx vones = gen_reg_rtx (V16QImode);
3575
3576 emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
3577 emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
3578 DONE;
3579 })
3580
3581 (define_expand "widen_ssumv8hi3"
3582 [(set (match_operand:V4SI 0 "register_operand" "=v")
3583 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3584 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3585 UNSPEC_VMSUMSHM)))]
3586 "TARGET_ALTIVEC"
3587 {
3588 rtx vones = gen_reg_rtx (V8HImode);
3589
3590 emit_insn (gen_altivec_vspltish (vones, const1_rtx));
3591 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
3592 DONE;
3593 })
3594
3595 (define_expand "vec_unpacks_hi_<VP_small_lc>"
3596 [(set (match_operand:VP 0 "register_operand" "=v")
3597 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3598 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
3599 "<VI_unit>"
3600 "")
3601
3602 (define_expand "vec_unpacks_lo_<VP_small_lc>"
3603 [(set (match_operand:VP 0 "register_operand" "=v")
3604 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3605 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
3606 "<VI_unit>"
3607 "")
3608
3609 (define_insn "vperm_v8hiv4si"
3610 [(set (match_operand:V4SI 0 "register_operand" "=?wa,v")
3611 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "wa,v")
3612 (match_operand:V4SI 2 "register_operand" "0,v")
3613 (match_operand:V16QI 3 "register_operand" "wa,v")]
3614 UNSPEC_VPERMSI))]
3615 "TARGET_ALTIVEC"
3616 "@
3617 xxperm %x0,%x1,%x3
3618 vperm %0,%1,%2,%3"
3619 [(set_attr "type" "vecperm")
3620 (set_attr "isa" "p9v,*")])
3621
3622 (define_insn "vperm_v16qiv8hi"
3623 [(set (match_operand:V8HI 0 "register_operand" "=?wa,v")
3624 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "wa,v")
3625 (match_operand:V8HI 2 "register_operand" "0,v")
3626 (match_operand:V16QI 3 "register_operand" "wa,v")]
3627 UNSPEC_VPERMHI))]
3628 "TARGET_ALTIVEC"
3629 "@
3630 xxperm %x0,%x1,%x3
3631 vperm %0,%1,%2,%3"
3632 [(set_attr "type" "vecperm")
3633 (set_attr "isa" "p9v,*")])
3634
3635 (define_expand "vec_unpacku_hi_<VP_small_lc>"
3636 [(set (match_operand:VP 0 "register_operand" "=v")
3637 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3638 UNSPEC_VUPKHU))]
3639 "TARGET_ALTIVEC"
3640 {
3641 rtx vzero = gen_reg_rtx (<VP_small>mode);
3642 emit_insn (gen_altivec_vspltis<VU_char> (vzero, const0_rtx));
3643
3644 rtx res = gen_reg_rtx (<VP_small>mode);
3645 rtx op1 = operands[1];
3646
3647 if (BYTES_BIG_ENDIAN)
3648 emit_insn (gen_altivec_vmrgh<VU_char> (res, vzero, op1));
3649 else
3650 emit_insn (gen_altivec_vmrgl<VU_char> (res, op1, vzero));
3651
3652 emit_insn (gen_move_insn (operands[0], gen_lowpart (<MODE>mode, res)));
3653 DONE;
3654 })
3655
3656 (define_expand "vec_unpacku_lo_<VP_small_lc>"
3657 [(set (match_operand:VP 0 "register_operand" "=v")
3658 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3659 UNSPEC_VUPKLU))]
3660 "TARGET_ALTIVEC"
3661 {
3662 rtx vzero = gen_reg_rtx (<VP_small>mode);
3663 emit_insn (gen_altivec_vspltis<VU_char> (vzero, const0_rtx));
3664
3665 rtx res = gen_reg_rtx (<VP_small>mode);
3666 rtx op1 = operands[1];
3667
3668 if (BYTES_BIG_ENDIAN)
3669 emit_insn (gen_altivec_vmrgl<VU_char> (res, vzero, op1));
3670 else
3671 emit_insn (gen_altivec_vmrgh<VU_char> (res, op1, vzero));
3672
3673 emit_insn (gen_move_insn (operands[0], gen_lowpart (<MODE>mode, res)));
3674 DONE;
3675 })
3676
3677 (define_expand "vec_widen_umult_hi_v16qi"
3678 [(set (match_operand:V8HI 0 "register_operand" "=v")
3679 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3680 (match_operand:V16QI 2 "register_operand" "v")]
3681 UNSPEC_VMULWHUB))]
3682 "TARGET_ALTIVEC"
3683 {
3684 rtx ve = gen_reg_rtx (V8HImode);
3685 rtx vo = gen_reg_rtx (V8HImode);
3686
3687 if (BYTES_BIG_ENDIAN)
3688 {
3689 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3690 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3691 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3692 }
3693 else
3694 {
3695 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3696 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3697 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3698 }
3699 DONE;
3700 })
3701
3702 (define_expand "vec_widen_umult_lo_v16qi"
3703 [(set (match_operand:V8HI 0 "register_operand" "=v")
3704 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3705 (match_operand:V16QI 2 "register_operand" "v")]
3706 UNSPEC_VMULWLUB))]
3707 "TARGET_ALTIVEC"
3708 {
3709 rtx ve = gen_reg_rtx (V8HImode);
3710 rtx vo = gen_reg_rtx (V8HImode);
3711
3712 if (BYTES_BIG_ENDIAN)
3713 {
3714 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3715 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3716 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3717 }
3718 else
3719 {
3720 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3721 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3722 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3723 }
3724 DONE;
3725 })
3726
3727 (define_expand "vec_widen_smult_hi_v16qi"
3728 [(set (match_operand:V8HI 0 "register_operand" "=v")
3729 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3730 (match_operand:V16QI 2 "register_operand" "v")]
3731 UNSPEC_VMULWHSB))]
3732 "TARGET_ALTIVEC"
3733 {
3734 rtx ve = gen_reg_rtx (V8HImode);
3735 rtx vo = gen_reg_rtx (V8HImode);
3736
3737 if (BYTES_BIG_ENDIAN)
3738 {
3739 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3740 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3741 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3742 }
3743 else
3744 {
3745 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3746 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3747 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3748 }
3749 DONE;
3750 })
3751
3752 (define_expand "vec_widen_smult_lo_v16qi"
3753 [(set (match_operand:V8HI 0 "register_operand" "=v")
3754 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3755 (match_operand:V16QI 2 "register_operand" "v")]
3756 UNSPEC_VMULWLSB))]
3757 "TARGET_ALTIVEC"
3758 {
3759 rtx ve = gen_reg_rtx (V8HImode);
3760 rtx vo = gen_reg_rtx (V8HImode);
3761
3762 if (BYTES_BIG_ENDIAN)
3763 {
3764 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3765 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3766 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3767 }
3768 else
3769 {
3770 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3771 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3772 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3773 }
3774 DONE;
3775 })
3776
3777 (define_expand "vec_widen_umult_hi_v8hi"
3778 [(set (match_operand:V4SI 0 "register_operand" "=v")
3779 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3780 (match_operand:V8HI 2 "register_operand" "v")]
3781 UNSPEC_VMULWHUH))]
3782 "TARGET_ALTIVEC"
3783 {
3784 rtx ve = gen_reg_rtx (V4SImode);
3785 rtx vo = gen_reg_rtx (V4SImode);
3786
3787 if (BYTES_BIG_ENDIAN)
3788 {
3789 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3790 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3791 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], ve, vo));
3792 }
3793 else
3794 {
3795 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3796 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3797 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], vo, ve));
3798 }
3799 DONE;
3800 })
3801
3802 (define_expand "vec_widen_umult_lo_v8hi"
3803 [(set (match_operand:V4SI 0 "register_operand" "=v")
3804 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3805 (match_operand:V8HI 2 "register_operand" "v")]
3806 UNSPEC_VMULWLUH))]
3807 "TARGET_ALTIVEC"
3808 {
3809 rtx ve = gen_reg_rtx (V4SImode);
3810 rtx vo = gen_reg_rtx (V4SImode);
3811
3812 if (BYTES_BIG_ENDIAN)
3813 {
3814 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3815 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3816 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], ve, vo));
3817 }
3818 else
3819 {
3820 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3821 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3822 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], vo, ve));
3823 }
3824 DONE;
3825 })
3826
3827 (define_expand "vec_widen_smult_hi_v8hi"
3828 [(set (match_operand:V4SI 0 "register_operand" "=v")
3829 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3830 (match_operand:V8HI 2 "register_operand" "v")]
3831 UNSPEC_VMULWHSH))]
3832 "TARGET_ALTIVEC"
3833 {
3834 rtx ve = gen_reg_rtx (V4SImode);
3835 rtx vo = gen_reg_rtx (V4SImode);
3836
3837 if (BYTES_BIG_ENDIAN)
3838 {
3839 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3840 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3841 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], ve, vo));
3842 }
3843 else
3844 {
3845 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3846 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3847 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], vo, ve));
3848 }
3849 DONE;
3850 })
3851
3852 (define_expand "vec_widen_smult_lo_v8hi"
3853 [(set (match_operand:V4SI 0 "register_operand" "=v")
3854 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3855 (match_operand:V8HI 2 "register_operand" "v")]
3856 UNSPEC_VMULWLSH))]
3857 "TARGET_ALTIVEC"
3858 {
3859 rtx ve = gen_reg_rtx (V4SImode);
3860 rtx vo = gen_reg_rtx (V4SImode);
3861
3862 if (BYTES_BIG_ENDIAN)
3863 {
3864 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3865 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3866 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], ve, vo));
3867 }
3868 else
3869 {
3870 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3871 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3872 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], vo, ve));
3873 }
3874 DONE;
3875 })
3876
3877 (define_expand "vec_pack_trunc_<mode>"
3878 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3879 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3880 (match_operand:VP 2 "register_operand" "v")]
3881 UNSPEC_VPACK_UNS_UNS_MOD))]
3882 "<VI_unit>"
3883 "")
3884
3885 (define_expand "mulv16qi3"
3886 [(set (match_operand:V16QI 0 "register_operand" "=v")
3887 (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3888 (match_operand:V16QI 2 "register_operand" "v")))]
3889 "TARGET_ALTIVEC"
3890 {
3891 rtx even = gen_reg_rtx (V8HImode);
3892 rtx odd = gen_reg_rtx (V8HImode);
3893 rtx mask = gen_reg_rtx (V16QImode);
3894 rtvec v = rtvec_alloc (16);
3895 int i;
3896
3897 for (i = 0; i < 8; ++i) {
3898 RTVEC_ELT (v, 2 * i)
3899 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3900 RTVEC_ELT (v, 2 * i + 1)
3901 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3902 }
3903
3904 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3905 emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3906 emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3907 emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3908 DONE;
3909 })
3910
3911 (define_expand "altivec_vpermxor"
3912 [(use (match_operand:V16QI 0 "register_operand"))
3913 (use (match_operand:V16QI 1 "register_operand"))
3914 (use (match_operand:V16QI 2 "register_operand"))
3915 (use (match_operand:V16QI 3 "register_operand"))]
3916 "TARGET_P8_VECTOR"
3917 {
3918 if (!BYTES_BIG_ENDIAN)
3919 {
3920 /* vpermxor indexes the bytes using Big Endian numbering. If LE,
3921 change indexing in operand[3] to BE index. */
3922 rtx be_index = gen_reg_rtx (V16QImode);
3923
3924 emit_insn (gen_one_cmplv16qi2 (be_index, operands[3]));
3925 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3926 operands[2], be_index));
3927 }
3928 else
3929 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3930 operands[2], operands[3]));
3931 DONE;
3932 })
3933
3934 (define_expand "altivec_negv4sf2"
3935 [(use (match_operand:V4SF 0 "register_operand"))
3936 (use (match_operand:V4SF 1 "register_operand"))]
3937 "TARGET_ALTIVEC"
3938 {
3939 rtx neg0;
3940
3941 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
3942 neg0 = gen_reg_rtx (V4SImode);
3943 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
3944 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
3945
3946 /* XOR */
3947 emit_insn (gen_xorv4sf3 (operands[0],
3948 gen_lowpart (V4SFmode, neg0), operands[1]));
3949
3950 DONE;
3951 })
3952
3953 ;; Vector reverse elements
3954 (define_expand "altivec_vreveti2"
3955 [(set (match_operand:TI 0 "register_operand" "=v")
3956 (unspec:TI [(match_operand:TI 1 "register_operand" "v")]
3957 UNSPEC_VREVEV))]
3958 "TARGET_ALTIVEC"
3959 {
3960 int i, j, size, num_elements;
3961 rtvec v = rtvec_alloc (16);
3962 rtx mask = gen_reg_rtx (V16QImode);
3963
3964 size = GET_MODE_UNIT_SIZE (TImode);
3965 num_elements = GET_MODE_NUNITS (TImode);
3966
3967 for (j = 0; j < num_elements; j++)
3968 for (i = 0; i < size; i++)
3969 RTVEC_ELT (v, i + j * size)
3970 = GEN_INT (i + (num_elements - 1 - j) * size);
3971
3972 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3973 emit_insn (gen_altivec_vperm_ti (operands[0], operands[1],
3974 operands[1], mask));
3975 DONE;
3976 })
3977
3978 ;; Vector reverse elements for V16QI V8HI V4SI V4SF
3979 (define_expand "altivec_vreve<mode>2"
3980 [(set (match_operand:VEC_K 0 "register_operand" "=v")
3981 (unspec:VEC_K [(match_operand:VEC_K 1 "register_operand" "v")]
3982 UNSPEC_VREVEV))]
3983 "TARGET_ALTIVEC"
3984 {
3985 if (TARGET_P9_VECTOR)
3986 {
3987 if (<MODE>mode == V16QImode)
3988 emit_insn (gen_p9_xxbrq_v16qi (operands[0], operands[1]));
3989 else if (<MODE>mode == V8HImode)
3990 {
3991 rtx subreg1 = simplify_gen_subreg (V1TImode, operands[1],
3992 <MODE>mode, 0);
3993 rtx temp = gen_reg_rtx (V1TImode);
3994 emit_insn (gen_p9_xxbrq_v1ti (temp, subreg1));
3995 rtx subreg2 = simplify_gen_subreg (<MODE>mode, temp,
3996 V1TImode, 0);
3997 emit_insn (gen_p9_xxbrh_v8hi (operands[0], subreg2));
3998 }
3999 else /* V4SI and V4SF. */
4000 {
4001 rtx subreg1 = simplify_gen_subreg (V1TImode, operands[1],
4002 <MODE>mode, 0);
4003 rtx temp = gen_reg_rtx (V1TImode);
4004 emit_insn (gen_p9_xxbrq_v1ti (temp, subreg1));
4005 rtx subreg2 = simplify_gen_subreg (<MODE>mode, temp,
4006 V1TImode, 0);
4007 if (<MODE>mode == V4SImode)
4008 emit_insn (gen_p9_xxbrw_v4si (operands[0], subreg2));
4009 else
4010 emit_insn (gen_p9_xxbrw_v4sf (operands[0], subreg2));
4011 }
4012 DONE;
4013 }
4014
4015 int i, j, size, num_elements;
4016 rtvec v = rtvec_alloc (16);
4017 rtx mask = gen_reg_rtx (V16QImode);
4018
4019 size = GET_MODE_UNIT_SIZE (<MODE>mode);
4020 num_elements = GET_MODE_NUNITS (<MODE>mode);
4021
4022 for (j = 0; j < num_elements; j++)
4023 for (i = 0; i < size; i++)
4024 RTVEC_ELT (v, i + j * size)
4025 = GEN_INT (i + (num_elements - 1 - j) * size);
4026
4027 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
4028 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
4029 operands[1], mask));
4030 DONE;
4031 })
4032
4033 ;; Vector reverse elements for V2DI V2DF
4034 (define_expand "altivec_vreve<mode>2"
4035 [(set (match_operand:VEC_64 0 "register_operand" "=v")
4036 (unspec:VEC_64 [(match_operand:VEC_64 1 "register_operand" "v")]
4037 UNSPEC_VREVEV))]
4038 "TARGET_ALTIVEC"
4039 {
4040 emit_insn (gen_xxswapd_<mode> (operands[0], operands[1]));
4041 DONE;
4042 })
4043
4044 ;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
4045 ;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
4046 (define_insn "altivec_lvlx"
4047 [(set (match_operand:V16QI 0 "register_operand" "=v")
4048 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4049 UNSPEC_LVLX))]
4050 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4051 "lvlx %0,%y1"
4052 [(set_attr "type" "vecload")])
4053
4054 (define_insn "altivec_lvlxl"
4055 [(set (match_operand:V16QI 0 "register_operand" "=v")
4056 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4057 UNSPEC_LVLXL))]
4058 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4059 "lvlxl %0,%y1"
4060 [(set_attr "type" "vecload")])
4061
4062 (define_insn "altivec_lvrx"
4063 [(set (match_operand:V16QI 0 "register_operand" "=v")
4064 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4065 UNSPEC_LVRX))]
4066 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4067 "lvrx %0,%y1"
4068 [(set_attr "type" "vecload")])
4069
4070 (define_insn "altivec_lvrxl"
4071 [(set (match_operand:V16QI 0 "register_operand" "=v")
4072 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4073 UNSPEC_LVRXL))]
4074 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4075 "lvrxl %0,%y1"
4076 [(set_attr "type" "vecload")])
4077
4078 (define_insn "altivec_stvlx"
4079 [(parallel
4080 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4081 (match_operand:V16QI 1 "register_operand" "v"))
4082 (unspec [(const_int 0)] UNSPEC_STVLX)])]
4083 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4084 "stvlx %1,%y0"
4085 [(set_attr "type" "vecstore")])
4086
4087 (define_insn "altivec_stvlxl"
4088 [(parallel
4089 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4090 (match_operand:V16QI 1 "register_operand" "v"))
4091 (unspec [(const_int 0)] UNSPEC_STVLXL)])]
4092 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4093 "stvlxl %1,%y0"
4094 [(set_attr "type" "vecstore")])
4095
4096 (define_insn "altivec_stvrx"
4097 [(parallel
4098 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4099 (match_operand:V16QI 1 "register_operand" "v"))
4100 (unspec [(const_int 0)] UNSPEC_STVRX)])]
4101 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4102 "stvrx %1,%y0"
4103 [(set_attr "type" "vecstore")])
4104
4105 (define_insn "altivec_stvrxl"
4106 [(parallel
4107 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4108 (match_operand:V16QI 1 "register_operand" "v"))
4109 (unspec [(const_int 0)] UNSPEC_STVRXL)])]
4110 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4111 "stvrxl %1,%y0"
4112 [(set_attr "type" "vecstore")])
4113
4114 (define_expand "vec_unpacks_float_hi_v8hi"
4115 [(set (match_operand:V4SF 0 "register_operand")
4116 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4117 UNSPEC_VUPKHS_V4SF))]
4118 "TARGET_ALTIVEC"
4119 {
4120 rtx tmp = gen_reg_rtx (V4SImode);
4121
4122 emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
4123 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4124 DONE;
4125 })
4126
4127 (define_expand "vec_unpacks_float_lo_v8hi"
4128 [(set (match_operand:V4SF 0 "register_operand")
4129 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4130 UNSPEC_VUPKLS_V4SF))]
4131 "TARGET_ALTIVEC"
4132 {
4133 rtx tmp = gen_reg_rtx (V4SImode);
4134
4135 emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
4136 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4137 DONE;
4138 })
4139
4140 (define_expand "vec_unpacku_float_hi_v8hi"
4141 [(set (match_operand:V4SF 0 "register_operand")
4142 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4143 UNSPEC_VUPKHU_V4SF))]
4144 "TARGET_ALTIVEC"
4145 {
4146 rtx tmp = gen_reg_rtx (V4SImode);
4147
4148 emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
4149 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4150 DONE;
4151 })
4152
4153 (define_expand "vec_unpacku_float_lo_v8hi"
4154 [(set (match_operand:V4SF 0 "register_operand")
4155 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4156 UNSPEC_VUPKLU_V4SF))]
4157 "TARGET_ALTIVEC"
4158 {
4159 rtx tmp = gen_reg_rtx (V4SImode);
4160
4161 emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
4162 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4163 DONE;
4164 })
4165
4166 \f
4167 ;; Power8/power9 vector instructions encoded as Altivec instructions
4168
4169 ;; Vector count leading zeros
4170 (define_insn "*p8v_clz<mode>2"
4171 [(set (match_operand:VI2 0 "register_operand" "=v")
4172 (clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4173 "TARGET_P8_VECTOR"
4174 "vclz<wd> %0,%1"
4175 [(set_attr "type" "vecsimple")])
4176
4177 ;; Vector absolute difference unsigned
4178 (define_expand "vadu<mode>3"
4179 [(set (match_operand:VI 0 "register_operand")
4180 (unspec:VI [(match_operand:VI 1 "register_operand")
4181 (match_operand:VI 2 "register_operand")]
4182 UNSPEC_VADU))]
4183 "TARGET_P9_VECTOR")
4184
4185 ;; Vector absolute difference unsigned
4186 (define_insn "p9_vadu<mode>3"
4187 [(set (match_operand:VI 0 "register_operand" "=v")
4188 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
4189 (match_operand:VI 2 "register_operand" "v")]
4190 UNSPEC_VADU))]
4191 "TARGET_P9_VECTOR"
4192 "vabsdu<wd> %0,%1,%2"
4193 [(set_attr "type" "vecsimple")])
4194
4195 ;; Vector count trailing zeros
4196 (define_insn "*p9v_ctz<mode>2"
4197 [(set (match_operand:VI2 0 "register_operand" "=v")
4198 (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4199 "TARGET_P9_VECTOR"
4200 "vctz<wd> %0,%1"
4201 [(set_attr "type" "vecsimple")])
4202
4203 ;; Vector population count
4204 (define_insn "*p8v_popcount<mode>2"
4205 [(set (match_operand:VI2 0 "register_operand" "=v")
4206 (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4207 "TARGET_P8_VECTOR"
4208 "vpopcnt<wd> %0,%1"
4209 [(set_attr "type" "vecsimple")])
4210
4211 ;; Vector parity
4212 (define_insn "*p9v_parity<mode>2"
4213 [(set (match_operand:VParity 0 "register_operand" "=v")
4214 (parity:VParity (match_operand:VParity 1 "register_operand" "v")))]
4215 "TARGET_P9_VECTOR"
4216 "vprtyb<wd> %0,%1"
4217 [(set_attr "type" "vecsimple")])
4218
4219 ;; Vector Gather Bits by Bytes by Doubleword
4220 (define_insn "p8v_vgbbd"
4221 [(set (match_operand:V16QI 0 "register_operand" "=v")
4222 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
4223 UNSPEC_VGBBD))]
4224 "TARGET_P8_VECTOR"
4225 "vgbbd %0,%1"
4226 [(set_attr "type" "vecsimple")])
4227
4228 \f
4229 ;; 128-bit binary integer arithmetic
4230 ;; We have a special container type (V1TImode) to allow operations using the
4231 ;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
4232 ;; having to worry about the register allocator deciding GPRs are better.
4233
4234 (define_insn "altivec_vadduqm"
4235 [(set (match_operand:V1TI 0 "register_operand" "=v")
4236 (plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4237 (match_operand:V1TI 2 "register_operand" "v")))]
4238 "TARGET_VADDUQM"
4239 "vadduqm %0,%1,%2"
4240 [(set_attr "type" "vecsimple")])
4241
4242 (define_insn "altivec_vaddcuq"
4243 [(set (match_operand:V1TI 0 "register_operand" "=v")
4244 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4245 (match_operand:V1TI 2 "register_operand" "v")]
4246 UNSPEC_VADDCUQ))]
4247 "TARGET_VADDUQM"
4248 "vaddcuq %0,%1,%2"
4249 [(set_attr "type" "vecsimple")])
4250
4251 (define_insn "altivec_vsubuqm"
4252 [(set (match_operand:V1TI 0 "register_operand" "=v")
4253 (minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4254 (match_operand:V1TI 2 "register_operand" "v")))]
4255 "TARGET_VADDUQM"
4256 "vsubuqm %0,%1,%2"
4257 [(set_attr "type" "vecsimple")])
4258
4259 (define_insn "altivec_vsubcuq"
4260 [(set (match_operand:V1TI 0 "register_operand" "=v")
4261 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4262 (match_operand:V1TI 2 "register_operand" "v")]
4263 UNSPEC_VSUBCUQ))]
4264 "TARGET_VADDUQM"
4265 "vsubcuq %0,%1,%2"
4266 [(set_attr "type" "vecsimple")])
4267
4268 (define_insn "altivec_vaddeuqm"
4269 [(set (match_operand:V1TI 0 "register_operand" "=v")
4270 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4271 (match_operand:V1TI 2 "register_operand" "v")
4272 (match_operand:V1TI 3 "register_operand" "v")]
4273 UNSPEC_VADDEUQM))]
4274 "TARGET_VADDUQM"
4275 "vaddeuqm %0,%1,%2,%3"
4276 [(set_attr "type" "vecsimple")])
4277
4278 (define_insn "altivec_vaddecuq"
4279 [(set (match_operand:V1TI 0 "register_operand" "=v")
4280 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4281 (match_operand:V1TI 2 "register_operand" "v")
4282 (match_operand:V1TI 3 "register_operand" "v")]
4283 UNSPEC_VADDECUQ))]
4284 "TARGET_VADDUQM"
4285 "vaddecuq %0,%1,%2,%3"
4286 [(set_attr "type" "vecsimple")])
4287
4288 (define_insn "altivec_vsubeuqm"
4289 [(set (match_operand:V1TI 0 "register_operand" "=v")
4290 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4291 (match_operand:V1TI 2 "register_operand" "v")
4292 (match_operand:V1TI 3 "register_operand" "v")]
4293 UNSPEC_VSUBEUQM))]
4294 "TARGET_VADDUQM"
4295 "vsubeuqm %0,%1,%2,%3"
4296 [(set_attr "type" "vecsimple")])
4297
4298 (define_insn "altivec_vsubecuq"
4299 [(set (match_operand:V1TI 0 "register_operand" "=v")
4300 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4301 (match_operand:V1TI 2 "register_operand" "v")
4302 (match_operand:V1TI 3 "register_operand" "v")]
4303 UNSPEC_VSUBECUQ))]
4304 "TARGET_VADDUQM"
4305 "vsubecuq %0,%1,%2,%3"
4306 [(set_attr "type" "vecsimple")])
4307
4308 ;; We use V2DI as the output type to simplify converting the permute
4309 ;; bits into an integer
4310 (define_insn "altivec_vbpermq"
4311 [(set (match_operand:V2DI 0 "register_operand" "=v")
4312 (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
4313 (match_operand:V16QI 2 "register_operand" "v")]
4314 UNSPEC_VBPERMQ))]
4315 "TARGET_P8_VECTOR"
4316 "vbpermq %0,%1,%2"
4317 [(set_attr "type" "vecperm")])
4318
4319 ; One of the vector API interfaces requires returning vector unsigned char.
4320 (define_insn "altivec_vbpermq2"
4321 [(set (match_operand:V16QI 0 "register_operand" "=v")
4322 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4323 (match_operand:V16QI 2 "register_operand" "v")]
4324 UNSPEC_VBPERMQ))]
4325 "TARGET_P8_VECTOR"
4326 "vbpermq %0,%1,%2"
4327 [(set_attr "type" "vecperm")])
4328
4329 (define_insn "altivec_vbpermd"
4330 [(set (match_operand:V2DI 0 "register_operand" "=v")
4331 (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
4332 (match_operand:V16QI 2 "register_operand" "v")]
4333 UNSPEC_VBPERMD))]
4334 "TARGET_P9_VECTOR"
4335 "vbpermd %0,%1,%2"
4336 [(set_attr "type" "vecsimple")])
4337
4338 ;; Support for SAD (sum of absolute differences).
4339
4340 ;; Due to saturating semantics, we can't combine the sum-across
4341 ;; with the vector accumulate in vsum4ubs. A vadduwm is needed.
4342 (define_expand "usadv16qi"
4343 [(use (match_operand:V4SI 0 "register_operand"))
4344 (use (match_operand:V16QI 1 "register_operand"))
4345 (use (match_operand:V16QI 2 "register_operand"))
4346 (use (match_operand:V4SI 3 "register_operand"))]
4347 "TARGET_P9_VECTOR"
4348 {
4349 rtx absd = gen_reg_rtx (V16QImode);
4350 rtx zero = gen_reg_rtx (V4SImode);
4351 rtx psum = gen_reg_rtx (V4SImode);
4352
4353 emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
4354 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4355 emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
4356 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4357 DONE;
4358 })
4359
4360 ;; Since vsum4shs is saturating and further performs signed
4361 ;; arithmetic, we can't combine the sum-across with the vector
4362 ;; accumulate in vsum4shs. A vadduwm is needed.
4363 (define_expand "usadv8hi"
4364 [(use (match_operand:V4SI 0 "register_operand"))
4365 (use (match_operand:V8HI 1 "register_operand"))
4366 (use (match_operand:V8HI 2 "register_operand"))
4367 (use (match_operand:V4SI 3 "register_operand"))]
4368 "TARGET_P9_VECTOR"
4369 {
4370 rtx absd = gen_reg_rtx (V8HImode);
4371 rtx zero = gen_reg_rtx (V4SImode);
4372 rtx psum = gen_reg_rtx (V4SImode);
4373
4374 emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
4375 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4376 emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
4377 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4378 DONE;
4379 })
4380
4381 ;; Decimal Integer operations
4382 (define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
4383
4384 (define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
4385 (UNSPEC_BCDSUB "sub")])
4386
4387 (define_code_iterator BCD_TEST [eq lt le gt ge unordered])
4388 (define_mode_iterator VBCD [V1TI V16QI])
4389
4390 (define_insn "bcd<bcd_add_sub>_<mode>"
4391 [(set (match_operand:VBCD 0 "register_operand" "=v")
4392 (unspec:VBCD [(match_operand:VBCD 1 "register_operand" "v")
4393 (match_operand:VBCD 2 "register_operand" "v")
4394 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4395 UNSPEC_BCD_ADD_SUB))
4396 (clobber (reg:CCFP CR6_REGNO))]
4397 "TARGET_P8_VECTOR"
4398 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4399 [(set_attr "type" "vecsimple")])
4400
4401 ;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
4402 ;; can use the unordered test for BCD nans and add/subtracts that overflow. An
4403 ;; UNORDERED test on an integer type (like V1TImode) is not defined. The type
4404 ;; probably should be one that can go in the VMX (Altivec) registers, so we
4405 ;; can't use DDmode or DFmode.
4406 (define_insn "*bcd<bcd_add_sub>_test_<mode>"
4407 [(set (reg:CCFP CR6_REGNO)
4408 (compare:CCFP
4409 (unspec:V2DF [(match_operand:VBCD 1 "register_operand" "v")
4410 (match_operand:VBCD 2 "register_operand" "v")
4411 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4412 UNSPEC_BCD_ADD_SUB)
4413 (match_operand:V2DF 4 "zero_constant" "j")))
4414 (clobber (match_scratch:VBCD 0 "=v"))]
4415 "TARGET_P8_VECTOR"
4416 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4417 [(set_attr "type" "vecsimple")])
4418
4419 (define_insn "*bcd<bcd_add_sub>_test2_<mode>"
4420 [(set (match_operand:VBCD 0 "register_operand" "=v")
4421 (unspec:VBCD [(match_operand:VBCD 1 "register_operand" "v")
4422 (match_operand:VBCD 2 "register_operand" "v")
4423 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4424 UNSPEC_BCD_ADD_SUB))
4425 (set (reg:CCFP CR6_REGNO)
4426 (compare:CCFP
4427 (unspec:V2DF [(match_dup 1)
4428 (match_dup 2)
4429 (match_dup 3)]
4430 UNSPEC_BCD_ADD_SUB)
4431 (match_operand:V2DF 4 "zero_constant" "j")))]
4432 "TARGET_P8_VECTOR"
4433 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4434 [(set_attr "type" "vecsimple")])
4435
4436 (define_insn "vcfuged"
4437 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4438 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4439 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4440 UNSPEC_VCFUGED))]
4441 "TARGET_POWER10"
4442 "vcfuged %0,%1,%2"
4443 [(set_attr "type" "vecsimple")])
4444
4445 (define_insn "vclzdm"
4446 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4447 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4448 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4449 UNSPEC_VCLZDM))]
4450 "TARGET_POWER10"
4451 "vclzdm %0,%1,%2"
4452 [(set_attr "type" "vecsimple")])
4453
4454 (define_insn "vctzdm"
4455 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4456 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4457 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4458 UNSPEC_VCTZDM))]
4459 "TARGET_POWER10"
4460 "vctzdm %0,%1,%2"
4461 [(set_attr "type" "vecsimple")])
4462
4463 (define_insn "vpdepd"
4464 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4465 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4466 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4467 UNSPEC_VPDEPD))]
4468 "TARGET_POWER10"
4469 "vpdepd %0,%1,%2"
4470 [(set_attr "type" "vecsimple")])
4471
4472 (define_insn "vpextd"
4473 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4474 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4475 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4476 UNSPEC_VPEXTD))]
4477 "TARGET_POWER10"
4478 "vpextd %0,%1,%2"
4479 [(set_attr "type" "vecsimple")])
4480
4481 (define_insn "vgnb"
4482 [(set (match_operand:DI 0 "register_operand" "=r")
4483 (unspec:DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4484 (match_operand:QI 2 "u3bit_cint_operand" "n")]
4485 UNSPEC_VGNB))]
4486 "TARGET_POWER10"
4487 "vgnb %0,%1,%2"
4488 [(set_attr "type" "vecsimple")])
4489
4490 (define_insn "vclrlb"
4491 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
4492 (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
4493 (match_operand:SI 2 "gpc_reg_operand" "r")]
4494 UNSPEC_VCLRLB))]
4495 "TARGET_POWER10"
4496 {
4497 if (BYTES_BIG_ENDIAN)
4498 return "vclrlb %0,%1,%2";
4499 else
4500 return "vclrrb %0,%1,%2";
4501 }
4502 [(set_attr "type" "vecsimple")])
4503
4504 (define_insn "vclrrb"
4505 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
4506 (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
4507 (match_operand:SI 2 "gpc_reg_operand" "r")]
4508 UNSPEC_VCLRRB))]
4509 "TARGET_POWER10"
4510 {
4511 if (BYTES_BIG_ENDIAN)
4512 return "vclrrb %0,%1,%2";
4513 else
4514 return "vclrlb %0,%1,%2";
4515 }
4516 [(set_attr "type" "vecsimple")])
4517
4518 (define_expand "bcd<bcd_add_sub>_<code>_<mode>"
4519 [(parallel [(set (reg:CCFP CR6_REGNO)
4520 (compare:CCFP
4521 (unspec:V2DF [(match_operand:VBCD 1 "register_operand")
4522 (match_operand:VBCD 2 "register_operand")
4523 (match_operand:QI 3 "const_0_to_1_operand")]
4524 UNSPEC_BCD_ADD_SUB)
4525 (match_dup 4)))
4526 (clobber (match_scratch:VBCD 5))])
4527 (set (match_operand:SI 0 "register_operand")
4528 (BCD_TEST:SI (reg:CCFP CR6_REGNO)
4529 (const_int 0)))]
4530 "TARGET_P8_VECTOR"
4531 {
4532 operands[4] = CONST0_RTX (V2DFmode);
4533 })
4534
4535 (define_insn "*bcdinvalid_<mode>"
4536 [(set (reg:CCFP CR6_REGNO)
4537 (compare:CCFP
4538 (unspec:V2DF [(match_operand:VBCD 1 "register_operand" "v")]
4539 UNSPEC_BCDADD)
4540 (match_operand:V2DF 2 "zero_constant" "j")))
4541 (clobber (match_scratch:VBCD 0 "=v"))]
4542 "TARGET_P8_VECTOR"
4543 "bcdadd. %0,%1,%1,0"
4544 [(set_attr "type" "vecsimple")])
4545
4546 (define_expand "bcdinvalid_<mode>"
4547 [(parallel [(set (reg:CCFP CR6_REGNO)
4548 (compare:CCFP
4549 (unspec:V2DF [(match_operand:VBCD 1 "register_operand")]
4550 UNSPEC_BCDADD)
4551 (match_dup 2)))
4552 (clobber (match_scratch:VBCD 3))])
4553 (set (match_operand:SI 0 "register_operand")
4554 (unordered:SI (reg:CCFP CR6_REGNO)
4555 (const_int 0)))]
4556 "TARGET_P8_VECTOR"
4557 {
4558 operands[2] = CONST0_RTX (V2DFmode);
4559 })
4560
4561 (define_insn "bcdshift_v16qi"
4562 [(set (match_operand:V16QI 0 "register_operand" "=v")
4563 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4564 (match_operand:V16QI 2 "register_operand" "v")
4565 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4566 UNSPEC_BCDSHIFT))
4567 (clobber (reg:CCFP CR6_REGNO))]
4568 "TARGET_P8_VECTOR"
4569 "bcds. %0,%1,%2,%3"
4570 [(set_attr "type" "vecsimple")])
4571
4572 (define_expand "bcdmul10_v16qi"
4573 [(set (match_operand:V16QI 0 "register_operand")
4574 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")]
4575 UNSPEC_BCDSHIFT))
4576 (clobber (reg:CCFP CR6_REGNO))]
4577 "TARGET_P9_VECTOR"
4578 {
4579 rtx one = gen_reg_rtx (V16QImode);
4580
4581 emit_insn (gen_altivec_vspltisb (one, const1_rtx));
4582 emit_insn (gen_bcdshift_v16qi (operands[0], one, operands[1], const0_rtx));
4583
4584 DONE;
4585 })
4586
4587 (define_expand "bcddiv10_v16qi"
4588 [(set (match_operand:V16QI 0 "register_operand")
4589 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")]
4590 UNSPEC_BCDSHIFT))
4591 (clobber (reg:CCFP CR6_REGNO))]
4592 "TARGET_P9_VECTOR"
4593 {
4594 rtx one = gen_reg_rtx (V16QImode);
4595
4596 emit_insn (gen_altivec_vspltisb (one, constm1_rtx));
4597 emit_insn (gen_bcdshift_v16qi (operands[0], one, operands[1], const0_rtx));
4598
4599 DONE;
4600 })
4601
4602
4603 ;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
4604 ;; the bcdadd/bcdsub that tests the value. The combiner won't work since
4605 ;; CR6 is a hard coded register. Unfortunately, all of the Altivec predicate
4606 ;; support is hard coded to use the fixed register CR6 instead of creating
4607 ;; a register class for CR6.
4608
4609 (define_peephole2
4610 [(parallel [(set (match_operand:V1TI 0 "register_operand")
4611 (unspec:V1TI [(match_operand:V1TI 1 "register_operand")
4612 (match_operand:V1TI 2 "register_operand")
4613 (match_operand:QI 3 "const_0_to_1_operand")]
4614 UNSPEC_BCD_ADD_SUB))
4615 (clobber (reg:CCFP CR6_REGNO))])
4616 (parallel [(set (reg:CCFP CR6_REGNO)
4617 (compare:CCFP
4618 (unspec:V2DF [(match_dup 1)
4619 (match_dup 2)
4620 (match_dup 3)]
4621 UNSPEC_BCD_ADD_SUB)
4622 (match_operand:V2DF 4 "zero_constant")))
4623 (clobber (match_operand:V1TI 5 "register_operand"))])]
4624 "TARGET_P8_VECTOR"
4625 [(parallel [(set (match_dup 0)
4626 (unspec:V1TI [(match_dup 1)
4627 (match_dup 2)
4628 (match_dup 3)]
4629 UNSPEC_BCD_ADD_SUB))
4630 (set (reg:CCFP CR6_REGNO)
4631 (compare:CCFP
4632 (unspec:V2DF [(match_dup 1)
4633 (match_dup 2)
4634 (match_dup 3)]
4635 UNSPEC_BCD_ADD_SUB)
4636 (match_dup 4)))])])