]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/altivec.md
Update copyright years.
[thirdparty/gcc.git] / gcc / config / rs6000 / altivec.md
1 ;; AltiVec patterns.
2 ;; Copyright (C) 2002-2024 Free Software Foundation, Inc.
3 ;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
4
5 ;; This file is part of GCC.
6
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 3, or (at your
10 ;; option) any later version.
11
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
16
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
20
21 (define_c_enum "unspec"
22 [UNSPEC_VCMPBFP
23 UNSPEC_VMSUMU
24 UNSPEC_VMSUMUDM
25 UNSPEC_VMSUMM
26 UNSPEC_VMSUMSHM
27 UNSPEC_VMSUMUHS
28 UNSPEC_VMSUMSHS
29 UNSPEC_VMHADDSHS
30 UNSPEC_VMHRADDSHS
31 UNSPEC_VADDCUW
32 UNSPEC_VAVGU
33 UNSPEC_VAVGS
34 UNSPEC_VMULEUB
35 UNSPEC_VMULESB
36 UNSPEC_VMULEUH
37 UNSPEC_VMULESH
38 UNSPEC_VMULEUW
39 UNSPEC_VMULESW
40 UNSPEC_VMULEUD
41 UNSPEC_VMULESD
42 UNSPEC_VMULOUB
43 UNSPEC_VMULOSB
44 UNSPEC_VMULOUH
45 UNSPEC_VMULOSH
46 UNSPEC_VMULOUW
47 UNSPEC_VMULOSW
48 UNSPEC_VMULOUD
49 UNSPEC_VMULOSD
50 UNSPEC_VPKPX
51 UNSPEC_VPACK_SIGN_SIGN_SAT
52 UNSPEC_VPACK_SIGN_UNS_SAT
53 UNSPEC_VPACK_UNS_UNS_SAT
54 UNSPEC_VPACK_UNS_UNS_MOD
55 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
56 UNSPEC_VREVEV
57 UNSPEC_VSLV4SI
58 UNSPEC_VSLO
59 UNSPEC_VSR
60 UNSPEC_VSRO
61 UNSPEC_VSUBCUW
62 UNSPEC_VSUM4UBS
63 UNSPEC_VSUM4S
64 UNSPEC_VSUM2SWS
65 UNSPEC_VSUMSWS
66 UNSPEC_VPERM
67 UNSPEC_VPERMR
68 UNSPEC_VPERM_UNS
69 UNSPEC_VRFIN
70 UNSPEC_VCFUX
71 UNSPEC_VCFSX
72 UNSPEC_VCTUXS
73 UNSPEC_VCTSXS
74 UNSPEC_VLOGEFP
75 UNSPEC_VEXPTEFP
76 UNSPEC_VSLDOI
77 UNSPEC_VUNPACK_HI_SIGN
78 UNSPEC_VUNPACK_LO_SIGN
79 UNSPEC_VUNPACK_HI_SIGN_DIRECT
80 UNSPEC_VUNPACK_LO_SIGN_DIRECT
81 UNSPEC_VUPKHPX
82 UNSPEC_VUPKLPX
83 UNSPEC_CONVERT_4F32_8I16
84 UNSPEC_CONVERT_4F32_8F16
85 UNSPEC_DST
86 UNSPEC_DSTT
87 UNSPEC_DSTST
88 UNSPEC_DSTSTT
89 UNSPEC_LVSL
90 UNSPEC_LVSR
91 UNSPEC_LVE
92 UNSPEC_STVX
93 UNSPEC_STVXL
94 UNSPEC_STVE
95 UNSPEC_SET_VSCR
96 UNSPEC_GET_VRSAVE
97 UNSPEC_LVX
98 UNSPEC_REDUC_PLUS
99 UNSPEC_VECSH
100 UNSPEC_EXTEVEN_V4SI
101 UNSPEC_EXTEVEN_V8HI
102 UNSPEC_EXTEVEN_V16QI
103 UNSPEC_EXTEVEN_V4SF
104 UNSPEC_EXTODD_V4SI
105 UNSPEC_EXTODD_V8HI
106 UNSPEC_EXTODD_V16QI
107 UNSPEC_EXTODD_V4SF
108 UNSPEC_INTERHI_V4SI
109 UNSPEC_INTERHI_V8HI
110 UNSPEC_INTERHI_V16QI
111 UNSPEC_INTERLO_V4SI
112 UNSPEC_INTERLO_V8HI
113 UNSPEC_INTERLO_V16QI
114 UNSPEC_LVLX
115 UNSPEC_LVLXL
116 UNSPEC_LVRX
117 UNSPEC_LVRXL
118 UNSPEC_STVLX
119 UNSPEC_STVLXL
120 UNSPEC_STVRX
121 UNSPEC_STVRXL
122 UNSPEC_VADU
123 UNSPEC_VSLV
124 UNSPEC_VSRV
125 UNSPEC_VMULWHUB
126 UNSPEC_VMULWLUB
127 UNSPEC_VMULWHSB
128 UNSPEC_VMULWLSB
129 UNSPEC_VMULWHUH
130 UNSPEC_VMULWLUH
131 UNSPEC_VMULWHSH
132 UNSPEC_VMULWLSH
133 UNSPEC_VUPKHU
134 UNSPEC_VUPKLU
135 UNSPEC_VPERMSI
136 UNSPEC_VPERMHI
137 UNSPEC_INTERHI
138 UNSPEC_INTERLO
139 UNSPEC_VUPKHS_V4SF
140 UNSPEC_VUPKLS_V4SF
141 UNSPEC_VUPKHU_V4SF
142 UNSPEC_VUPKLU_V4SF
143 UNSPEC_VGBBD
144 UNSPEC_VSPLT_DIRECT
145 UNSPEC_VMRGEW_DIRECT
146 UNSPEC_VMRGOW_DIRECT
147 UNSPEC_VSUMSWS_DIRECT
148 UNSPEC_VADDCUQ
149 UNSPEC_VADDEUQM
150 UNSPEC_VADDECUQ
151 UNSPEC_VSUBCUQ
152 UNSPEC_VSUBEUQM
153 UNSPEC_VSUBECUQ
154 UNSPEC_VBPERMQ
155 UNSPEC_VBPERMD
156 UNSPEC_BCDADD
157 UNSPEC_BCDSUB
158 UNSPEC_BCD_OVERFLOW
159 UNSPEC_BCDSHIFT
160 UNSPEC_VRLMI
161 UNSPEC_VRLNM
162 UNSPEC_VCFUGED
163 UNSPEC_VCLZDM
164 UNSPEC_VCTZDM
165 UNSPEC_VGNB
166 UNSPEC_VPDEPD
167 UNSPEC_VPEXTD
168 UNSPEC_VCLRLB
169 UNSPEC_VCLRRB
170 UNSPEC_VSTRIR
171 UNSPEC_VSTRIL
172 UNSPEC_SLDB
173 UNSPEC_SRDB
174 ])
175
176 (define_c_enum "unspecv"
177 [UNSPECV_SET_VRSAVE
178 UNSPECV_MTVSCR
179 UNSPECV_MFVSCR
180 UNSPECV_DSSALL
181 UNSPECV_DSS
182 ])
183
184 ;; Short vec int modes
185 (define_mode_iterator VIshort [V8HI V16QI])
186 ;; Vec float modes
187 (define_mode_iterator VF [V4SF])
188 ;; Vec modes, pity mode iterators are not composable
189 (define_mode_iterator V [V4SI V8HI V16QI V4SF])
190 ;; Vec modes for move/logical/permute ops, include vector types for move not
191 ;; otherwise handled by altivec (v2df, v2di, ti)
192 (define_mode_iterator VM [V4SI
193 V8HI
194 V16QI
195 V4SF
196 V2DF
197 V2DI
198 V1TI
199 TI
200 (KF "FLOAT128_VECTOR_P (KFmode)")
201 (TF "FLOAT128_VECTOR_P (TFmode)")])
202
203 ;; Like VM, except don't do TImode
204 (define_mode_iterator VM2 [V4SI
205 V8HI
206 V16QI
207 V4SF
208 V2DF
209 V2DI
210 V1TI
211 (KF "FLOAT128_VECTOR_P (KFmode)")
212 (TF "FLOAT128_VECTOR_P (TFmode)")])
213
214 ;; Map the Vector convert single precision to double precision for integer
215 ;; versus floating point
216 (define_mode_attr VS_sxwsp [(V4SI "sxw") (V4SF "sp")])
217
218 ;; Specific iterator for parity which does not have a byte/half-word form, but
219 ;; does have a quad word form
220 (define_mode_iterator VParity [V4SI
221 V2DI
222 V1TI
223 TI])
224
225 (define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
226 (define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
227 (define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
228 (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
229 (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
230 (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
231 (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
232
233 ;; Vector pack/unpack
234 (define_mode_iterator VP [V2DI V4SI V8HI])
235 (define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
236 (define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
237 (define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
238
239 ;; Vector negate
240 (define_mode_iterator VNEG [V4SI V2DI])
241
242 ;; Vector move instructions.
243 (define_insn "*altivec_mov<mode>"
244 [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
245 (match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
246 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
247 && (register_operand (operands[0], <MODE>mode)
248 || register_operand (operands[1], <MODE>mode))"
249 "@
250 stvx %1,%y0
251 lvx %0,%y1
252 vor %0,%1,%1
253 #
254 #
255 #
256 vxor %0,%0,%0
257 * return output_vec_const_move (operands);
258 #"
259 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
260 (set_attr "length" "*,*,*,20,20,20,*,8,32")])
261
262 ;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
263 ;; is for unions. However for plain data movement, slightly favor the vector
264 ;; loads
265 (define_insn "*altivec_movti"
266 [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
267 (match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
268 "VECTOR_MEM_ALTIVEC_P (TImode)
269 && (register_operand (operands[0], TImode)
270 || register_operand (operands[1], TImode))"
271 "@
272 stvx %1,%y0
273 lvx %0,%y1
274 vor %0,%1,%1
275 #
276 #
277 #
278 vxor %0,%0,%0
279 * return output_vec_const_move (operands);"
280 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
281
282 ;; Load up a vector with the most significant bit set by loading up -1 and
283 ;; doing a shift left
284 (define_split
285 [(set (match_operand:VM 0 "altivec_register_operand")
286 (match_operand:VM 1 "easy_vector_constant_msb"))]
287 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
288 [(const_int 0)]
289 {
290 rtx dest = operands[0];
291 machine_mode mode;
292 rtvec v;
293 int i, num_elements;
294
295 switch (easy_altivec_constant (operands[1], <MODE>mode))
296 {
297 case 1:
298 mode = V16QImode;
299 break;
300 case 2:
301 mode = V8HImode;
302 break;
303 case 4:
304 mode = V4SImode;
305 break;
306 default:
307 gcc_unreachable ();
308 }
309 if (mode != <MODE>mode)
310 dest = gen_lowpart (mode, dest);
311
312 num_elements = GET_MODE_NUNITS (mode);
313 v = rtvec_alloc (num_elements);
314 for (i = 0; i < num_elements; i++)
315 RTVEC_ELT (v, i) = constm1_rtx;
316
317 rs6000_expand_vector_init (dest, gen_rtx_PARALLEL (mode, v));
318 emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
319 DONE;
320 })
321
322 (define_split
323 [(set (match_operand:VM 0 "altivec_register_operand")
324 (match_operand:VM 1 "easy_vector_constant_add_self"))]
325 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
326 [(set (match_dup 0) (match_dup 3))
327 (set (match_dup 0) (match_dup 4))]
328 {
329 rtx dup = gen_easy_altivec_constant (operands[1]);
330 rtx const_vec;
331 machine_mode op_mode = <MODE>mode;
332
333 /* Divide the operand of the resulting VEC_DUPLICATE, and use
334 simplify_rtx to make a CONST_VECTOR. */
335 XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
336 XEXP (dup, 0), const1_rtx);
337 const_vec = simplify_rtx (dup);
338
339 if (op_mode == V4SFmode)
340 {
341 op_mode = V4SImode;
342 operands[0] = gen_lowpart (op_mode, operands[0]);
343 }
344 if (GET_MODE (const_vec) == op_mode)
345 operands[3] = const_vec;
346 else
347 operands[3] = gen_lowpart (op_mode, const_vec);
348 operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
349 })
350
351 (define_split
352 [(set (match_operand:VM 0 "altivec_register_operand")
353 (match_operand:VM 1 "easy_vector_constant_vsldoi"))]
354 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
355 [(set (match_dup 2) (match_dup 3))
356 (set (match_dup 4) (match_dup 5))
357 (set (match_dup 0)
358 (unspec:VM [(match_dup 2)
359 (match_dup 4)
360 (match_dup 6)]
361 UNSPEC_VSLDOI))]
362 {
363 rtx op1 = operands[1];
364 int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
365 HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
366 rtx rtx_val = GEN_INT (val);
367 int shift = vspltis_shifted (op1);
368
369 gcc_assert (shift != 0);
370 operands[2] = gen_reg_rtx (<MODE>mode);
371 operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
372 operands[4] = gen_reg_rtx (<MODE>mode);
373
374 if (shift < 0)
375 {
376 operands[5] = CONSTM1_RTX (<MODE>mode);
377 operands[6] = GEN_INT (-shift);
378 }
379 else
380 {
381 operands[5] = CONST0_RTX (<MODE>mode);
382 operands[6] = GEN_INT (shift);
383 }
384 })
385
386 (define_insn_and_split "sldoi_to_mov<mode>"
387 [(set (match_operand:VM 0 "altivec_register_operand")
388 (unspec:VM [(match_operand:VM 1 "const_vector_each_byte_same")
389 (match_dup 1)
390 (match_operand:QI 2 "u5bit_cint_operand")]
391 UNSPEC_VSLDOI))]
392 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
393 "#"
394 "&& 1"
395 [(set (match_dup 0) (match_dup 1))]
396 "{
397 if (!easy_vector_constant (operands[1], <MODE>mode))
398 {
399 rtx dest = gen_reg_rtx (<MODE>mode);
400 emit_move_insn (dest, operands[1]);
401 operands[1] = dest;
402 }
403 }")
404
405 (define_insn "get_vrsave_internal"
406 [(set (match_operand:SI 0 "register_operand" "=r")
407 (unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
408 "TARGET_ALTIVEC"
409 {
410 if (TARGET_MACHO)
411 return "mfspr %0,256";
412 else
413 return "mfvrsave %0";
414 }
415 [(set_attr "type" "*")])
416
417 (define_insn "*set_vrsave_internal"
418 [(match_parallel 0 "vrsave_operation"
419 [(set (reg:SI VRSAVE_REGNO)
420 (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
421 (reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
422 "TARGET_ALTIVEC"
423 {
424 if (TARGET_MACHO)
425 return "mtspr 256,%1";
426 else
427 return "mtvrsave %1";
428 }
429 [(set_attr "type" "*")])
430
431 (define_insn "*save_world"
432 [(match_parallel 0 "save_world_operation"
433 [(clobber (reg:SI LR_REGNO))
434 (use (match_operand:SI 1 "call_operand" "s"))])]
435 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
436 "bl %z1"
437 [(set_attr "type" "branch")])
438
439 (define_insn "*restore_world"
440 [(match_parallel 0 "restore_world_operation"
441 [(return)
442 (use (match_operand:SI 1 "call_operand" "s"))
443 (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
444 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
445 "b %z1")
446
447 ;; The save_vregs and restore_vregs patterns don't use memory_operand
448 ;; because (plus (reg) (const_int)) is not a valid vector address.
449 ;; This way is more compact than describing exactly what happens in
450 ;; the out-of-line functions, ie. loading the constant into r11/r12
451 ;; then using indexed addressing, and requires less editing of rtl
452 ;; to describe the operation to dwarf2out_frame_debug_expr.
453 (define_insn "*save_vregs_<mode>_r11"
454 [(match_parallel 0 "any_parallel_operand"
455 [(clobber (reg:P LR_REGNO))
456 (use (match_operand:P 1 "symbol_ref_operand" "s"))
457 (clobber (reg:P 11))
458 (use (reg:P 0))
459 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
460 (match_operand:P 3 "short_cint_operand" "I")))
461 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
462 "TARGET_ALTIVEC"
463 "bl %1"
464 [(set_attr "type" "branch")])
465
466 (define_insn "*save_vregs_<mode>_r12"
467 [(match_parallel 0 "any_parallel_operand"
468 [(clobber (reg:P LR_REGNO))
469 (use (match_operand:P 1 "symbol_ref_operand" "s"))
470 (clobber (reg:P 12))
471 (use (reg:P 0))
472 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
473 (match_operand:P 3 "short_cint_operand" "I")))
474 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
475 "TARGET_ALTIVEC"
476 "bl %1"
477 [(set_attr "type" "branch")])
478
479 (define_insn "*restore_vregs_<mode>_r11"
480 [(match_parallel 0 "any_parallel_operand"
481 [(clobber (reg:P LR_REGNO))
482 (use (match_operand:P 1 "symbol_ref_operand" "s"))
483 (clobber (reg:P 11))
484 (use (reg:P 0))
485 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
486 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
487 (match_operand:P 4 "short_cint_operand" "I"))))])]
488 "TARGET_ALTIVEC"
489 "bl %1"
490 [(set_attr "type" "branch")])
491
492 (define_insn "*restore_vregs_<mode>_r12"
493 [(match_parallel 0 "any_parallel_operand"
494 [(clobber (reg:P LR_REGNO))
495 (use (match_operand:P 1 "symbol_ref_operand" "s"))
496 (clobber (reg:P 12))
497 (use (reg:P 0))
498 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
499 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
500 (match_operand:P 4 "short_cint_operand" "I"))))])]
501 "TARGET_ALTIVEC"
502 "bl %1"
503 [(set_attr "type" "branch")])
504
505 ;; Simple binary operations.
506
507 ;; add
508 (define_insn "add<mode>3"
509 [(set (match_operand:VI2 0 "register_operand" "=v")
510 (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
511 (match_operand:VI2 2 "register_operand" "v")))]
512 "<VI_unit>"
513 "vaddu<VI_char>m %0,%1,%2"
514 [(set_attr "type" "vecsimple")])
515
516 (define_insn "*altivec_addv4sf3"
517 [(set (match_operand:V4SF 0 "register_operand" "=v")
518 (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
519 (match_operand:V4SF 2 "register_operand" "v")))]
520 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
521 "vaddfp %0,%1,%2"
522 [(set_attr "type" "vecfloat")])
523
524 (define_insn "altivec_vaddcuw"
525 [(set (match_operand:V4SI 0 "register_operand" "=v")
526 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
527 (match_operand:V4SI 2 "register_operand" "v")]
528 UNSPEC_VADDCUW))]
529 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
530 "vaddcuw %0,%1,%2"
531 [(set_attr "type" "vecsimple")])
532
533 (define_insn "altivec_vaddu<VI_char>s"
534 [(set (match_operand:VI 0 "register_operand" "=v")
535 (us_plus:VI (match_operand:VI 1 "register_operand" "v")
536 (match_operand:VI 2 "register_operand" "v")))
537 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
538 "<VI_unit>"
539 "vaddu<VI_char>s %0,%1,%2"
540 [(set_attr "type" "vecsimple")])
541
542 (define_insn "altivec_vadds<VI_char>s"
543 [(set (match_operand:VI 0 "register_operand" "=v")
544 (ss_plus:VI (match_operand:VI 1 "register_operand" "v")
545 (match_operand:VI 2 "register_operand" "v")))
546 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
547 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
548 "vadds<VI_char>s %0,%1,%2"
549 [(set_attr "type" "vecsimple")])
550
551 ;; sub
552 (define_insn "sub<mode>3"
553 [(set (match_operand:VI2 0 "register_operand" "=v")
554 (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
555 (match_operand:VI2 2 "register_operand" "v")))]
556 "<VI_unit>"
557 "vsubu<VI_char>m %0,%1,%2"
558 [(set_attr "type" "vecsimple")])
559
560 (define_insn "*altivec_subv4sf3"
561 [(set (match_operand:V4SF 0 "register_operand" "=v")
562 (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
563 (match_operand:V4SF 2 "register_operand" "v")))]
564 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
565 "vsubfp %0,%1,%2"
566 [(set_attr "type" "vecfloat")])
567
568 (define_insn "altivec_vsubcuw"
569 [(set (match_operand:V4SI 0 "register_operand" "=v")
570 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
571 (match_operand:V4SI 2 "register_operand" "v")]
572 UNSPEC_VSUBCUW))]
573 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
574 "vsubcuw %0,%1,%2"
575 [(set_attr "type" "vecsimple")])
576
577 (define_insn "altivec_vsubu<VI_char>s"
578 [(set (match_operand:VI 0 "register_operand" "=v")
579 (us_minus:VI (match_operand:VI 1 "register_operand" "v")
580 (match_operand:VI 2 "register_operand" "v")))
581 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
582 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
583 "vsubu<VI_char>s %0,%1,%2"
584 [(set_attr "type" "vecsimple")])
585
586 (define_insn "altivec_vsubs<VI_char>s"
587 [(set (match_operand:VI 0 "register_operand" "=v")
588 (ss_minus:VI (match_operand:VI 1 "register_operand" "v")
589 (match_operand:VI 2 "register_operand" "v")))
590 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
591 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
592 "vsubs<VI_char>s %0,%1,%2"
593 [(set_attr "type" "vecsimple")])
594
595 ;;
596 (define_insn "uavg<mode>3_ceil"
597 [(set (match_operand:VI 0 "register_operand" "=v")
598 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
599 (match_operand:VI 2 "register_operand" "v")]
600 UNSPEC_VAVGU))]
601 "TARGET_ALTIVEC"
602 "vavgu<VI_char> %0,%1,%2"
603 [(set_attr "type" "vecsimple")])
604
605 (define_insn "avg<mode>3_ceil"
606 [(set (match_operand:VI 0 "register_operand" "=v")
607 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
608 (match_operand:VI 2 "register_operand" "v")]
609 UNSPEC_VAVGS))]
610 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
611 "vavgs<VI_char> %0,%1,%2"
612 [(set_attr "type" "vecsimple")])
613
614 (define_insn "altivec_vcmpbfp"
615 [(set (match_operand:V4SI 0 "register_operand" "=v")
616 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
617 (match_operand:V4SF 2 "register_operand" "v")]
618 UNSPEC_VCMPBFP))]
619 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
620 "vcmpbfp %0,%1,%2"
621 [(set_attr "type" "veccmp")])
622
623 (define_insn "altivec_eqv1ti"
624 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
625 (eq:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
626 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
627 "TARGET_POWER10"
628 "vcmpequq %0,%1,%2"
629 [(set_attr "type" "veccmpfx")])
630
631 (define_insn "altivec_eq<mode>"
632 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
633 (eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
634 (match_operand:VI2 2 "altivec_register_operand" "v")))]
635 "<VI_unit>"
636 "vcmpequ<VI_char> %0,%1,%2"
637 [(set_attr "type" "veccmpfx")])
638
639 (define_insn "*altivec_gt<mode>"
640 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
641 (gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
642 (match_operand:VI2 2 "altivec_register_operand" "v")))]
643 "<VI_unit>"
644 "vcmpgts<VI_char> %0,%1,%2"
645 [(set_attr "type" "veccmpfx")])
646
647 (define_insn "*altivec_gtv1ti"
648 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
649 (gt:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
650 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
651 "TARGET_POWER10"
652 "vcmpgtsq %0,%1,%2"
653 [(set_attr "type" "veccmpfx")])
654
655 (define_insn "*altivec_gtu<mode>"
656 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
657 (gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
658 (match_operand:VI2 2 "altivec_register_operand" "v")))]
659 "<VI_unit>"
660 "vcmpgtu<VI_char> %0,%1,%2"
661 [(set_attr "type" "veccmpfx")])
662
663 (define_insn "*altivec_gtuv1ti"
664 [(set (match_operand:V1TI 0 "altivec_register_operand" "=v")
665 (gtu:V1TI (match_operand:V1TI 1 "altivec_register_operand" "v")
666 (match_operand:V1TI 2 "altivec_register_operand" "v")))]
667 "TARGET_POWER10"
668 "vcmpgtuq %0,%1,%2"
669 [(set_attr "type" "veccmpfx")])
670
671 (define_insn "*altivec_eqv4sf"
672 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
673 (eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
674 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
675 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
676 "vcmpeqfp %0,%1,%2"
677 [(set_attr "type" "veccmp")])
678
679 (define_insn "*altivec_gtv4sf"
680 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
681 (gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
682 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
683 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
684 "vcmpgtfp %0,%1,%2"
685 [(set_attr "type" "veccmp")])
686
687 (define_insn "*altivec_gev4sf"
688 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
689 (ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
690 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
691 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
692 "vcmpgefp %0,%1,%2"
693 [(set_attr "type" "veccmp")])
694
695 (define_insn "altivec_vsel<mode>"
696 [(set (match_operand:VM 0 "register_operand" "=wa,v")
697 (ior:VM
698 (and:VM
699 (not:VM (match_operand:VM 3 "register_operand" "wa,v"))
700 (match_operand:VM 1 "register_operand" "wa,v"))
701 (and:VM
702 (match_dup 3)
703 (match_operand:VM 2 "register_operand" "wa,v"))))]
704 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
705 "@
706 xxsel %x0,%x1,%x2,%x3
707 vsel %0,%1,%2,%3"
708 [(set_attr "type" "vecmove")
709 (set_attr "isa" "<VSisa>")])
710
711 (define_insn "altivec_vsel<mode>2"
712 [(set (match_operand:VM 0 "register_operand" "=wa,v")
713 (ior:VM
714 (and:VM
715 (not:VM (match_operand:VM 3 "register_operand" "wa,v"))
716 (match_operand:VM 1 "register_operand" "wa,v"))
717 (and:VM
718 (match_operand:VM 2 "register_operand" "wa,v")
719 (match_dup 3))))]
720 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
721 "@
722 xxsel %x0,%x1,%x2,%x3
723 vsel %0,%1,%2,%3"
724 [(set_attr "type" "vecmove")
725 (set_attr "isa" "<VSisa>")])
726
727 (define_insn "altivec_vsel<mode>3"
728 [(set (match_operand:VM 0 "register_operand" "=wa,v")
729 (ior:VM
730 (and:VM
731 (match_operand:VM 3 "register_operand" "wa,v")
732 (match_operand:VM 1 "register_operand" "wa,v"))
733 (and:VM
734 (not:VM (match_dup 3))
735 (match_operand:VM 2 "register_operand" "wa,v"))))]
736 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
737 "@
738 xxsel %x0,%x2,%x1,%x3
739 vsel %0,%2,%1,%3"
740 [(set_attr "type" "vecmove")
741 (set_attr "isa" "<VSisa>")])
742
743 (define_insn "altivec_vsel<mode>4"
744 [(set (match_operand:VM 0 "register_operand" "=wa,v")
745 (ior:VM
746 (and:VM
747 (match_operand:VM 1 "register_operand" "wa,v")
748 (match_operand:VM 3 "register_operand" "wa,v"))
749 (and:VM
750 (not:VM (match_dup 3))
751 (match_operand:VM 2 "register_operand" "wa,v"))))]
752 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
753 "@
754 xxsel %x0,%x2,%x1,%x3
755 vsel %0,%2,%1,%3"
756 [(set_attr "type" "vecmove")
757 (set_attr "isa" "<VSisa>")])
758
759 ;; Fused multiply add.
760
761 (define_insn "*altivec_fmav4sf4"
762 [(set (match_operand:V4SF 0 "register_operand" "=v")
763 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
764 (match_operand:V4SF 2 "register_operand" "v")
765 (match_operand:V4SF 3 "register_operand" "v")))]
766 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
767 "vmaddfp %0,%1,%2,%3"
768 [(set_attr "type" "vecfloat")])
769
770 ;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
771
772 (define_expand "altivec_mulv4sf3"
773 [(set (match_operand:V4SF 0 "register_operand")
774 (fma:V4SF (match_operand:V4SF 1 "register_operand")
775 (match_operand:V4SF 2 "register_operand")
776 (match_dup 3)))]
777 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
778 {
779 rtx neg0;
780
781 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
782 neg0 = gen_reg_rtx (V4SImode);
783 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
784 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
785
786 operands[3] = gen_lowpart (V4SFmode, neg0);
787 })
788
789 ;; 32-bit integer multiplication
790 ;; A_high = Operand_0 & 0xFFFF0000 >> 16
791 ;; A_low = Operand_0 & 0xFFFF
792 ;; B_high = Operand_1 & 0xFFFF0000 >> 16
793 ;; B_low = Operand_1 & 0xFFFF
794 ;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
795
796 ;; (define_insn "mulv4si3"
797 ;; [(set (match_operand:V4SI 0 "register_operand" "=v")
798 ;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
799 ;; (match_operand:V4SI 2 "register_operand" "v")))]
800 (define_insn "mulv4si3_p8"
801 [(set (match_operand:V4SI 0 "register_operand" "=v")
802 (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
803 (match_operand:V4SI 2 "register_operand" "v")))]
804 "TARGET_P8_VECTOR"
805 "vmuluwm %0,%1,%2"
806 [(set_attr "type" "veccomplex")])
807
808 (define_expand "mulv4si3"
809 [(use (match_operand:V4SI 0 "register_operand"))
810 (use (match_operand:V4SI 1 "register_operand"))
811 (use (match_operand:V4SI 2 "register_operand"))]
812 "TARGET_ALTIVEC"
813 {
814 rtx zero;
815 rtx swap;
816 rtx small_swap;
817 rtx sixteen;
818 rtx one;
819 rtx two;
820 rtx low_product;
821 rtx high_product;
822
823 if (TARGET_P8_VECTOR)
824 {
825 emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
826 DONE;
827 }
828
829 zero = gen_reg_rtx (V4SImode);
830 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
831
832 sixteen = gen_reg_rtx (V4SImode);
833 emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
834
835 swap = gen_reg_rtx (V4SImode);
836 emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
837
838 one = gen_reg_rtx (V8HImode);
839 convert_move (one, operands[1], 0);
840
841 two = gen_reg_rtx (V8HImode);
842 convert_move (two, operands[2], 0);
843
844 small_swap = gen_reg_rtx (V8HImode);
845 convert_move (small_swap, swap, 0);
846
847 low_product = gen_reg_rtx (V4SImode);
848 emit_insn (gen_altivec_vmulouh (low_product, one, two));
849
850 high_product = gen_reg_rtx (V4SImode);
851 emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
852
853 emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
854
855 emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
856
857 DONE;
858 })
859
860 (define_expand "mulv8hi3"
861 [(use (match_operand:V8HI 0 "register_operand"))
862 (use (match_operand:V8HI 1 "register_operand"))
863 (use (match_operand:V8HI 2 "register_operand"))]
864 "TARGET_ALTIVEC"
865 {
866 rtx zero = gen_reg_rtx (V8HImode);
867
868 emit_insn (gen_altivec_vspltish (zero, const0_rtx));
869 emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
870
871 DONE;
872 })
873
874 ;; Map UNSPEC_SLDB to "l" and UNSPEC_SRDB to "r".
875 (define_int_attr SLDB_lr [(UNSPEC_SLDB "l")
876 (UNSPEC_SRDB "r")])
877
878 (define_int_iterator VSHIFT_DBL_LR [UNSPEC_SLDB UNSPEC_SRDB])
879
880 (define_insn "vs<SLDB_lr>db_<mode>"
881 [(set (match_operand:VI2 0 "register_operand" "=v")
882 (unspec:VI2 [(match_operand:VI2 1 "register_operand" "v")
883 (match_operand:VI2 2 "register_operand" "v")
884 (match_operand:QI 3 "const_0_to_12_operand" "n")]
885 VSHIFT_DBL_LR))]
886 "TARGET_POWER10"
887 "vs<SLDB_lr>dbi %0,%1,%2,%3"
888 [(set_attr "type" "vecsimple")])
889
890 (define_expand "vstrir_<mode>"
891 [(set (match_operand:VIshort 0 "altivec_register_operand")
892 (unspec:VIshort [(match_operand:VIshort 1 "altivec_register_operand")]
893 UNSPEC_VSTRIR))]
894 "TARGET_POWER10"
895 {
896 if (BYTES_BIG_ENDIAN)
897 emit_insn (gen_vstrir_direct_<mode> (operands[0], operands[1]));
898 else
899 emit_insn (gen_vstril_direct_<mode> (operands[0], operands[1]));
900 DONE;
901 })
902
903 (define_insn "vstrir_direct_<mode>"
904 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
905 (unspec:VIshort
906 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
907 UNSPEC_VSTRIR))]
908 "TARGET_POWER10"
909 "vstri<wd>r %0,%1"
910 [(set_attr "type" "vecsimple")])
911
912 ;; This expands into same code as vstrir<mode> followed by condition logic
913 ;; so that a single vstribr. or vstrihr. or vstribl. or vstrihl. instruction
914 ;; can, for example, satisfy the needs of a vec_strir () function paired
915 ;; with a vec_strir_p () function if both take the same incoming arguments.
916 (define_expand "vstrir_p_<mode>"
917 [(match_operand:SI 0 "gpc_reg_operand")
918 (match_operand:VIshort 1 "altivec_register_operand")]
919 "TARGET_POWER10"
920 {
921 rtx scratch = gen_reg_rtx (<MODE>mode);
922 if (BYTES_BIG_ENDIAN)
923 emit_insn (gen_vstrir_p_direct_<mode> (scratch, operands[1]));
924 else
925 emit_insn (gen_vstril_p_direct_<mode> (scratch, operands[1]));
926 emit_insn (gen_cr6_test_for_zero (operands[0]));
927 DONE;
928 })
929
930 (define_insn "vstrir_p_direct_<mode>"
931 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
932 (unspec:VIshort
933 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
934 UNSPEC_VSTRIR))
935 (set (reg:CC CR6_REGNO)
936 (unspec:CC [(match_dup 1)]
937 UNSPEC_VSTRIR))]
938 "TARGET_POWER10"
939 "vstri<wd>r. %0,%1"
940 [(set_attr "type" "vecsimple")])
941
942 (define_expand "vstril_<mode>"
943 [(set (match_operand:VIshort 0 "altivec_register_operand")
944 (unspec:VIshort [(match_operand:VIshort 1 "altivec_register_operand")]
945 UNSPEC_VSTRIR))]
946 "TARGET_POWER10"
947 {
948 if (BYTES_BIG_ENDIAN)
949 emit_insn (gen_vstril_direct_<mode> (operands[0], operands[1]));
950 else
951 emit_insn (gen_vstrir_direct_<mode> (operands[0], operands[1]));
952 DONE;
953 })
954
955 (define_insn "vstril_direct_<mode>"
956 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
957 (unspec:VIshort
958 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
959 UNSPEC_VSTRIL))]
960 "TARGET_POWER10"
961 "vstri<wd>l %0,%1"
962 [(set_attr "type" "vecsimple")])
963
964 ;; This expands into same code as vstril_<mode> followed by condition logic
965 ;; so that a single vstribr. or vstrihr. or vstribl. or vstrihl. instruction
966 ;; can, for example, satisfy the needs of a vec_stril () function paired
967 ;; with a vec_stril_p () function if both take the same incoming arguments.
968 (define_expand "vstril_p_<mode>"
969 [(match_operand:SI 0 "gpc_reg_operand")
970 (match_operand:VIshort 1 "altivec_register_operand")]
971 "TARGET_POWER10"
972 {
973 rtx scratch = gen_reg_rtx (<MODE>mode);
974 if (BYTES_BIG_ENDIAN)
975 emit_insn (gen_vstril_p_direct_<mode> (scratch, operands[1]));
976 else
977 emit_insn (gen_vstrir_p_direct_<mode> (scratch, operands[1]));
978 emit_insn (gen_cr6_test_for_zero (operands[0]));
979 DONE;
980 })
981
982 (define_insn "vstril_p_direct_<mode>"
983 [(set (match_operand:VIshort 0 "altivec_register_operand" "=v")
984 (unspec:VIshort
985 [(match_operand:VIshort 1 "altivec_register_operand" "v")]
986 UNSPEC_VSTRIL))
987 (set (reg:CC CR6_REGNO)
988 (unspec:CC [(match_dup 1)]
989 UNSPEC_VSTRIR))]
990 "TARGET_POWER10"
991 "vstri<wd>l. %0,%1"
992 [(set_attr "type" "vecsimple")])
993
994 ;; Fused multiply subtract
995 (define_insn "*altivec_vnmsubfp"
996 [(set (match_operand:V4SF 0 "register_operand" "=v")
997 (neg:V4SF
998 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
999 (match_operand:V4SF 2 "register_operand" "v")
1000 (neg:V4SF
1001 (match_operand:V4SF 3 "register_operand" "v")))))]
1002 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1003 "vnmsubfp %0,%1,%2,%3"
1004 [(set_attr "type" "vecfloat")])
1005
1006 (define_insn "altivec_vmsumu<VI_char>m"
1007 [(set (match_operand:V4SI 0 "register_operand" "=v")
1008 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1009 (match_operand:VIshort 2 "register_operand" "v")
1010 (match_operand:V4SI 3 "register_operand" "v")]
1011 UNSPEC_VMSUMU))]
1012 "TARGET_ALTIVEC"
1013 "vmsumu<VI_char>m %0,%1,%2,%3"
1014 [(set_attr "type" "veccomplex")])
1015
1016 (define_insn "altivec_vmsumudm"
1017 [(set (match_operand:V1TI 0 "register_operand" "=v")
1018 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1019 (match_operand:V2DI 2 "register_operand" "v")
1020 (match_operand:V1TI 3 "register_operand" "v")]
1021 UNSPEC_VMSUMUDM))]
1022 "TARGET_P8_VECTOR"
1023 "vmsumudm %0,%1,%2,%3"
1024 [(set_attr "type" "veccomplex")])
1025
1026 (define_insn "altivec_vmsumm<VI_char>m"
1027 [(set (match_operand:V4SI 0 "register_operand" "=v")
1028 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1029 (match_operand:VIshort 2 "register_operand" "v")
1030 (match_operand:V4SI 3 "register_operand" "v")]
1031 UNSPEC_VMSUMM))]
1032 "TARGET_ALTIVEC"
1033 "vmsumm<VI_char>m %0,%1,%2,%3"
1034 [(set_attr "type" "veccomplex")])
1035
1036 (define_insn "altivec_vmsumshm"
1037 [(set (match_operand:V4SI 0 "register_operand" "=v")
1038 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1039 (match_operand:V8HI 2 "register_operand" "v")
1040 (match_operand:V4SI 3 "register_operand" "v")]
1041 UNSPEC_VMSUMSHM))]
1042 "TARGET_ALTIVEC"
1043 "vmsumshm %0,%1,%2,%3"
1044 [(set_attr "type" "veccomplex")])
1045
1046 (define_insn "altivec_vmsumuhs"
1047 [(set (match_operand:V4SI 0 "register_operand" "=v")
1048 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1049 (match_operand:V8HI 2 "register_operand" "v")
1050 (match_operand:V4SI 3 "register_operand" "v")]
1051 UNSPEC_VMSUMUHS))
1052 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1053 "TARGET_ALTIVEC"
1054 "vmsumuhs %0,%1,%2,%3"
1055 [(set_attr "type" "veccomplex")])
1056
1057 (define_insn "altivec_vmsumshs"
1058 [(set (match_operand:V4SI 0 "register_operand" "=v")
1059 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1060 (match_operand:V8HI 2 "register_operand" "v")
1061 (match_operand:V4SI 3 "register_operand" "v")]
1062 UNSPEC_VMSUMSHS))
1063 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1064 "TARGET_ALTIVEC"
1065 "vmsumshs %0,%1,%2,%3"
1066 [(set_attr "type" "veccomplex")])
1067
1068 ;; max
1069
1070 (define_insn "umax<mode>3"
1071 [(set (match_operand:VI2 0 "register_operand" "=v")
1072 (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
1073 (match_operand:VI2 2 "register_operand" "v")))]
1074 "<VI_unit>"
1075 "vmaxu<VI_char> %0,%1,%2"
1076 [(set_attr "type" "vecsimple")])
1077
1078 (define_insn "smax<mode>3"
1079 [(set (match_operand:VI2 0 "register_operand" "=v")
1080 (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
1081 (match_operand:VI2 2 "register_operand" "v")))]
1082 "<VI_unit>"
1083 "vmaxs<VI_char> %0,%1,%2"
1084 [(set_attr "type" "vecsimple")])
1085
1086 (define_insn "*altivec_smaxv4sf3"
1087 [(set (match_operand:V4SF 0 "register_operand" "=v")
1088 (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
1089 (match_operand:V4SF 2 "register_operand" "v")))]
1090 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1091 "vmaxfp %0,%1,%2"
1092 [(set_attr "type" "veccmp")])
1093
1094 (define_insn "umin<mode>3"
1095 [(set (match_operand:VI2 0 "register_operand" "=v")
1096 (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
1097 (match_operand:VI2 2 "register_operand" "v")))]
1098 "<VI_unit>"
1099 "vminu<VI_char> %0,%1,%2"
1100 [(set_attr "type" "vecsimple")])
1101
1102 (define_insn "smin<mode>3"
1103 [(set (match_operand:VI2 0 "register_operand" "=v")
1104 (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
1105 (match_operand:VI2 2 "register_operand" "v")))]
1106 "<VI_unit>"
1107 "vmins<VI_char> %0,%1,%2"
1108 [(set_attr "type" "vecsimple")])
1109
1110 (define_insn "*altivec_sminv4sf3"
1111 [(set (match_operand:V4SF 0 "register_operand" "=v")
1112 (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
1113 (match_operand:V4SF 2 "register_operand" "v")))]
1114 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
1115 "vminfp %0,%1,%2"
1116 [(set_attr "type" "veccmp")])
1117
1118 (define_insn "altivec_vmhaddshs"
1119 [(set (match_operand:V8HI 0 "register_operand" "=v")
1120 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1121 (match_operand:V8HI 2 "register_operand" "v")
1122 (match_operand:V8HI 3 "register_operand" "v")]
1123 UNSPEC_VMHADDSHS))
1124 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1125 "TARGET_ALTIVEC"
1126 "vmhaddshs %0,%1,%2,%3"
1127 [(set_attr "type" "veccomplex")])
1128
1129 (define_insn "altivec_vmhraddshs"
1130 [(set (match_operand:V8HI 0 "register_operand" "=v")
1131 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1132 (match_operand:V8HI 2 "register_operand" "v")
1133 (match_operand:V8HI 3 "register_operand" "v")]
1134 UNSPEC_VMHRADDSHS))
1135 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1136 "TARGET_ALTIVEC"
1137 "vmhraddshs %0,%1,%2,%3"
1138 [(set_attr "type" "veccomplex")])
1139
1140 (define_insn "fmav8hi4"
1141 [(set (match_operand:V8HI 0 "register_operand" "=v")
1142 (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
1143 (match_operand:V8HI 2 "register_operand" "v"))
1144 (match_operand:V8HI 3 "register_operand" "v")))]
1145 "TARGET_ALTIVEC"
1146 "vmladduhm %0,%1,%2,%3"
1147 [(set_attr "type" "veccomplex")])
1148
1149 (define_expand "altivec_vmrghb"
1150 [(use (match_operand:V16QI 0 "register_operand"))
1151 (use (match_operand:V16QI 1 "register_operand"))
1152 (use (match_operand:V16QI 2 "register_operand"))]
1153 "TARGET_ALTIVEC"
1154 {
1155 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrghb_direct
1156 : gen_altivec_vmrglb_direct;
1157 if (!BYTES_BIG_ENDIAN)
1158 std::swap (operands[1], operands[2]);
1159 emit_insn (fun (operands[0], operands[1], operands[2]));
1160 DONE;
1161 })
1162
1163 (define_insn "altivec_vmrghb_direct"
1164 [(set (match_operand:V16QI 0 "register_operand" "=v")
1165 (vec_select:V16QI
1166 (vec_concat:V32QI
1167 (match_operand:V16QI 1 "register_operand" "v")
1168 (match_operand:V16QI 2 "register_operand" "v"))
1169 (parallel [(const_int 0) (const_int 16)
1170 (const_int 1) (const_int 17)
1171 (const_int 2) (const_int 18)
1172 (const_int 3) (const_int 19)
1173 (const_int 4) (const_int 20)
1174 (const_int 5) (const_int 21)
1175 (const_int 6) (const_int 22)
1176 (const_int 7) (const_int 23)])))]
1177 "TARGET_ALTIVEC"
1178 "vmrghb %0,%1,%2"
1179 [(set_attr "type" "vecperm")])
1180
1181 (define_expand "altivec_vmrghh"
1182 [(use (match_operand:V8HI 0 "register_operand"))
1183 (use (match_operand:V8HI 1 "register_operand"))
1184 (use (match_operand:V8HI 2 "register_operand"))]
1185 "TARGET_ALTIVEC"
1186 {
1187 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrghh_direct
1188 : gen_altivec_vmrglh_direct;
1189 if (!BYTES_BIG_ENDIAN)
1190 std::swap (operands[1], operands[2]);
1191 emit_insn (fun (operands[0], operands[1], operands[2]));
1192 DONE;
1193 })
1194
1195 (define_insn "altivec_vmrghh_direct"
1196 [(set (match_operand:V8HI 0 "register_operand" "=v")
1197 (vec_select:V8HI
1198 (vec_concat:V16HI
1199 (match_operand:V8HI 1 "register_operand" "v")
1200 (match_operand:V8HI 2 "register_operand" "v"))
1201 (parallel [(const_int 0) (const_int 8)
1202 (const_int 1) (const_int 9)
1203 (const_int 2) (const_int 10)
1204 (const_int 3) (const_int 11)])))]
1205 "TARGET_ALTIVEC"
1206 "vmrghh %0,%1,%2"
1207 [(set_attr "type" "vecperm")])
1208
1209 (define_expand "altivec_vmrghw"
1210 [(use (match_operand:V4SI 0 "register_operand"))
1211 (use (match_operand:V4SI 1 "register_operand"))
1212 (use (match_operand:V4SI 2 "register_operand"))]
1213 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1214 {
1215 rtx (*fun) (rtx, rtx, rtx);
1216 fun = BYTES_BIG_ENDIAN ? gen_altivec_vmrghw_direct_v4si
1217 : gen_altivec_vmrglw_direct_v4si;
1218 if (!BYTES_BIG_ENDIAN)
1219 std::swap (operands[1], operands[2]);
1220 emit_insn (fun (operands[0], operands[1], operands[2]));
1221 DONE;
1222 })
1223
1224 (define_insn "altivec_vmrghw_direct_<mode>"
1225 [(set (match_operand:VSX_W 0 "register_operand" "=wa,v")
1226 (vec_select:VSX_W
1227 (vec_concat:<VS_double>
1228 (match_operand:VSX_W 1 "register_operand" "wa,v")
1229 (match_operand:VSX_W 2 "register_operand" "wa,v"))
1230 (parallel [(const_int 0) (const_int 4)
1231 (const_int 1) (const_int 5)])))]
1232 "TARGET_ALTIVEC"
1233 "@
1234 xxmrghw %x0,%x1,%x2
1235 vmrghw %0,%1,%2"
1236 [(set_attr "type" "vecperm")])
1237
1238 (define_insn "*altivec_vmrghsf"
1239 [(set (match_operand:V4SF 0 "register_operand" "=v")
1240 (vec_select:V4SF
1241 (vec_concat:V8SF
1242 (match_operand:V4SF 1 "register_operand" "v")
1243 (match_operand:V4SF 2 "register_operand" "v"))
1244 (parallel [(const_int 0) (const_int 4)
1245 (const_int 1) (const_int 5)])))]
1246 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1247 {
1248 if (BYTES_BIG_ENDIAN)
1249 return "vmrghw %0,%1,%2";
1250 else
1251 return "vmrglw %0,%2,%1";
1252 }
1253 [(set_attr "type" "vecperm")])
1254
1255 (define_expand "altivec_vmrglb"
1256 [(use (match_operand:V16QI 0 "register_operand"))
1257 (use (match_operand:V16QI 1 "register_operand"))
1258 (use (match_operand:V16QI 2 "register_operand"))]
1259 "TARGET_ALTIVEC"
1260 {
1261 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrglb_direct
1262 : gen_altivec_vmrghb_direct;
1263 if (!BYTES_BIG_ENDIAN)
1264 std::swap (operands[1], operands[2]);
1265 emit_insn (fun (operands[0], operands[1], operands[2]));
1266 DONE;
1267 })
1268
1269 (define_insn "altivec_vmrglb_direct"
1270 [(set (match_operand:V16QI 0 "register_operand" "=v")
1271 (vec_select:V16QI
1272 (vec_concat:V32QI
1273 (match_operand:V16QI 1 "register_operand" "v")
1274 (match_operand:V16QI 2 "register_operand" "v"))
1275 (parallel [(const_int 8) (const_int 24)
1276 (const_int 9) (const_int 25)
1277 (const_int 10) (const_int 26)
1278 (const_int 11) (const_int 27)
1279 (const_int 12) (const_int 28)
1280 (const_int 13) (const_int 29)
1281 (const_int 14) (const_int 30)
1282 (const_int 15) (const_int 31)])))]
1283 "TARGET_ALTIVEC"
1284 "vmrglb %0,%1,%2"
1285 [(set_attr "type" "vecperm")])
1286
1287 (define_expand "altivec_vmrglh"
1288 [(use (match_operand:V8HI 0 "register_operand"))
1289 (use (match_operand:V8HI 1 "register_operand"))
1290 (use (match_operand:V8HI 2 "register_operand"))]
1291 "TARGET_ALTIVEC"
1292 {
1293 rtx (*fun) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN ? gen_altivec_vmrglh_direct
1294 : gen_altivec_vmrghh_direct;
1295 if (!BYTES_BIG_ENDIAN)
1296 std::swap (operands[1], operands[2]);
1297 emit_insn (fun (operands[0], operands[1], operands[2]));
1298 DONE;
1299 })
1300
1301 (define_insn "altivec_vmrglh_direct"
1302 [(set (match_operand:V8HI 0 "register_operand" "=v")
1303 (vec_select:V8HI
1304 (vec_concat:V16HI
1305 (match_operand:V8HI 1 "register_operand" "v")
1306 (match_operand:V8HI 2 "register_operand" "v"))
1307 (parallel [(const_int 4) (const_int 12)
1308 (const_int 5) (const_int 13)
1309 (const_int 6) (const_int 14)
1310 (const_int 7) (const_int 15)])))]
1311 "TARGET_ALTIVEC"
1312 "vmrglh %0,%1,%2"
1313 [(set_attr "type" "vecperm")])
1314
1315 (define_expand "altivec_vmrglw"
1316 [(use (match_operand:V4SI 0 "register_operand"))
1317 (use (match_operand:V4SI 1 "register_operand"))
1318 (use (match_operand:V4SI 2 "register_operand"))]
1319 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1320 {
1321 rtx (*fun) (rtx, rtx, rtx);
1322 fun = BYTES_BIG_ENDIAN ? gen_altivec_vmrglw_direct_v4si
1323 : gen_altivec_vmrghw_direct_v4si;
1324 if (!BYTES_BIG_ENDIAN)
1325 std::swap (operands[1], operands[2]);
1326 emit_insn (fun (operands[0], operands[1], operands[2]));
1327 DONE;
1328 })
1329
1330 (define_insn "altivec_vmrglw_direct_<mode>"
1331 [(set (match_operand:VSX_W 0 "register_operand" "=wa,v")
1332 (vec_select:VSX_W
1333 (vec_concat:<VS_double>
1334 (match_operand:VSX_W 1 "register_operand" "wa,v")
1335 (match_operand:VSX_W 2 "register_operand" "wa,v"))
1336 (parallel [(const_int 2) (const_int 6)
1337 (const_int 3) (const_int 7)])))]
1338 "TARGET_ALTIVEC"
1339 "@
1340 xxmrglw %x0,%x1,%x2
1341 vmrglw %0,%1,%2"
1342 [(set_attr "type" "vecperm")])
1343
1344 (define_insn "*altivec_vmrglsf"
1345 [(set (match_operand:V4SF 0 "register_operand" "=v")
1346 (vec_select:V4SF
1347 (vec_concat:V8SF
1348 (match_operand:V4SF 1 "register_operand" "v")
1349 (match_operand:V4SF 2 "register_operand" "v"))
1350 (parallel [(const_int 2) (const_int 6)
1351 (const_int 3) (const_int 7)])))]
1352 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1353 {
1354 if (BYTES_BIG_ENDIAN)
1355 return "vmrglw %0,%1,%2";
1356 else
1357 return "vmrghw %0,%2,%1";
1358 }
1359 [(set_attr "type" "vecperm")])
1360
1361 ;; Power8 vector merge two V2DF/V2DI even words to V2DF
1362 (define_expand "p8_vmrgew_<mode>"
1363 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1364 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1365 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1366 "VECTOR_MEM_VSX_P (<MODE>mode)"
1367 {
1368 rtvec v;
1369 rtx x;
1370
1371 v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
1372 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1373
1374 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1375 emit_insn (gen_rtx_SET (operands[0], x));
1376 DONE;
1377 })
1378
1379 ;; Power8 vector merge two V4SF/V4SI even words to V4SF
1380 (define_insn "p8_vmrgew_<mode>"
1381 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1382 (vec_select:VSX_W
1383 (vec_concat:<VS_double>
1384 (match_operand:VSX_W 1 "register_operand" "v")
1385 (match_operand:VSX_W 2 "register_operand" "v"))
1386 (parallel [(const_int 0) (const_int 4)
1387 (const_int 2) (const_int 6)])))]
1388 "TARGET_P8_VECTOR"
1389 {
1390 if (BYTES_BIG_ENDIAN)
1391 return "vmrgew %0,%1,%2";
1392 else
1393 return "vmrgow %0,%2,%1";
1394 }
1395 [(set_attr "type" "vecperm")])
1396
1397 (define_insn "p8_vmrgow_<mode>"
1398 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1399 (vec_select:VSX_W
1400 (vec_concat:<VS_double>
1401 (match_operand:VSX_W 1 "register_operand" "v")
1402 (match_operand:VSX_W 2 "register_operand" "v"))
1403 (parallel [(const_int 1) (const_int 5)
1404 (const_int 3) (const_int 7)])))]
1405 "TARGET_P8_VECTOR"
1406 {
1407 if (BYTES_BIG_ENDIAN)
1408 return "vmrgow %0,%1,%2";
1409 else
1410 return "vmrgew %0,%2,%1";
1411 }
1412 [(set_attr "type" "vecperm")])
1413
1414 (define_expand "p8_vmrgow_<mode>"
1415 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1416 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1417 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1418 "VECTOR_MEM_VSX_P (<MODE>mode)"
1419 {
1420 rtvec v;
1421 rtx x;
1422
1423 v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
1424 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1425
1426 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1427 emit_insn (gen_rtx_SET (operands[0], x));
1428 DONE;
1429 })
1430
1431 (define_insn "p8_vmrgew_<mode>_direct"
1432 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1433 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1434 (match_operand:VSX_W 2 "register_operand" "v")]
1435 UNSPEC_VMRGEW_DIRECT))]
1436 "TARGET_P8_VECTOR"
1437 "vmrgew %0,%1,%2"
1438 [(set_attr "type" "vecperm")])
1439
1440 (define_insn "p8_vmrgow_<mode>_direct"
1441 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1442 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1443 (match_operand:VSX_W 2 "register_operand" "v")]
1444 UNSPEC_VMRGOW_DIRECT))]
1445 "TARGET_P8_VECTOR"
1446 "vmrgow %0,%1,%2"
1447 [(set_attr "type" "vecperm")])
1448
1449 (define_expand "vec_widen_umult_even_v16qi"
1450 [(use (match_operand:V8HI 0 "register_operand"))
1451 (use (match_operand:V16QI 1 "register_operand"))
1452 (use (match_operand:V16QI 2 "register_operand"))]
1453 "TARGET_ALTIVEC"
1454 {
1455 if (BYTES_BIG_ENDIAN)
1456 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1457 else
1458 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1459 DONE;
1460 })
1461
1462 (define_expand "vec_widen_smult_even_v16qi"
1463 [(use (match_operand:V8HI 0 "register_operand"))
1464 (use (match_operand:V16QI 1 "register_operand"))
1465 (use (match_operand:V16QI 2 "register_operand"))]
1466 "TARGET_ALTIVEC"
1467 {
1468 if (BYTES_BIG_ENDIAN)
1469 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1470 else
1471 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1472 DONE;
1473 })
1474
1475 (define_expand "vec_widen_umult_even_v8hi"
1476 [(use (match_operand:V4SI 0 "register_operand"))
1477 (use (match_operand:V8HI 1 "register_operand"))
1478 (use (match_operand:V8HI 2 "register_operand"))]
1479 "TARGET_ALTIVEC"
1480 {
1481 if (BYTES_BIG_ENDIAN)
1482 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1483 else
1484 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1485 DONE;
1486 })
1487
1488 (define_expand "vec_widen_smult_even_v8hi"
1489 [(use (match_operand:V4SI 0 "register_operand"))
1490 (use (match_operand:V8HI 1 "register_operand"))
1491 (use (match_operand:V8HI 2 "register_operand"))]
1492 "TARGET_ALTIVEC"
1493 {
1494 if (BYTES_BIG_ENDIAN)
1495 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1496 else
1497 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1498 DONE;
1499 })
1500
1501 (define_expand "vec_widen_umult_even_v4si"
1502 [(use (match_operand:V2DI 0 "register_operand"))
1503 (use (match_operand:V4SI 1 "register_operand"))
1504 (use (match_operand:V4SI 2 "register_operand"))]
1505 "TARGET_P8_VECTOR"
1506 {
1507 if (BYTES_BIG_ENDIAN)
1508 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1509 else
1510 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1511 DONE;
1512 })
1513
1514 (define_expand "vec_widen_umult_even_v2di"
1515 [(use (match_operand:V1TI 0 "register_operand"))
1516 (use (match_operand:V2DI 1 "register_operand"))
1517 (use (match_operand:V2DI 2 "register_operand"))]
1518 "TARGET_POWER10"
1519 {
1520 if (BYTES_BIG_ENDIAN)
1521 emit_insn (gen_altivec_vmuleud (operands[0], operands[1], operands[2]));
1522 else
1523 emit_insn (gen_altivec_vmuloud (operands[0], operands[1], operands[2]));
1524 DONE;
1525 })
1526
1527 (define_expand "vec_widen_smult_even_v4si"
1528 [(use (match_operand:V2DI 0 "register_operand"))
1529 (use (match_operand:V4SI 1 "register_operand"))
1530 (use (match_operand:V4SI 2 "register_operand"))]
1531 "TARGET_P8_VECTOR"
1532 {
1533 if (BYTES_BIG_ENDIAN)
1534 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1535 else
1536 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1537 DONE;
1538 })
1539
1540 (define_expand "vec_widen_smult_even_v2di"
1541 [(use (match_operand:V1TI 0 "register_operand"))
1542 (use (match_operand:V2DI 1 "register_operand"))
1543 (use (match_operand:V2DI 2 "register_operand"))]
1544 "TARGET_POWER10"
1545 {
1546 if (BYTES_BIG_ENDIAN)
1547 emit_insn (gen_altivec_vmulesd (operands[0], operands[1], operands[2]));
1548 else
1549 emit_insn (gen_altivec_vmulosd (operands[0], operands[1], operands[2]));
1550 DONE;
1551 })
1552
1553 (define_expand "vec_widen_umult_odd_v16qi"
1554 [(use (match_operand:V8HI 0 "register_operand"))
1555 (use (match_operand:V16QI 1 "register_operand"))
1556 (use (match_operand:V16QI 2 "register_operand"))]
1557 "TARGET_ALTIVEC"
1558 {
1559 if (BYTES_BIG_ENDIAN)
1560 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1561 else
1562 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1563 DONE;
1564 })
1565
1566 (define_expand "vec_widen_smult_odd_v16qi"
1567 [(use (match_operand:V8HI 0 "register_operand"))
1568 (use (match_operand:V16QI 1 "register_operand"))
1569 (use (match_operand:V16QI 2 "register_operand"))]
1570 "TARGET_ALTIVEC"
1571 {
1572 if (BYTES_BIG_ENDIAN)
1573 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1574 else
1575 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1576 DONE;
1577 })
1578
1579 (define_expand "vec_widen_umult_odd_v8hi"
1580 [(use (match_operand:V4SI 0 "register_operand"))
1581 (use (match_operand:V8HI 1 "register_operand"))
1582 (use (match_operand:V8HI 2 "register_operand"))]
1583 "TARGET_ALTIVEC"
1584 {
1585 if (BYTES_BIG_ENDIAN)
1586 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1587 else
1588 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1589 DONE;
1590 })
1591
1592 (define_expand "vec_widen_smult_odd_v8hi"
1593 [(use (match_operand:V4SI 0 "register_operand"))
1594 (use (match_operand:V8HI 1 "register_operand"))
1595 (use (match_operand:V8HI 2 "register_operand"))]
1596 "TARGET_ALTIVEC"
1597 {
1598 if (BYTES_BIG_ENDIAN)
1599 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1600 else
1601 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1602 DONE;
1603 })
1604
1605 (define_expand "vec_widen_umult_odd_v4si"
1606 [(use (match_operand:V2DI 0 "register_operand"))
1607 (use (match_operand:V4SI 1 "register_operand"))
1608 (use (match_operand:V4SI 2 "register_operand"))]
1609 "TARGET_P8_VECTOR"
1610 {
1611 if (BYTES_BIG_ENDIAN)
1612 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1613 else
1614 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1615 DONE;
1616 })
1617
1618 (define_expand "vec_widen_umult_odd_v2di"
1619 [(use (match_operand:V1TI 0 "register_operand"))
1620 (use (match_operand:V2DI 1 "register_operand"))
1621 (use (match_operand:V2DI 2 "register_operand"))]
1622 "TARGET_POWER10"
1623 {
1624 if (BYTES_BIG_ENDIAN)
1625 emit_insn (gen_altivec_vmuloud (operands[0], operands[1], operands[2]));
1626 else
1627 emit_insn (gen_altivec_vmuleud (operands[0], operands[1], operands[2]));
1628 DONE;
1629 })
1630
1631 (define_expand "vec_widen_smult_odd_v4si"
1632 [(use (match_operand:V2DI 0 "register_operand"))
1633 (use (match_operand:V4SI 1 "register_operand"))
1634 (use (match_operand:V4SI 2 "register_operand"))]
1635 "TARGET_P8_VECTOR"
1636 {
1637 if (BYTES_BIG_ENDIAN)
1638 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1639 else
1640 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1641 DONE;
1642 })
1643
1644 (define_expand "vec_widen_smult_odd_v2di"
1645 [(use (match_operand:V1TI 0 "register_operand"))
1646 (use (match_operand:V2DI 1 "register_operand"))
1647 (use (match_operand:V2DI 2 "register_operand"))]
1648 "TARGET_POWER10"
1649 {
1650 if (BYTES_BIG_ENDIAN)
1651 emit_insn (gen_altivec_vmulosd (operands[0], operands[1], operands[2]));
1652 else
1653 emit_insn (gen_altivec_vmulesd (operands[0], operands[1], operands[2]));
1654 DONE;
1655 })
1656
1657 (define_insn "altivec_vmuleub"
1658 [(set (match_operand:V8HI 0 "register_operand" "=v")
1659 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1660 (match_operand:V16QI 2 "register_operand" "v")]
1661 UNSPEC_VMULEUB))]
1662 "TARGET_ALTIVEC"
1663 "vmuleub %0,%1,%2"
1664 [(set_attr "type" "veccomplex")])
1665
1666 (define_insn "altivec_vmuloub"
1667 [(set (match_operand:V8HI 0 "register_operand" "=v")
1668 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1669 (match_operand:V16QI 2 "register_operand" "v")]
1670 UNSPEC_VMULOUB))]
1671 "TARGET_ALTIVEC"
1672 "vmuloub %0,%1,%2"
1673 [(set_attr "type" "veccomplex")])
1674
1675 (define_insn "altivec_vmulesb"
1676 [(set (match_operand:V8HI 0 "register_operand" "=v")
1677 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1678 (match_operand:V16QI 2 "register_operand" "v")]
1679 UNSPEC_VMULESB))]
1680 "TARGET_ALTIVEC"
1681 "vmulesb %0,%1,%2"
1682 [(set_attr "type" "veccomplex")])
1683
1684 (define_insn "altivec_vmulosb"
1685 [(set (match_operand:V8HI 0 "register_operand" "=v")
1686 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1687 (match_operand:V16QI 2 "register_operand" "v")]
1688 UNSPEC_VMULOSB))]
1689 "TARGET_ALTIVEC"
1690 "vmulosb %0,%1,%2"
1691 [(set_attr "type" "veccomplex")])
1692
1693 (define_insn "altivec_vmuleuh"
1694 [(set (match_operand:V4SI 0 "register_operand" "=v")
1695 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1696 (match_operand:V8HI 2 "register_operand" "v")]
1697 UNSPEC_VMULEUH))]
1698 "TARGET_ALTIVEC"
1699 "vmuleuh %0,%1,%2"
1700 [(set_attr "type" "veccomplex")])
1701
1702 (define_insn "altivec_vmulouh"
1703 [(set (match_operand:V4SI 0 "register_operand" "=v")
1704 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1705 (match_operand:V8HI 2 "register_operand" "v")]
1706 UNSPEC_VMULOUH))]
1707 "TARGET_ALTIVEC"
1708 "vmulouh %0,%1,%2"
1709 [(set_attr "type" "veccomplex")])
1710
1711 (define_insn "altivec_vmulesh"
1712 [(set (match_operand:V4SI 0 "register_operand" "=v")
1713 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1714 (match_operand:V8HI 2 "register_operand" "v")]
1715 UNSPEC_VMULESH))]
1716 "TARGET_ALTIVEC"
1717 "vmulesh %0,%1,%2"
1718 [(set_attr "type" "veccomplex")])
1719
1720 (define_insn "altivec_vmulosh"
1721 [(set (match_operand:V4SI 0 "register_operand" "=v")
1722 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1723 (match_operand:V8HI 2 "register_operand" "v")]
1724 UNSPEC_VMULOSH))]
1725 "TARGET_ALTIVEC"
1726 "vmulosh %0,%1,%2"
1727 [(set_attr "type" "veccomplex")])
1728
1729 (define_insn "altivec_vmuleuw"
1730 [(set (match_operand:V2DI 0 "register_operand" "=v")
1731 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1732 (match_operand:V4SI 2 "register_operand" "v")]
1733 UNSPEC_VMULEUW))]
1734 "TARGET_P8_VECTOR"
1735 "vmuleuw %0,%1,%2"
1736 [(set_attr "type" "veccomplex")])
1737
1738 (define_insn "altivec_vmuleud"
1739 [(set (match_operand:V1TI 0 "register_operand" "=v")
1740 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1741 (match_operand:V2DI 2 "register_operand" "v")]
1742 UNSPEC_VMULEUD))]
1743 "TARGET_POWER10"
1744 "vmuleud %0,%1,%2"
1745 [(set_attr "type" "veccomplex")])
1746
1747 (define_insn "altivec_vmulouw"
1748 [(set (match_operand:V2DI 0 "register_operand" "=v")
1749 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1750 (match_operand:V4SI 2 "register_operand" "v")]
1751 UNSPEC_VMULOUW))]
1752 "TARGET_P8_VECTOR"
1753 "vmulouw %0,%1,%2"
1754 [(set_attr "type" "veccomplex")])
1755
1756 (define_insn "altivec_vmuloud"
1757 [(set (match_operand:V1TI 0 "register_operand" "=v")
1758 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1759 (match_operand:V2DI 2 "register_operand" "v")]
1760 UNSPEC_VMULOUD))]
1761 "TARGET_POWER10"
1762 "vmuloud %0,%1,%2"
1763 [(set_attr "type" "veccomplex")])
1764
1765 (define_insn "altivec_vmulesw"
1766 [(set (match_operand:V2DI 0 "register_operand" "=v")
1767 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1768 (match_operand:V4SI 2 "register_operand" "v")]
1769 UNSPEC_VMULESW))]
1770 "TARGET_P8_VECTOR"
1771 "vmulesw %0,%1,%2"
1772 [(set_attr "type" "veccomplex")])
1773
1774 (define_insn "altivec_vmulesd"
1775 [(set (match_operand:V1TI 0 "register_operand" "=v")
1776 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1777 (match_operand:V2DI 2 "register_operand" "v")]
1778 UNSPEC_VMULESD))]
1779 "TARGET_POWER10"
1780 "vmulesd %0,%1,%2"
1781 [(set_attr "type" "veccomplex")])
1782
1783 (define_insn "altivec_vmulosw"
1784 [(set (match_operand:V2DI 0 "register_operand" "=v")
1785 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1786 (match_operand:V4SI 2 "register_operand" "v")]
1787 UNSPEC_VMULOSW))]
1788 "TARGET_P8_VECTOR"
1789 "vmulosw %0,%1,%2"
1790 [(set_attr "type" "veccomplex")])
1791
1792 (define_insn "altivec_vmulosd"
1793 [(set (match_operand:V1TI 0 "register_operand" "=v")
1794 (unspec:V1TI [(match_operand:V2DI 1 "register_operand" "v")
1795 (match_operand:V2DI 2 "register_operand" "v")]
1796 UNSPEC_VMULOSD))]
1797 "TARGET_POWER10"
1798 "vmulosd %0,%1,%2"
1799 [(set_attr "type" "veccomplex")])
1800
1801 ;; Vector pack/unpack
1802 (define_insn "altivec_vpkpx"
1803 [(set (match_operand:V8HI 0 "register_operand" "=v")
1804 (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
1805 (match_operand:V4SI 2 "register_operand" "v")]
1806 UNSPEC_VPKPX))]
1807 "TARGET_ALTIVEC"
1808 {
1809 if (BYTES_BIG_ENDIAN)
1810 return "vpkpx %0,%1,%2";
1811 else
1812 return "vpkpx %0,%2,%1";
1813 }
1814 [(set_attr "type" "vecperm")])
1815
1816 (define_insn "altivec_vpks<VI_char>ss"
1817 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1818 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1819 (match_operand:VP 2 "register_operand" "v")]
1820 UNSPEC_VPACK_SIGN_SIGN_SAT))]
1821 "<VI_unit>"
1822 {
1823 if (BYTES_BIG_ENDIAN)
1824 return "vpks<VI_char>ss %0,%1,%2";
1825 else
1826 return "vpks<VI_char>ss %0,%2,%1";
1827 }
1828 [(set_attr "type" "vecperm")])
1829
1830 (define_insn "altivec_vpks<VI_char>us"
1831 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1832 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1833 (match_operand:VP 2 "register_operand" "v")]
1834 UNSPEC_VPACK_SIGN_UNS_SAT))]
1835 "<VI_unit>"
1836 {
1837 if (BYTES_BIG_ENDIAN)
1838 return "vpks<VI_char>us %0,%1,%2";
1839 else
1840 return "vpks<VI_char>us %0,%2,%1";
1841 }
1842 [(set_attr "type" "vecperm")])
1843
1844 (define_insn "altivec_vpku<VI_char>us"
1845 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1846 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1847 (match_operand:VP 2 "register_operand" "v")]
1848 UNSPEC_VPACK_UNS_UNS_SAT))]
1849 "<VI_unit>"
1850 {
1851 if (BYTES_BIG_ENDIAN)
1852 return "vpku<VI_char>us %0,%1,%2";
1853 else
1854 return "vpku<VI_char>us %0,%2,%1";
1855 }
1856 [(set_attr "type" "vecperm")])
1857
1858 (define_insn "altivec_vpku<VI_char>um"
1859 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1860 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1861 (match_operand:VP 2 "register_operand" "v")]
1862 UNSPEC_VPACK_UNS_UNS_MOD))]
1863 "<VI_unit>"
1864 {
1865 if (BYTES_BIG_ENDIAN)
1866 return "vpku<VI_char>um %0,%1,%2";
1867 else
1868 return "vpku<VI_char>um %0,%2,%1";
1869 }
1870 [(set_attr "type" "vecperm")])
1871
1872 (define_insn "altivec_vpku<VI_char>um_direct"
1873 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1874 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1875 (match_operand:VP 2 "register_operand" "v")]
1876 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1877 "<VI_unit>"
1878 {
1879 if (BYTES_BIG_ENDIAN)
1880 return "vpku<VI_char>um %0,%1,%2";
1881 else
1882 return "vpku<VI_char>um %0,%2,%1";
1883 }
1884 [(set_attr "type" "vecperm")])
1885
1886 (define_insn "altivec_vrl<VI_char>"
1887 [(set (match_operand:VI2 0 "register_operand" "=v")
1888 (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1889 (match_operand:VI2 2 "register_operand" "v")))]
1890 "<VI_unit>"
1891 "vrl<VI_char> %0,%1,%2"
1892 [(set_attr "type" "vecsimple")])
1893
1894 (define_insn "altivec_vrlq"
1895 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1896 (rotate:V1TI (match_operand:V1TI 1 "vsx_register_operand" "v")
1897 (match_operand:V1TI 2 "vsx_register_operand" "v")))]
1898 "TARGET_POWER10"
1899 ;; rotate amount in needs to be in bits[57:63] of operand2.
1900 "vrlq %0,%1,%2"
1901 [(set_attr "type" "vecsimple")])
1902
1903 (define_insn "altivec_vrl<VI_char>mi"
1904 [(set (match_operand:VIlong 0 "register_operand" "=v")
1905 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1906 (match_operand:VIlong 2 "register_operand" "0")
1907 (match_operand:VIlong 3 "register_operand" "v")]
1908 UNSPEC_VRLMI))]
1909 "TARGET_P9_VECTOR"
1910 "vrl<VI_char>mi %0,%1,%3"
1911 [(set_attr "type" "veclogical")])
1912
1913 (define_expand "altivec_vrlqmi"
1914 [(set (match_operand:V1TI 0 "vsx_register_operand")
1915 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand")
1916 (match_operand:V1TI 2 "vsx_register_operand")
1917 (match_operand:V1TI 3 "vsx_register_operand")]
1918 UNSPEC_VRLMI))]
1919 "TARGET_POWER10"
1920 {
1921 /* Mask bit begin, end fields need to be in bits [41:55] of 128-bit operand2.
1922 Shift amount in needs to be put in bits[57:63] of 128-bit operand2. */
1923 rtx tmp = gen_reg_rtx (V1TImode);
1924
1925 emit_insn (gen_xxswapd_v1ti (tmp, operands[3]));
1926 emit_insn (gen_altivec_vrlqmi_inst (operands[0], operands[1], operands[2],
1927 tmp));
1928 DONE;
1929 })
1930
1931 (define_insn "altivec_vrlqmi_inst"
1932 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1933 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand" "v")
1934 (match_operand:V1TI 2 "vsx_register_operand" "0")
1935 (match_operand:V1TI 3 "vsx_register_operand" "v")]
1936 UNSPEC_VRLMI))]
1937 "TARGET_POWER10"
1938 "vrlqmi %0,%1,%3"
1939 [(set_attr "type" "veclogical")])
1940
1941 (define_insn "altivec_vrl<VI_char>nm"
1942 [(set (match_operand:VIlong 0 "register_operand" "=v")
1943 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1944 (match_operand:VIlong 2 "register_operand" "v")]
1945 UNSPEC_VRLNM))]
1946 "TARGET_P9_VECTOR"
1947 "vrl<VI_char>nm %0,%1,%2"
1948 [(set_attr "type" "veclogical")])
1949
1950 (define_expand "altivec_vrlqnm"
1951 [(set (match_operand:V1TI 0 "vsx_register_operand")
1952 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand")
1953 (match_operand:V1TI 2 "vsx_register_operand")]
1954 UNSPEC_VRLNM))]
1955 "TARGET_POWER10"
1956 {
1957 /* Shift amount in needs to be put in bits[57:63] of 128-bit operand2. */
1958 rtx tmp = gen_reg_rtx (V1TImode);
1959
1960 emit_insn (gen_xxswapd_v1ti (tmp, operands[2]));
1961 emit_insn (gen_altivec_vrlqnm_inst (operands[0], operands[1], tmp));
1962 DONE;
1963 })
1964
1965 (define_insn "altivec_vrlqnm_inst"
1966 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
1967 (unspec:V1TI [(match_operand:V1TI 1 "vsx_register_operand" "v")
1968 (match_operand:V1TI 2 "vsx_register_operand" "v")]
1969 UNSPEC_VRLNM))]
1970 "TARGET_POWER10"
1971 ;; rotate and mask bits need to be in upper 64-bits of operand2.
1972 "vrlqnm %0,%1,%2"
1973 [(set_attr "type" "veclogical")])
1974
1975 (define_insn "altivec_vsl"
1976 [(set (match_operand:V4SI 0 "register_operand" "=v")
1977 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1978 (match_operand:V4SI 2 "register_operand" "v")]
1979 UNSPEC_VSLV4SI))]
1980 "TARGET_ALTIVEC"
1981 "vsl %0,%1,%2"
1982 [(set_attr "type" "vecperm")])
1983
1984 (define_insn "altivec_vslo"
1985 [(set (match_operand:V4SI 0 "register_operand" "=v")
1986 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1987 (match_operand:V4SI 2 "register_operand" "v")]
1988 UNSPEC_VSLO))]
1989 "TARGET_ALTIVEC"
1990 "vslo %0,%1,%2"
1991 [(set_attr "type" "vecperm")])
1992
1993 (define_insn "vslv"
1994 [(set (match_operand:V16QI 0 "register_operand" "=v")
1995 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1996 (match_operand:V16QI 2 "register_operand" "v")]
1997 UNSPEC_VSLV))]
1998 "TARGET_P9_VECTOR"
1999 "vslv %0,%1,%2"
2000 [(set_attr "type" "vecsimple")])
2001
2002 (define_insn "vsrv"
2003 [(set (match_operand:V16QI 0 "register_operand" "=v")
2004 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
2005 (match_operand:V16QI 2 "register_operand" "v")]
2006 UNSPEC_VSRV))]
2007 "TARGET_P9_VECTOR"
2008 "vsrv %0,%1,%2"
2009 [(set_attr "type" "vecsimple")])
2010
2011 (define_insn "*altivec_vsl<VI_char>"
2012 [(set (match_operand:VI2 0 "register_operand" "=v")
2013 (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
2014 (match_operand:VI2 2 "register_operand" "v")))]
2015 "<VI_unit>"
2016 "vsl<VI_char> %0,%1,%2"
2017 [(set_attr "type" "vecsimple")])
2018
2019 (define_insn "altivec_vslq_<mode>"
2020 [(set (match_operand:VEC_TI 0 "vsx_register_operand" "=v")
2021 (ashift:VEC_TI (match_operand:VEC_TI 1 "vsx_register_operand" "v")
2022 (match_operand:VEC_TI 2 "vsx_register_operand" "v")))]
2023 "TARGET_POWER10"
2024 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2025 "vslq %0,%1,%2"
2026 [(set_attr "type" "vecsimple")])
2027
2028 (define_insn "*altivec_vsr<VI_char>"
2029 [(set (match_operand:VI2 0 "register_operand" "=v")
2030 (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
2031 (match_operand:VI2 2 "register_operand" "v")))]
2032 "<VI_unit>"
2033 "vsr<VI_char> %0,%1,%2"
2034 [(set_attr "type" "vecsimple")])
2035
2036 (define_insn "altivec_vsrq_<mode>"
2037 [(set (match_operand:VEC_TI 0 "vsx_register_operand" "=v")
2038 (lshiftrt:VEC_TI (match_operand:VEC_TI 1 "vsx_register_operand" "v")
2039 (match_operand:VEC_TI 2 "vsx_register_operand" "v")))]
2040 "TARGET_POWER10"
2041 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2042 "vsrq %0,%1,%2"
2043 [(set_attr "type" "vecsimple")])
2044
2045 (define_insn "*altivec_vsra<VI_char>"
2046 [(set (match_operand:VI2 0 "register_operand" "=v")
2047 (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
2048 (match_operand:VI2 2 "register_operand" "v")))]
2049 "<VI_unit>"
2050 "vsra<VI_char> %0,%1,%2"
2051 [(set_attr "type" "vecsimple")])
2052
2053 (define_insn "altivec_vsraq"
2054 [(set (match_operand:V1TI 0 "vsx_register_operand" "=v")
2055 (ashiftrt:V1TI (match_operand:V1TI 1 "vsx_register_operand" "v")
2056 (match_operand:V1TI 2 "vsx_register_operand" "v")))]
2057 "TARGET_POWER10"
2058 /* Shift amount in needs to be in bits[57:63] of 128-bit operand. */
2059 "vsraq %0,%1,%2"
2060 [(set_attr "type" "vecsimple")])
2061
2062 (define_insn "altivec_vsr"
2063 [(set (match_operand:V4SI 0 "register_operand" "=v")
2064 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2065 (match_operand:V4SI 2 "register_operand" "v")]
2066 UNSPEC_VSR))]
2067 "TARGET_ALTIVEC"
2068 "vsr %0,%1,%2"
2069 [(set_attr "type" "vecperm")])
2070
2071 (define_insn "altivec_vsro"
2072 [(set (match_operand:V4SI 0 "register_operand" "=v")
2073 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2074 (match_operand:V4SI 2 "register_operand" "v")]
2075 UNSPEC_VSRO))]
2076 "TARGET_ALTIVEC"
2077 "vsro %0,%1,%2"
2078 [(set_attr "type" "vecperm")])
2079
2080 (define_insn "altivec_vsum4ubs"
2081 [(set (match_operand:V4SI 0 "register_operand" "=v")
2082 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
2083 (match_operand:V4SI 2 "register_operand" "v")]
2084 UNSPEC_VSUM4UBS))
2085 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2086 "TARGET_ALTIVEC"
2087 "vsum4ubs %0,%1,%2"
2088 [(set_attr "type" "veccomplex")])
2089
2090 (define_insn "altivec_vsum4s<VI_char>s"
2091 [(set (match_operand:V4SI 0 "register_operand" "=v")
2092 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
2093 (match_operand:V4SI 2 "register_operand" "v")]
2094 UNSPEC_VSUM4S))
2095 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2096 "TARGET_ALTIVEC"
2097 "vsum4s<VI_char>s %0,%1,%2"
2098 [(set_attr "type" "veccomplex")])
2099
2100 (define_expand "altivec_vsum2sws"
2101 [(use (match_operand:V4SI 0 "register_operand"))
2102 (use (match_operand:V4SI 1 "register_operand"))
2103 (use (match_operand:V4SI 2 "register_operand"))]
2104 "TARGET_ALTIVEC"
2105 {
2106 if (BYTES_BIG_ENDIAN)
2107 emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
2108 operands[2]));
2109 else
2110 {
2111 rtx tmp1 = gen_reg_rtx (V4SImode);
2112 rtx tmp2 = gen_reg_rtx (V4SImode);
2113 emit_insn (gen_altivec_vsldoi_v4si (tmp1, operands[2],
2114 operands[2], GEN_INT (12)));
2115 emit_insn (gen_altivec_vsum2sws_direct (tmp2, operands[1], tmp1));
2116 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
2117 GEN_INT (4)));
2118 }
2119 DONE;
2120 })
2121
2122 ; FIXME: This can probably be expressed without an UNSPEC.
2123 (define_insn "altivec_vsum2sws_direct"
2124 [(set (match_operand:V4SI 0 "register_operand" "=v")
2125 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2126 (match_operand:V4SI 2 "register_operand" "v")]
2127 UNSPEC_VSUM2SWS))
2128 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2129 "TARGET_ALTIVEC"
2130 "vsum2sws %0,%1,%2"
2131 [(set_attr "type" "veccomplex")])
2132
2133 (define_expand "altivec_vsumsws"
2134 [(use (match_operand:V4SI 0 "register_operand"))
2135 (use (match_operand:V4SI 1 "register_operand"))
2136 (use (match_operand:V4SI 2 "register_operand"))]
2137 "TARGET_ALTIVEC"
2138 {
2139 if (BYTES_BIG_ENDIAN)
2140 emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
2141 operands[2]));
2142 else
2143 {
2144 rtx tmp1 = gen_reg_rtx (V4SImode);
2145 rtx tmp2 = gen_reg_rtx (V4SImode);
2146 emit_insn (gen_altivec_vspltw_direct (tmp1, operands[2], const0_rtx));
2147 emit_insn (gen_altivec_vsumsws_direct (tmp2, operands[1], tmp1));
2148 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
2149 GEN_INT (12)));
2150 }
2151 DONE;
2152 })
2153
2154 ; FIXME: This can probably be expressed without an UNSPEC.
2155 (define_insn "altivec_vsumsws_direct"
2156 [(set (match_operand:V4SI 0 "register_operand" "=v")
2157 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2158 (match_operand:V4SI 2 "register_operand" "v")]
2159 UNSPEC_VSUMSWS_DIRECT))
2160 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2161 "TARGET_ALTIVEC"
2162 "vsumsws %0,%1,%2"
2163 [(set_attr "type" "veccomplex")])
2164
2165 (define_expand "altivec_vspltb"
2166 [(use (match_operand:V16QI 0 "register_operand"))
2167 (use (match_operand:V16QI 1 "register_operand"))
2168 (use (match_operand:QI 2 "const_0_to_15_operand"))]
2169 "TARGET_ALTIVEC"
2170 {
2171 rtvec v = gen_rtvec (1, operands[2]);
2172 rtx x;
2173 x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2174 x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
2175 emit_insn (gen_rtx_SET (operands[0], x));
2176 DONE;
2177 })
2178
2179 (define_insn "*altivec_vspltb_internal"
2180 [(set (match_operand:V16QI 0 "register_operand" "=v")
2181 (vec_duplicate:V16QI
2182 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
2183 (parallel
2184 [(match_operand:QI 2 "const_0_to_15_operand" "")]))))]
2185 "TARGET_ALTIVEC"
2186 {
2187 if (!BYTES_BIG_ENDIAN)
2188 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
2189
2190 return "vspltb %0,%1,%2";
2191 }
2192 [(set_attr "type" "vecperm")])
2193
2194 (define_insn "altivec_vspltb_direct"
2195 [(set (match_operand:V16QI 0 "register_operand" "=v")
2196 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
2197 (match_operand:QI 2 "const_0_to_15_operand" "i")]
2198 UNSPEC_VSPLT_DIRECT))]
2199 "TARGET_ALTIVEC"
2200 "vspltb %0,%1,%2"
2201 [(set_attr "type" "vecperm")])
2202
2203 (define_expand "altivec_vsplth"
2204 [(use (match_operand:V8HI 0 "register_operand"))
2205 (use (match_operand:V8HI 1 "register_operand"))
2206 (use (match_operand:QI 2 "const_0_to_7_operand"))]
2207 "TARGET_ALTIVEC"
2208 {
2209 rtvec v = gen_rtvec (1, operands[2]);
2210 rtx x;
2211 x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2212 x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
2213 emit_insn (gen_rtx_SET (operands[0], x));
2214 DONE;
2215 })
2216
2217 (define_insn "*altivec_vsplth_internal"
2218 [(set (match_operand:V8HI 0 "register_operand" "=v")
2219 (vec_duplicate:V8HI
2220 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
2221 (parallel
2222 [(match_operand:QI 2 "const_0_to_7_operand" "")]))))]
2223 "TARGET_ALTIVEC"
2224 {
2225 if (!BYTES_BIG_ENDIAN)
2226 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2227
2228 return "vsplth %0,%1,%2";
2229 }
2230 [(set_attr "type" "vecperm")])
2231
2232 (define_insn "altivec_vsplth_direct"
2233 [(set (match_operand:V8HI 0 "register_operand" "=v")
2234 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
2235 (match_operand:QI 2 "const_0_to_7_operand" "i")]
2236 UNSPEC_VSPLT_DIRECT))]
2237 "TARGET_ALTIVEC"
2238 "vsplth %0,%1,%2"
2239 [(set_attr "type" "vecperm")])
2240
2241 (define_expand "altivec_vspltw"
2242 [(use (match_operand:V4SI 0 "register_operand"))
2243 (use (match_operand:V4SI 1 "register_operand"))
2244 (use (match_operand:QI 2 "const_0_to_3_operand"))]
2245 "TARGET_ALTIVEC"
2246 {
2247 rtvec v = gen_rtvec (1, operands[2]);
2248 rtx x;
2249 x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2250 x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
2251 emit_insn (gen_rtx_SET (operands[0], x));
2252 DONE;
2253 })
2254
2255 (define_insn "*altivec_vspltw_internal"
2256 [(set (match_operand:V4SI 0 "register_operand" "=v")
2257 (vec_duplicate:V4SI
2258 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
2259 (parallel
2260 [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))]
2261 "TARGET_ALTIVEC"
2262 {
2263 if (!BYTES_BIG_ENDIAN)
2264 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2265
2266 return "vspltw %0,%1,%2";
2267 }
2268 [(set_attr "type" "vecperm")])
2269
2270 (define_insn "altivec_vspltw_direct"
2271 [(set (match_operand:V4SI 0 "register_operand" "=v")
2272 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2273 (match_operand:QI 2 "const_0_to_3_operand" "i")]
2274 UNSPEC_VSPLT_DIRECT))]
2275 "TARGET_ALTIVEC"
2276 "vspltw %0,%1,%2"
2277 [(set_attr "type" "vecperm")])
2278
2279 (define_expand "altivec_vspltsf"
2280 [(use (match_operand:V4SF 0 "register_operand"))
2281 (use (match_operand:V4SF 1 "register_operand"))
2282 (use (match_operand:QI 2 "const_0_to_3_operand"))]
2283 "TARGET_ALTIVEC"
2284 {
2285 rtvec v = gen_rtvec (1, operands[2]);
2286 rtx x;
2287 x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2288 x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
2289 emit_insn (gen_rtx_SET (operands[0], x));
2290 DONE;
2291 })
2292
2293 (define_insn "*altivec_vspltsf_internal"
2294 [(set (match_operand:V4SF 0 "register_operand" "=v")
2295 (vec_duplicate:V4SF
2296 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
2297 (parallel
2298 [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))]
2299 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2300 {
2301 if (!BYTES_BIG_ENDIAN)
2302 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2303
2304 return "vspltw %0,%1,%2";
2305 }
2306 [(set_attr "type" "vecperm")])
2307
2308 (define_insn "altivec_vspltis<VI_char>"
2309 [(set (match_operand:VI 0 "register_operand" "=v")
2310 (vec_duplicate:VI
2311 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
2312 "TARGET_ALTIVEC"
2313 "vspltis<VI_char> %0,%1"
2314 [(set_attr "type" "vecperm")])
2315
2316 (define_insn "*altivec_vrfiz"
2317 [(set (match_operand:V4SF 0 "register_operand" "=v")
2318 (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
2319 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2320 "vrfiz %0,%1"
2321 [(set_attr "type" "vecfloat")])
2322
2323 (define_expand "altivec_vperm_<mode>"
2324 [(set (match_operand:VM 0 "register_operand")
2325 (unspec:VM [(match_operand:VM 1 "register_operand")
2326 (match_operand:VM 2 "register_operand")
2327 (match_operand:V16QI 3 "register_operand")]
2328 UNSPEC_VPERM))]
2329 "TARGET_ALTIVEC"
2330 {
2331 if (!BYTES_BIG_ENDIAN)
2332 {
2333 altivec_expand_vec_perm_le (operands);
2334 DONE;
2335 }
2336 })
2337
2338 ;; Slightly prefer vperm, since the target does not overlap the source
2339 (define_insn "altivec_vperm_<mode>_direct"
2340 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2341 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2342 (match_operand:VM 2 "register_operand" "0,v")
2343 (match_operand:V16QI 3 "register_operand" "wa,v")]
2344 UNSPEC_VPERM))]
2345 "TARGET_ALTIVEC"
2346 "@
2347 xxperm %x0,%x1,%x3
2348 vperm %0,%1,%2,%3"
2349 [(set_attr "type" "vecperm")
2350 (set_attr "isa" "p9v,*")])
2351
2352 (define_insn "altivec_vperm_v8hiv16qi"
2353 [(set (match_operand:V16QI 0 "register_operand" "=?wa,v")
2354 (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "wa,v")
2355 (match_operand:V8HI 2 "register_operand" "0,v")
2356 (match_operand:V16QI 3 "register_operand" "wa,v")]
2357 UNSPEC_VPERM))]
2358 "TARGET_ALTIVEC"
2359 "@
2360 xxperm %x0,%x1,%x3
2361 vperm %0,%1,%2,%3"
2362 [(set_attr "type" "vecperm")
2363 (set_attr "isa" "p9v,*")])
2364
2365 (define_expand "altivec_vperm_<mode>_uns"
2366 [(set (match_operand:VM 0 "register_operand")
2367 (unspec:VM [(match_operand:VM 1 "register_operand")
2368 (match_operand:VM 2 "register_operand")
2369 (match_operand:V16QI 3 "register_operand")]
2370 UNSPEC_VPERM_UNS))]
2371 "TARGET_ALTIVEC"
2372 {
2373 if (!BYTES_BIG_ENDIAN)
2374 {
2375 altivec_expand_vec_perm_le (operands);
2376 DONE;
2377 }
2378 })
2379
2380 (define_insn "*altivec_vperm_<mode>_uns_internal"
2381 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2382 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2383 (match_operand:VM 2 "register_operand" "0,v")
2384 (match_operand:V16QI 3 "register_operand" "wa,v")]
2385 UNSPEC_VPERM_UNS))]
2386 "TARGET_ALTIVEC"
2387 "@
2388 xxperm %x0,%x1,%x3
2389 vperm %0,%1,%2,%3"
2390 [(set_attr "type" "vecperm")
2391 (set_attr "isa" "p9v,*")])
2392
2393 (define_expand "vec_permv16qi"
2394 [(set (match_operand:V16QI 0 "register_operand")
2395 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")
2396 (match_operand:V16QI 2 "register_operand")
2397 (match_operand:V16QI 3 "register_operand")]
2398 UNSPEC_VPERM))]
2399 "TARGET_ALTIVEC"
2400 {
2401 if (!BYTES_BIG_ENDIAN) {
2402 altivec_expand_vec_perm_le (operands);
2403 DONE;
2404 }
2405 })
2406
2407 (define_insn "*altivec_vpermr_<mode>_internal"
2408 [(set (match_operand:VM 0 "register_operand" "=?wa,v")
2409 (unspec:VM [(match_operand:VM 1 "register_operand" "wa,v")
2410 (match_operand:VM 2 "register_operand" "0,v")
2411 (match_operand:V16QI 3 "register_operand" "wa,v")]
2412 UNSPEC_VPERMR))]
2413 "TARGET_P9_VECTOR"
2414 "@
2415 xxpermr %x0,%x1,%x3
2416 vpermr %0,%1,%2,%3"
2417 [(set_attr "type" "vecperm")
2418 (set_attr "isa" "p9v,*")])
2419
2420 (define_insn "altivec_vrfip" ; ceil
2421 [(set (match_operand:V4SF 0 "register_operand" "=v")
2422 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2423 UNSPEC_FRIP))]
2424 "TARGET_ALTIVEC"
2425 "vrfip %0,%1"
2426 [(set_attr "type" "vecfloat")])
2427
2428 (define_insn "altivec_vrfin"
2429 [(set (match_operand:V4SF 0 "register_operand" "=v")
2430 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2431 UNSPEC_VRFIN))]
2432 "TARGET_ALTIVEC"
2433 "vrfin %0,%1"
2434 [(set_attr "type" "vecfloat")])
2435
2436 (define_insn "*altivec_vrfim" ; floor
2437 [(set (match_operand:V4SF 0 "register_operand" "=v")
2438 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2439 UNSPEC_FRIM))]
2440 "TARGET_ALTIVEC"
2441 "vrfim %0,%1"
2442 [(set_attr "type" "vecfloat")])
2443
2444 (define_insn "altivec_vcfux"
2445 [(set (match_operand:V4SF 0 "register_operand" "=v")
2446 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2447 (match_operand:QI 2 "immediate_operand" "i")]
2448 UNSPEC_VCFUX))]
2449 "TARGET_ALTIVEC"
2450 "vcfux %0,%1,%2"
2451 [(set_attr "type" "vecfloat")])
2452
2453 (define_insn "altivec_vcfsx"
2454 [(set (match_operand:V4SF 0 "register_operand" "=v")
2455 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2456 (match_operand:QI 2 "immediate_operand" "i")]
2457 UNSPEC_VCFSX))]
2458 "TARGET_ALTIVEC"
2459 "vcfsx %0,%1,%2"
2460 [(set_attr "type" "vecfloat")])
2461
2462 (define_insn "altivec_vctuxs"
2463 [(set (match_operand:V4SI 0 "register_operand" "=v")
2464 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2465 (match_operand:QI 2 "immediate_operand" "i")]
2466 UNSPEC_VCTUXS))
2467 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2468 "TARGET_ALTIVEC"
2469 "vctuxs %0,%1,%2"
2470 [(set_attr "type" "vecfloat")])
2471
2472 (define_insn "altivec_vctsxs"
2473 [(set (match_operand:V4SI 0 "register_operand" "=v")
2474 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2475 (match_operand:QI 2 "immediate_operand" "i")]
2476 UNSPEC_VCTSXS))
2477 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2478 "TARGET_ALTIVEC"
2479 "vctsxs %0,%1,%2"
2480 [(set_attr "type" "vecfloat")])
2481
2482 (define_insn "altivec_vlogefp"
2483 [(set (match_operand:V4SF 0 "register_operand" "=v")
2484 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2485 UNSPEC_VLOGEFP))]
2486 "TARGET_ALTIVEC"
2487 "vlogefp %0,%1"
2488 [(set_attr "type" "vecfloat")])
2489
2490 (define_insn "altivec_vexptefp"
2491 [(set (match_operand:V4SF 0 "register_operand" "=v")
2492 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2493 UNSPEC_VEXPTEFP))]
2494 "TARGET_ALTIVEC"
2495 "vexptefp %0,%1"
2496 [(set_attr "type" "vecfloat")])
2497
2498 (define_insn "*altivec_vrsqrtefp"
2499 [(set (match_operand:V4SF 0 "register_operand" "=v")
2500 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2501 UNSPEC_RSQRT))]
2502 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2503 "vrsqrtefp %0,%1"
2504 [(set_attr "type" "vecfloat")])
2505
2506 (define_insn "altivec_vrefp"
2507 [(set (match_operand:V4SF 0 "register_operand" "=v")
2508 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2509 UNSPEC_FRES))]
2510 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2511 "vrefp %0,%1"
2512 [(set_attr "type" "vecfloat")])
2513
2514 (define_expand "altivec_copysign_v4sf3"
2515 [(use (match_operand:V4SF 0 "register_operand"))
2516 (use (match_operand:V4SF 1 "register_operand"))
2517 (use (match_operand:V4SF 2 "register_operand"))]
2518 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2519 {
2520 rtx mask = gen_reg_rtx (V4SImode);
2521 rtx mask_val = gen_int_mode (HOST_WIDE_INT_1U << 31, SImode);
2522 rtvec v = gen_rtvec (4, mask_val, mask_val, mask_val, mask_val);
2523
2524 emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
2525 emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2526 gen_lowpart (V4SFmode, mask)));
2527 DONE;
2528 })
2529
2530 (define_insn "altivec_vsldoi_<mode>"
2531 [(set (match_operand:VM 0 "register_operand" "=v")
2532 (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2533 (match_operand:VM 2 "register_operand" "v")
2534 (match_operand:QI 3 "immediate_operand" "i")]
2535 UNSPEC_VSLDOI))]
2536 "TARGET_ALTIVEC"
2537 "vsldoi %0,%1,%2,%3"
2538 [(set_attr "type" "vecperm")])
2539
2540 (define_insn "altivec_vupkhs<VU_char>"
2541 [(set (match_operand:VP 0 "register_operand" "=v")
2542 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2543 UNSPEC_VUNPACK_HI_SIGN))]
2544 "<VI_unit>"
2545 {
2546 if (BYTES_BIG_ENDIAN)
2547 return "vupkhs<VU_char> %0,%1";
2548 else
2549 return "vupkls<VU_char> %0,%1";
2550 }
2551 [(set_attr "type" "vecperm")])
2552
2553 (define_insn "altivec_vupkhs<VU_char>_direct"
2554 [(set (match_operand:VP 0 "register_operand" "=v")
2555 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2556 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2557 "<VI_unit>"
2558 "vupkhs<VU_char> %0,%1"
2559 [(set_attr "type" "vecperm")])
2560
2561 (define_insn "altivec_vupkls<VU_char>"
2562 [(set (match_operand:VP 0 "register_operand" "=v")
2563 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2564 UNSPEC_VUNPACK_LO_SIGN))]
2565 "<VI_unit>"
2566 {
2567 if (BYTES_BIG_ENDIAN)
2568 return "vupkls<VU_char> %0,%1";
2569 else
2570 return "vupkhs<VU_char> %0,%1";
2571 }
2572 [(set_attr "type" "vecperm")])
2573
2574 (define_insn "*altivec_vupkls<VU_char>_direct"
2575 [(set (match_operand:VP 0 "register_operand" "=v")
2576 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2577 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2578 "<VI_unit>"
2579 "vupkls<VU_char> %0,%1"
2580 [(set_attr "type" "vecperm")])
2581
2582 (define_insn "altivec_vupkhpx"
2583 [(set (match_operand:V4SI 0 "register_operand" "=v")
2584 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2585 UNSPEC_VUPKHPX))]
2586 "TARGET_ALTIVEC"
2587 {
2588 if (BYTES_BIG_ENDIAN)
2589 return "vupkhpx %0,%1";
2590 else
2591 return "vupklpx %0,%1";
2592 }
2593 [(set_attr "type" "vecperm")])
2594
2595 (define_insn "altivec_vupklpx"
2596 [(set (match_operand:V4SI 0 "register_operand" "=v")
2597 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2598 UNSPEC_VUPKLPX))]
2599 "TARGET_ALTIVEC"
2600 {
2601 if (BYTES_BIG_ENDIAN)
2602 return "vupklpx %0,%1";
2603 else
2604 return "vupkhpx %0,%1";
2605 }
2606 [(set_attr "type" "vecperm")])
2607
2608 /* The cbranch_optab doesn't allow FAIL, so old cpus which are
2609 inefficient on unaligned vsx are disabled as the cost is high
2610 for unaligned load/store. */
2611 (define_expand "cbranchv16qi4"
2612 [(use (match_operator 0 "equality_operator"
2613 [(match_operand:V16QI 1 "reg_or_mem_operand")
2614 (match_operand:V16QI 2 "reg_or_mem_operand")]))
2615 (use (match_operand 3))]
2616 "VECTOR_MEM_VSX_P (V16QImode)
2617 && TARGET_EFFICIENT_UNALIGNED_VSX"
2618 {
2619 /* Use direct move for P8 LE to skip doubleword swap, as the byte
2620 order doesn't matter for equality compare. If any operands are
2621 altivec indexed or indirect operands, the load can be implemented
2622 directly by altivec aligned load instruction and swap is no
2623 need. */
2624 if (!TARGET_P9_VECTOR
2625 && !BYTES_BIG_ENDIAN
2626 && MEM_P (operands[1])
2627 && !altivec_indexed_or_indirect_operand (operands[1], V16QImode)
2628 && MEM_P (operands[2])
2629 && !altivec_indexed_or_indirect_operand (operands[2], V16QImode))
2630 {
2631 rtx reg_op1 = gen_reg_rtx (V16QImode);
2632 rtx reg_op2 = gen_reg_rtx (V16QImode);
2633 rs6000_emit_le_vsx_permute (reg_op1, operands[1], V16QImode);
2634 rs6000_emit_le_vsx_permute (reg_op2, operands[2], V16QImode);
2635 operands[1] = reg_op1;
2636 operands[2] = reg_op2;
2637 }
2638 else
2639 {
2640 operands[1] = force_reg (V16QImode, operands[1]);
2641 operands[2] = force_reg (V16QImode, operands[2]);
2642 }
2643
2644 rtx_code code = GET_CODE (operands[0]);
2645 operands[0] = gen_rtx_fmt_ee (code, V16QImode, operands[1], operands[2]);
2646 rs6000_emit_cbranch (V16QImode, operands);
2647 DONE;
2648 })
2649
2650 ;; Compare vectors producing a vector result and a predicate, setting CR6 to
2651 ;; indicate a combined status
2652 (define_insn "altivec_vcmpequ<VI_char>_p"
2653 [(set (reg:CC CR6_REGNO)
2654 (unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2655 (match_operand:VI2 2 "register_operand" "v"))]
2656 UNSPEC_PREDICATE))
2657 (set (match_operand:VI2 0 "register_operand" "=v")
2658 (eq:VI2 (match_dup 1)
2659 (match_dup 2)))]
2660 "<VI_unit>"
2661 "vcmpequ<VI_char>. %0,%1,%2"
2662 [(set_attr "type" "veccmpfx")])
2663
2664 (define_insn "altivec_vcmpequt_p"
2665 [(set (reg:CC CR6_REGNO)
2666 (unspec:CC [(eq:CC (match_operand:V1TI 1 "altivec_register_operand" "v")
2667 (match_operand:V1TI 2 "altivec_register_operand" "v"))]
2668 UNSPEC_PREDICATE))
2669 (set (match_operand:V1TI 0 "altivec_register_operand" "=v")
2670 (eq:V1TI (match_dup 1)
2671 (match_dup 2)))]
2672 "TARGET_POWER10"
2673 "vcmpequq. %0,%1,%2"
2674 [(set_attr "type" "veccmpfx")])
2675
2676 ;; Expand for builtin vcmpne{b,h,w}
2677 (define_expand "altivec_vcmpne_<mode>"
2678 [(set (match_operand:VSX_EXTRACT_I 3 "altivec_register_operand" "=v")
2679 (eq:VSX_EXTRACT_I (match_operand:VSX_EXTRACT_I 1 "altivec_register_operand" "v")
2680 (match_operand:VSX_EXTRACT_I 2 "altivec_register_operand" "v")))
2681 (set (match_operand:VSX_EXTRACT_I 0 "altivec_register_operand" "=v")
2682 (not:VSX_EXTRACT_I (match_dup 3)))]
2683 "TARGET_ALTIVEC"
2684 {
2685 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
2686 })
2687
2688 (define_insn "*altivec_vcmpgts<VI_char>_p"
2689 [(set (reg:CC CR6_REGNO)
2690 (unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2691 (match_operand:VI2 2 "register_operand" "v"))]
2692 UNSPEC_PREDICATE))
2693 (set (match_operand:VI2 0 "register_operand" "=v")
2694 (gt:VI2 (match_dup 1)
2695 (match_dup 2)))]
2696 "<VI_unit>"
2697 "vcmpgts<VI_char>. %0,%1,%2"
2698 [(set_attr "type" "veccmpfx")])
2699
2700 (define_insn "*altivec_vcmpgtst_p"
2701 [(set (reg:CC CR6_REGNO)
2702 (unspec:CC [(gt:CC (match_operand:V1TI 1 "register_operand" "v")
2703 (match_operand:V1TI 2 "register_operand" "v"))]
2704 UNSPEC_PREDICATE))
2705 (set (match_operand:V1TI 0 "register_operand" "=v")
2706 (gt:V1TI (match_dup 1)
2707 (match_dup 2)))]
2708 "TARGET_POWER10"
2709 "vcmpgtsq. %0,%1,%2"
2710 [(set_attr "type" "veccmpfx")])
2711
2712 (define_insn "*altivec_vcmpgtu<VI_char>_p"
2713 [(set (reg:CC CR6_REGNO)
2714 (unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2715 (match_operand:VI2 2 "register_operand" "v"))]
2716 UNSPEC_PREDICATE))
2717 (set (match_operand:VI2 0 "register_operand" "=v")
2718 (gtu:VI2 (match_dup 1)
2719 (match_dup 2)))]
2720 "<VI_unit>"
2721 "vcmpgtu<VI_char>. %0,%1,%2"
2722 [(set_attr "type" "veccmpfx")])
2723
2724 (define_insn "*altivec_vcmpgtut_p"
2725 [(set (reg:CC CR6_REGNO)
2726 (unspec:CC [(gtu:CC (match_operand:V1TI 1 "register_operand" "v")
2727 (match_operand:V1TI 2 "register_operand" "v"))]
2728 UNSPEC_PREDICATE))
2729 (set (match_operand:V1TI 0 "register_operand" "=v")
2730 (gtu:V1TI (match_dup 1)
2731 (match_dup 2)))]
2732 "TARGET_POWER10"
2733 "vcmpgtuq. %0,%1,%2"
2734 [(set_attr "type" "veccmpfx")])
2735
2736 (define_insn "*altivec_vcmpeqfp_p"
2737 [(set (reg:CC CR6_REGNO)
2738 (unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2739 (match_operand:V4SF 2 "register_operand" "v"))]
2740 UNSPEC_PREDICATE))
2741 (set (match_operand:V4SF 0 "register_operand" "=v")
2742 (eq:V4SF (match_dup 1)
2743 (match_dup 2)))]
2744 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2745 "vcmpeqfp. %0,%1,%2"
2746 [(set_attr "type" "veccmp")])
2747
2748 (define_insn "*altivec_vcmpgtfp_p"
2749 [(set (reg:CC CR6_REGNO)
2750 (unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2751 (match_operand:V4SF 2 "register_operand" "v"))]
2752 UNSPEC_PREDICATE))
2753 (set (match_operand:V4SF 0 "register_operand" "=v")
2754 (gt:V4SF (match_dup 1)
2755 (match_dup 2)))]
2756 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2757 "vcmpgtfp. %0,%1,%2"
2758 [(set_attr "type" "veccmp")])
2759
2760 (define_insn "*altivec_vcmpgefp_p"
2761 [(set (reg:CC CR6_REGNO)
2762 (unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2763 (match_operand:V4SF 2 "register_operand" "v"))]
2764 UNSPEC_PREDICATE))
2765 (set (match_operand:V4SF 0 "register_operand" "=v")
2766 (ge:V4SF (match_dup 1)
2767 (match_dup 2)))]
2768 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2769 "vcmpgefp. %0,%1,%2"
2770 [(set_attr "type" "veccmp")])
2771
2772 (define_insn "altivec_vcmpbfp_p"
2773 [(set (reg:CC CR6_REGNO)
2774 (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2775 (match_operand:V4SF 2 "register_operand" "v")]
2776 UNSPEC_VCMPBFP))
2777 (set (match_operand:V4SF 0 "register_operand" "=v")
2778 (unspec:V4SF [(match_dup 1)
2779 (match_dup 2)]
2780 UNSPEC_VCMPBFP))]
2781 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2782 "vcmpbfp. %0,%1,%2"
2783 [(set_attr "type" "veccmp")])
2784
2785 (define_insn "altivec_mtvscr"
2786 [(set (reg:SI VSCR_REGNO)
2787 (unspec_volatile:SI
2788 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
2789 "TARGET_ALTIVEC"
2790 "mtvscr %0"
2791 [(set_attr "type" "vecsimple")])
2792
2793 (define_insn "altivec_mfvscr"
2794 [(set (match_operand:V8HI 0 "register_operand" "=v")
2795 (unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
2796 "TARGET_ALTIVEC"
2797 "mfvscr %0"
2798 [(set_attr "type" "vecsimple")])
2799
2800 (define_insn "altivec_dssall"
2801 [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
2802 "TARGET_ALTIVEC"
2803 "dssall"
2804 [(set_attr "type" "vecsimple")])
2805
2806 (define_insn "altivec_dss"
2807 [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2808 UNSPECV_DSS)]
2809 "TARGET_ALTIVEC"
2810 "dss %0"
2811 [(set_attr "type" "vecsimple")])
2812
2813 (define_insn "altivec_dst"
2814 [(unspec [(match_operand 0 "register_operand" "b")
2815 (match_operand:SI 1 "register_operand" "r")
2816 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
2817 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2818 "dst %0,%1,%2"
2819 [(set_attr "type" "vecsimple")])
2820
2821 (define_insn "altivec_dstt"
2822 [(unspec [(match_operand 0 "register_operand" "b")
2823 (match_operand:SI 1 "register_operand" "r")
2824 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
2825 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2826 "dstt %0,%1,%2"
2827 [(set_attr "type" "vecsimple")])
2828
2829 (define_insn "altivec_dstst"
2830 [(unspec [(match_operand 0 "register_operand" "b")
2831 (match_operand:SI 1 "register_operand" "r")
2832 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
2833 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2834 "dstst %0,%1,%2"
2835 [(set_attr "type" "vecsimple")])
2836
2837 (define_insn "altivec_dststt"
2838 [(unspec [(match_operand 0 "register_operand" "b")
2839 (match_operand:SI 1 "register_operand" "r")
2840 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
2841 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2842 "dststt %0,%1,%2"
2843 [(set_attr "type" "vecsimple")])
2844
2845 (define_expand "altivec_lvsl"
2846 [(use (match_operand:V16QI 0 "register_operand"))
2847 (use (match_operand:V16QI 1 "memory_operand"))]
2848 "TARGET_ALTIVEC"
2849 {
2850 if (BYTES_BIG_ENDIAN)
2851 emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2852 else
2853 {
2854 rtx mask, constv, vperm;
2855 mask = gen_reg_rtx (V16QImode);
2856 emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
2857 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2858 constv = force_reg (V16QImode, constv);
2859 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2860 UNSPEC_VPERM);
2861 emit_insn (gen_rtx_SET (operands[0], vperm));
2862 }
2863 DONE;
2864 })
2865
2866 (define_insn "altivec_lvsl_reg_<mode>"
2867 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2868 (unspec:V16QI
2869 [(match_operand:GPR 1 "gpc_reg_operand" "b")]
2870 UNSPEC_LVSL_REG))]
2871 "TARGET_ALTIVEC"
2872 "lvsl %0,0,%1"
2873 [(set_attr "type" "vecload")])
2874
2875 (define_insn "altivec_lvsl_direct"
2876 [(set (match_operand:V16QI 0 "register_operand" "=v")
2877 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2878 UNSPEC_LVSL))]
2879 "TARGET_ALTIVEC"
2880 "lvsl %0,%y1"
2881 [(set_attr "type" "vecload")])
2882
2883 (define_expand "altivec_lvsr"
2884 [(use (match_operand:V16QI 0 "altivec_register_operand"))
2885 (use (match_operand:V16QI 1 "memory_operand"))]
2886 "TARGET_ALTIVEC"
2887 {
2888 if (BYTES_BIG_ENDIAN)
2889 emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2890 else
2891 {
2892 rtx mask, constv, vperm;
2893 mask = gen_reg_rtx (V16QImode);
2894 emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
2895 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2896 constv = force_reg (V16QImode, constv);
2897 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2898 UNSPEC_VPERM);
2899 emit_insn (gen_rtx_SET (operands[0], vperm));
2900 }
2901 DONE;
2902 })
2903
2904 (define_insn "altivec_lvsr_reg_<mode>"
2905 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2906 (unspec:V16QI
2907 [(match_operand:GPR 1 "gpc_reg_operand" "b")]
2908 UNSPEC_LVSR_REG))]
2909 "TARGET_ALTIVEC"
2910 "lvsr %0,0,%1"
2911 [(set_attr "type" "vecload")])
2912
2913 (define_insn "altivec_lvsr_direct"
2914 [(set (match_operand:V16QI 0 "register_operand" "=v")
2915 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2916 UNSPEC_LVSR))]
2917 "TARGET_ALTIVEC"
2918 "lvsr %0,%y1"
2919 [(set_attr "type" "vecload")])
2920
2921 (define_expand "build_vector_mask_for_load"
2922 [(set (match_operand:V16QI 0 "register_operand")
2923 (unspec:V16QI [(match_operand 1 "memory_operand")] UNSPEC_LVSR))]
2924 "TARGET_ALTIVEC"
2925 {
2926 rtx addr;
2927 rtx temp;
2928
2929 gcc_assert (MEM_P (operands[1]));
2930
2931 addr = XEXP (operands[1], 0);
2932 temp = gen_reg_rtx (GET_MODE (addr));
2933 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
2934 emit_insn (gen_altivec_lvsr (operands[0],
2935 replace_equiv_address (operands[1], temp)));
2936 DONE;
2937 })
2938
2939 ;; Parallel some of the LVE* and STV*'s with unspecs because some have
2940 ;; identical rtl but different instructions-- and gcc gets confused.
2941
2942 (define_insn "altivec_lve<VI_char>x"
2943 [(parallel
2944 [(set (match_operand:VI 0 "register_operand" "=v")
2945 (match_operand:VI 1 "memory_operand" "Z"))
2946 (unspec [(const_int 0)] UNSPEC_LVE)])]
2947 "TARGET_ALTIVEC"
2948 "lve<VI_char>x %0,%y1"
2949 [(set_attr "type" "vecload")])
2950
2951 (define_insn "*altivec_lvesfx"
2952 [(parallel
2953 [(set (match_operand:V4SF 0 "register_operand" "=v")
2954 (match_operand:V4SF 1 "memory_operand" "Z"))
2955 (unspec [(const_int 0)] UNSPEC_LVE)])]
2956 "TARGET_ALTIVEC"
2957 "lvewx %0,%y1"
2958 [(set_attr "type" "vecload")])
2959
2960 (define_insn "altivec_lvxl_<mode>"
2961 [(parallel
2962 [(set (match_operand:VM2 0 "register_operand" "=v")
2963 (match_operand:VM2 1 "memory_operand" "Z"))
2964 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2965 "TARGET_ALTIVEC"
2966 "lvxl %0,%y1"
2967 [(set_attr "type" "vecload")])
2968
2969 ; This version of lvx is used only in cases where we need to force an lvx
2970 ; over any other load, and we don't care about losing CSE opportunities.
2971 ; Its primary use is for prologue register saves.
2972 (define_insn "altivec_lvx_<mode>_internal"
2973 [(parallel
2974 [(set (match_operand:VM2 0 "register_operand" "=v")
2975 (match_operand:VM2 1 "memory_operand" "Z"))
2976 (unspec [(const_int 0)] UNSPEC_LVX)])]
2977 "TARGET_ALTIVEC"
2978 "lvx %0,%y1"
2979 [(set_attr "type" "vecload")])
2980
2981 ; The following patterns embody what lvx should usually look like.
2982 (define_expand "altivec_lvx_<VM2:mode>"
2983 [(set (match_operand:VM2 0 "register_operand")
2984 (match_operand:VM2 1 "altivec_indexed_or_indirect_operand"))]
2985 "TARGET_ALTIVEC"
2986 {
2987 rtx addr = XEXP (operand1, 0);
2988 if (rs6000_sum_of_two_registers_p (addr))
2989 {
2990 rtx op1 = XEXP (addr, 0);
2991 rtx op2 = XEXP (addr, 1);
2992 if (TARGET_64BIT)
2993 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_di (operand0, op1, op2));
2994 else
2995 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_si (operand0, op1, op2));
2996 }
2997 else
2998 {
2999 if (TARGET_64BIT)
3000 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_di (operand0, addr));
3001 else
3002 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_si (operand0, addr));
3003 }
3004 DONE;
3005 })
3006
3007 ; The next two patterns embody what lvx should usually look like.
3008 (define_insn "altivec_lvx_<VM2:mode>_2op_<P:mptrsize>"
3009 [(set (match_operand:VM2 0 "register_operand" "=v")
3010 (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
3011 (match_operand:P 2 "register_operand" "r"))
3012 (const_int -16))))]
3013 "TARGET_ALTIVEC"
3014 "lvx %0,%1,%2"
3015 [(set_attr "type" "vecload")])
3016
3017 (define_insn "altivec_lvx_<VM2:mode>_1op_<P:mptrsize>"
3018 [(set (match_operand:VM2 0 "register_operand" "=v")
3019 (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
3020 (const_int -16))))]
3021 "TARGET_ALTIVEC"
3022 "lvx %0,0,%1"
3023 [(set_attr "type" "vecload")])
3024
3025 ; This version of stvx is used only in cases where we need to force an stvx
3026 ; over any other store, and we don't care about losing CSE opportunities.
3027 ; Its primary use is for epilogue register restores.
3028 (define_insn "altivec_stvx_<mode>_internal"
3029 [(parallel
3030 [(set (match_operand:VM2 0 "memory_operand" "=Z")
3031 (match_operand:VM2 1 "register_operand" "v"))
3032 (unspec [(const_int 0)] UNSPEC_STVX)])]
3033 "TARGET_ALTIVEC"
3034 "stvx %1,%y0"
3035 [(set_attr "type" "vecstore")])
3036
3037 ; The following patterns embody what stvx should usually look like.
3038 (define_expand "altivec_stvx_<VM2:mode>"
3039 [(set (match_operand:VM2 1 "altivec_indexed_or_indirect_operand")
3040 (match_operand:VM2 0 "register_operand"))]
3041 "TARGET_ALTIVEC"
3042 {
3043 rtx addr = XEXP (operand1, 0);
3044 if (rs6000_sum_of_two_registers_p (addr))
3045 {
3046 rtx op1 = XEXP (addr, 0);
3047 rtx op2 = XEXP (addr, 1);
3048 if (TARGET_64BIT)
3049 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_di (operand0, op1, op2));
3050 else
3051 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_si (operand0, op1, op2));
3052 }
3053 else
3054 {
3055 if (TARGET_64BIT)
3056 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_di (operand0, addr));
3057 else
3058 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_si (operand0, addr));
3059 }
3060 DONE;
3061 })
3062
3063 ; The next two patterns embody what stvx should usually look like.
3064 (define_insn "altivec_stvx_<VM2:mode>_2op_<P:mptrsize>"
3065 [(set (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
3066 (match_operand:P 2 "register_operand" "r"))
3067 (const_int -16)))
3068 (match_operand:VM2 0 "register_operand" "v"))]
3069 "TARGET_ALTIVEC"
3070 "stvx %0,%1,%2"
3071 [(set_attr "type" "vecstore")])
3072
3073 (define_insn "altivec_stvx_<VM2:mode>_1op_<P:mptrsize>"
3074 [(set (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
3075 (const_int -16)))
3076 (match_operand:VM2 0 "register_operand" "v"))]
3077 "TARGET_ALTIVEC"
3078 "stvx %0,0,%1"
3079 [(set_attr "type" "vecstore")])
3080
3081 (define_insn "altivec_stvxl_<mode>"
3082 [(parallel
3083 [(set (match_operand:VM2 0 "memory_operand" "=Z")
3084 (match_operand:VM2 1 "register_operand" "v"))
3085 (unspec [(const_int 0)] UNSPEC_STVXL)])]
3086 "TARGET_ALTIVEC"
3087 "stvxl %1,%y0"
3088 [(set_attr "type" "vecstore")])
3089
3090 (define_insn "altivec_stve<VI_char>x"
3091 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
3092 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
3093 "TARGET_ALTIVEC"
3094 "stve<VI_char>x %1,%y0"
3095 [(set_attr "type" "vecstore")])
3096
3097 (define_insn "*altivec_stvesfx"
3098 [(set (match_operand:SF 0 "memory_operand" "=Z")
3099 (unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
3100 "TARGET_ALTIVEC"
3101 "stvewx %1,%y0"
3102 [(set_attr "type" "vecstore")])
3103
3104 ;; Generate doublee
3105 ;; signed int/float to double convert words 0 and 2
3106 (define_expand "doublee<mode>2"
3107 [(set (match_operand:V2DF 0 "register_operand" "=v")
3108 (match_operand:VSX_W 1 "register_operand" "v"))]
3109 "TARGET_VSX"
3110 {
3111 machine_mode op_mode = GET_MODE (operands[1]);
3112
3113 if (BYTES_BIG_ENDIAN)
3114 {
3115 /* Big endian word numbering for words in operand is 0 1 2 3.
3116 Input words 0 and 2 are where they need to be. */
3117 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
3118 }
3119 else
3120 {
3121 /* Little endian word numbering for operand is 3 2 1 0.
3122 take (operand[1] operand[1]) and shift left one word
3123 3 2 1 0 3 2 1 0 => 2 1 0 3
3124 Input words 2 and 0 are now where they need to be for the
3125 conversion. */
3126 rtx rtx_tmp;
3127 rtx rtx_val = GEN_INT (1);
3128
3129 rtx_tmp = gen_reg_rtx (op_mode);
3130 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3131 operands[1], rtx_val));
3132 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3133 }
3134 DONE;
3135 }
3136 [(set_attr "type" "veccomplex")])
3137
3138 ;; Generate unsdoublee
3139 ;; unsigned int to double convert words 0 and 2
3140 (define_expand "unsdoubleev4si2"
3141 [(set (match_operand:V2DF 0 "register_operand" "=v")
3142 (match_operand:V4SI 1 "register_operand" "v"))]
3143 "TARGET_VSX"
3144 {
3145 if (BYTES_BIG_ENDIAN)
3146 {
3147 /* Big endian word numbering for words in operand is 0 1 2 3.
3148 Input words 0 and 2 are where they need to be. */
3149 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3150 }
3151 else
3152 {
3153 /* Little endian word numbering for operand is 3 2 1 0.
3154 take (operand[1] operand[1]) and shift left one word
3155 3 2 1 0 3 2 1 0 => 2 1 0 3
3156 Input words 2 and 0 are now where they need to be for the
3157 conversion. */
3158 rtx rtx_tmp;
3159 rtx rtx_val = GEN_INT (1);
3160
3161 rtx_tmp = gen_reg_rtx (V4SImode);
3162 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3163 operands[1], rtx_val));
3164 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3165 }
3166 DONE;
3167 }
3168 [(set_attr "type" "veccomplex")])
3169
3170 ;; Generate doubleov
3171 ;; signed int/float to double convert words 1 and 3
3172 (define_expand "doubleo<mode>2"
3173 [(set (match_operand:V2DF 0 "register_operand" "=v")
3174 (match_operand:VSX_W 1 "register_operand" "v"))]
3175 "TARGET_VSX"
3176 {
3177 machine_mode op_mode = GET_MODE (operands[1]);
3178
3179 if (BYTES_BIG_ENDIAN)
3180 {
3181 /* Big endian word numbering for words in operand is 0 1 2 3.
3182 take (operand[1] operand[1]) and shift left one word
3183 0 1 2 3 0 1 2 3 => 1 2 3 0
3184 Input words 1 and 3 are now where they need to be for the
3185 conversion. */
3186 rtx rtx_tmp;
3187 rtx rtx_val = GEN_INT (1);
3188
3189 rtx_tmp = gen_reg_rtx (op_mode);
3190 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3191 operands[1], rtx_val));
3192 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3193 }
3194 else
3195 {
3196 /* Little endian word numbering for operand is 3 2 1 0.
3197 Input words 3 and 1 are where they need to be. */
3198 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
3199 }
3200 DONE;
3201 }
3202 [(set_attr "type" "veccomplex")])
3203
3204 ;; Generate unsdoubleov
3205 ;; unsigned int to double convert words 1 and 3
3206 (define_expand "unsdoubleov4si2"
3207 [(set (match_operand:V2DF 0 "register_operand" "=v")
3208 (match_operand:V4SI 1 "register_operand" "v"))]
3209 "TARGET_VSX"
3210 {
3211 if (BYTES_BIG_ENDIAN)
3212 {
3213 /* Big endian word numbering for words in operand is 0 1 2 3.
3214 take (operand[1] operand[1]) and shift left one word
3215 0 1 2 3 0 1 2 3 => 1 2 3 0
3216 Input words 1 and 3 are now where they need to be for the
3217 conversion. */
3218 rtx rtx_tmp;
3219 rtx rtx_val = GEN_INT (1);
3220
3221 rtx_tmp = gen_reg_rtx (V4SImode);
3222 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3223 operands[1], rtx_val));
3224 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3225 }
3226 else
3227 {
3228 /* Want to convert the words 1 and 3.
3229 Little endian word numbering for operand is 3 2 1 0.
3230 Input words 3 and 1 are where they need to be. */
3231 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3232 }
3233 DONE;
3234 }
3235 [(set_attr "type" "veccomplex")])
3236
3237 ;; Generate doublehv
3238 ;; signed int/float to double convert words 0 and 1
3239 (define_expand "doubleh<mode>2"
3240 [(set (match_operand:V2DF 0 "register_operand" "=v")
3241 (match_operand:VSX_W 1 "register_operand" "v"))]
3242 "TARGET_VSX"
3243 {
3244 rtx rtx_tmp;
3245 rtx rtx_val;
3246
3247 machine_mode op_mode = GET_MODE (operands[1]);
3248 rtx_tmp = gen_reg_rtx (op_mode);
3249
3250 if (BYTES_BIG_ENDIAN)
3251 {
3252 /* Big endian word numbering for words in operand is 0 1 2 3.
3253 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3254 take (rts_tmp operand[1]) and shift left three words
3255 1 2 3 0 0 1 2 3 => 0 0 1 2
3256 Input words 0 and 1 are now where they need to be for the
3257 conversion. */
3258 rtx_val = GEN_INT (1);
3259 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3260 operands[1], rtx_val));
3261
3262 rtx_val = GEN_INT (3);
3263 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3264 operands[1], rtx_val));
3265 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3266 }
3267 else
3268 {
3269 /* Little endian word numbering for operand is 3 2 1 0.
3270 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3271 take (operand[1] rts_tmp) and shift left two words
3272 3 2 1 0 0 3 2 1 => 1 0 0 3
3273 Input words 0 and 1 are now where they need to be for the
3274 conversion. */
3275 rtx_val = GEN_INT (3);
3276 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3277 operands[1], rtx_val));
3278
3279 rtx_val = GEN_INT (2);
3280 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3281 rtx_tmp, rtx_val));
3282 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3283 }
3284 DONE;
3285 }
3286 [(set_attr "type" "veccomplex")])
3287
3288 ;; Generate unsdoublehv
3289 ;; unsigned int to double convert words 0 and 1
3290 (define_expand "unsdoublehv4si2"
3291 [(set (match_operand:V2DF 0 "register_operand" "=v")
3292 (match_operand:V4SI 1 "register_operand" "v"))]
3293 "TARGET_VSX"
3294 {
3295 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3296 rtx rtx_val = GEN_INT (12);
3297
3298 if (BYTES_BIG_ENDIAN)
3299 {
3300 /* Big endian word numbering for words in operand is 0 1 2 3.
3301 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3302 take (rts_tmp operand[1]) and shift left three words
3303 1 2 3 0 0 1 2 3 => 0 0 1 2
3304 Input words 0 and 1 are now where they need to be for the
3305 conversion. */
3306 rtx_val = GEN_INT (1);
3307 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3308 operands[1], rtx_val));
3309
3310 rtx_val = GEN_INT (3);
3311 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3312 operands[1], rtx_val));
3313 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3314 }
3315 else
3316 {
3317 /* Little endian word numbering for operand is 3 2 1 0.
3318 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3319 take (operand[1] rts_tmp) and shift left two words
3320 3 2 1 0 0 3 2 1 => 1 0 0 3
3321 Input words 1 and 0 are now where they need to be for the
3322 conversion. */
3323 rtx_val = GEN_INT (3);
3324
3325 rtx_tmp = gen_reg_rtx (V4SImode);
3326 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3327 operands[1], rtx_val));
3328
3329 rtx_val = GEN_INT (2);
3330 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3331 rtx_tmp, rtx_val));
3332 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3333 }
3334 DONE;
3335 }
3336 [(set_attr "type" "veccomplex")])
3337
3338 ;; Generate doublelv
3339 ;; signed int/float to double convert words 2 and 3
3340 (define_expand "doublel<mode>2"
3341 [(set (match_operand:V2DF 0 "register_operand" "=v")
3342 (match_operand:VSX_W 1 "register_operand" "v"))]
3343 "TARGET_VSX"
3344 {
3345 rtx rtx_tmp;
3346 rtx rtx_val = GEN_INT (3);
3347
3348 machine_mode op_mode = GET_MODE (operands[1]);
3349 rtx_tmp = gen_reg_rtx (op_mode);
3350
3351 if (BYTES_BIG_ENDIAN)
3352 {
3353 /* Big endian word numbering for operand is 0 1 2 3.
3354 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3355 take (operand[1] rtx_tmp) and shift left two words
3356 0 1 2 3 3 0 1 2 => 2 3 3 0
3357 now use convert instruction to convert word 2 and 3 in the
3358 input vector. */
3359 rtx_val = GEN_INT (3);
3360 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3361 operands[1], rtx_val));
3362
3363 rtx_val = GEN_INT (2);
3364 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3365 rtx_tmp, rtx_val));
3366 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3367 }
3368 else
3369 {
3370 /* Little endian word numbering for operand is 3 2 1 0.
3371 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3372 take (rtx_tmp operand[1]) and shift left three words
3373 2 1 0 3 3 2 1 0 => 3 3 2 1
3374 now use convert instruction to convert word 3 and 2 in the
3375 input vector. */
3376 rtx_val = GEN_INT (1);
3377 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3378 operands[1], rtx_val));
3379
3380 rtx_val = GEN_INT (3);
3381 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3382 operands[1], rtx_val));
3383 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3384 }
3385 DONE;
3386 }
3387 [(set_attr "type" "veccomplex")])
3388
3389 ;; Generate unsdoublelv
3390 ;; unsigned int to double convert convert 2 and 3
3391 (define_expand "unsdoublelv4si2"
3392 [(set (match_operand:V2DF 0 "register_operand" "=v")
3393 (match_operand:V4SI 1 "register_operand" "v"))]
3394 "TARGET_VSX"
3395 {
3396 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3397 rtx rtx_val = GEN_INT (12);
3398
3399 if (BYTES_BIG_ENDIAN)
3400 {
3401 /* Big endian word numbering for operand is 0 1 2 3.
3402 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3403 take (operand[1] rtx_tmp) and shift left two words
3404 0 1 2 3 3 0 1 2 => 2 3 3 0
3405 now use convert instruction to convert word 2 and 3 in the
3406 input vector. */
3407 rtx_val = GEN_INT (3);
3408 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3409 operands[1], rtx_val));
3410
3411 rtx_val = GEN_INT (2);
3412 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3413 rtx_tmp, rtx_val));
3414 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3415 }
3416 else
3417 {
3418 /* Little endian word numbering for operand is 3 2 1 0.
3419 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3420 take (rtx_tmp operand[1]) and shift left three words
3421 2 1 0 3 3 2 1 0 => 3 3 2 1
3422 now use convert instruction to convert word 3 and 2 in the
3423 input vector. */
3424 rtx_val = GEN_INT (1);
3425 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp,
3426 operands[1], operands[1], rtx_val));
3427
3428 rtx_val = GEN_INT (3);
3429 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3430 operands[1], rtx_val));
3431 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3432 }
3433 DONE;
3434 }
3435 [(set_attr "type" "veccomplex")])
3436
3437 ;; Generate two vector F32 converted to packed vector I16 vector
3438 (define_expand "convert_4f32_8i16"
3439 [(set (match_operand:V8HI 0 "register_operand" "=v")
3440 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3441 (match_operand:V4SF 2 "register_operand" "v")]
3442 UNSPEC_CONVERT_4F32_8I16))]
3443 "TARGET_P9_VECTOR"
3444 {
3445 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3446 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3447
3448 emit_insn (gen_altivec_vctuxs (rtx_tmp_hi, operands[1], const0_rtx));
3449 emit_insn (gen_altivec_vctuxs (rtx_tmp_lo, operands[2], const0_rtx));
3450 emit_insn (gen_altivec_vpkswss (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3451 DONE;
3452 })
3453
3454
3455 ;; Convert two vector F32 to packed vector F16.
3456 ;; This builtin packs 32-bit floating-point values into a packed
3457 ;; 16-bit floating point values (stored in 16bit integer type).
3458 ;; (vector unsigned short r = vec_pack_to_short_fp32 (a, b);
3459 ;; The expected codegen for this builtin is
3460 ;; xvcvsphp t, a
3461 ;; xvcvsphp u, b
3462 ;; if (little endian)
3463 ;; vpkuwum r, t, u
3464 ;; else
3465 ;; vpkuwum r, u, t
3466
3467 (define_expand "convert_4f32_8f16"
3468 [(set (match_operand:V8HI 0 "register_operand" "=v")
3469 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3470 (match_operand:V4SF 2 "register_operand" "v")]
3471 UNSPEC_CONVERT_4F32_8F16))]
3472 "TARGET_P9_VECTOR"
3473 {
3474 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3475 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3476
3477 emit_insn (gen_vsx_xvcvsphp (rtx_tmp_hi, operands[1]));
3478 emit_insn (gen_vsx_xvcvsphp (rtx_tmp_lo, operands[2]));
3479 if (!BYTES_BIG_ENDIAN)
3480 emit_insn (gen_altivec_vpkuwum (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3481 else
3482 emit_insn (gen_altivec_vpkuwum (operands[0], rtx_tmp_lo, rtx_tmp_hi));
3483 DONE;
3484 })
3485
3486
3487 ;; Generate
3488 ;; xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
3489 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3490 ;; vmaxs? %0,%1,SCRATCH2"
3491 (define_expand "abs<mode>2"
3492 [(set (match_dup 2) (match_dup 3))
3493 (set (match_dup 4)
3494 (minus:VI2 (match_dup 2)
3495 (match_operand:VI2 1 "register_operand" "v")))
3496 (set (match_operand:VI2 0 "register_operand" "=v")
3497 (smax:VI2 (match_dup 1) (match_dup 4)))]
3498 "<VI_unit>"
3499 {
3500 operands[2] = gen_reg_rtx (<MODE>mode);
3501 operands[3] = CONST0_RTX (<MODE>mode);
3502 operands[4] = gen_reg_rtx (<MODE>mode);
3503 })
3504
3505 ;; Generate
3506 ;; vspltisw SCRATCH1,0
3507 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3508 ;; vmins? %0,%1,SCRATCH2"
3509 (define_expand "nabs<mode>2"
3510 [(set (match_dup 2) (match_dup 3))
3511 (set (match_dup 4)
3512 (minus:VI2 (match_dup 2)
3513 (match_operand:VI2 1 "register_operand" "v")))
3514 (set (match_operand:VI2 0 "register_operand" "=v")
3515 (smin:VI2 (match_dup 1) (match_dup 4)))]
3516 "<VI_unit>"
3517 {
3518 operands[2] = gen_reg_rtx (<MODE>mode);
3519 operands[3] = CONST0_RTX (<MODE>mode);
3520 operands[4] = gen_reg_rtx (<MODE>mode);
3521 })
3522
3523 ;; Generate
3524 ;; vspltisw SCRATCH1,-1
3525 ;; vslw SCRATCH2,SCRATCH1,SCRATCH1
3526 ;; vandc %0,%1,SCRATCH2
3527 (define_expand "altivec_absv4sf2"
3528 [(set (match_dup 2)
3529 (vec_duplicate:V4SI (const_int -1)))
3530 (set (match_dup 3)
3531 (ashift:V4SI (match_dup 2) (match_dup 2)))
3532 (set (match_operand:V4SF 0 "register_operand" "=v")
3533 (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
3534 (match_operand:V4SF 1 "register_operand" "v")))]
3535 "TARGET_ALTIVEC"
3536 {
3537 operands[2] = gen_reg_rtx (V4SImode);
3538 operands[3] = gen_reg_rtx (V4SImode);
3539 })
3540
3541 ;; Generate
3542 ;; vspltis? SCRATCH0,0
3543 ;; vsubs?s SCRATCH2,SCRATCH1,%1
3544 ;; vmaxs? %0,%1,SCRATCH2"
3545 (define_expand "altivec_abss_<mode>"
3546 [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
3547 (parallel [(set (match_dup 3)
3548 (ss_minus:VI (match_dup 2)
3549 (match_operand:VI 1 "register_operand" "v")))
3550 (set (reg:SI VSCR_REGNO)
3551 (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
3552 (set (match_operand:VI 0 "register_operand" "=v")
3553 (smax:VI (match_dup 1) (match_dup 3)))]
3554 "TARGET_ALTIVEC"
3555 {
3556 operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
3557 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
3558 })
3559
3560 (define_expand "reduc_plus_scal_<mode>"
3561 [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
3562 (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
3563 UNSPEC_REDUC_PLUS))]
3564 "TARGET_ALTIVEC"
3565 {
3566 rtx vzero = gen_reg_rtx (V4SImode);
3567 rtx vtmp1 = gen_reg_rtx (V4SImode);
3568 rtx vtmp2 = gen_reg_rtx (<MODE>mode);
3569 rtx dest = gen_lowpart (V4SImode, vtmp2);
3570 int elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
3571
3572 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3573 emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
3574 emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
3575 rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
3576 DONE;
3577 })
3578
3579 (define_insn "*p9_neg<mode>2"
3580 [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
3581 (neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
3582 "TARGET_P9_VECTOR"
3583 "vneg<VI_char> %0,%1"
3584 [(set_attr "type" "vecsimple")])
3585
3586 (define_expand "neg<mode>2"
3587 [(set (match_operand:VI2 0 "register_operand")
3588 (neg:VI2 (match_operand:VI2 1 "register_operand")))]
3589 "<VI_unit>"
3590 {
3591 if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
3592 {
3593 rtx vzero;
3594
3595 vzero = gen_reg_rtx (GET_MODE (operands[0]));
3596 emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
3597 emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
3598 DONE;
3599 }
3600 })
3601
3602 (define_expand "udot_prod<mode>"
3603 [(set (match_operand:V4SI 0 "register_operand" "=v")
3604 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3605 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3606 (match_operand:VIshort 2 "register_operand" "v")]
3607 UNSPEC_VMSUMU)))]
3608 "TARGET_ALTIVEC"
3609 {
3610 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
3611 DONE;
3612 })
3613
3614 (define_expand "sdot_prodv8hi"
3615 [(set (match_operand:V4SI 0 "register_operand" "=v")
3616 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3617 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3618 (match_operand:V8HI 2 "register_operand" "v")]
3619 UNSPEC_VMSUMSHM)))]
3620 "TARGET_ALTIVEC"
3621 {
3622 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
3623 DONE;
3624 })
3625
3626 (define_expand "widen_usum<mode>3"
3627 [(set (match_operand:V4SI 0 "register_operand" "=v")
3628 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3629 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
3630 UNSPEC_VMSUMU)))]
3631 "TARGET_ALTIVEC"
3632 {
3633 rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
3634
3635 emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
3636 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
3637 DONE;
3638 })
3639
3640 (define_expand "widen_ssumv16qi3"
3641 [(set (match_operand:V4SI 0 "register_operand" "=v")
3642 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3643 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
3644 UNSPEC_VMSUMM)))]
3645 "TARGET_ALTIVEC"
3646 {
3647 rtx vones = gen_reg_rtx (V16QImode);
3648
3649 emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
3650 emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
3651 DONE;
3652 })
3653
3654 (define_expand "widen_ssumv8hi3"
3655 [(set (match_operand:V4SI 0 "register_operand" "=v")
3656 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3657 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3658 UNSPEC_VMSUMSHM)))]
3659 "TARGET_ALTIVEC"
3660 {
3661 rtx vones = gen_reg_rtx (V8HImode);
3662
3663 emit_insn (gen_altivec_vspltish (vones, const1_rtx));
3664 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
3665 DONE;
3666 })
3667
3668 (define_expand "vec_unpacks_hi_<VP_small_lc>"
3669 [(set (match_operand:VP 0 "register_operand" "=v")
3670 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3671 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
3672 "<VI_unit>"
3673 "")
3674
3675 (define_expand "vec_unpacks_lo_<VP_small_lc>"
3676 [(set (match_operand:VP 0 "register_operand" "=v")
3677 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3678 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
3679 "<VI_unit>"
3680 "")
3681
3682 (define_insn "vperm_v8hiv4si"
3683 [(set (match_operand:V4SI 0 "register_operand" "=?wa,v")
3684 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "wa,v")
3685 (match_operand:V4SI 2 "register_operand" "0,v")
3686 (match_operand:V16QI 3 "register_operand" "wa,v")]
3687 UNSPEC_VPERMSI))]
3688 "TARGET_ALTIVEC"
3689 "@
3690 xxperm %x0,%x1,%x3
3691 vperm %0,%1,%2,%3"
3692 [(set_attr "type" "vecperm")
3693 (set_attr "isa" "p9v,*")])
3694
3695 (define_insn "vperm_v16qiv8hi"
3696 [(set (match_operand:V8HI 0 "register_operand" "=?wa,v")
3697 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "wa,v")
3698 (match_operand:V8HI 2 "register_operand" "0,v")
3699 (match_operand:V16QI 3 "register_operand" "wa,v")]
3700 UNSPEC_VPERMHI))]
3701 "TARGET_ALTIVEC"
3702 "@
3703 xxperm %x0,%x1,%x3
3704 vperm %0,%1,%2,%3"
3705 [(set_attr "type" "vecperm")
3706 (set_attr "isa" "p9v,*")])
3707
3708 (define_expand "vec_unpacku_hi_<VP_small_lc>"
3709 [(set (match_operand:VP 0 "register_operand" "=v")
3710 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3711 UNSPEC_VUPKHU))]
3712 "TARGET_ALTIVEC"
3713 {
3714 rtx vzero = gen_reg_rtx (<VP_small>mode);
3715 emit_insn (gen_altivec_vspltis<VU_char> (vzero, const0_rtx));
3716
3717 rtx res = gen_reg_rtx (<VP_small>mode);
3718 rtx op1 = operands[1];
3719
3720 if (BYTES_BIG_ENDIAN)
3721 emit_insn (gen_altivec_vmrgh<VU_char> (res, vzero, op1));
3722 else
3723 emit_insn (gen_altivec_vmrgl<VU_char> (res, op1, vzero));
3724
3725 emit_insn (gen_move_insn (operands[0], gen_lowpart (<MODE>mode, res)));
3726 DONE;
3727 })
3728
3729 (define_expand "vec_unpacku_lo_<VP_small_lc>"
3730 [(set (match_operand:VP 0 "register_operand" "=v")
3731 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3732 UNSPEC_VUPKLU))]
3733 "TARGET_ALTIVEC"
3734 {
3735 rtx vzero = gen_reg_rtx (<VP_small>mode);
3736 emit_insn (gen_altivec_vspltis<VU_char> (vzero, const0_rtx));
3737
3738 rtx res = gen_reg_rtx (<VP_small>mode);
3739 rtx op1 = operands[1];
3740
3741 if (BYTES_BIG_ENDIAN)
3742 emit_insn (gen_altivec_vmrgl<VU_char> (res, vzero, op1));
3743 else
3744 emit_insn (gen_altivec_vmrgh<VU_char> (res, op1, vzero));
3745
3746 emit_insn (gen_move_insn (operands[0], gen_lowpart (<MODE>mode, res)));
3747 DONE;
3748 })
3749
3750 (define_expand "vec_widen_umult_hi_v16qi"
3751 [(set (match_operand:V8HI 0 "register_operand" "=v")
3752 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3753 (match_operand:V16QI 2 "register_operand" "v")]
3754 UNSPEC_VMULWHUB))]
3755 "TARGET_ALTIVEC"
3756 {
3757 rtx ve = gen_reg_rtx (V8HImode);
3758 rtx vo = gen_reg_rtx (V8HImode);
3759
3760 if (BYTES_BIG_ENDIAN)
3761 {
3762 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3763 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3764 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3765 }
3766 else
3767 {
3768 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3769 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3770 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3771 }
3772 DONE;
3773 })
3774
3775 (define_expand "vec_widen_umult_lo_v16qi"
3776 [(set (match_operand:V8HI 0 "register_operand" "=v")
3777 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3778 (match_operand:V16QI 2 "register_operand" "v")]
3779 UNSPEC_VMULWLUB))]
3780 "TARGET_ALTIVEC"
3781 {
3782 rtx ve = gen_reg_rtx (V8HImode);
3783 rtx vo = gen_reg_rtx (V8HImode);
3784
3785 if (BYTES_BIG_ENDIAN)
3786 {
3787 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3788 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3789 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3790 }
3791 else
3792 {
3793 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3794 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3795 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3796 }
3797 DONE;
3798 })
3799
3800 (define_expand "vec_widen_smult_hi_v16qi"
3801 [(set (match_operand:V8HI 0 "register_operand" "=v")
3802 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3803 (match_operand:V16QI 2 "register_operand" "v")]
3804 UNSPEC_VMULWHSB))]
3805 "TARGET_ALTIVEC"
3806 {
3807 rtx ve = gen_reg_rtx (V8HImode);
3808 rtx vo = gen_reg_rtx (V8HImode);
3809
3810 if (BYTES_BIG_ENDIAN)
3811 {
3812 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3813 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3814 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3815 }
3816 else
3817 {
3818 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3819 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3820 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3821 }
3822 DONE;
3823 })
3824
3825 (define_expand "vec_widen_smult_lo_v16qi"
3826 [(set (match_operand:V8HI 0 "register_operand" "=v")
3827 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3828 (match_operand:V16QI 2 "register_operand" "v")]
3829 UNSPEC_VMULWLSB))]
3830 "TARGET_ALTIVEC"
3831 {
3832 rtx ve = gen_reg_rtx (V8HImode);
3833 rtx vo = gen_reg_rtx (V8HImode);
3834
3835 if (BYTES_BIG_ENDIAN)
3836 {
3837 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3838 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3839 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3840 }
3841 else
3842 {
3843 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3844 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3845 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3846 }
3847 DONE;
3848 })
3849
3850 (define_expand "vec_widen_umult_hi_v8hi"
3851 [(set (match_operand:V4SI 0 "register_operand" "=v")
3852 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3853 (match_operand:V8HI 2 "register_operand" "v")]
3854 UNSPEC_VMULWHUH))]
3855 "TARGET_ALTIVEC"
3856 {
3857 rtx ve = gen_reg_rtx (V4SImode);
3858 rtx vo = gen_reg_rtx (V4SImode);
3859
3860 if (BYTES_BIG_ENDIAN)
3861 {
3862 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3863 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3864 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], ve, vo));
3865 }
3866 else
3867 {
3868 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3869 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3870 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], vo, ve));
3871 }
3872 DONE;
3873 })
3874
3875 (define_expand "vec_widen_umult_lo_v8hi"
3876 [(set (match_operand:V4SI 0 "register_operand" "=v")
3877 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3878 (match_operand:V8HI 2 "register_operand" "v")]
3879 UNSPEC_VMULWLUH))]
3880 "TARGET_ALTIVEC"
3881 {
3882 rtx ve = gen_reg_rtx (V4SImode);
3883 rtx vo = gen_reg_rtx (V4SImode);
3884
3885 if (BYTES_BIG_ENDIAN)
3886 {
3887 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3888 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3889 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], ve, vo));
3890 }
3891 else
3892 {
3893 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3894 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3895 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], vo, ve));
3896 }
3897 DONE;
3898 })
3899
3900 (define_expand "vec_widen_smult_hi_v8hi"
3901 [(set (match_operand:V4SI 0 "register_operand" "=v")
3902 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3903 (match_operand:V8HI 2 "register_operand" "v")]
3904 UNSPEC_VMULWHSH))]
3905 "TARGET_ALTIVEC"
3906 {
3907 rtx ve = gen_reg_rtx (V4SImode);
3908 rtx vo = gen_reg_rtx (V4SImode);
3909
3910 if (BYTES_BIG_ENDIAN)
3911 {
3912 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3913 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3914 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], ve, vo));
3915 }
3916 else
3917 {
3918 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3919 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3920 emit_insn (gen_altivec_vmrghw_direct_v4si (operands[0], vo, ve));
3921 }
3922 DONE;
3923 })
3924
3925 (define_expand "vec_widen_smult_lo_v8hi"
3926 [(set (match_operand:V4SI 0 "register_operand" "=v")
3927 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3928 (match_operand:V8HI 2 "register_operand" "v")]
3929 UNSPEC_VMULWLSH))]
3930 "TARGET_ALTIVEC"
3931 {
3932 rtx ve = gen_reg_rtx (V4SImode);
3933 rtx vo = gen_reg_rtx (V4SImode);
3934
3935 if (BYTES_BIG_ENDIAN)
3936 {
3937 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3938 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3939 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], ve, vo));
3940 }
3941 else
3942 {
3943 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3944 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3945 emit_insn (gen_altivec_vmrglw_direct_v4si (operands[0], vo, ve));
3946 }
3947 DONE;
3948 })
3949
3950 (define_expand "vec_pack_trunc_<mode>"
3951 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3952 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3953 (match_operand:VP 2 "register_operand" "v")]
3954 UNSPEC_VPACK_UNS_UNS_MOD))]
3955 "<VI_unit>"
3956 "")
3957
3958 (define_expand "mulv16qi3"
3959 [(set (match_operand:V16QI 0 "register_operand" "=v")
3960 (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3961 (match_operand:V16QI 2 "register_operand" "v")))]
3962 "TARGET_ALTIVEC"
3963 {
3964 rtx even = gen_reg_rtx (V8HImode);
3965 rtx odd = gen_reg_rtx (V8HImode);
3966 rtx mask = gen_reg_rtx (V16QImode);
3967 rtvec v = rtvec_alloc (16);
3968 int i;
3969
3970 for (i = 0; i < 8; ++i) {
3971 RTVEC_ELT (v, 2 * i)
3972 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3973 RTVEC_ELT (v, 2 * i + 1)
3974 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3975 }
3976
3977 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3978 emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3979 emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3980 emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3981 DONE;
3982 })
3983
3984 (define_expand "altivec_vpermxor"
3985 [(use (match_operand:V16QI 0 "register_operand"))
3986 (use (match_operand:V16QI 1 "register_operand"))
3987 (use (match_operand:V16QI 2 "register_operand"))
3988 (use (match_operand:V16QI 3 "register_operand"))]
3989 "TARGET_P8_VECTOR"
3990 {
3991 if (!BYTES_BIG_ENDIAN)
3992 {
3993 /* vpermxor indexes the bytes using Big Endian numbering. If LE,
3994 change indexing in operand[3] to BE index. */
3995 rtx be_index = gen_reg_rtx (V16QImode);
3996
3997 emit_insn (gen_one_cmplv16qi2 (be_index, operands[3]));
3998 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3999 operands[2], be_index));
4000 }
4001 else
4002 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
4003 operands[2], operands[3]));
4004 DONE;
4005 })
4006
4007 (define_expand "altivec_negv4sf2"
4008 [(use (match_operand:V4SF 0 "register_operand"))
4009 (use (match_operand:V4SF 1 "register_operand"))]
4010 "TARGET_ALTIVEC"
4011 {
4012 rtx neg0;
4013
4014 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
4015 neg0 = gen_reg_rtx (V4SImode);
4016 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
4017 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
4018
4019 /* XOR */
4020 emit_insn (gen_xorv4sf3 (operands[0],
4021 gen_lowpart (V4SFmode, neg0), operands[1]));
4022
4023 DONE;
4024 })
4025
4026 ;; Vector reverse elements for V16QI V8HI V4SI V4SF
4027 (define_expand "altivec_vreve<mode>2"
4028 [(set (match_operand:VEC_K 0 "register_operand" "=v")
4029 (unspec:VEC_K [(match_operand:VEC_K 1 "register_operand" "v")]
4030 UNSPEC_VREVEV))]
4031 "TARGET_ALTIVEC"
4032 {
4033 if (TARGET_P9_VECTOR)
4034 {
4035 if (<MODE>mode == V16QImode)
4036 emit_insn (gen_p9_xxbrq_v16qi (operands[0], operands[1]));
4037 else if (<MODE>mode == V8HImode)
4038 {
4039 rtx subreg1 = simplify_gen_subreg (V1TImode, operands[1],
4040 <MODE>mode, 0);
4041 rtx temp = gen_reg_rtx (V1TImode);
4042 emit_insn (gen_p9_xxbrq_v1ti (temp, subreg1));
4043 rtx subreg2 = simplify_gen_subreg (<MODE>mode, temp,
4044 V1TImode, 0);
4045 emit_insn (gen_p9_xxbrh_v8hi (operands[0], subreg2));
4046 }
4047 else /* V4SI and V4SF. */
4048 {
4049 rtx subreg1 = simplify_gen_subreg (V1TImode, operands[1],
4050 <MODE>mode, 0);
4051 rtx temp = gen_reg_rtx (V1TImode);
4052 emit_insn (gen_p9_xxbrq_v1ti (temp, subreg1));
4053 rtx subreg2 = simplify_gen_subreg (<MODE>mode, temp,
4054 V1TImode, 0);
4055 if (<MODE>mode == V4SImode)
4056 emit_insn (gen_p9_xxbrw_v4si (operands[0], subreg2));
4057 else
4058 emit_insn (gen_p9_xxbrw_v4sf (operands[0], subreg2));
4059 }
4060 DONE;
4061 }
4062
4063 int i, j, size, num_elements;
4064 rtvec v = rtvec_alloc (16);
4065 rtx mask = gen_reg_rtx (V16QImode);
4066
4067 size = GET_MODE_UNIT_SIZE (<MODE>mode);
4068 num_elements = GET_MODE_NUNITS (<MODE>mode);
4069
4070 for (j = 0; j < num_elements; j++)
4071 for (i = 0; i < size; i++)
4072 RTVEC_ELT (v, i + j * size)
4073 = GEN_INT (i + (num_elements - 1 - j) * size);
4074
4075 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
4076 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
4077 operands[1], mask));
4078 DONE;
4079 })
4080
4081 ;; Vector reverse elements for V2DI V2DF
4082 (define_expand "altivec_vreve<mode>2"
4083 [(set (match_operand:VEC_64 0 "register_operand" "=v")
4084 (unspec:VEC_64 [(match_operand:VEC_64 1 "register_operand" "v")]
4085 UNSPEC_VREVEV))]
4086 "TARGET_ALTIVEC"
4087 {
4088 emit_insn (gen_xxswapd_<mode> (operands[0], operands[1]));
4089 DONE;
4090 })
4091
4092 ;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
4093 ;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
4094 (define_insn "altivec_lvlx"
4095 [(set (match_operand:V16QI 0 "register_operand" "=v")
4096 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4097 UNSPEC_LVLX))]
4098 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4099 "lvlx %0,%y1"
4100 [(set_attr "type" "vecload")])
4101
4102 (define_insn "altivec_lvlxl"
4103 [(set (match_operand:V16QI 0 "register_operand" "=v")
4104 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4105 UNSPEC_LVLXL))]
4106 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4107 "lvlxl %0,%y1"
4108 [(set_attr "type" "vecload")])
4109
4110 (define_insn "altivec_lvrx"
4111 [(set (match_operand:V16QI 0 "register_operand" "=v")
4112 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4113 UNSPEC_LVRX))]
4114 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4115 "lvrx %0,%y1"
4116 [(set_attr "type" "vecload")])
4117
4118 (define_insn "altivec_lvrxl"
4119 [(set (match_operand:V16QI 0 "register_operand" "=v")
4120 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
4121 UNSPEC_LVRXL))]
4122 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4123 "lvrxl %0,%y1"
4124 [(set_attr "type" "vecload")])
4125
4126 (define_insn "altivec_stvlx"
4127 [(parallel
4128 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4129 (match_operand:V16QI 1 "register_operand" "v"))
4130 (unspec [(const_int 0)] UNSPEC_STVLX)])]
4131 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4132 "stvlx %1,%y0"
4133 [(set_attr "type" "vecstore")])
4134
4135 (define_insn "altivec_stvlxl"
4136 [(parallel
4137 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4138 (match_operand:V16QI 1 "register_operand" "v"))
4139 (unspec [(const_int 0)] UNSPEC_STVLXL)])]
4140 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4141 "stvlxl %1,%y0"
4142 [(set_attr "type" "vecstore")])
4143
4144 (define_insn "altivec_stvrx"
4145 [(parallel
4146 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4147 (match_operand:V16QI 1 "register_operand" "v"))
4148 (unspec [(const_int 0)] UNSPEC_STVRX)])]
4149 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4150 "stvrx %1,%y0"
4151 [(set_attr "type" "vecstore")])
4152
4153 (define_insn "altivec_stvrxl"
4154 [(parallel
4155 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
4156 (match_operand:V16QI 1 "register_operand" "v"))
4157 (unspec [(const_int 0)] UNSPEC_STVRXL)])]
4158 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
4159 "stvrxl %1,%y0"
4160 [(set_attr "type" "vecstore")])
4161
4162 (define_expand "vec_unpacks_float_hi_v8hi"
4163 [(set (match_operand:V4SF 0 "register_operand")
4164 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4165 UNSPEC_VUPKHS_V4SF))]
4166 "TARGET_ALTIVEC"
4167 {
4168 rtx tmp = gen_reg_rtx (V4SImode);
4169
4170 emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
4171 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4172 DONE;
4173 })
4174
4175 (define_expand "vec_unpacks_float_lo_v8hi"
4176 [(set (match_operand:V4SF 0 "register_operand")
4177 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4178 UNSPEC_VUPKLS_V4SF))]
4179 "TARGET_ALTIVEC"
4180 {
4181 rtx tmp = gen_reg_rtx (V4SImode);
4182
4183 emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
4184 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4185 DONE;
4186 })
4187
4188 (define_expand "vec_unpacku_float_hi_v8hi"
4189 [(set (match_operand:V4SF 0 "register_operand")
4190 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4191 UNSPEC_VUPKHU_V4SF))]
4192 "TARGET_ALTIVEC"
4193 {
4194 rtx tmp = gen_reg_rtx (V4SImode);
4195
4196 emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
4197 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4198 DONE;
4199 })
4200
4201 (define_expand "vec_unpacku_float_lo_v8hi"
4202 [(set (match_operand:V4SF 0 "register_operand")
4203 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4204 UNSPEC_VUPKLU_V4SF))]
4205 "TARGET_ALTIVEC"
4206 {
4207 rtx tmp = gen_reg_rtx (V4SImode);
4208
4209 emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
4210 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4211 DONE;
4212 })
4213
4214 \f
4215 ;; Power8/power9 vector instructions encoded as Altivec instructions
4216
4217 ;; Vector count leading zeros
4218 (define_insn "*p8v_clz<mode>2"
4219 [(set (match_operand:VI2 0 "register_operand" "=v")
4220 (clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4221 "TARGET_P8_VECTOR"
4222 "vclz<wd> %0,%1"
4223 [(set_attr "type" "vecsimple")])
4224
4225 ;; Vector absolute difference unsigned
4226 (define_expand "vadu<mode>3"
4227 [(set (match_operand:VI 0 "register_operand")
4228 (unspec:VI [(match_operand:VI 1 "register_operand")
4229 (match_operand:VI 2 "register_operand")]
4230 UNSPEC_VADU))]
4231 "TARGET_P9_VECTOR")
4232
4233 ;; Vector absolute difference unsigned
4234 (define_insn "p9_vadu<mode>3"
4235 [(set (match_operand:VI 0 "register_operand" "=v")
4236 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
4237 (match_operand:VI 2 "register_operand" "v")]
4238 UNSPEC_VADU))]
4239 "TARGET_P9_VECTOR"
4240 "vabsdu<wd> %0,%1,%2"
4241 [(set_attr "type" "vecsimple")])
4242
4243 ;; Vector count trailing zeros
4244 (define_insn "*p9v_ctz<mode>2"
4245 [(set (match_operand:VI2 0 "register_operand" "=v")
4246 (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4247 "TARGET_P9_VECTOR"
4248 "vctz<wd> %0,%1"
4249 [(set_attr "type" "vecsimple")])
4250
4251 ;; Vector population count
4252 (define_insn "*p8v_popcount<mode>2"
4253 [(set (match_operand:VI2 0 "register_operand" "=v")
4254 (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4255 "TARGET_P8_VECTOR"
4256 "vpopcnt<wd> %0,%1"
4257 [(set_attr "type" "vecsimple")])
4258
4259 ;; Vector parity
4260 (define_insn "rs6000_vprtyb<mode>2"
4261 [(set (match_operand:VEC_IP 0 "register_operand" "=v")
4262 (unspec:VEC_IP
4263 [(match_operand:VEC_IP 1 "register_operand" "v")]
4264 UNSPEC_PARITY))]
4265 "TARGET_P9_VECTOR"
4266 "vprtyb<wd> %0,%1"
4267 [(set_attr "type" "vecsimple")])
4268
4269 ;; Vector Gather Bits by Bytes by Doubleword
4270 (define_insn "p8v_vgbbd"
4271 [(set (match_operand:V16QI 0 "register_operand" "=v")
4272 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
4273 UNSPEC_VGBBD))]
4274 "TARGET_P8_VECTOR"
4275 "vgbbd %0,%1"
4276 [(set_attr "type" "vecsimple")])
4277
4278 \f
4279 ;; 128-bit binary integer arithmetic
4280 ;; We have a special container type (V1TImode) to allow operations using the
4281 ;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
4282 ;; having to worry about the register allocator deciding GPRs are better.
4283
4284 (define_insn "altivec_vadduqm"
4285 [(set (match_operand:V1TI 0 "register_operand" "=v")
4286 (plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4287 (match_operand:V1TI 2 "register_operand" "v")))]
4288 "TARGET_VADDUQM"
4289 "vadduqm %0,%1,%2"
4290 [(set_attr "type" "vecsimple")])
4291
4292 (define_insn "altivec_vaddcuq"
4293 [(set (match_operand:V1TI 0 "register_operand" "=v")
4294 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4295 (match_operand:V1TI 2 "register_operand" "v")]
4296 UNSPEC_VADDCUQ))]
4297 "TARGET_VADDUQM"
4298 "vaddcuq %0,%1,%2"
4299 [(set_attr "type" "vecsimple")])
4300
4301 (define_insn "altivec_vsubuqm"
4302 [(set (match_operand:V1TI 0 "register_operand" "=v")
4303 (minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4304 (match_operand:V1TI 2 "register_operand" "v")))]
4305 "TARGET_VADDUQM"
4306 "vsubuqm %0,%1,%2"
4307 [(set_attr "type" "vecsimple")])
4308
4309 (define_insn "altivec_vsubcuq"
4310 [(set (match_operand:V1TI 0 "register_operand" "=v")
4311 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4312 (match_operand:V1TI 2 "register_operand" "v")]
4313 UNSPEC_VSUBCUQ))]
4314 "TARGET_VADDUQM"
4315 "vsubcuq %0,%1,%2"
4316 [(set_attr "type" "vecsimple")])
4317
4318 (define_insn "altivec_vaddeuqm"
4319 [(set (match_operand:V1TI 0 "register_operand" "=v")
4320 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4321 (match_operand:V1TI 2 "register_operand" "v")
4322 (match_operand:V1TI 3 "register_operand" "v")]
4323 UNSPEC_VADDEUQM))]
4324 "TARGET_VADDUQM"
4325 "vaddeuqm %0,%1,%2,%3"
4326 [(set_attr "type" "vecsimple")])
4327
4328 (define_insn "altivec_vaddecuq"
4329 [(set (match_operand:V1TI 0 "register_operand" "=v")
4330 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4331 (match_operand:V1TI 2 "register_operand" "v")
4332 (match_operand:V1TI 3 "register_operand" "v")]
4333 UNSPEC_VADDECUQ))]
4334 "TARGET_VADDUQM"
4335 "vaddecuq %0,%1,%2,%3"
4336 [(set_attr "type" "vecsimple")])
4337
4338 (define_insn "altivec_vsubeuqm"
4339 [(set (match_operand:V1TI 0 "register_operand" "=v")
4340 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4341 (match_operand:V1TI 2 "register_operand" "v")
4342 (match_operand:V1TI 3 "register_operand" "v")]
4343 UNSPEC_VSUBEUQM))]
4344 "TARGET_VADDUQM"
4345 "vsubeuqm %0,%1,%2,%3"
4346 [(set_attr "type" "vecsimple")])
4347
4348 (define_insn "altivec_vsubecuq"
4349 [(set (match_operand:V1TI 0 "register_operand" "=v")
4350 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4351 (match_operand:V1TI 2 "register_operand" "v")
4352 (match_operand:V1TI 3 "register_operand" "v")]
4353 UNSPEC_VSUBECUQ))]
4354 "TARGET_VADDUQM"
4355 "vsubecuq %0,%1,%2,%3"
4356 [(set_attr "type" "vecsimple")])
4357
4358 ;; We use V2DI as the output type to simplify converting the permute
4359 ;; bits into an integer
4360 (define_insn "altivec_vbpermq"
4361 [(set (match_operand:V2DI 0 "register_operand" "=v")
4362 (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
4363 (match_operand:V16QI 2 "register_operand" "v")]
4364 UNSPEC_VBPERMQ))]
4365 "TARGET_P8_VECTOR"
4366 "vbpermq %0,%1,%2"
4367 [(set_attr "type" "vecperm")])
4368
4369 ; One of the vector API interfaces requires returning vector unsigned char.
4370 (define_insn "altivec_vbpermq2"
4371 [(set (match_operand:V16QI 0 "register_operand" "=v")
4372 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4373 (match_operand:V16QI 2 "register_operand" "v")]
4374 UNSPEC_VBPERMQ))]
4375 "TARGET_P8_VECTOR"
4376 "vbpermq %0,%1,%2"
4377 [(set_attr "type" "vecperm")])
4378
4379 (define_insn "altivec_vbpermd"
4380 [(set (match_operand:V2DI 0 "register_operand" "=v")
4381 (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
4382 (match_operand:V16QI 2 "register_operand" "v")]
4383 UNSPEC_VBPERMD))]
4384 "TARGET_P9_VECTOR"
4385 "vbpermd %0,%1,%2"
4386 [(set_attr "type" "vecsimple")])
4387
4388 ;; Support for SAD (sum of absolute differences).
4389
4390 ;; Due to saturating semantics, we can't combine the sum-across
4391 ;; with the vector accumulate in vsum4ubs. A vadduwm is needed.
4392 (define_expand "usadv16qi"
4393 [(use (match_operand:V4SI 0 "register_operand"))
4394 (use (match_operand:V16QI 1 "register_operand"))
4395 (use (match_operand:V16QI 2 "register_operand"))
4396 (use (match_operand:V4SI 3 "register_operand"))]
4397 "TARGET_P9_VECTOR"
4398 {
4399 rtx absd = gen_reg_rtx (V16QImode);
4400 rtx zero = gen_reg_rtx (V4SImode);
4401 rtx psum = gen_reg_rtx (V4SImode);
4402
4403 emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
4404 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4405 emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
4406 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4407 DONE;
4408 })
4409
4410 ;; Since vsum4shs is saturating and further performs signed
4411 ;; arithmetic, we can't combine the sum-across with the vector
4412 ;; accumulate in vsum4shs. A vadduwm is needed.
4413 (define_expand "usadv8hi"
4414 [(use (match_operand:V4SI 0 "register_operand"))
4415 (use (match_operand:V8HI 1 "register_operand"))
4416 (use (match_operand:V8HI 2 "register_operand"))
4417 (use (match_operand:V4SI 3 "register_operand"))]
4418 "TARGET_P9_VECTOR"
4419 {
4420 rtx absd = gen_reg_rtx (V8HImode);
4421 rtx zero = gen_reg_rtx (V4SImode);
4422 rtx psum = gen_reg_rtx (V4SImode);
4423
4424 emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
4425 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4426 emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
4427 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4428 DONE;
4429 })
4430
4431 ;; Decimal Integer operations
4432 (define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
4433
4434 (define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
4435 (UNSPEC_BCDSUB "sub")])
4436
4437 (define_code_iterator BCD_TEST [eq lt le gt ge unordered])
4438 (define_mode_iterator VBCD [V1TI V16QI])
4439
4440 (define_insn "bcd<bcd_add_sub>_<mode>"
4441 [(set (match_operand:VBCD 0 "register_operand" "=v")
4442 (unspec:VBCD [(match_operand:VBCD 1 "register_operand" "v")
4443 (match_operand:VBCD 2 "register_operand" "v")
4444 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4445 UNSPEC_BCD_ADD_SUB))
4446 (clobber (reg:CCFP CR6_REGNO))]
4447 "TARGET_P8_VECTOR"
4448 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4449 [(set_attr "type" "vecsimple")])
4450
4451 ;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
4452 ;; can use the unordered test for BCD nans and add/subtracts that overflow. An
4453 ;; UNORDERED test on an integer type (like V1TImode) is not defined. The type
4454 ;; probably should be one that can go in the VMX (Altivec) registers, so we
4455 ;; can't use DDmode or DFmode.
4456 (define_insn "*bcd<bcd_add_sub>_test_<mode>"
4457 [(set (reg:CCFP CR6_REGNO)
4458 (compare:CCFP
4459 (unspec:V2DF [(match_operand:VBCD 1 "register_operand" "v")
4460 (match_operand:VBCD 2 "register_operand" "v")
4461 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4462 UNSPEC_BCD_ADD_SUB)
4463 (match_operand:V2DF 4 "zero_constant" "j")))
4464 (clobber (match_scratch:VBCD 0 "=v"))]
4465 "TARGET_P8_VECTOR"
4466 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4467 [(set_attr "type" "vecsimple")])
4468
4469 (define_insn "*bcd<bcd_add_sub>_test2_<mode>"
4470 [(set (match_operand:VBCD 0 "register_operand" "=v")
4471 (unspec:VBCD [(match_operand:VBCD 1 "register_operand" "v")
4472 (match_operand:VBCD 2 "register_operand" "v")
4473 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4474 UNSPEC_BCD_ADD_SUB))
4475 (set (reg:CCFP CR6_REGNO)
4476 (compare:CCFP
4477 (unspec:V2DF [(match_dup 1)
4478 (match_dup 2)
4479 (match_dup 3)]
4480 UNSPEC_BCD_ADD_SUB)
4481 (match_operand:V2DF 4 "zero_constant" "j")))]
4482 "TARGET_P8_VECTOR"
4483 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4484 [(set_attr "type" "vecsimple")])
4485
4486 (define_insn "vcfuged"
4487 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4488 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4489 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4490 UNSPEC_VCFUGED))]
4491 "TARGET_POWER10"
4492 "vcfuged %0,%1,%2"
4493 [(set_attr "type" "vecsimple")])
4494
4495 (define_insn "vclzdm"
4496 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4497 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4498 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4499 UNSPEC_VCLZDM))]
4500 "TARGET_POWER10"
4501 "vclzdm %0,%1,%2"
4502 [(set_attr "type" "vecsimple")])
4503
4504 (define_insn "vctzdm"
4505 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4506 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4507 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4508 UNSPEC_VCTZDM))]
4509 "TARGET_POWER10"
4510 "vctzdm %0,%1,%2"
4511 [(set_attr "type" "vecsimple")])
4512
4513 (define_insn "vpdepd"
4514 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4515 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4516 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4517 UNSPEC_VPDEPD))]
4518 "TARGET_POWER10"
4519 "vpdepd %0,%1,%2"
4520 [(set_attr "type" "vecsimple")])
4521
4522 (define_insn "vpextd"
4523 [(set (match_operand:V2DI 0 "altivec_register_operand" "=v")
4524 (unspec:V2DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4525 (match_operand:V2DI 2 "altivec_register_operand" "v")]
4526 UNSPEC_VPEXTD))]
4527 "TARGET_POWER10"
4528 "vpextd %0,%1,%2"
4529 [(set_attr "type" "vecsimple")])
4530
4531 (define_insn "vgnb"
4532 [(set (match_operand:DI 0 "register_operand" "=r")
4533 (unspec:DI [(match_operand:V2DI 1 "altivec_register_operand" "v")
4534 (match_operand:QI 2 "u3bit_cint_operand" "n")]
4535 UNSPEC_VGNB))]
4536 "TARGET_POWER10"
4537 "vgnb %0,%1,%2"
4538 [(set_attr "type" "vecsimple")])
4539
4540 (define_insn "vclrlb"
4541 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
4542 (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
4543 (match_operand:SI 2 "gpc_reg_operand" "r")]
4544 UNSPEC_VCLRLB))]
4545 "TARGET_POWER10"
4546 {
4547 if (BYTES_BIG_ENDIAN)
4548 return "vclrlb %0,%1,%2";
4549 else
4550 return "vclrrb %0,%1,%2";
4551 }
4552 [(set_attr "type" "vecsimple")])
4553
4554 (define_insn "vclrrb"
4555 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
4556 (unspec:V16QI [(match_operand:V16QI 1 "altivec_register_operand" "v")
4557 (match_operand:SI 2 "gpc_reg_operand" "r")]
4558 UNSPEC_VCLRRB))]
4559 "TARGET_POWER10"
4560 {
4561 if (BYTES_BIG_ENDIAN)
4562 return "vclrrb %0,%1,%2";
4563 else
4564 return "vclrlb %0,%1,%2";
4565 }
4566 [(set_attr "type" "vecsimple")])
4567
4568 (define_expand "bcd<bcd_add_sub>_<code>_<mode>"
4569 [(parallel [(set (reg:CCFP CR6_REGNO)
4570 (compare:CCFP
4571 (unspec:V2DF [(match_operand:VBCD 1 "register_operand")
4572 (match_operand:VBCD 2 "register_operand")
4573 (match_operand:QI 3 "const_0_to_1_operand")]
4574 UNSPEC_BCD_ADD_SUB)
4575 (match_dup 4)))
4576 (clobber (match_scratch:VBCD 5))])
4577 (set (match_operand:SI 0 "register_operand")
4578 (BCD_TEST:SI (reg:CCFP CR6_REGNO)
4579 (const_int 0)))]
4580 "TARGET_P8_VECTOR"
4581 {
4582 operands[4] = CONST0_RTX (V2DFmode);
4583 })
4584
4585 (define_insn "*bcdinvalid_<mode>"
4586 [(set (reg:CCFP CR6_REGNO)
4587 (compare:CCFP
4588 (unspec:V2DF [(match_operand:VBCD 1 "register_operand" "v")]
4589 UNSPEC_BCDADD)
4590 (match_operand:V2DF 2 "zero_constant" "j")))
4591 (clobber (match_scratch:VBCD 0 "=v"))]
4592 "TARGET_P8_VECTOR"
4593 "bcdadd. %0,%1,%1,0"
4594 [(set_attr "type" "vecsimple")])
4595
4596 (define_expand "bcdinvalid_<mode>"
4597 [(parallel [(set (reg:CCFP CR6_REGNO)
4598 (compare:CCFP
4599 (unspec:V2DF [(match_operand:VBCD 1 "register_operand")]
4600 UNSPEC_BCDADD)
4601 (match_dup 2)))
4602 (clobber (match_scratch:VBCD 3))])
4603 (set (match_operand:SI 0 "register_operand")
4604 (unordered:SI (reg:CCFP CR6_REGNO)
4605 (const_int 0)))]
4606 "TARGET_P8_VECTOR"
4607 {
4608 operands[2] = CONST0_RTX (V2DFmode);
4609 })
4610
4611 (define_insn "bcdshift_v16qi"
4612 [(set (match_operand:V16QI 0 "register_operand" "=v")
4613 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4614 (match_operand:V16QI 2 "register_operand" "v")
4615 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4616 UNSPEC_BCDSHIFT))
4617 (clobber (reg:CCFP CR6_REGNO))]
4618 "TARGET_P8_VECTOR"
4619 "bcds. %0,%1,%2,%3"
4620 [(set_attr "type" "vecsimple")])
4621
4622 (define_expand "bcdmul10_v16qi"
4623 [(set (match_operand:V16QI 0 "register_operand")
4624 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")]
4625 UNSPEC_BCDSHIFT))
4626 (clobber (reg:CCFP CR6_REGNO))]
4627 "TARGET_P9_VECTOR"
4628 {
4629 rtx one = gen_reg_rtx (V16QImode);
4630
4631 emit_insn (gen_altivec_vspltisb (one, const1_rtx));
4632 emit_insn (gen_bcdshift_v16qi (operands[0], one, operands[1], const0_rtx));
4633
4634 DONE;
4635 })
4636
4637 (define_expand "bcddiv10_v16qi"
4638 [(set (match_operand:V16QI 0 "register_operand")
4639 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")]
4640 UNSPEC_BCDSHIFT))
4641 (clobber (reg:CCFP CR6_REGNO))]
4642 "TARGET_P9_VECTOR"
4643 {
4644 rtx one = gen_reg_rtx (V16QImode);
4645
4646 emit_insn (gen_altivec_vspltisb (one, constm1_rtx));
4647 emit_insn (gen_bcdshift_v16qi (operands[0], one, operands[1], const0_rtx));
4648
4649 DONE;
4650 })
4651
4652
4653 ;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
4654 ;; the bcdadd/bcdsub that tests the value. The combiner won't work since
4655 ;; CR6 is a hard coded register. Unfortunately, all of the Altivec predicate
4656 ;; support is hard coded to use the fixed register CR6 instead of creating
4657 ;; a register class for CR6.
4658
4659 (define_peephole2
4660 [(parallel [(set (match_operand:V1TI 0 "register_operand")
4661 (unspec:V1TI [(match_operand:V1TI 1 "register_operand")
4662 (match_operand:V1TI 2 "register_operand")
4663 (match_operand:QI 3 "const_0_to_1_operand")]
4664 UNSPEC_BCD_ADD_SUB))
4665 (clobber (reg:CCFP CR6_REGNO))])
4666 (parallel [(set (reg:CCFP CR6_REGNO)
4667 (compare:CCFP
4668 (unspec:V2DF [(match_dup 1)
4669 (match_dup 2)
4670 (match_dup 3)]
4671 UNSPEC_BCD_ADD_SUB)
4672 (match_operand:V2DF 4 "zero_constant")))
4673 (clobber (match_operand:V1TI 5 "register_operand"))])]
4674 "TARGET_P8_VECTOR"
4675 [(parallel [(set (match_dup 0)
4676 (unspec:V1TI [(match_dup 1)
4677 (match_dup 2)
4678 (match_dup 3)]
4679 UNSPEC_BCD_ADD_SUB))
4680 (set (reg:CCFP CR6_REGNO)
4681 (compare:CCFP
4682 (unspec:V2DF [(match_dup 1)
4683 (match_dup 2)
4684 (match_dup 3)]
4685 UNSPEC_BCD_ADD_SUB)
4686 (match_dup 4)))])])