]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
i386: Fix missed APX_NDD check for shift/rotate expanders [PR 112943]
authorHongyu Wang <hongyu.wang@intel.com>
Mon, 11 Dec 2023 11:30:42 +0000 (19:30 +0800)
committerHongyu Wang <hongyu.wang@intel.com>
Tue, 12 Dec 2023 02:50:27 +0000 (10:50 +0800)
The ashl/lshr/ashr expanders calls ix86_expand_binary_operator, while
they will be called for some post-reload split, and TARGET_APX_NDD is
required for these calls to avoid force-load to memory at postreload
stage.

gcc/ChangeLog:

PR target/112943
* config/i386/i386.md (ashl<mode>3): Add TARGET_APX_NDD to
ix86_expand_binary_operator call.
(<insn><mode>3): Likewise for rshift.
(<insn>di3): Likewise for DImode rotate.
(<insn><mode>3): Likewise for SWI124 rotate.

gcc/testsuite/ChangeLog:

PR target/112943
* gcc.target/i386/pr112943.c: New test.

gcc/config/i386/i386.md
gcc/testsuite/gcc.target/i386/pr112943.c [new file with mode: 0644]

index b4db50f61cdd883c1c807f64d0cb92cfd9385ab6..f83064ec3357000a5b5a74ce5c5902a04ef7e95b 100644 (file)
        (ashift:SDWIM (match_operand:SDWIM 1 "<ashl_input_operand>")
                      (match_operand:QI 2 "nonmemory_operand")))]
   ""
-  "ix86_expand_binary_operator (ASHIFT, <MODE>mode, operands); DONE;")
+  "ix86_expand_binary_operator (ASHIFT, <MODE>mode, operands,
+                               TARGET_APX_NDD); DONE;")
 
 (define_insn_and_split "*ashl<dwi>3_doubleword_mask"
   [(set (match_operand:<DWI> 0 "register_operand")
        (any_shiftrt:SDWIM (match_operand:SDWIM 1 "<shift_operand>")
                           (match_operand:QI 2 "nonmemory_operand")))]
   ""
-  "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands); DONE;")
+  "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands,
+                               TARGET_APX_NDD); DONE;")
 
 ;; Avoid useless masking of count operand.
 (define_insn_and_split "*<insn><mode>3_mask"
  ""
 {
   if (TARGET_64BIT)
-    ix86_expand_binary_operator (<CODE>, DImode, operands);
+    ix86_expand_binary_operator (<CODE>, DImode, operands,
+                                TARGET_APX_NDD);
   else if (const_1_to_31_operand (operands[2], VOIDmode))
     emit_insn (gen_ix86_<insn>di3_doubleword
                (operands[0], operands[1], operands[2]));
        (any_rotate:SWIM124 (match_operand:SWIM124 1 "nonimmediate_operand")
                            (match_operand:QI 2 "nonmemory_operand")))]
   ""
-  "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands); DONE;")
+  "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands,
+                               TARGET_APX_NDD); DONE;")
 
 ;; Avoid useless masking of count operand.
 (define_insn_and_split "*<insn><mode>3_mask"
diff --git a/gcc/testsuite/gcc.target/i386/pr112943.c b/gcc/testsuite/gcc.target/i386/pr112943.c
new file mode 100644 (file)
index 0000000..7e299bd
--- /dev/null
@@ -0,0 +1,63 @@
+/* PR target/112943 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -march=westmere -mapxf" } */
+
+typedef unsigned char __attribute__((__vector_size__(1))) v8u8;
+typedef char __attribute__((__vector_size__(2))) v16u8;
+typedef char __attribute__((__vector_size__(4))) v32u8;
+typedef char __attribute__((__vector_size__(8))) v64u8;
+typedef char __attribute__((__vector_size__(16))) v128u8;
+typedef _Float16 __attribute__((__vector_size__(2))) v16f16;
+typedef _Float16 __attribute__((__vector_size__(16))) v128f16;
+typedef _Float64x __attribute__((__vector_size__(16))) v128f128;
+typedef _Decimal64 d64;
+char foo0_u8_0;
+v8u8 foo0_v8u8_0;
+__attribute__((__vector_size__(sizeof(char)))) char foo0_v8s8_0;
+__attribute__((__vector_size__(sizeof(long)))) unsigned long v64u64_0;
+_Float16 foo0_f16_0;
+v128f16 foo0_v128f16_0;
+double foo0_f64_0;
+int foo0_f128_0, foo0_v32d32_0, foo0__0;
+d64 foo0_d64_0;
+v8u8 *foo0_ret;
+unsigned __int128 foo0_u128_3;
+v8u8 d;
+void foo0() {
+    v64u64_0 -= foo0_u8_0;
+    v8u8 v8u8_1 = foo0_v8u8_0 % d;
+    v128f128 v128f128_1 = __builtin_convertvector(v64u64_0, v128f128);
+    __int128 u128_2 = ((9223372036854775807 + (__int128) 1) << 4) * foo0_u8_0,
+            u128_r = u128_2 + foo0_u128_3 + foo0_f128_0 + (__int128)foo0_d64_0;
+    v16f16 v16f16_1 = __builtin_convertvector(foo0_v8s8_0, v16f16);
+    v128f16 v128f16_1 = 0 > foo0_v128f16_0;
+    v128u8 v128u8_r = (v128u8)v128f16_1 + (v128u8)v128f128_1;
+    v64u8 v64u8_r = ((union {
+                     v128u8 a;
+                     v64u8 b;
+                     })v128u8_r)
+    .b +
+      (v64u8)v64u64_0;
+    v32u8 v32u8_r = ((union {
+                     v64u8 a;
+                     v32u8 b;
+                     })v64u8_r)
+    .b +
+      (v32u8)foo0_v32d32_0;
+    v16u8 v16u8_r = ((union {
+                     v32u8 a;
+                     v16u8 b;
+                     })v32u8_r)
+    .b +
+      (v16u8)v16f16_1;
+    v8u8 v8u8_r = ((union {
+                   v16u8 a;
+                   v8u8 b;
+                   })v16u8_r)
+    .b +
+      foo0_v8u8_0 + v8u8_1 + foo0_v8s8_0;
+    long u64_r = u128_r + foo0_f64_0 + (unsigned long)foo0__0;
+    short u16_r = u64_r + foo0_f16_0;
+    char u8_r = u16_r + foo0_u8_0;
+    *foo0_ret = v8u8_r + u8_r;
+}