]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
RISC-V: Fix ICE for vector single-width integer multiply-add intrinsics
authorJin Ma <jinma@linux.alibaba.com>
Sat, 17 Aug 2024 16:18:03 +0000 (10:18 -0600)
committerJeff Law <jlaw@ventanamicro.com>
Sat, 17 Aug 2024 16:18:03 +0000 (10:18 -0600)
When rs1 is the immediate 0, the following ICE occurs:

error: unrecognizable insn:
(insn 8 5 12 2 (set (reg:RVVM1DI 134 [ <retval> ])
        (if_then_else:RVVM1DI (unspec:RVVMF64BI [
                    (const_vector:RVVMF64BI repeat [
                            (const_int 1 [0x1])
                       ])
                    (reg/v:DI 137 [ vl ])
                    (const_int 2 [0x2]) repeated x2
                    (const_int 0 [0])
                    (reg:SI 66 vl)
                    (reg:SI 67 vtype)
                ] UNSPEC_VPREDICATE)
            (plus:RVVM1DI (mult:RVVM1DI (vec_duplicate:RVVM1DI (const_int 0 [0]))
                    (reg/v:RVVM1DI 136 [ vs2 ]))
                (reg/v:RVVM1DI 135 [ vd ]))
            (reg/v:RVVM1DI 135 [ vd ])))

gcc/ChangeLog:

* config/riscv/vector.md: Allow scalar operand to be 0.

gcc/testsuite/ChangeLog:

* gcc.target/riscv/rvv/base/bug-7.c: New test.
* gcc.target/riscv/rvv/base/bug-8.c: New test.

gcc/config/riscv/vector.md
gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c [new file with mode: 0644]
gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c [new file with mode: 0644]

index aad34b3aa24ca4cf6185e9b3b74a8251f7f3b63d..211bbc0bff0aa57b75566205c5d36e0ab394297e 100644 (file)
          (plus:V_VLSI
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      "  0,  vr,  0,  vr"))
            (match_operand:V_VLSI 4 "register_operand"        " vr,  vr, vr,  vr"))
          (match_dup 3)))]
   "TARGET_VECTOR"
   "@
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%z2,%4%p1
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
          (plus:V_VLSI
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      " vr,  vr, vr,  vr"))
            (match_operand:V_VLSI 4 "register_operand"        "  0,  vr,  0,  vr"))
          (match_dup 4)))]
   "TARGET_VECTOR"
   "@
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         "  0,  vr,  0,  vr"))
            (match_operand:V_VLSI_D 4 "register_operand"           " vr,  vr, vr,  vr"))
          (match_dup 3)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m2r.v\t%0,%z2\;vmadd.vx\t%0,%z2,%4%p1
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m2r.v\t%0,%z2\;vmadd.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         " vr,  vr, vr,  vr"))
            (match_operand:V_VLSI_D 4 "register_operand"           "  0,  vr,  0,  vr"))
          (match_dup 4)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
            (match_operand:V_VLSI 4 "register_operand"        " vr,  vr, vr,  vr")
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      "  0,  vr,  0,  vr")))
          (match_dup 3)))]
   "TARGET_VECTOR"
   "@
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
            (match_operand:V_VLSI 4 "register_operand"        "  0,  vr,  0,  vr")
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      " vr,  vr, vr,  vr")))
          (match_dup 4)))]
   "TARGET_VECTOR"
   "@
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         "  0,  vr,  0,  vr")))
          (match_dup 3)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         " vr,  vr, vr,  vr")))
          (match_dup 4)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c
new file mode 100644 (file)
index 0000000..28766ce
--- /dev/null
@@ -0,0 +1,26 @@
+/* Test that we do not have ice when compile */
+/* { dg-do assemble } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O2"  { target { rv64 } } } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O2"  { target { rv32 } } } */
+
+#include <riscv_vector.h>
+
+vint64m1_t f1 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vmacc_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f2 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsac_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m8_t f3 (vint64m8_t vd, vint64m8_t vs2, size_t vl)
+{
+  return __riscv_vmadd_vx_i64m8 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f4 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsub_vx_i64m1 (vd, 0, vs2, vl);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c
new file mode 100644 (file)
index 0000000..975f755
--- /dev/null
@@ -0,0 +1,26 @@
+/* Test that we do not have ice when compile */
+/* { dg-do assemble } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O0"  { target { rv64 } } } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O0"  { target { rv32 } } } */
+
+#include <riscv_vector.h>
+
+vint64m1_t f1 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vmacc_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f2 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsac_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m8_t f3 (vint64m8_t vd, vint64m8_t vs2, size_t vl)
+{
+  return __riscv_vmadd_vx_i64m8 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f4 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsub_vx_i64m1 (vd, 0, vs2, vl);
+}