(any_extend:VWEXTI
(match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf2\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")])
(any_extend:VQEXTI
(match_operand:<V_QUAD_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VQEXTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf4\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")])
(any_extend:VOEXTI
(match_operand:<V_OCT_TRUNC> 3 "register_operand" " vr, vr"))
(match_operand:VOEXTI 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
"v<sz>ext.vf8\t%0,%3%p1"
[(set_attr "type" "vext")
(set_attr "mode" "<MODE>")])
--- /dev/null
+/* { dg-do compile { target { rv64 } } } */
+/* { dg-options "-march=rv64gc_xtheadvector -mabi=lp64d -O3" } */
+
+#include <riscv_vector.h>
+
+struct a
+{
+ int b[];
+} c (vint32m4_t), d;
+
+char e;
+char *f;
+
+void g ()
+{
+ int h;
+ vint32m4_t i;
+ vint8m1_t j = __riscv_vlse8_v_i8m1 (&e, d.b[3], h);
+ vint16m2_t k = __riscv_vwadd_vx_i16m2 (j, 0, h);
+ i = __riscv_vwmacc_vx_i32m4 (i, f[0], k, h);
+ c (i);
+}
+
+/* { dg-final { scan-assembler-not {th\.vsext\.vf2} } } */
--- /dev/null
+/* { dg-do compile { target { rv64 } } } */
+/* { dg-options "-march=rv64gc_xtheadvector -mabi=lp64d -O3" } */
+
+#include <riscv_vector.h>
+
+struct a
+{
+ int b[];
+} c (vuint32m4_t), d;
+
+char e;
+char *f;
+
+void g ()
+{
+ int h;
+ vuint32m4_t i;
+ vuint8m1_t j = __riscv_vlse8_v_u8m1 (&e, d.b[3], h);
+ vuint16m2_t k = __riscv_vwaddu_vx_u16m2 (j, 0, h);
+ i = __riscv_vwmaccu_vx_u32m4 (i, f[0], k, h);
+ c (i);
+}
+
+/* { dg-final { scan-assembler-not {th\.vzext\.vf2} } } */