From: Ju-Zhe Zhong Date: Fri, 3 Feb 2023 07:34:31 +0000 (+0800) Subject: RISC-V: Add vand.vx C API tests X-Git-Tag: basepoints/gcc-14~1419 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9f35eb5d51bb4a3857633690c951ecb344a18e84;p=thirdparty%2Fgcc.git RISC-V: Add vand.vx C API tests gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/vand_vx_m_rv32-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_m_rv32-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_m_rv32-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_m_rv64-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_m_rv64-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_m_rv64-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_mu_rv32-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_mu_rv32-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_mu_rv32-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_mu_rv64-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_mu_rv64-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_mu_rv64-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_rv32-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_rv32-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_rv32-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_rv64-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_rv64-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_rv64-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tu_rv32-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tu_rv32-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tu_rv32-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tu_rv64-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tu_rv64-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tu_rv64-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tum_rv32-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tum_rv32-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tum_rv32-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tum_rv64-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tum_rv64-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tum_rv64-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-3.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-1.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-2.c: New test. * gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-3.c: New test. --- diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-1.c new file mode 100644 index 000000000000..8f4df862c976 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-1.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_m(mask,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_m(mask,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_m(mask,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_m(mask,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_m(mask,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_m(mask,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_m(mask,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_m(mask,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_m(mask,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_m(mask,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_m(mask,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_m(mask,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_m(mask,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_m(mask,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_m(mask,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_m(mask,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_m(mask,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_m(mask,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_m(mask,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_m(mask,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_m(mask,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_m(mask,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_m(mask,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_m(mask,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_m(mask,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_m(mask,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_m(mask,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_m(mask,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_m(mask,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_m(mask,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_m(mask,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_m(mask,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_m(mask,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_m(mask,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_m(mask,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_m(mask,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_m(mask,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_m(mask,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_m(mask,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_m(mask,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_m(mask,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_m(mask,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_m(mask,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_m(mask,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-2.c new file mode 100644 index 000000000000..f43197197da5 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-2.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_m(mask,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_m(mask,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_m(mask,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_m(mask,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_m(mask,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_m(mask,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_m(mask,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_m(mask,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_m(mask,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_m(mask,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_m(mask,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_m(mask,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_m(mask,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_m(mask,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_m(mask,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_m(mask,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_m(mask,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_m(mask,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_m(mask,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_m(mask,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_m(mask,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_m(mask,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_m(mask,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_m(mask,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_m(mask,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_m(mask,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_m(mask,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_m(mask,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_m(mask,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_m(mask,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_m(mask,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_m(mask,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_m(mask,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_m(mask,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_m(mask,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_m(mask,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_m(mask,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_m(mask,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_m(mask,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_m(mask,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_m(mask,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_m(mask,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_m(mask,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_m(mask,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-3.c new file mode 100644 index 000000000000..3143a13b1bab --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv32-3.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_m(mask,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_m(mask,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_m(mask,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_m(mask,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_m(mask,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_m(mask,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_m(mask,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_m(mask,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_m(mask,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_m(mask,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_m(mask,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_m(mask,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_m(mask,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_m(mask,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_m(mask,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_m(mask,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_m(mask,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_m(mask,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_m(mask,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_m(mask,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_m(mask,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_m(mask,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_m(mask,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_m(mask,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_m(mask,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_m(mask,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_m(mask,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_m(mask,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_m(mask,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_m(mask,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_m(mask,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_m(mask,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_m(mask,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_m(mask,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_m(mask,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_m(mask,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_m(mask,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_m(mask,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_m(mask,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_m(mask,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_m(mask,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_m(mask,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_m(mask,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_m(mask,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-1.c new file mode 100644 index 000000000000..e446326f5b29 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_m(mask,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_m(mask,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_m(mask,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_m(mask,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_m(mask,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_m(mask,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_m(mask,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_m(mask,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_m(mask,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_m(mask,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_m(mask,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_m(mask,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_m(mask,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_m(mask,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_m(mask,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_m(mask,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_m(mask,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_m(mask,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_m(mask,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_m(mask,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_m(mask,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_m(mask,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_m(mask,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_m(mask,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_m(mask,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_m(mask,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_m(mask,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_m(mask,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_m(mask,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_m(mask,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_m(mask,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_m(mask,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_m(mask,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_m(mask,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_m(mask,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_m(mask,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_m(mask,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_m(mask,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_m(mask,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_m(mask,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_m(mask,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_m(mask,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_m(mask,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_m(mask,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-2.c new file mode 100644 index 000000000000..7bec65878941 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_m(mask,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_m(mask,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_m(mask,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_m(mask,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_m(mask,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_m(mask,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_m(mask,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_m(mask,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_m(mask,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_m(mask,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_m(mask,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_m(mask,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_m(mask,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_m(mask,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_m(mask,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_m(mask,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_m(mask,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_m(mask,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_m(mask,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_m(mask,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_m(mask,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_m(mask,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_m(mask,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_m(mask,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_m(mask,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_m(mask,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_m(mask,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_m(mask,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_m(mask,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_m(mask,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_m(mask,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_m(mask,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_m(mask,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_m(mask,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_m(mask,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_m(mask,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_m(mask,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_m(mask,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_m(mask,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_m(mask,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_m(mask,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_m(mask,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_m(mask,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_m(mask,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-3.c new file mode 100644 index 000000000000..5782012ac5c2 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_m_rv64-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_m(mask,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_m(mask,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_m(mask,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_m(mask,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_m(mask,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_m(mask,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_m(mask,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_m(mask,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_m(mask,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_m(mask,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_m(mask,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_m(mask,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_m(mask,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_m(mask,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_m(mask,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_m(mask,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_m(mask,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_m(mask,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_m(mask,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_m(mask,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_m(mask,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_m(mask,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_m(mask,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_m(mask,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_m(mask,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_m(mask,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_m(mask,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_m(mask,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_m(mask,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_m(mask,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_m(mask,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_m(mask,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_m(mask,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_m(mask,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_m(mask,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_m(mask,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_m(mask,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_m(mask,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_m(mask,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_m(mask,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_m(mask,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_m(mask,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_m(mask,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_m(mask,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-1.c new file mode 100644 index 000000000000..b7c707ff2feb --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-1.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_mu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_mu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_mu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_mu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_mu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_mu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_mu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_mu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_mu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_mu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_mu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_mu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_mu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_mu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_mu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_mu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_mu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_mu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_mu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_mu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_mu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_mu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_mu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_mu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_mu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_mu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_mu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_mu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_mu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_mu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_mu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_mu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_mu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_mu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_mu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_mu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_mu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_mu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_mu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-2.c new file mode 100644 index 000000000000..4838f943bc89 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-2.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_mu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_mu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_mu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_mu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_mu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_mu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_mu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_mu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_mu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_mu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_mu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_mu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_mu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_mu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_mu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_mu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_mu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_mu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_mu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_mu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_mu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_mu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_mu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_mu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_mu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_mu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_mu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_mu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_mu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_mu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_mu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_mu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_mu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_mu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_mu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_mu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_mu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_mu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_mu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_mu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_mu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_mu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_mu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_mu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-3.c new file mode 100644 index 000000000000..ba42a0a28034 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv32-3.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_mu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_mu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_mu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_mu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_mu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_mu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_mu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_mu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_mu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_mu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_mu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_mu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_mu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_mu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_mu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_mu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_mu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_mu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_mu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_mu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_mu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_mu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_mu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_mu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_mu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_mu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_mu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_mu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_mu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_mu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_mu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_mu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_mu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_mu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_mu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_mu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_mu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_mu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_mu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_mu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_mu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_mu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_mu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_mu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-1.c new file mode 100644 index 000000000000..19f2c420b2ce --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_mu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_mu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_mu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_mu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_mu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_mu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_mu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_mu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_mu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_mu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_mu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_mu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_mu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_mu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_mu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_mu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_mu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_mu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_mu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_mu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_mu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_mu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_mu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_mu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_mu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_mu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_mu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_mu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_mu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_mu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_mu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_mu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_mu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_mu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_mu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_mu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_mu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_mu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_mu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-2.c new file mode 100644 index 000000000000..f81636824a98 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_mu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_mu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_mu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_mu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_mu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_mu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_mu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_mu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_mu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_mu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_mu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_mu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_mu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_mu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_mu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_mu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_mu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_mu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_mu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_mu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_mu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_mu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_mu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_mu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_mu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_mu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_mu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_mu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_mu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_mu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_mu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_mu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_mu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_mu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_mu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_mu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_mu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_mu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_mu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_mu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_mu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_mu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_mu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_mu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-3.c new file mode 100644 index 000000000000..e6feee2a86b1 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_mu_rv64-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_mu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_mu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_mu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_mu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_mu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_mu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_mu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_mu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_mu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_mu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_mu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_mu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_mu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_mu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_mu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_mu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_mu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_mu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_mu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_mu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_mu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_mu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_mu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_mu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_mu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_mu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_mu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_mu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_mu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_mu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_mu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_mu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_mu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_mu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_mu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_mu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_mu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_mu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_mu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_mu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_mu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_mu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_mu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_mu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-1.c new file mode 100644 index 000000000000..7411e96f901d --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-1.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8(op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4(op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2(op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1(op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2(op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4(op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8(op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4(op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2(op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1(op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2(op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4(op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8(op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2(op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1(op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2(op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4(op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8(op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1(op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2(op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4(op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8(op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8(op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4(op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2(op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1(op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2(op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4(op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8(op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4(op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2(op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1(op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2(op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4(op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8(op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2(op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1(op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2(op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4(op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8(op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1(op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2(op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4(op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8(op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-2.c new file mode 100644 index 000000000000..568635cc899c --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-2.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8(op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4(op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2(op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1(op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2(op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4(op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8(op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4(op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2(op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1(op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2(op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4(op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8(op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2(op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1(op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2(op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4(op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8(op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1(op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2(op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4(op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8(op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8(op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4(op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2(op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1(op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2(op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4(op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8(op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4(op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2(op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1(op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2(op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4(op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8(op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2(op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1(op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2(op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4(op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8(op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1(op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2(op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4(op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8(op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-3.c new file mode 100644 index 000000000000..f523d4718282 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv32-3.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8(op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4(op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2(op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1(op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2(op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4(op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8(op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4(op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2(op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1(op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2(op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4(op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8(op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2(op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1(op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2(op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4(op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8(op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1(op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2(op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4(op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8(op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8(op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4(op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2(op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1(op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2(op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4(op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8(op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4(op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2(op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1(op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2(op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4(op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8(op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2(op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1(op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2(op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4(op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8(op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1(op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2(op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4(op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8(op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-1.c new file mode 100644 index 000000000000..9fde661ef01a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8(op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4(op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2(op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1(op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2(op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4(op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8(op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4(op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2(op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1(op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2(op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4(op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8(op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2(op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1(op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2(op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4(op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8(op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1(op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2(op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4(op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8(op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8(op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4(op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2(op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1(op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2(op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4(op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8(op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4(op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2(op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1(op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2(op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4(op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8(op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2(op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1(op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2(op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4(op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8(op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1(op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2(op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4(op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8(op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-2.c new file mode 100644 index 000000000000..3da692dea08f --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8(op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4(op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2(op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1(op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2(op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4(op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8(op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4(op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2(op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1(op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2(op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4(op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8(op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2(op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1(op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2(op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4(op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8(op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1(op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2(op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4(op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8(op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8(op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4(op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2(op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1(op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2(op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4(op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8(op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4(op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2(op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1(op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2(op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4(op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8(op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2(op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1(op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2(op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4(op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8(op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1(op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2(op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4(op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8(op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-3.c new file mode 100644 index 000000000000..f3b498605f84 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_rv64-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8(op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4(op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2(op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1(op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2(op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4(op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8(op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4(op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2(op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1(op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2(op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4(op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8(op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2(op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1(op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2(op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4(op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8(op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1(op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2(op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4(op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8(op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8(op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4(op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2(op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1(op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2(op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4(op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8(op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4(op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2(op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1(op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2(op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4(op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8(op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2(op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1(op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2(op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4(op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8(op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1(op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2(op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4(op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8(op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-1.c new file mode 100644 index 000000000000..77f083bed0b3 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-1.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tu(merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tu(merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tu(merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tu(merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tu(merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tu(merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tu(merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tu(merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tu(merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tu(merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tu(merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tu(merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tu(merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tu(merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tu(merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tu(merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tu(merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tu(merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tu(merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tu(merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tu(merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tu(merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tu(merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tu(merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tu(merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tu(merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tu(merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tu(merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tu(merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tu(merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tu(merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tu(merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tu(merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tu(merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tu(merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tu(merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tu(merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tu(merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tu(merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tu(merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tu(merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tu(merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tu(merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tu(merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-2.c new file mode 100644 index 000000000000..a90c7b6ee7fb --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-2.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tu(merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tu(merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tu(merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tu(merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tu(merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tu(merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tu(merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tu(merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tu(merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tu(merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tu(merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tu(merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tu(merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tu(merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tu(merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tu(merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tu(merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tu(merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tu(merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tu(merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tu(merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tu(merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tu(merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tu(merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tu(merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tu(merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tu(merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tu(merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tu(merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tu(merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tu(merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tu(merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tu(merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tu(merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tu(merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tu(merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tu(merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tu(merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tu(merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tu(merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tu(merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tu(merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tu(merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tu(merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-3.c new file mode 100644 index 000000000000..f8953243e36a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv32-3.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tu(merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tu(merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tu(merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tu(merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tu(merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tu(merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tu(merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tu(merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tu(merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tu(merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tu(merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tu(merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tu(merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tu(merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tu(merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tu(merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tu(merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tu(merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tu(merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tu(merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tu(merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tu(merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tu(merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tu(merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tu(merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tu(merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tu(merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tu(merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tu(merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tu(merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tu(merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tu(merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tu(merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tu(merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tu(merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tu(merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tu(merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tu(merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tu(merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tu(merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tu(merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tu(merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tu(merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tu(merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-1.c new file mode 100644 index 000000000000..374a05767b05 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tu(merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tu(merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tu(merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tu(merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tu(merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tu(merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tu(merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tu(merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tu(merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tu(merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tu(merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tu(merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tu(merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tu(merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tu(merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tu(merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tu(merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tu(merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tu(merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tu(merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tu(merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tu(merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tu(merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tu(merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tu(merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tu(merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tu(merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tu(merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tu(merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tu(merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tu(merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tu(merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tu(merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tu(merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tu(merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tu(merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tu(merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tu(merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tu(merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tu(merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tu(merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tu(merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tu(merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tu(merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-2.c new file mode 100644 index 000000000000..6f51c255ddba --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tu(merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tu(merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tu(merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tu(merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tu(merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tu(merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tu(merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tu(merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tu(merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tu(merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tu(merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tu(merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tu(merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tu(merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tu(merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tu(merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tu(merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tu(merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tu(merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tu(merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tu(merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tu(merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tu(merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tu(merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tu(merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tu(merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tu(merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tu(merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tu(merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tu(merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tu(merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tu(merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tu(merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tu(merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tu(merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tu(merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tu(merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tu(merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tu(merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tu(merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tu(merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tu(merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tu(merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tu(merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-3.c new file mode 100644 index 000000000000..17d66c9295b5 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tu_rv64-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tu(merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tu(merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tu(merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tu(merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tu(merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tu(merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tu(merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tu(merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tu(merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tu(merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tu(merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tu(merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tu(merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tu(merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tu(merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tu(merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tu(merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tu(merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tu(merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tu(merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tu(merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tu(merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tu(merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tu(merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tu(merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tu(merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tu(merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tu(merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tu(merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tu(merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tu(merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tu(merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tu(merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tu(merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tu(merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tu(merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tu(merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tu(merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tu(merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tu(merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tu(merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tu(merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tu(merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tu(merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-1.c new file mode 100644 index 000000000000..dff21684dd86 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-1.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tum(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tum(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tum(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tum(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tum(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tum(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tum(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tum(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tum(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tum(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tum(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tum(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tum(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tum(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tum(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tum(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tum(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tum(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tum(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tum(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tum(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tum(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tum(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tum(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tum(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tum(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tum(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tum(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tum(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tum(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tum(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tum(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tum(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tum(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tum(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tum(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tum(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tum(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tum(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-2.c new file mode 100644 index 000000000000..58c2e507d2c4 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-2.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tum(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tum(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tum(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tum(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tum(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tum(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tum(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tum(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tum(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tum(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tum(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tum(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tum(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tum(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tum(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tum(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tum(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tum(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tum(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tum(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tum(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tum(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tum(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tum(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tum(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tum(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tum(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tum(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tum(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tum(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tum(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tum(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tum(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tum(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tum(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tum(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tum(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tum(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tum(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tum(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tum(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tum(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tum(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tum(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-3.c new file mode 100644 index 000000000000..9e2f1d22b4ea --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv32-3.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tum(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tum(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tum(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tum(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tum(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tum(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tum(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tum(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tum(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tum(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tum(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tum(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tum(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tum(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tum(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tum(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tum(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tum(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tum(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tum(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tum(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tum(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tum(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tum(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tum(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tum(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tum(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tum(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tum(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tum(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tum(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tum(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tum(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tum(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tum(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tum(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tum(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tum(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tum(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tum(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tum(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tum(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tum(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tum(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-1.c new file mode 100644 index 000000000000..a49807a937dd --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tum(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tum(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tum(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tum(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tum(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tum(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tum(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tum(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tum(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tum(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tum(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tum(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tum(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tum(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tum(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tum(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tum(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tum(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tum(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tum(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tum(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tum(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tum(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tum(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tum(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tum(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tum(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tum(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tum(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tum(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tum(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tum(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tum(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tum(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tum(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tum(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tum(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tum(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tum(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-2.c new file mode 100644 index 000000000000..adbd279fc2a5 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tum(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tum(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tum(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tum(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tum(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tum(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tum(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tum(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tum(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tum(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tum(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tum(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tum(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tum(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tum(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tum(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tum(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tum(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tum(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tum(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tum(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tum(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tum(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tum(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tum(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tum(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tum(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tum(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tum(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tum(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tum(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tum(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tum(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tum(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tum(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tum(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tum(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tum(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tum(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tum(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tum(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tum(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tum(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tum(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-3.c new file mode 100644 index 000000000000..cad7cdfc7c8a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tum_rv64-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tum(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tum(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tum(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tum(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tum(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tum(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tum(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tum(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tum(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tum(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tum(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tum(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tum(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tum(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tum(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tum(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tum(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tum(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tum(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tum(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tum(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tum(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tum(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tum(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tum(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tum(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tum(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tum(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tum(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tum(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tum(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tum(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tum(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tum(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tum(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tum(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tum(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tum(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tum(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tum(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tum(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tum(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tum(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tum(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-1.c new file mode 100644 index 000000000000..8a28885f3cc9 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-1.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tumu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tumu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tumu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tumu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tumu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tumu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tumu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tumu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tumu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tumu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tumu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tumu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tumu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tumu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tumu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tumu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tumu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tumu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-2.c new file mode 100644 index 000000000000..667940d01782 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-2.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tumu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tumu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tumu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tumu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tumu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tumu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tumu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tumu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tumu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tumu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tumu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tumu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tumu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tumu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tumu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tumu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tumu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tumu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tumu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tumu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tumu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tumu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tumu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tumu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tumu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tumu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tumu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tumu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tumu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tumu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tumu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tumu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tumu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tumu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tumu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tumu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tumu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tumu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tumu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-3.c new file mode 100644 index 000000000000..46093090ce00 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv32-3.c @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tumu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tumu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tumu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tumu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tumu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tumu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tumu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tumu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tumu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tumu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tumu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tumu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tumu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tumu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tumu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tumu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tumu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tumu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tumu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tumu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tumu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tumu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tumu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tumu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tumu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tumu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tumu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tumu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tumu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tumu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tumu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tumu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tumu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tumu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tumu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tumu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tumu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tumu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tumu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 8 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-1.c new file mode 100644 index 000000000000..09e45964fbc9 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tumu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tumu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tumu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tumu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tumu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tumu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tumu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tumu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tumu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tumu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tumu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tumu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tumu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tumu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tumu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tumu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tumu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tumu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-2.c new file mode 100644 index 000000000000..a4d1d51eb923 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tumu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tumu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tumu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tumu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tumu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tumu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tumu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tumu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tumu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tumu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tumu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tumu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tumu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tumu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tumu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tumu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tumu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tumu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tumu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tumu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tumu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tumu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tumu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tumu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tumu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tumu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tumu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tumu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tumu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tumu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tumu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tumu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tumu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tumu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tumu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tumu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tumu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tumu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tumu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-3.c new file mode 100644 index 000000000000..4a05fd04f935 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vand_vx_tumu_rv64-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vand_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf8_tumu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vand_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf4_tumu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vand_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8mf2_tumu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vand_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m1_tumu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vand_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m2_tumu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vand_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m4_tumu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vand_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vand_vx_i8m8_tumu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vand_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf4_tumu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vand_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16mf2_tumu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vand_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m1_tumu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vand_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m2_tumu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vand_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m4_tumu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vand_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vand_vx_i16m8_tumu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vand_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32mf2_tumu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vand_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m1_tumu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vand_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m2_tumu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vand_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m4_tumu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vand_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vand_vx_i32m8_tumu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vand_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m1_tumu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vand_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m2_tumu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vand_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m4_tumu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vand_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vand_vx_i64m8_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vand_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf8_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vand_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf4_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vand_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8mf2_tumu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vand_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m1_tumu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vand_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m2_tumu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vand_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m4_tumu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vand_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vand_vx_u8m8_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vand_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf4_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vand_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16mf2_tumu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vand_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m1_tumu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vand_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m2_tumu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vand_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m4_tumu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vand_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vand_vx_u16m8_tumu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vand_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32mf2_tumu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vand_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m1_tumu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vand_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m2_tumu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vand_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m4_tumu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vand_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vand_vx_u32m8_tumu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vand_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m1_tumu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vand_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m2_tumu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vand_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m4_tumu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vand_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vand_vx_u64m8_tumu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vand\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */