}
};
+template<enum frm_op_type FRM_OP = NO_FRM>
class vfncvt_f : public function_base
{
public:
+ bool has_rounding_mode_operand_p () const override
+ {
+ return FRM_OP == HAS_FRM;
+ }
+
rtx expand (function_expander &e) const override
{
if (e.op_info->op == OP_TYPE_f_w)
static CONSTEXPR const vfncvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfncvt_xu_frm_obj;
static CONSTEXPR const vfncvt_rtz_x<FIX> vfncvt_rtz_x_obj;
static CONSTEXPR const vfncvt_rtz_x<UNSIGNED_FIX> vfncvt_rtz_xu_obj;
-static CONSTEXPR const vfncvt_f vfncvt_f_obj;
+static CONSTEXPR const vfncvt_f<NO_FRM> vfncvt_f_obj;
+static CONSTEXPR const vfncvt_f<HAS_FRM> vfncvt_f_frm_obj;
static CONSTEXPR const vfncvt_rod_f vfncvt_rod_f_obj;
static CONSTEXPR const reducop<PLUS> vredsum_obj;
static CONSTEXPR const reducop<UMAX> vredmaxu_obj;
BASE (vfncvt_rtz_x)
BASE (vfncvt_rtz_xu)
BASE (vfncvt_f)
+BASE (vfncvt_f_frm)
BASE (vfncvt_rod_f)
BASE (vredsum)
BASE (vredmaxu)
DEF_RVV_FUNCTION (vfncvt_x_frm, narrow_alu_frm, full_preds, f_to_ni_f_w_ops)
DEF_RVV_FUNCTION (vfncvt_xu_frm, narrow_alu_frm, full_preds, f_to_nu_f_w_ops)
+DEF_RVV_FUNCTION (vfncvt_f_frm, narrow_alu_frm, full_preds, i_to_nf_x_w_ops)
+DEF_RVV_FUNCTION (vfncvt_f_frm, narrow_alu_frm, full_preds, u_to_nf_xu_w_ops)
+DEF_RVV_FUNCTION (vfncvt_f_frm, narrow_alu_frm, full_preds, f_to_nf_f_w_ops)
/* 14. Vector Reduction Operations. */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64 -O3 -Wno-psabi" } */
+
+#include "riscv_vector.h"
+
+vfloat32m1_t
+test_riscv_vfncvt_f_x_w_f32m1_rm (vint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_x_w_f32m1_rm (op1, 0, vl);
+}
+
+vfloat32m1_t
+test_vfncvt_f_x_w_f32m1_rm_m (vbool32_t mask, vint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_x_w_f32m1_rm_m (mask, op1, 1, vl);
+}
+
+vfloat32m1_t
+test_riscv_vfncvt_f_xu_w_f32m1_rm (vuint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_xu_w_f32m1_rm (op1, 0, vl);
+}
+
+vfloat32m1_t
+test_vfncvt_f_xu_w_f32m1_rm_m (vbool32_t mask, vuint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_xu_w_f32m1_rm_m (mask, op1, 1, vl);
+}
+
+vfloat32m1_t
+test_riscv_vfncvt_f_f_w_f32m1_rm (vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_f_w_f32m1_rm (op1, 0, vl);
+}
+
+vfloat32m1_t
+test_vfncvt_f_f_w_f32m1_rm_m (vbool32_t mask, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_f_w_f32m1_rm_m (mask, op1, 1, vl);
+}
+
+vfloat32m1_t
+test_riscv_vfncvt_f_x_w_f32m1 (vint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_x_w_f32m1 (op1, vl);
+}
+
+vfloat32m1_t
+test_vfncvt_f_x_w_f32m1_m (vbool32_t mask, vint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_x_w_f32m1_m (mask, op1, vl);
+}
+
+vfloat32m1_t
+test_riscv_vfncvt_f_xu_w_f32m1 (vuint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_xu_w_f32m1 (op1, vl);
+}
+
+vfloat32m1_t
+test_vfncvt_f_xu_w_f32m1_m (vbool32_t mask, vuint64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_xu_w_f32m1_m (mask, op1, vl);
+}
+
+vfloat32m1_t
+test_riscv_vfncvt_f_f_w_f32m1 (vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_f_w_f32m1 (op1, vl);
+}
+
+vfloat32m1_t
+test_vfncvt_f_f_w_f32m1_m (vbool32_t mask, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfncvt_f_f_w_f32m1_m (mask, op1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vfncvt\.f\.[xuf]+\.w\s+v[0-9]+,\s*v[0-9]+} 12 } } */
+/* { dg-final { scan-assembler-times {frrm\s+[axs][0-9]+} 6 } } */
+/* { dg-final { scan-assembler-times {fsrm\s+[axs][0-9]+} 6 } } */
+/* { dg-final { scan-assembler-times {fsrmi\s+[01234]} 6 } } */