}
[(set_attr "type" "vimuladd")])
+(define_insn_and_split "*widen_first_<any_extend:su>_vx_<mode>"
+ [(set (match_operand:VWEXTI_D 0 "register_operand")
+ (vec_duplicate:VWEXTI_D
+ (any_extend:<VEL>
+ (match_operand:<VSUBEL> 1 "register_operand"))))]
+ "TARGET_VECTOR && TARGET_64BIT && can_create_pseudo_p ()"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+ {
+ rtx extend_scalar = gen_reg_rtx (<VEL>mode);
+ emit_insn (gen_<any_extend:extend_name><vsubel><vel>2 (extend_scalar,
+ operands[1]));
+
+ insn_code icode = code_for_pred_broadcast (<MODE>mode);
+ rtx vec_dup_ops[] = {operands[0], extend_scalar};
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::UNARY_OP, vec_dup_ops);
+
+ DONE;
+ })
+
+(define_insn_and_split "*widen_<any_widen_binop:optab>_<any_extend:su>_vx_<mode>"
+ [(set (match_operand:VWEXTI_D 0 "register_operand")
+ (any_widen_binop:VWEXTI_D
+ (any_extend:VWEXTI_D
+ (match_operand:<V_DOUBLE_TRUNC> 1 "register_operand"))
+ (vec_duplicate:VWEXTI_D
+ (any_extend:<VEL>
+ (match_operand:<VSUBEL> 2 "register_operand")))))]
+ "TARGET_VECTOR && TARGET_64BIT && can_create_pseudo_p ()"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+ {
+ insn_code icode = code_for_pred_dual_widen_scalar (<any_widen_binop:CODE>,
+ <any_extend:CODE>,
+ <MODE>mode);
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::BINARY_OP, operands);
+
+ DONE;
+ }
+ [(set_attr "type" "viwalu")])
;; =============================================================================
;; Combine vec_duplicate + op.vv to op.vf
;; This code iterator allows signed and unsigned widening multiplications
;; to use the same template.
(define_code_iterator any_extend [sign_extend zero_extend])
+(define_code_attr extend_name [
+ (sign_extend "extend") (zero_extend "zero_extend")
+])
;; These code iterators allow unsigned and signed extraction to be generated
;; from the same template.
(RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
])
+(define_mode_iterator VWEXTI_D [
+ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
+ (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
+
+ (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64")
+ (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64")
+ (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64")
+ (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64")
+ (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+ (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256")
+ (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512")
+ (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024")
+ (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048")
+ (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096")
+])
+
(define_mode_iterator VWEXTI [
RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")