]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
aarch64: Convert aarch64 multi choice patterns to new syntax
authorAndrea Corallo <andrea.corallo@arm.com>
Tue, 19 Sep 2023 13:12:08 +0000 (15:12 +0200)
committerAndrea Corallo <andrea.corallo@arm.com>
Tue, 3 Oct 2023 08:39:05 +0000 (10:39 +0200)
Hi all,
this patch converts a number of multi multi choice patterns within the
aarch64 backend to the new syntax.

The list of the converted patterns is in the Changelog.

For completeness here follows the list of multi choice patterns that
were rejected for conversion by my parser, they typically have some C
as asm output and require some manual intervention:
aarch64_simd_vec_set<mode>, aarch64_get_lane<mode>,
aarch64_cm<optab>di, aarch64_cm<optab>di, aarch64_cmtstdi,
*aarch64_movv8di, *aarch64_be_mov<mode>, *aarch64_be_movci,
*aarch64_be_mov<mode>, *aarch64_be_movxi, *aarch64_sve_mov<mode>_le,
*aarch64_sve_mov<mode>_be, @aarch64_pred_mov<mode>,
@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx4SI_ONLY:mode>,
@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>,
*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_sxtw,
*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_uxtw,
@aarch64_vec_duplicate_vq<mode>_le, *vec_extract<mode><Vel>_0,
*vec_extract<mode><Vel>_v128, *cmp<cmp_op><mode>_and,
*fcm<cmp_op><mode>_and_combine, @aarch64_sve_ext<mode>,
@aarch64_sve2_<su>aba<mode>, *sibcall_insn, *sibcall_value_insn,
*xor_one_cmpl<mode>3, *insv_reg<mode>_<SUBDI_BITS>,
*aarch64_bfi<GPI:mode><ALLX:mode>_<SUBDI_BITS>,
*aarch64_bfidi<ALLX:mode>_subreg_<SUBDI_BITS>, *aarch64_bfxil<mode>,
*aarch64_bfxilsi_uxtw,
*aarch64_<su_optab>cvtf<fcvt_target><GPF:mode>2_mult,
atomic_store<mode>.

Bootstraped and reg tested on aarch64-unknown-linux-gnu, also I
analysed tmp-mddump.md (from 'make mddump') and could not find
effective differences, okay for trunk?

Bests

  Andrea

gcc/ChangeLog:

* config/aarch64/aarch64.md (@ccmp<CC_ONLY:mode><GPI:mode>)
(@ccmp<CC_ONLY:mode><GPI:mode>_rev, *call_insn, *call_value_insn)
(*mov<mode>_aarch64, load_pair_sw_<SX:mode><SX2:mode>)
(load_pair_dw_<DX:mode><DX2:mode>)
(store_pair_sw_<SX:mode><SX2:mode>)
(store_pair_dw_<DX:mode><DX2:mode>, *extendsidi2_aarch64)
(*zero_extendsidi2_aarch64, *load_pair_zero_extendsidi2_aarch64)
(*extend<SHORT:mode><GPI:mode>2_aarch64)
(*zero_extend<SHORT:mode><GPI:mode>2_aarch64)
(*extendqihi2_aarch64, *zero_extendqihi2_aarch64)
(*add<mode>3_aarch64, *addsi3_aarch64_uxtw, *add<mode>3_poly_1)
(add<mode>3_compare0, *addsi3_compare0_uxtw)
(*add<mode>3_compareC_cconly, add<mode>3_compareC)
(*add<mode>3_compareV_cconly_imm, add<mode>3_compareV_imm)
(*add<mode>3nr_compare0, subdi3, subv<GPI:mode>_imm)
(*cmpv<GPI:mode>_insn, sub<mode>3_compare1_imm, neg<mode>2)
(cmp<mode>, fcmp<mode>, fcmpe<mode>, *cmov<mode>_insn)
(*cmovsi_insn_uxtw, <optab><mode>3, *<optab>si3_uxtw)
(*and<mode>3_compare0, *andsi3_compare0_uxtw, one_cmpl<mode>2)
(*<NLOGICAL:optab>_one_cmpl<mode>3, *and<mode>3nr_compare0)
(*aarch64_ashl_sisd_or_int_<mode>3)
(*aarch64_lshr_sisd_or_int_<mode>3)
(*aarch64_ashr_sisd_or_int_<mode>3, *ror<mode>3_insn)
(*<optab>si3_insn_uxtw, <optab>_trunc<fcvt_target><GPI:mode>2)
(<optab><fcvt_target><GPF:mode>2)
(<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3)
(<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3)
(*aarch64_<optab><mode>3_cssc, copysign<GPF:mode>3_insn): Update
to new syntax.

* config/aarch64/aarch64-sve2.md (@aarch64_scatter_stnt<mode>)
(@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>)
(*aarch64_mul_unpredicated_<mode>)
(@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>_2)
(*cond_<sve_int_op><mode>_3, *cond_<sve_int_op><mode>_any)
(*cond_<sve_int_op><mode>_z, @aarch64_pred_<sve_int_op><mode>)
(*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_3)
(*cond_<sve_int_op><mode>_any, @aarch64_sve_<sve_int_op><mode>)
(@aarch64_sve_<sve_int_op>_lane_<mode>)
(@aarch64_sve_add_mul_lane_<mode>)
(@aarch64_sve_sub_mul_lane_<mode>, @aarch64_sve2_xar<mode>)
(*aarch64_sve2_bcax<mode>, @aarch64_sve2_eor3<mode>)
(*aarch64_sve2_nor<mode>, *aarch64_sve2_nand<mode>)
(*aarch64_sve2_bsl<mode>, *aarch64_sve2_nbsl<mode>)
(*aarch64_sve2_bsl1n<mode>, *aarch64_sve2_bsl2n<mode>)
(*aarch64_sve2_sra<mode>, @aarch64_sve_add_<sve_int_op><mode>)
(*aarch64_sve2_<su>aba<mode>, @aarch64_sve_add_<sve_int_op><mode>)
(@aarch64_sve_add_<sve_int_op>_lane_<mode>)
(@aarch64_sve_qadd_<sve_int_op><mode>)
(@aarch64_sve_qadd_<sve_int_op>_lane_<mode>)
(@aarch64_sve_sub_<sve_int_op><mode>)
(@aarch64_sve_sub_<sve_int_op>_lane_<mode>)
(@aarch64_sve_qsub_<sve_int_op><mode>)
(@aarch64_sve_qsub_<sve_int_op>_lane_<mode>)
(@aarch64_sve_<sve_fp_op><mode>, @aarch64_<sve_fp_op>_lane_<mode>)
(@aarch64_pred_<sve_int_op><mode>)
(@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_int_op><mode>_2)
(*cond_<sve_int_op><mode>_z, @aarch64_sve_<optab><mode>)
(@aarch64_<optab>_lane_<mode>, @aarch64_sve_<optab><mode>)
(@aarch64_<optab>_lane_<mode>, @aarch64_pred_<sve_fp_op><mode>)
(*cond_<sve_fp_op><mode>_any_relaxed)
(*cond_<sve_fp_op><mode>_any_strict)
(@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>)
(@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_fp_op><mode>)
(*cond_<sve_fp_op><mode>_strict): Update to new syntax.

* config/aarch64/aarch64-sve.md (*aarch64_sve_mov<mode>_ldr_str)
(*aarch64_sve_mov<mode>_no_ldr_str, @aarch64_pred_mov<mode>)
(*aarch64_sve_mov<mode>, aarch64_wrffr)
(mask_scatter_store<mode><v_int_container>)
(*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked)
(*mask_scatter_store<mode><v_int_container>_sxtw)
(*mask_scatter_store<mode><v_int_container>_uxtw)
(@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>)
(@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>)
(*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw)
(*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw)
(*vec_duplicate<mode>_reg, vec_shl_insert_<mode>)
(vec_series<mode>, @extract_<last_op>_<mode>)
(@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2)
(*cond_<optab><mode>_any, @aarch64_pred_<optab><mode>)
(@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>)
(@cond_<optab><mode>)
(*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2)
(@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>)
(@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>)
(*cond_uxt<mode>_2, *cond_uxt<mode>_any, *cnot<mode>)
(*cond_cnot<mode>_2, *cond_cnot<mode>_any)
(@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed)
(*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed)
(*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>)
(*cond_<optab><mode>_2, *cond_<optab><mode>_3)
(*cond_<optab><mode>_any, add<mode>3, sub<mode>3)
(@aarch64_pred_<su>abd<mode>, *aarch64_cond_<su>abd<mode>_2)
(*aarch64_cond_<su>abd<mode>_3, *aarch64_cond_<su>abd<mode>_any)
(@aarch64_sve_<optab><mode>, @aarch64_pred_<optab><mode>)
(*cond_<optab><mode>_2, *cond_<optab><mode>_z)
(@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2)
(*cond_<optab><mode>_3, *cond_<optab><mode>_any, <optab><mode>3)
(*cond_bic<mode>_2, *cond_bic<mode>_any)
(@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_const)
(*cond_<optab><mode>_any_const, *cond_<sve_int_op><mode>_m)
(*cond_<sve_int_op><mode>_z, *sdiv_pow2<mode>3)
(*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_any)
(@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed)
(*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed)
(*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>)
(*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
(*cond_<optab><mode>_2_const_relaxed)
(*cond_<optab><mode>_2_const_strict)
(*cond_<optab><mode>_3_relaxed, *cond_<optab><mode>_3_strict)
(*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
(*cond_<optab><mode>_any_const_relaxed)
(*cond_<optab><mode>_any_const_strict)
(@aarch64_pred_<optab><mode>, *cond_add<mode>_2_const_relaxed)
(*cond_add<mode>_2_const_strict)
(*cond_add<mode>_any_const_relaxed)
(*cond_add<mode>_any_const_strict, @aarch64_pred_<optab><mode>)
(*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
(*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
(@aarch64_pred_<optab><mode>, *cond_sub<mode>_3_const_relaxed)
(*cond_sub<mode>_3_const_strict, *cond_sub<mode>_const_relaxed)
(*cond_sub<mode>_const_strict, *aarch64_pred_abd<mode>_relaxed)
(*aarch64_pred_abd<mode>_strict)
(*aarch64_cond_abd<mode>_2_relaxed)
(*aarch64_cond_abd<mode>_2_strict)
(*aarch64_cond_abd<mode>_3_relaxed)
(*aarch64_cond_abd<mode>_3_strict)
(*aarch64_cond_abd<mode>_any_relaxed)
(*aarch64_cond_abd<mode>_any_strict, @aarch64_pred_<optab><mode>)
(@aarch64_pred_fma<mode>, *cond_fma<mode>_2, *cond_fma<mode>_4)
(*cond_fma<mode>_any, @aarch64_pred_fnma<mode>)
(*cond_fnma<mode>_2, *cond_fnma<mode>_4, *cond_fnma<mode>_any)
(<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>)
(@<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>)
(@aarch64_sve_add_<optab><vsi2qi>, @aarch64_pred_<optab><mode>)
(*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
(*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict)
(*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
(@aarch64_<optab>_lane_<mode>, @aarch64_pred_<optab><mode>)
(*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict)
(*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
(@aarch64_<optab>_lane_<mode>, @aarch64_sve_tmad<mode>)
(@aarch64_sve_<sve_fp_op>vnx4sf)
(@aarch64_sve_<sve_fp_op>_lanevnx4sf)
(@aarch64_sve_<sve_fp_op><mode>, *vcond_mask_<mode><vpred>)
(@aarch64_sel_dup<mode>, @aarch64_pred_cmp<cmp_op><mode>)
(*cmp<cmp_op><mode>_cc, *cmp<cmp_op><mode>_ptest)
(@aarch64_pred_fcm<cmp_op><mode>, @fold_extract_<last_op>_<mode>)
(@aarch64_fold_extract_vector_<last_op>_<mode>)
(@aarch64_sve_splice<mode>)
(@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>)
(@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>)
(*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed)
(*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict)
(*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>)
(@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>)
(@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>)
(*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed)
(*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict)
(*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>)
(@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>)
(*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>)
(@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>)
(*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>)
(@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>)
(*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>)
(@aarch64_brk<brk_op>, *aarch64_sve_<inc_dec><mode>_cntp): Update
to new syntax.

* config/aarch64/aarch64-simd.md (aarch64_simd_dup<mode>)
(load_pair<DREG:mode><DREG2:mode>)
(vec_store_pair<DREG:mode><DREG2:mode>, aarch64_simd_stp<mode>)
(aarch64_simd_mov_from_<mode>low)
(aarch64_simd_mov_from_<mode>high, and<mode>3<vczle><vczbe>)
(ior<mode>3<vczle><vczbe>, aarch64_simd_ashr<mode><vczle><vczbe>)
(aarch64_simd_bsl<mode>_internal<vczle><vczbe>)
(*aarch64_simd_bsl<mode>_alt<vczle><vczbe>)
(aarch64_simd_bsldi_internal, aarch64_simd_bsldi_alt)
(store_pair_lanes<mode>, *aarch64_combine_internal<mode>)
(*aarch64_combine_internal_be<mode>, *aarch64_combinez<mode>)
(*aarch64_combinez_be<mode>)
(aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_cm<optab>di)
(aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_mov<mode>)
(*aarch64_be_mov<mode>, *aarch64_be_movoi): Update to new syntax.

gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/aarch64-sve.md
gcc/config/aarch64/aarch64-sve2.md
gcc/config/aarch64/aarch64.md

index e955691f1be8830efacc237465119764ce2a4942..f487b570b0c962f3fee9c0e5ffeec7eb9329ac2d 100644 (file)
 })
 
 (define_insn "aarch64_simd_dup<mode>"
-  [(set (match_operand:VDQ_I 0 "register_operand" "=w, w")
+  [(set (match_operand:VDQ_I 0 "register_operand")
        (vec_duplicate:VDQ_I
-         (match_operand:<VEL> 1 "register_operand" "w,?r")))]
+         (match_operand:<VEL> 1 "register_operand")))]
   "TARGET_SIMD"
-  "@
-   dup\\t%0.<Vtype>, %1.<Vetype>[0]
-   dup\\t%0.<Vtype>, %<vwcore>1"
-  [(set_attr "type" "neon_dup<q>, neon_from_gp<q>")]
+  {@ [ cons: =0 , 1  ; attrs: type      ]
+     [ w        , w  ; neon_dup<q>      ] dup\t%0.<Vtype>, %1.<Vetype>[0]
+     [ w        , ?r ; neon_from_gp<q>  ] dup\t%0.<Vtype>, %<vwcore>1
+  }
 )
 
 (define_insn "aarch64_simd_dup<mode>"
-  [(set (match_operand:VDQF_F16 0 "register_operand" "=w,w")
+  [(set (match_operand:VDQF_F16 0 "register_operand")
        (vec_duplicate:VDQF_F16
-         (match_operand:<VEL> 1 "register_operand" "w,r")))]
+         (match_operand:<VEL> 1 "register_operand")))]
   "TARGET_SIMD"
-  "@
-   dup\\t%0.<Vtype>, %1.<Vetype>[0]
-   dup\\t%0.<Vtype>, %<vwcore>1"
-  [(set_attr "type" "neon_dup<q>, neon_from_gp<q>")]
+  {@ [ cons: =0 , 1 ; attrs: type      ]
+     [ w        , w ; neon_dup<q>      ] dup\t%0.<Vtype>, %1.<Vetype>[0]
+     [ w        , r ; neon_from_gp<q>  ] dup\t%0.<Vtype>, %<vwcore>1
+  }
 )
 
 (define_insn "aarch64_dup_lane<mode>"
 )
 
 (define_insn "load_pair<DREG:mode><DREG2:mode>"
-  [(set (match_operand:DREG 0 "register_operand" "=w,r")
-       (match_operand:DREG 1 "aarch64_mem_pair_operand" "Ump,Ump"))
-   (set (match_operand:DREG2 2 "register_operand" "=w,r")
-       (match_operand:DREG2 3 "memory_operand" "m,m"))]
+  [(set (match_operand:DREG 0 "register_operand")
+       (match_operand:DREG 1 "aarch64_mem_pair_operand"))
+   (set (match_operand:DREG2 2 "register_operand")
+       (match_operand:DREG2 3 "memory_operand"))]
   "TARGET_FLOAT
    && rtx_equal_p (XEXP (operands[3], 0),
                   plus_constant (Pmode,
                                  XEXP (operands[1], 0),
                                  GET_MODE_SIZE (<DREG:MODE>mode)))"
-  "@
-   ldp\t%d0, %d2, %z1
-   ldp\t%x0, %x2, %z1"
-  [(set_attr "type" "neon_ldp,load_16")]
+  {@ [ cons: =0 , 1   , =2 , 3 ; attrs: type ]
+     [ w        , Ump , w  , m ; neon_ldp    ] ldp\t%d0, %d2, %z1
+     [ r        , Ump , r  , m ; load_16     ] ldp\t%x0, %x2, %z1
+  }
 )
 
 (define_insn "vec_store_pair<DREG:mode><DREG2:mode>"
-  [(set (match_operand:DREG 0 "aarch64_mem_pair_operand" "=Ump,Ump")
-       (match_operand:DREG 1 "register_operand" "w,r"))
-   (set (match_operand:DREG2 2 "memory_operand" "=m,m")
-       (match_operand:DREG2 3 "register_operand" "w,r"))]
+  [(set (match_operand:DREG 0 "aarch64_mem_pair_operand")
+       (match_operand:DREG 1 "register_operand"))
+   (set (match_operand:DREG2 2 "memory_operand")
+       (match_operand:DREG2 3 "register_operand"))]
   "TARGET_FLOAT
    && rtx_equal_p (XEXP (operands[2], 0),
                   plus_constant (Pmode,
                                  XEXP (operands[0], 0),
                                  GET_MODE_SIZE (<DREG:MODE>mode)))"
-  "@
-   stp\t%d1, %d3, %z0
-   stp\t%x1, %x3, %z0"
-  [(set_attr "type" "neon_stp,store_16")]
+  {@ [ cons: =0 , 1 , =2 , 3 ; attrs: type ]
+     [ Ump      , w , m  , w ; neon_stp    ] stp\t%d1, %d3, %z0
+     [ Ump      , r , m  , r ; store_16    ] stp\t%x1, %x3, %z0
+  }
 )
 
 (define_insn "aarch64_simd_stp<mode>"
-  [(set (match_operand:VP_2E 0 "aarch64_mem_pair_lanes_operand" "=Umn,Umn")
-       (vec_duplicate:VP_2E (match_operand:<VEL> 1 "register_operand" "w,r")))]
+  [(set (match_operand:VP_2E 0 "aarch64_mem_pair_lanes_operand")
+       (vec_duplicate:VP_2E (match_operand:<VEL> 1 "register_operand")))]
   "TARGET_SIMD"
-  "@
-   stp\\t%<Vetype>1, %<Vetype>1, %y0
-   stp\\t%<vw>1, %<vw>1, %y0"
-  [(set_attr "type" "neon_stp, store_<ldpstp_vel_sz>")]
+  {@ [ cons: =0 , 1 ; attrs: type            ]
+     [ Umn      , w ; neon_stp               ] stp\t%<Vetype>1, %<Vetype>1, %y0
+     [ Umn      , r ; store_<ldpstp_vel_sz>  ] stp\t%<vw>1, %<vw>1, %y0
+  }
 )
 
 (define_insn "load_pair<VQ:mode><VQ2:mode>"
 )
 
 (define_insn_and_split "aarch64_simd_mov_from_<mode>low"
-  [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r")
+  [(set (match_operand:<VHALF> 0 "register_operand")
         (vec_select:<VHALF>
-          (match_operand:VQMOV_NO2E 1 "register_operand" "w,w")
-          (match_operand:VQMOV_NO2E 2 "vect_par_cnst_lo_half" "")))]
+          (match_operand:VQMOV_NO2E 1 "register_operand")
+          (match_operand:VQMOV_NO2E 2 "vect_par_cnst_lo_half")))]
   "TARGET_SIMD"
-  "@
-   #
-   umov\t%0, %1.d[0]"
+  {@ [ cons: =0 , 1 ; attrs: type    ]
+     [ w        , w ; mov_reg        ] #
+     [ ?r       , w ; neon_to_gp<q>  ] umov\t%0, %1.d[0]
+  }
   "&& reload_completed && aarch64_simd_register (operands[0], <VHALF>mode)"
   [(set (match_dup 0) (match_dup 1))]
   {
     operands[1] = aarch64_replace_reg_mode (operands[1], <VHALF>mode);
   }
-  [(set_attr "type" "mov_reg,neon_to_gp<q>")
+  [
    (set_attr "length" "4")]
 )
 
 (define_insn "aarch64_simd_mov_from_<mode>high"
-  [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r,?r")
+  [(set (match_operand:<VHALF> 0 "register_operand")
         (vec_select:<VHALF>
-          (match_operand:VQMOV_NO2E 1 "register_operand" "w,w,w")
-          (match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half" "")))]
+          (match_operand:VQMOV_NO2E 1 "register_operand")
+          (match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half")))]
   "TARGET_FLOAT"
-  "@
-   dup\t%d0, %1.d[1]
-   umov\t%0, %1.d[1]
-   fmov\t%0, %1.d[1]"
-  [(set_attr "type" "neon_dup<q>,neon_to_gp<q>,f_mrc")
-   (set_attr "arch" "simd,simd,*")
+  {@ [ cons: =0 , 1 ; attrs: type   , arch  ]
+     [ w        , w ; neon_dup<q>   , simd  ] dup\t%d0, %1.d[1]
+     [ ?r       , w ; neon_to_gp<q> , simd  ] umov\t%0, %1.d[1]
+     [ ?r       , w ; f_mrc         , *     ] fmov\t%0, %1.d[1]
+  }
+  [
+   
    (set_attr "length" "4")]
 )
 
 
 ;; For AND (vector, register) and BIC (vector, immediate)
 (define_insn "and<mode>3<vczle><vczbe>"
-  [(set (match_operand:VDQ_I 0 "register_operand" "=w,w")
-       (and:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w,0")
-                  (match_operand:VDQ_I 2 "aarch64_reg_or_bic_imm" "w,Db")))]
+  [(set (match_operand:VDQ_I 0 "register_operand")
+       (and:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+                  (match_operand:VDQ_I 2 "aarch64_reg_or_bic_imm")))]
   "TARGET_SIMD"
-  "@
-   and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
-   * return aarch64_output_simd_mov_immediate (operands[2], <bitsize>,\
-                                              AARCH64_CHECK_BIC);"
+  {@ [ cons: =0 , 1 , 2   ]
+     [ w        , w , w   ] and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+     [ w        , 0 , Db  ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_BIC);
+  }
   [(set_attr "type" "neon_logic<q>")]
 )
 
 ;; For ORR (vector, register) and ORR (vector, immediate)
 (define_insn "ior<mode>3<vczle><vczbe>"
-  [(set (match_operand:VDQ_I 0 "register_operand" "=w,w")
-       (ior:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w,0")
-                  (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm" "w,Do")))]
+  [(set (match_operand:VDQ_I 0 "register_operand")
+       (ior:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+                  (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm")))]
   "TARGET_SIMD"
-  "@
-   orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
-   * return aarch64_output_simd_mov_immediate (operands[2], <bitsize>,\
-                                              AARCH64_CHECK_ORR);"
+  {@ [ cons: =0 , 1 , 2   ]
+     [ w        , w , w   ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+     [ w        , 0 , Do  ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_ORR);
+  }
   [(set_attr "type" "neon_logic<q>")]
 )
 
 )
 
 (define_insn "aarch64_simd_ashr<mode><vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w,w")
-       (ashiftrt:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w,w")
-                    (match_operand:VDQ_I  2 "aarch64_simd_rshift_imm" "D1,Dr")))]
+ [(set (match_operand:VDQ_I 0 "register_operand")
+       (ashiftrt:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+                    (match_operand:VDQ_I  2 "aarch64_simd_rshift_imm")))]
  "TARGET_SIMD"
- "@
-  cmlt\t%0.<Vtype>, %1.<Vtype>, #0
-  sshr\t%0.<Vtype>, %1.<Vtype>, %2"
-  [(set_attr "type" "neon_compare<q>,neon_shift_imm<q>")]
+ {@ [ cons: =0 , 1 , 2  ; attrs: type        ]
+    [ w        , w , D1 ; neon_compare<q>    ] cmlt\t%0.<Vtype>, %1.<Vtype>, #0
+    [ w        , w , Dr ; neon_shift_imm<q>  ] sshr\t%0.<Vtype>, %1.<Vtype>, %2
+  }
 )
 
 (define_insn "aarch64_<sra_op>sra_n<mode>_insn"
 ;; in *aarch64_simd_bsl<mode>_alt.
 
 (define_insn "aarch64_simd_bsl<mode>_internal<vczle><vczbe>"
-  [(set (match_operand:VDQ_I 0 "register_operand" "=w,w,w")
+  [(set (match_operand:VDQ_I 0 "register_operand")
        (xor:VDQ_I
           (and:VDQ_I
             (xor:VDQ_I
-              (match_operand:<V_INT_EQUIV> 3 "register_operand" "w,0,w")
-              (match_operand:VDQ_I 2 "register_operand" "w,w,0"))
-            (match_operand:VDQ_I 1 "register_operand" "0,w,w"))
+              (match_operand:<V_INT_EQUIV> 3 "register_operand")
+              (match_operand:VDQ_I 2 "register_operand"))
+            (match_operand:VDQ_I 1 "register_operand"))
          (match_dup:<V_INT_EQUIV> 3)
        ))]
   "TARGET_SIMD"
-  "@
-  bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
-  bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
-  bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
+  {@ [ cons: =0 , 1 , 2 , 3  ]
+     [ w        , 0 , w , w  ] bsl\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+     [ w        , w , w , 0  ] bit\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+     [ w        , w , 0 , w  ] bif\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>
+  }
   [(set_attr "type" "neon_bsl<q>")]
 )
 
 ;; permutations of commutative operations, we have to have a separate pattern.
 
 (define_insn "*aarch64_simd_bsl<mode>_alt<vczle><vczbe>"
-  [(set (match_operand:VDQ_I 0 "register_operand" "=w,w,w")
+  [(set (match_operand:VDQ_I 0 "register_operand")
        (xor:VDQ_I
           (and:VDQ_I
             (xor:VDQ_I
-              (match_operand:VDQ_I 3 "register_operand" "w,w,0")
-              (match_operand:<V_INT_EQUIV> 2 "register_operand" "w,0,w"))
-             (match_operand:VDQ_I 1 "register_operand" "0,w,w"))
+              (match_operand:VDQ_I 3 "register_operand")
+              (match_operand:<V_INT_EQUIV> 2 "register_operand"))
+             (match_operand:VDQ_I 1 "register_operand"))
          (match_dup:<V_INT_EQUIV> 2)))]
   "TARGET_SIMD"
-  "@
-  bsl\\t%0.<Vbtype>, %3.<Vbtype>, %2.<Vbtype>
-  bit\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>
-  bif\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+  {@ [ cons: =0 , 1 , 2 , 3  ]
+     [ w        , 0 , w , w  ] bsl\t%0.<Vbtype>, %3.<Vbtype>, %2.<Vbtype>
+     [ w        , w , 0 , w  ] bit\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>
+     [ w        , w , w , 0  ] bif\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+  }
   [(set_attr "type" "neon_bsl<q>")]
 )
 
 ;; would be better calculated on the integer side.
 
 (define_insn_and_split "aarch64_simd_bsldi_internal"
-  [(set (match_operand:DI 0 "register_operand" "=w,w,w,&r")
+  [(set (match_operand:DI 0 "register_operand")
        (xor:DI
           (and:DI
             (xor:DI
-              (match_operand:DI 3 "register_operand" "w,0,w,r")
-              (match_operand:DI 2 "register_operand" "w,w,0,r"))
-            (match_operand:DI 1 "register_operand" "0,w,w,r"))
+              (match_operand:DI 3 "register_operand")
+              (match_operand:DI 2 "register_operand"))
+            (match_operand:DI 1 "register_operand"))
          (match_dup:DI 3)
        ))]
   "TARGET_SIMD"
-  "@
-  bsl\\t%0.8b, %2.8b, %3.8b
-  bit\\t%0.8b, %2.8b, %1.8b
-  bif\\t%0.8b, %3.8b, %1.8b
-  #"
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: type , length ]
+     [ w        , 0 , w , w ; neon_bsl    , 4      ] bsl\t%0.8b, %2.8b, %3.8b
+     [ w        , w , w , 0 ; neon_bsl    , 4      ] bit\t%0.8b, %2.8b, %1.8b
+     [ w        , w , 0 , w ; neon_bsl    , 4      ] bif\t%0.8b, %3.8b, %1.8b
+     [ &r       , r , r , r ; multiple    , 12     ] #
+  }
   "&& REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
   [(match_dup 1) (match_dup 1) (match_dup 2) (match_dup 3)]
 {
   emit_insn (gen_xordi3 (operands[0], scratch, operands[3]));
   DONE;
 }
-  [(set_attr "type" "neon_bsl,neon_bsl,neon_bsl,multiple")
-   (set_attr "length" "4,4,4,12")]
 )
 
 (define_insn_and_split "aarch64_simd_bsldi_alt"
-  [(set (match_operand:DI 0 "register_operand" "=w,w,w,&r")
+  [(set (match_operand:DI 0 "register_operand")
        (xor:DI
           (and:DI
             (xor:DI
-              (match_operand:DI 3 "register_operand" "w,w,0,r")
-              (match_operand:DI 2 "register_operand" "w,0,w,r"))
-            (match_operand:DI 1 "register_operand" "0,w,w,r"))
+              (match_operand:DI 3 "register_operand")
+              (match_operand:DI 2 "register_operand"))
+            (match_operand:DI 1 "register_operand"))
          (match_dup:DI 2)
        ))]
   "TARGET_SIMD"
-  "@
-  bsl\\t%0.8b, %3.8b, %2.8b
-  bit\\t%0.8b, %3.8b, %1.8b
-  bif\\t%0.8b, %2.8b, %1.8b
-  #"
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: type , length ]
+     [ w        , 0 , w , w ; neon_bsl    , 4      ] bsl\t%0.8b, %3.8b, %2.8b
+     [ w        , w , 0 , w ; neon_bsl    , 4      ] bit\t%0.8b, %3.8b, %1.8b
+     [ w        , w , w , 0 ; neon_bsl    , 4      ] bif\t%0.8b, %2.8b, %1.8b
+     [ &r       , r , r , r ; multiple    , 12     ] #
+  }
   "&& REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
   [(match_dup 0) (match_dup 1) (match_dup 2) (match_dup 3)]
 {
   emit_insn (gen_xordi3 (operands[0], scratch, operands[2]));
   DONE;
 }
-  [(set_attr "type" "neon_bsl,neon_bsl,neon_bsl,multiple")
-   (set_attr "length" "4,4,4,12")]
 )
 
 (define_expand "aarch64_simd_bsl<mode>"
 ;; This dedicated pattern must come first.
 
 (define_insn "store_pair_lanes<mode>"
-  [(set (match_operand:<VDBL> 0 "aarch64_mem_pair_lanes_operand" "=Umn, Umn")
+  [(set (match_operand:<VDBL> 0 "aarch64_mem_pair_lanes_operand")
        (vec_concat:<VDBL>
-          (match_operand:VDCSIF 1 "register_operand" "w, r")
-          (match_operand:VDCSIF 2 "register_operand" "w, r")))]
+          (match_operand:VDCSIF 1 "register_operand")
+          (match_operand:VDCSIF 2 "register_operand")))]
   "TARGET_FLOAT"
-  "@
-   stp\t%<single_type>1, %<single_type>2, %y0
-   stp\t%<single_wx>1, %<single_wx>2, %y0"
-  [(set_attr "type" "neon_stp, store_16")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: type ]
+     [ Umn      , w , w ; neon_stp    ] stp\t%<single_type>1, %<single_type>2, %y0
+     [ Umn      , r , r ; store_16    ] stp\t%<single_wx>1, %<single_wx>2, %y0
+  }
 )
 
 ;; Form a vector whose least significant half comes from operand 1 and whose
 ;; the register alternatives either don't accept or themselves disparage.
 
 (define_insn "*aarch64_combine_internal<mode>"
-  [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
+  [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand")
        (vec_concat:<VDBL>
-         (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")
-         (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, w, ?r")))]
+         (match_operand:VDCSIF 1 "register_operand")
+         (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand")))]
   "TARGET_FLOAT
    && !BYTES_BIG_ENDIAN
    && (register_operand (operands[0], <VDBL>mode)
        || register_operand (operands[2], <MODE>mode))"
-  "@
-   ins\t%0.<single_type>[1], %2.<single_type>[0]
-   ins\t%0.<single_type>[1], %<single_wx>2
-   fmov\t%0.d[1], %2
-   ld1\t{%0.<single_type>}[1], %2
-   stp\t%<single_type>1, %<single_type>2, %y0
-   stp\t%<single_wx>1, %<single_wx>2, %y0"
-  [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr,
-                    neon_load1_one_lane<dblq>, neon_stp, store_16")
-   (set_attr "arch" "simd,simd,*,simd,*,*")]
+  {@ [ cons: =0 , 1  , 2   ; attrs: type               , arch  ]
+     [ w        , 0  , w   ; neon_ins<dblq>            , simd  ] ins\t%0.<single_type>[1], %2.<single_type>[0]
+     [ w        , 0  , ?r  ; neon_from_gp<dblq>        , simd  ] ins\t%0.<single_type>[1], %<single_wx>2
+     [ w        , 0  , ?r  ; f_mcr                     , *     ] fmov\t%0.d[1], %2
+     [ w        , 0  , Utv ; neon_load1_one_lane<dblq> , simd  ] ld1\t{%0.<single_type>}[1], %2
+     [ Umn      , ?w , w   ; neon_stp                  , *     ] stp\t%<single_type>1, %<single_type>2, %y0
+     [ Umn      , ?r , ?r  ; store_16                  , *     ] stp\t%<single_wx>1, %<single_wx>2, %y0
+  }
 )
 
 (define_insn "*aarch64_combine_internal_be<mode>"
-  [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand" "=w, w, w, w, Umn, Umn")
+  [(set (match_operand:<VDBL> 0 "aarch64_reg_or_mem_pair_operand")
        (vec_concat:<VDBL>
-         (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand" "w, ?r, ?r, Utv, ?w, ?r")
-         (match_operand:VDCSIF 1 "register_operand" "0, 0, 0, 0, ?w, ?r")))]
+         (match_operand:VDCSIF 2 "aarch64_simd_nonimmediate_operand")
+         (match_operand:VDCSIF 1 "register_operand")))]
   "TARGET_FLOAT
    && BYTES_BIG_ENDIAN
    && (register_operand (operands[0], <VDBL>mode)
        || register_operand (operands[2], <MODE>mode))"
-  "@
-   ins\t%0.<single_type>[1], %2.<single_type>[0]
-   ins\t%0.<single_type>[1], %<single_wx>2
-   fmov\t%0.d[1], %2
-   ld1\t{%0.<single_type>}[1], %2
-   stp\t%<single_type>2, %<single_type>1, %y0
-   stp\t%<single_wx>2, %<single_wx>1, %y0"
-  [(set_attr "type" "neon_ins<dblq>, neon_from_gp<dblq>, f_mcr, neon_load1_one_lane<dblq>, neon_stp, store_16")
-   (set_attr "arch" "simd,simd,*,simd,*,*")]
+  {@ [ cons: =0 , 1  , 2   ; attrs: type               , arch  ]
+     [ w        , 0  , w   ; neon_ins<dblq>            , simd  ] ins\t%0.<single_type>[1], %2.<single_type>[0]
+     [ w        , 0  , ?r  ; neon_from_gp<dblq>        , simd  ] ins\t%0.<single_type>[1], %<single_wx>2
+     [ w        , 0  , ?r  ; f_mcr                     , *     ] fmov\t%0.d[1], %2
+     [ w        , 0  , Utv ; neon_load1_one_lane<dblq> , simd  ] ld1\t{%0.<single_type>}[1], %2
+     [ Umn      , ?w , ?w  ; neon_stp                  , *     ] stp\t%<single_type>2, %<single_type>1, %y0
+     [ Umn      , ?r , ?r  ; store_16                  , *     ] stp\t%<single_wx>2, %<single_wx>1, %y0
+  }
 )
 
 ;; In this insn, operand 1 should be low, and operand 2 the high part of the
 ;; dest vector.
 
 (define_insn "*aarch64_combinez<mode>"
-  [(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
+  [(set (match_operand:<VDBL> 0 "register_operand")
        (vec_concat:<VDBL>
-         (match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")
+         (match_operand:VDCSIF 1 "nonimmediate_operand")
          (match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")))]
   "TARGET_FLOAT && !BYTES_BIG_ENDIAN"
-  "@
-   fmov\\t%<single_type>0, %<single_type>1
-   fmov\t%<single_type>0, %<single_wx>1
-   ldr\\t%<single_type>0, %1"
-  [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
+  {@ [ cons: =0 , 1  ; attrs: type      ]
+     [ w        , w  ; neon_move<q>     ] fmov\t%<single_type>0, %<single_type>1
+     [ w        , ?r ; neon_from_gp     ] fmov\t%<single_type>0, %<single_wx>1
+     [ w        , m  ; neon_load1_1reg  ] ldr\t%<single_type>0, %1
+  }
 )
 
 (define_insn "*aarch64_combinez_be<mode>"
-  [(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
+  [(set (match_operand:<VDBL> 0 "register_operand")
         (vec_concat:<VDBL>
          (match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")
-         (match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")))]
+         (match_operand:VDCSIF 1 "nonimmediate_operand")))]
   "TARGET_FLOAT && BYTES_BIG_ENDIAN"
-  "@
-   fmov\\t%<single_type>0, %<single_type>1
-   fmov\t%<single_type>0, %<single_wx>1
-   ldr\\t%<single_type>0, %1"
-  [(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")]
+  {@ [ cons: =0 , 1  ; attrs: type      ]
+     [ w        , w  ; neon_move<q>     ] fmov\t%<single_type>0, %<single_type>1
+     [ w        , ?r ; neon_from_gp     ] fmov\t%<single_type>0, %<single_wx>1
+     [ w        , m  ; neon_load1_1reg  ] ldr\t%<single_type>0, %1
+  }
 )
 
 ;; Form a vector whose first half (in array order) comes from operand 1
 ;; have different ideas of what should be passed to this pattern.
 
 (define_insn "aarch64_cm<optab><mode><vczle><vczbe>"
-  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
        (neg:<V_INT_EQUIV>
          (COMPARISONS:<V_INT_EQUIV>
-           (match_operand:VDQ_I 1 "register_operand" "w,w")
-           (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero" "w,ZDz")
+           (match_operand:VDQ_I 1 "register_operand")
+           (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero")
          )))]
   "TARGET_SIMD"
-  "@
-  cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
-  cm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
-  [(set_attr "type" "neon_compare<q>, neon_compare_zero<q>")]
+  {@ [ cons: =0 , 1 , 2   ; attrs: type           ]
+     [ w        , w , w   ; neon_compare<q>       ] cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
+     [ w        , w , ZDz ; neon_compare_zero<q>  ] cm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0
+  }
 )
 
 (define_insn_and_split "aarch64_cm<optab>di"
 )
 
 (define_insn "*aarch64_cm<optab>di"
-  [(set (match_operand:DI 0 "register_operand" "=w,w")
+  [(set (match_operand:DI 0 "register_operand")
        (neg:DI
          (COMPARISONS:DI
-           (match_operand:DI 1 "register_operand" "w,w")
-           (match_operand:DI 2 "aarch64_simd_reg_or_zero" "w,ZDz")
+           (match_operand:DI 1 "register_operand")
+           (match_operand:DI 2 "aarch64_simd_reg_or_zero")
          )))]
   "TARGET_SIMD && reload_completed"
-  "@
-  cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2>
-  cm<optab>\t%d0, %d1, #0"
-  [(set_attr "type" "neon_compare, neon_compare_zero")]
+  {@ [ cons: =0 , 1 , 2   ; attrs: type        ]
+     [ w        , w , w   ; neon_compare       ] cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2>
+     [ w        , w , ZDz ; neon_compare_zero  ] cm<optab>\t%d0, %d1, #0
+  }
 )
 
 ;; cm(hs|hi)
 ;; fcm(eq|ge|gt|le|lt)
 
 (define_insn "aarch64_cm<optab><mode><vczle><vczbe>"
-  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
        (neg:<V_INT_EQUIV>
          (COMPARISONS:<V_INT_EQUIV>
-           (match_operand:VHSDF_HSDF 1 "register_operand" "w,w")
-           (match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero" "w,YDz")
+           (match_operand:VHSDF_HSDF 1 "register_operand")
+           (match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero")
          )))]
   "TARGET_SIMD"
-  "@
-  fcm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
-  fcm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0"
+  {@ [ cons: =0 , 1 , 2    ]
+     [ w        , w , w    ] fcm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
+     [ w        , w , YDz  ] fcm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0
+  }
   [(set_attr "type" "neon_fp_compare_<stype><q>")]
 )
 
 )
 
 (define_insn "*aarch64_mov<mode>"
-  [(set (match_operand:VSTRUCT_QD 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
-       (match_operand:VSTRUCT_QD 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+  [(set (match_operand:VSTRUCT_QD 0 "aarch64_simd_nonimmediate_operand")
+       (match_operand:VSTRUCT_QD 1 "aarch64_simd_general_operand"))]
   "TARGET_SIMD && !BYTES_BIG_ENDIAN
    && (register_operand (operands[0], <MODE>mode)
        || register_operand (operands[1], <MODE>mode))"
-  "@
-   #
-   st1\\t{%S1.<Vtype> - %<Vendreg>1.<Vtype>}, %0
-   ld1\\t{%S0.<Vtype> - %<Vendreg>0.<Vtype>}, %1"
-  [(set_attr "type" "multiple,neon_store<nregs>_<nregs>reg_q,\
-                    neon_load<nregs>_<nregs>reg_q")
-   (set_attr "length" "<insn_count>,4,4")]
+  {@ [ cons: =0 , 1   ; attrs: type                    , length        ]
+     [ w        , w   ; multiple                       , <insn_count>  ] #
+     [ Utv      , w   ; neon_store<nregs>_<nregs>reg_q , 4             ] st1\t{%S1.<Vtype> - %<Vendreg>1.<Vtype>}, %0
+     [ w        , Utv ; neon_load<nregs>_<nregs>reg_q  , 4             ] ld1\t{%S0.<Vtype> - %<Vendreg>0.<Vtype>}, %1
+  }
 )
 
 (define_insn "*aarch64_mov<mode>"
-  [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
-       (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+  [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand")
+       (match_operand:VSTRUCT 1 "aarch64_simd_general_operand"))]
   "TARGET_SIMD && !BYTES_BIG_ENDIAN
    && (register_operand (operands[0], <MODE>mode)
        || register_operand (operands[1], <MODE>mode))"
-  "@
-   #
-   st1\\t{%S1.16b - %<Vendreg>1.16b}, %0
-   ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1"
-  [(set_attr "type" "multiple,neon_store<nregs>_<nregs>reg_q,\
-                    neon_load<nregs>_<nregs>reg_q")
-   (set_attr "length" "<insn_count>,4,4")]
+  {@ [ cons: =0 , 1   ; attrs: type                    , length        ]
+     [ w        , w   ; multiple                       , <insn_count>  ] #
+     [ Utv      , w   ; neon_store<nregs>_<nregs>reg_q , 4             ] st1\t{%S1.16b - %<Vendreg>1.16b}, %0
+     [ w        , Utv ; neon_load<nregs>_<nregs>reg_q  , 4             ] ld1\t{%S0.16b - %<Vendreg>0.16b}, %1
+  }
 )
 
 (define_insn "*aarch64_movv8di"
 )
 
 (define_insn "*aarch64_be_mov<mode>"
-  [(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand" "=w,m,w")
-       (match_operand:VSTRUCT_2D 1 "general_operand"      " w,w,m"))]
+  [(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand")
+       (match_operand:VSTRUCT_2D 1 "general_operand"))]
   "TARGET_FLOAT
    && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
    && (register_operand (operands[0], <MODE>mode)
        || register_operand (operands[1], <MODE>mode))"
-  "@
-   #
-   stp\\t%d1, %R1, %0
-   ldp\\t%d0, %R0, %1"
-  [(set_attr "type" "multiple,neon_stp,neon_ldp")
-   (set_attr "length" "8,4,4")]
+  {@ [ cons: =0 , 1 ; attrs: type , length ]
+     [ w        , w ; multiple    , 8      ] #
+     [ m        , w ; neon_stp    , 4      ] stp\t%d1, %R1, %0
+     [ w        , m ; neon_ldp    , 4      ] ldp\t%d0, %R0, %1
+  }
 )
 
 (define_insn "*aarch64_be_mov<mode>"
-  [(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand" "=w,m,w")
-       (match_operand:VSTRUCT_2Q 1 "general_operand"      " w,w,m"))]
+  [(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand")
+       (match_operand:VSTRUCT_2Q 1 "general_operand"))]
   "TARGET_FLOAT
    && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
    && (register_operand (operands[0], <MODE>mode)
        || register_operand (operands[1], <MODE>mode))"
-  "@
-   #
-   stp\\t%q1, %R1, %0
-   ldp\\t%q0, %R0, %1"
-  [(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
-   (set_attr "arch" "simd,*,*")
-   (set_attr "length" "8,4,4")]
+  {@ [ cons: =0 , 1 ; attrs: type , arch , length ]
+     [ w        , w ; multiple    , simd , 8      ] #
+     [ m        , w ; neon_stp_q  , *    , 4      ] stp\t%q1, %R1, %0
+     [ w        , m ; neon_ldp_q  , *    , 4      ] ldp\t%q0, %R0, %1
+  }
 )
 
 (define_insn "*aarch64_be_movoi"
-  [(set (match_operand:OI 0 "nonimmediate_operand" "=w,m,w")
-       (match_operand:OI 1 "general_operand"      " w,w,m"))]
+  [(set (match_operand:OI 0 "nonimmediate_operand")
+       (match_operand:OI 1 "general_operand"))]
   "TARGET_FLOAT
    && (!TARGET_SIMD || BYTES_BIG_ENDIAN)
    && (register_operand (operands[0], OImode)
        || register_operand (operands[1], OImode))"
-  "@
-   #
-   stp\\t%q1, %R1, %0
-   ldp\\t%q0, %R0, %1"
-  [(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
-   (set_attr "arch" "simd,*,*")
-   (set_attr "length" "8,4,4")]
+  {@ [ cons: =0 , 1 ; attrs: type , arch , length ]
+     [ w        , w ; multiple    , simd , 8      ] #
+     [ m        , w ; neon_stp_q  , *    , 4      ] stp\t%q1, %R1, %0
+     [ w        , m ; neon_ldp_q  , *    , 4      ] ldp\t%q0, %R0, %1
+  }
 )
 
 (define_insn "*aarch64_be_mov<mode>"
index b223e7d3c9d27d409a4e45ce0276b9bafdc81668..5a652d8536a0ef9461f40da7b22834e683e73ceb 100644 (file)
 ;; and after RA; before RA we want the predicated load and store patterns to
 ;; be used instead.
 (define_insn "*aarch64_sve_mov<mode>_ldr_str"
-  [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w")
-       (match_operand:SVE_FULL 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))]
+  [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand")
+       (match_operand:SVE_FULL 1 "aarch64_sve_general_operand"))]
   "TARGET_SVE
    && (<MODE>mode == VNx16QImode || !BYTES_BIG_ENDIAN)
    && ((lra_in_progress || reload_completed)
        || (register_operand (operands[0], <MODE>mode)
           && nonmemory_operand (operands[1], <MODE>mode)))"
-  "@
-   ldr\t%0, %1
-   str\t%1, %0
-   mov\t%0.d, %1.d
-   * return aarch64_output_sve_mov_immediate (operands[1]);"
+  {@ [ cons: =0 , 1    ]
+     [ w        , Utr  ] ldr\t%0, %1
+     [ Utr      , w    ] str\t%1, %0
+     [ w        , w    ] mov\t%0.d, %1.d
+     [ w        , Dn   ] << aarch64_output_sve_mov_immediate (operands[1]);
+  }
 )
 
 ;; Unpredicated moves that cannot use LDR and STR, i.e. partial vectors
 ;; or vectors for which little-endian ordering isn't acceptable.  Memory
 ;; accesses require secondary reloads.
 (define_insn "*aarch64_sve_mov<mode>_no_ldr_str"
-  [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w")
-       (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand" "w, Dn"))]
+  [(set (match_operand:SVE_ALL 0 "register_operand")
+       (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand"))]
   "TARGET_SVE
    && <MODE>mode != VNx16QImode
    && (BYTES_BIG_ENDIAN
        || maybe_ne (BYTES_PER_SVE_VECTOR, GET_MODE_SIZE (<MODE>mode)))"
-  "@
-   mov\t%0.d, %1.d
-   * return aarch64_output_sve_mov_immediate (operands[1]);"
+  {@ [ cons: =0 , 1   ]
+     [ w        , w   ] mov\t%0.d, %1.d
+     [ w        , Dn  ] << aarch64_output_sve_mov_immediate (operands[1]);
+  }
 )
 
 ;; Handle memory reloads for modes that can't use LDR and STR.  We use
 ;; Note that this pattern is generated directly by aarch64_emit_sve_pred_move,
 ;; so changes to this pattern will need changes there as well.
 (define_insn_and_split "@aarch64_pred_mov<mode>"
-  [(set (match_operand:SVE_ALL 0 "nonimmediate_operand" "=w, w, m")
+  [(set (match_operand:SVE_ALL 0 "nonimmediate_operand")
        (unspec:SVE_ALL
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
-          (match_operand:SVE_ALL 2 "nonimmediate_operand" "w, m, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SVE_ALL 2 "nonimmediate_operand")]
          UNSPEC_PRED_X))]
   "TARGET_SVE
    && (register_operand (operands[0], <MODE>mode)
        || register_operand (operands[2], <MODE>mode))"
-  "@
-   #
-   ld1<Vesize>\t%0.<Vctype>, %1/z, %2
-   st1<Vesize>\t%2.<Vctype>, %1, %0"
+  {@ [ cons: =0 , 1   , 2  ]
+     [ w        , Upl , w  ] #
+     [ w        , Upl , m  ] ld1<Vesize>\t%0.<Vctype>, %1/z, %2
+     [ m        , Upl , w  ] st1<Vesize>\t%2.<Vctype>, %1, %0
+  }
   "&& register_operand (operands[0], <MODE>mode)
    && register_operand (operands[2], <MODE>mode)"
   [(set (match_dup 0) (match_dup 2))]
 )
 
 (define_insn "*aarch64_sve_mov<mode>"
-  [(set (match_operand:PRED_ALL 0 "nonimmediate_operand" "=Upa, m, Upa, Upa")
-       (match_operand:PRED_ALL 1 "aarch64_mov_operand" "Upa, Upa, m, Dn"))]
+  [(set (match_operand:PRED_ALL 0 "nonimmediate_operand")
+       (match_operand:PRED_ALL 1 "aarch64_mov_operand"))]
   "TARGET_SVE
    && (register_operand (operands[0], <MODE>mode)
        || register_operand (operands[1], <MODE>mode))"
-  "@
-   mov\t%0.b, %1.b
-   str\t%1, %0
-   ldr\t%0, %1
-   * return aarch64_output_sve_mov_immediate (operands[1]);"
+  {@ [ cons: =0 , 1    ]
+     [ Upa      , Upa  ] mov\t%0.b, %1.b
+     [ m        , Upa  ] str\t%1, %0
+     [ Upa      , m    ] ldr\t%0, %1
+     [ Upa      , Dn   ] << aarch64_output_sve_mov_immediate (operands[1]);
+  }
 )
 
 ;; Match PTRUES Pn.B when both the predicate and flags are useful.
 ;; Write to the FFR and start a new FFRT scheduling region.
 (define_insn "aarch64_wrffr"
   [(set (reg:VNx16BI FFR_REGNUM)
-       (match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one" "Dm, Upa"))
+       (match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one"))
    (set (reg:VNx16BI FFRT_REGNUM)
        (unspec:VNx16BI [(match_dup 0)] UNSPEC_WRFFR))]
   "TARGET_SVE"
-  "@
-   setffr
-   wrffr\t%0.b"
+  {@ [ cons: 0 ]
+     [ Dm      ] setffr
+     [ Upa     ] wrffr\t%0.b
+  }
 )
 
 ;; [L2 in the block comment above about FFR handling]
 (define_insn "mask_scatter_store<mode><v_int_container>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
-          (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>" "Z, vgw, rk, rk, rk, rk")
-          (match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
-          (match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
-          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
-          (match_operand:SVE_4 4 "register_operand" "w, w, w, w, w, w")]
+         [(match_operand:VNx4BI 5 "register_operand")
+          (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>")
+          (match_operand:VNx4SI 1 "register_operand")
+          (match_operand:DI 2 "const_int_operand")
+          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+          (match_operand:SVE_4 4 "register_operand")]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<Vesize>\t%4.s, %5, [%1.s]
-   st1<Vesize>\t%4.s, %5, [%1.s, #%0]
-   st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
-   st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
-   st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
-   st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+  {@ [ cons: 0 , 1 , 2   , 3   , 4 , 5    ]
+     [ Z       , w , Ui1 , Ui1 , w , Upl  ] st1<Vesize>\t%4.s, %5, [%1.s]
+     [ vgw     , w , Ui1 , Ui1 , w , Upl  ] st1<Vesize>\t%4.s, %5, [%1.s, #%0]
+     [ rk      , w , Z   , Ui1 , w , Upl  ] st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
+     [ rk      , w , Ui1 , Ui1 , w , Upl  ] st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
+     [ rk      , w , Z   , i   , w , Upl  ] st1<Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
+     [ rk      , w , Ui1 , i   , w , Upl  ] st1<Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]
+  }
 )
 
 ;; Predicated scatter stores for 64-bit elements.  The value of operand 2
 (define_insn "mask_scatter_store<mode><v_int_container>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
-          (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>" "Z, vgd, rk, rk")
-          (match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
+         [(match_operand:VNx2BI 5 "register_operand")
+          (match_operand:DI 0 "aarch64_sve_gather_offset_<Vesize>")
+          (match_operand:VNx2DI 1 "register_operand")
           (match_operand:DI 2 "const_int_operand")
-          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, i")
-          (match_operand:SVE_2 4 "register_operand" "w, w, w, w")]
+          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+          (match_operand:SVE_2 4 "register_operand")]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<Vesize>\t%4.d, %5, [%1.d]
-   st1<Vesize>\t%4.d, %5, [%1.d, #%0]
-   st1<Vesize>\t%4.d, %5, [%0, %1.d]
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+  {@ [ cons: 0 , 1 , 3   , 4 , 5    ]
+     [ Z       , w , Ui1 , w , Upl  ] st1<Vesize>\t%4.d, %5, [%1.d]
+     [ vgd     , w , Ui1 , w , Upl  ] st1<Vesize>\t%4.d, %5, [%1.d, #%0]
+     [ rk      , w , Ui1 , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d]
+     [ rk      , w , i   , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]
+  }
 )
 
 ;; Likewise, but with the offset being extended from 32 bits.
 (define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
-          (match_operand:DI 0 "register_operand" "rk, rk")
+         [(match_operand:VNx2BI 5 "register_operand")
+          (match_operand:DI 0 "register_operand")
           (unspec:VNx2DI
             [(match_operand 6)
              (ANY_EXTEND:VNx2DI
-               (match_operand:VNx2SI 1 "register_operand" "w, w"))]
+               (match_operand:VNx2SI 1 "register_operand"))]
             UNSPEC_PRED_X)
           (match_operand:DI 2 "const_int_operand")
-          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
-          (match_operand:SVE_2 4 "register_operand" "w, w")]
+          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+          (match_operand:SVE_2 4 "register_operand")]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]"
+  {@ [ cons: 0 , 1 , 3   , 4 , 5    ]
+     [ rk      , w , Ui1 , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
+     [ rk      , w , i   , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]
+  }
   "&& !CONSTANT_P (operands[6])"
   {
     operands[6] = CONSTM1_RTX (<VPRED>mode);
 (define_insn_and_rewrite "*mask_scatter_store<mode><v_int_container>_sxtw"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
-          (match_operand:DI 0 "register_operand" "rk, rk")
+         [(match_operand:VNx2BI 5 "register_operand")
+          (match_operand:DI 0 "register_operand")
           (unspec:VNx2DI
             [(match_operand 6)
              (sign_extend:VNx2DI
                (truncate:VNx2SI
-                 (match_operand:VNx2DI 1 "register_operand" "w, w")))]
+                 (match_operand:VNx2DI 1 "register_operand")))]
             UNSPEC_PRED_X)
           (match_operand:DI 2 "const_int_operand")
-          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
-          (match_operand:SVE_2 4 "register_operand" "w, w")]
+          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+          (match_operand:SVE_2 4 "register_operand")]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]"
+  {@ [ cons: 0 , 1 , 3   , 4 , 5    ]
+     [ rk      , w , Ui1 , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
+     [ rk      , w , i   , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
+  }
   "&& !CONSTANT_P (operands[6])"
   {
     operands[6] = CONSTM1_RTX (<VPRED>mode);
 (define_insn "*mask_scatter_store<mode><v_int_container>_uxtw"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
-          (match_operand:DI 0 "aarch64_reg_or_zero" "rk, rk")
+         [(match_operand:VNx2BI 5 "register_operand")
+          (match_operand:DI 0 "aarch64_reg_or_zero")
           (and:VNx2DI
-            (match_operand:VNx2DI 1 "register_operand" "w, w")
+            (match_operand:VNx2DI 1 "register_operand")
             (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
           (match_operand:DI 2 "const_int_operand")
-          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
-          (match_operand:SVE_2 4 "register_operand" "w, w")]
+          (match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
+          (match_operand:SVE_2 4 "register_operand")]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
-   st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+  {@ [ cons: 0 , 1 , 3   , 4 , 5    ]
+     [ rk      , w , Ui1 , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
+     [ rk      , w , i   , w , Upl  ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
+  }
 )
 
 ;; -------------------------------------------------------------------------
 (define_insn "@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:VNx4BI 5 "register_operand")
           (match_operand:DI 0 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>" "Z, vg<VNx4_NARROW:Vesize>, rk, rk, rk, rk")
-          (match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
-          (match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
+          (match_operand:VNx4SI 1 "register_operand")
+          (match_operand:DI 2 "const_int_operand")
           (match_operand:DI 3 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
           (truncate:VNx4_NARROW
-            (match_operand:VNx4_WIDE 4 "register_operand" "w, w, w, w, w, w"))]
+            (match_operand:VNx4_WIDE 4 "register_operand"))]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
-   st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
-   st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
-   st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
-   st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
-   st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]"
+  {@ [ cons: 1 , 2   , 4 , 5    ]
+     [ w       , Ui1 , w , Upl  ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
+     [ w       , Ui1 , w , Upl  ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
+     [ w       , Z   , w , Upl  ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw]
+     [ w       , Ui1 , w , Upl  ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw]
+     [ w       , Z   , w , Upl  ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, sxtw %p3]
+     [ w       , Ui1 , w , Upl  ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%0, %1.s, uxtw %p3]
+  }
 )
 
 ;; Predicated truncating scatter stores for 64-bit elements.  The value of
 (define_insn "@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:VNx2BI 5 "register_operand")
           (match_operand:DI 0 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>" "Z, vg<VNx2_NARROW:Vesize>, rk, rk")
-          (match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
+          (match_operand:VNx2DI 1 "register_operand")
           (match_operand:DI 2 "const_int_operand")
           (match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, Ui1, Ui1, i")
           (truncate:VNx2_NARROW
-            (match_operand:VNx2_WIDE 4 "register_operand" "w, w, w, w"))]
+            (match_operand:VNx2_WIDE 4 "register_operand"))]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]"
+  {@ [ cons: 1 , 4 , 5    ]
+     [ w       , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
+     [ w       , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
+     [ w       , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d]
+     [ w       , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, lsl %p3]
+  }
 )
 
 ;; Likewise, but with the offset being sign-extended from 32 bits.
 (define_insn_and_rewrite "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
-          (match_operand:DI 0 "register_operand" "rk, rk")
+         [(match_operand:VNx2BI 5 "register_operand")
+          (match_operand:DI 0 "register_operand")
           (unspec:VNx2DI
             [(match_operand 6)
              (sign_extend:VNx2DI
                (truncate:VNx2SI
-                 (match_operand:VNx2DI 1 "register_operand" "w, w")))]
+                 (match_operand:VNx2DI 1 "register_operand")))]
             UNSPEC_PRED_X)
           (match_operand:DI 2 "const_int_operand")
           (match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
           (truncate:VNx2_NARROW
-            (match_operand:VNx2_WIDE 4 "register_operand" "w, w"))]
+            (match_operand:VNx2_WIDE 4 "register_operand"))]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]"
+  {@ [ cons: 0 , 1 , 4 , 5    ]
+     [ rk      , w , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
+     [ rk      , w , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
+  }
   "&& !rtx_equal_p (operands[5], operands[6])"
   {
     operands[6] = copy_rtx (operands[5]);
 (define_insn "*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
-          (match_operand:DI 0 "aarch64_reg_or_zero" "rk, rk")
+         [(match_operand:VNx2BI 5 "register_operand")
+          (match_operand:DI 0 "aarch64_reg_or_zero")
           (and:VNx2DI
-            (match_operand:VNx2DI 1 "register_operand" "w, w")
+            (match_operand:VNx2DI 1 "register_operand")
             (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
           (match_operand:DI 2 "const_int_operand")
           (match_operand:DI 3 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
           (truncate:VNx2_NARROW
-            (match_operand:VNx2_WIDE 4 "register_operand" "w, w"))]
+            (match_operand:VNx2_WIDE 4 "register_operand"))]
          UNSPEC_ST1_SCATTER))]
   "TARGET_SVE"
-  "@
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
-   st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]"
+  {@ [ cons: 0 , 1 , 4 , 5    ]
+     [ rk      , w , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
+     [ rk      , w , w , Upl  ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
+  }
 )
 
 ;; =========================================================================
 ;; the load at the first opportunity in order to allow the PTRUE to be
 ;; optimized with surrounding code.
 (define_insn_and_split "*vec_duplicate<mode>_reg"
-  [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w")
+  [(set (match_operand:SVE_ALL 0 "register_operand")
        (vec_duplicate:SVE_ALL
-         (match_operand:<VEL> 1 "aarch64_sve_dup_operand" "r, w, Uty")))
+         (match_operand:<VEL> 1 "aarch64_sve_dup_operand")))
    (clobber (match_scratch:VNx16BI 2 "=X, X, Upl"))]
   "TARGET_SVE"
-  "@
-   mov\t%0.<Vetype>, %<vwcore>1
-   mov\t%0.<Vetype>, %<Vetype>1
-   #"
+  {@ [ cons: =0 , 1   ; attrs: length ]
+     [ w        , r   ; 4             ] mov\t%0.<Vetype>, %<vwcore>1
+     [ w        , w   ; 4             ] mov\t%0.<Vetype>, %<Vetype>1
+     [ w        , Uty ; 8             ] #
+  }
   "&& MEM_P (operands[1])"
   [(const_int 0)]
   {
                                   CONST0_RTX (<MODE>mode)));
     DONE;
   }
-  [(set_attr "length" "4,4,8")]
 )
 
 ;; Duplicate an Advanced SIMD vector to fill an SVE vector (LE version).
 
 ;; Shift an SVE vector left and insert a scalar into element 0.
 (define_insn "vec_shl_insert_<mode>"
-  [(set (match_operand:SVE_FULL 0 "register_operand" "=?w, w, ??&w, ?&w")
+  [(set (match_operand:SVE_FULL 0 "register_operand")
        (unspec:SVE_FULL
-         [(match_operand:SVE_FULL 1 "register_operand" "0, 0, w, w")
-          (match_operand:<VEL> 2 "aarch64_reg_or_zero" "rZ, w, rZ, w")]
+         [(match_operand:SVE_FULL 1 "register_operand")
+          (match_operand:<VEL> 2 "aarch64_reg_or_zero")]
          UNSPEC_INSR))]
   "TARGET_SVE"
-  "@
-   insr\t%0.<Vetype>, %<vwcore>2
-   insr\t%0.<Vetype>, %<Vetype>2
-   movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
-   movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2"
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  {@ [ cons: =0 , 1 , 2  ; attrs: movprfx ]
+     [ ?w       , 0 , rZ ; *              ] insr\t%0.<Vetype>, %<vwcore>2
+     [ w        , 0 , w  ; *              ] insr\t%0.<Vetype>, %<Vetype>2
+     [ ??&w     , w , rZ ; yes            ] movprfx\t%0, %1\;insr\t%0.<Vetype>, %<vwcore>2
+     [ ?&w      , w , w  ; yes            ] movprfx\t%0, %1\;insr\t%0.<Vetype>, %<Vetype>2
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "vec_series<mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (vec_series:SVE_I
-         (match_operand:<VEL> 1 "aarch64_sve_index_operand" "Usi, r, r")
-         (match_operand:<VEL> 2 "aarch64_sve_index_operand" "r, Usi, r")))]
+         (match_operand:<VEL> 1 "aarch64_sve_index_operand")
+         (match_operand:<VEL> 2 "aarch64_sve_index_operand")))]
   "TARGET_SVE"
-  "@
-   index\t%0.<Vctype>, #%1, %<vccore>2
-   index\t%0.<Vctype>, %<vccore>1, #%2
-   index\t%0.<Vctype>, %<vccore>1, %<vccore>2"
+  {@ [ cons: =0 , 1   , 2    ]
+     [ w        , Usi , r    ] index\t%0.<Vctype>, #%1, %<vccore>2
+     [ w        , r   , Usi  ] index\t%0.<Vctype>, %<vccore>1, #%2
+     [ w        , r   , r    ] index\t%0.<Vctype>, %<vccore>1, %<vccore>2
+  }
 )
 
 ;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range
 ;; Extract the last active element of operand 1 into operand 0.
 ;; If no elements are active, extract the last inactive element instead.
 (define_insn "@extract_<last_op>_<mode>"
-  [(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
+  [(set (match_operand:<VEL> 0 "register_operand")
        (unspec:<VEL>
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
-          (match_operand:SVE_FULL 2 "register_operand" "w, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SVE_FULL 2 "register_operand")]
          LAST))]
   "TARGET_SVE"
-  "@
-   last<ab>\t%<vwcore>0, %1, %2.<Vetype>
-   last<ab>\t%<Vetype>0, %1, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2  ]
+     [ ?r       , Upl , w  ] last<ab>\t%<vwcore>0, %1, %2.<Vetype>
+     [ w        , Upl , w  ] last<ab>\t%<Vetype>0, %1, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Integer unary arithmetic predicated with a PTRUE.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_UNARY:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "0, w"))]
+            (match_operand:SVE_I 2 "register_operand"))]
          UNSPEC_PRED_X))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated integer unary arithmetic with merging.
 
 ;; Predicated integer unary arithmetic, merging with the first input.
 (define_insn "*cond_<optab><mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_UNARY:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "0, w"))
+            (match_operand:SVE_I 2 "register_operand"))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated integer unary arithmetic, merging with an independent value.
 ;; as earlyclobber helps to make the instruction more regular to the
 ;; register allocator.
 (define_insn "*cond_<optab><mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_UNARY:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "w, w, w"))
-          (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+            (match_operand:SVE_I 2 "register_operand"))
+          (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated integer unary operations.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
-            [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")]
+            [(match_operand:SVE_FULL_I 2 "register_operand")]
             SVE_INT_UNARY)]
          UNSPEC_PRED_X))]
   "TARGET_SVE && <elem_bits> >= <min_elem_bits>"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Another way of expressing the REVB, REVH and REVW patterns, with this
 ;; of lanes and the data mode decides the granularity of the reversal within
 ;; each lane.
 (define_insn "@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>"
-  [(set (match_operand:SVE_ALL 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_ALL 0 "register_operand")
        (unspec:SVE_ALL
-         [(match_operand:PRED_HSD 1 "register_operand" "Upl, Upl")
+         [(match_operand:PRED_HSD 1 "register_operand")
           (unspec:SVE_ALL
-            [(match_operand:SVE_ALL 2 "register_operand" "0, w")]
+            [(match_operand:SVE_ALL 2 "register_operand")]
             UNSPEC_REVBHW)]
          UNSPEC_PRED_X))]
   "TARGET_SVE && <PRED_HSD:elem_bits> > <SVE_ALL:container_bits>"
-  "@
-   rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
-   movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;rev<SVE_ALL:Vcwtype>\t%0.<PRED_HSD:Vetype>, %1/m, %2.<PRED_HSD:Vetype>
+  }
 )
 
 ;; Predicated integer unary operations with merging.
 (define_insn "@cond_<optab><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
-            [(match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")]
+            [(match_operand:SVE_FULL_I 2 "register_operand")]
             SVE_INT_UNARY)
-          (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <elem_bits> >= <min_elem_bits>"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated sign and zero extension from a narrower mode.
 (define_insn "*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2"
-  [(set (match_operand:SVE_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_HSDI 0 "register_operand")
        (unspec:SVE_HSDI
-         [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand")
           (ANY_EXTEND:SVE_HSDI
-            (match_operand:SVE_PARTIAL_I 2 "register_operand" "0, w"))]
+            (match_operand:SVE_PARTIAL_I 2 "register_operand"))]
          UNSPEC_PRED_X))]
   "TARGET_SVE && (~<SVE_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
-  "@
-   <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
-   movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<su>xt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
+  }
 )
 
 ;; Predicated truncate-and-sign-extend operations.
 (define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (sign_extend:SVE_FULL_HSDI
             (truncate:SVE_PARTIAL_I
-              (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")))]
+              (match_operand:SVE_FULL_HSDI 2 "register_operand")))]
          UNSPEC_PRED_X))]
   "TARGET_SVE
    && (~<SVE_FULL_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
-  "@
-   sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+  }
 )
 
 ;; Predicated truncate-and-sign-extend operations with merging.
 (define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (sign_extend:SVE_FULL_HSDI
             (truncate:SVE_PARTIAL_I
-              (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")))
-          (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+              (match_operand:SVE_FULL_HSDI 2 "register_operand")))
+          (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && (~<SVE_FULL_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
-  "@
-   sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+  }
 )
 
 ;; Predicated truncate-and-zero-extend operations, merging with the
 ;; The canonical form of this operation is an AND of a constant rather
 ;; than (zero_extend (truncate ...)).
 (define_insn "*cond_uxt<mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (and:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "0, w")
+            (match_operand:SVE_I 2 "register_operand")
             (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
-   movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated truncate-and-zero-extend operations, merging with an
 ;; as early-clobber helps to make the instruction more regular to the
 ;; register allocator.
 (define_insn "*cond_uxt<mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (and:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "w, w, w")
+            (match_operand:SVE_I 2 "register_operand")
             (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
-          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 4  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 )
 
 (define_insn "*cnot<mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
          [(unspec:<VPRED>
-            [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+            [(match_operand:<VPRED> 1 "register_operand")
              (match_operand:SI 5 "aarch64_sve_ptrue_flag")
              (eq:<VPRED>
-               (match_operand:SVE_I 2 "register_operand" "0, w")
+               (match_operand:SVE_I 2 "register_operand")
                (match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
             UNSPEC_PRED_Z)
           (match_operand:SVE_I 4 "aarch64_simd_imm_one")
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated logical inverse with merging.
 
 ;; Predicated logical inverse, merging with the first input.
 (define_insn_and_rewrite "*cond_cnot<mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           ;; Logical inverse of operand 2 (as above).
           (unspec:SVE_I
             [(unspec:<VPRED>
                [(match_operand 5)
                 (const_int SVE_KNOWN_PTRUE)
                 (eq:<VPRED>
-                  (match_operand:SVE_I 2 "register_operand" "0, w")
+                  (match_operand:SVE_I 2 "register_operand")
                   (match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
                UNSPEC_PRED_Z)
              (match_operand:SVE_I 4 "aarch64_simd_imm_one")
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   cnot\t%0.<Vetype>, %1/m, %0.<Vetype>
-   movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] cnot\t%0.<Vetype>, %1/m, %0.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
   "&& !CONSTANT_P (operands[5])"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated logical inverse, merging with an independent value.
 ;; as earlyclobber helps to make the instruction more regular to the
 ;; register allocator.
 (define_insn_and_rewrite "*cond_cnot<mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           ;; Logical inverse of operand 2 (as above).
           (unspec:SVE_I
             [(unspec:<VPRED>
                [(match_operand 5)
                 (const_int SVE_KNOWN_PTRUE)
                 (eq:<VPRED>
-                  (match_operand:SVE_I 2 "register_operand" "w, w, w")
+                  (match_operand:SVE_I 2 "register_operand")
                   (match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
                UNSPEC_PRED_Z)
              (match_operand:SVE_I 4 "aarch64_simd_imm_one")
              (match_dup 3)]
             UNSPEC_SEL)
-          (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[6])"
-  "@
-   cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %6\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 6  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %6\;cnot\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
   "&& !CONSTANT_P (operands[5])"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated floating-point unary operations.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")]
          SVE_COND_FP_UNARY))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated floating-point unary arithmetic with merging.
 
 ;; Predicated floating-point unary arithmetic, merging with the first input.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 3)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE_COND_FP_UNARY)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[3])"
   {
     operands[3] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE_COND_FP_UNARY)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated floating-point unary arithmetic, merging with an independent
 ;; as earlyclobber helps to make the instruction more regular to the
 ;; register allocator.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE_COND_FP_UNARY)
-          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE_COND_FP_UNARY)
-          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; and would make the instruction seem less uniform to the register
 ;; allocator.
 (define_insn_and_split "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY_IMM:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "%0, 0, w, w")
-            (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand" "<sve_imm_con>, w, <sve_imm_con>, w"))]
+            (match_operand:SVE_I 2 "register_operand")
+            (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand"))]
          UNSPEC_PRED_X))]
   "TARGET_SVE"
-  "@
-   #
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2  , 3             ; attrs: movprfx ]
+     [ w        , Upl , %0 , <sve_imm_con> ; *              ] #
+     [ w        , Upl , 0  , w             ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , <sve_imm_con> ; yes            ] #
+     [ ?&w      , Upl , w  , w             ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   ; Split the unpredicated form after reload, so that we don't have
   ; the unnecessary PTRUE.
   "&& reload_completed
   [(set (match_dup 0)
        (SVE_INT_BINARY_IMM:SVE_I (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,yes,yes")]
 )
 
 ;; Unpredicated binary operations with a constant (post-RA only).
 
 ;; Predicated integer operations, merging with the first input.
 (define_insn "*cond_<optab><mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "0, w")
-            (match_operand:SVE_I 3 "register_operand" "w, w"))
+            (match_operand:SVE_I 2 "register_operand")
+            (match_operand:SVE_I 3 "register_operand"))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer operations, merging with the second input.
 (define_insn "*cond_<optab><mode>_3"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "w, w")
-            (match_operand:SVE_I 3 "register_operand" "0, w"))
+            (match_operand:SVE_I 2 "register_operand")
+            (match_operand:SVE_I 3 "register_operand"))
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; Predicated integer operations, merging with an independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
-            (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))
-          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+            (match_operand:SVE_I 2 "register_operand")
+            (match_operand:SVE_I 3 "register_operand"))
+          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 ;; -------------------------------------------------------------------------
 
 (define_insn "add<mode>3"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?w, ?w, w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (plus:SVE_I
-         (match_operand:SVE_I 1 "register_operand" "%0, 0, 0, w, w, w")
-         (match_operand:SVE_I 2 "aarch64_sve_add_operand" "vsa, vsn, vsi, vsa, vsn, w")))]
+         (match_operand:SVE_I 1 "register_operand")
+         (match_operand:SVE_I 2 "aarch64_sve_add_operand")))]
   "TARGET_SVE"
-  "@
-   add\t%0.<Vetype>, %0.<Vetype>, #%D2
-   sub\t%0.<Vetype>, %0.<Vetype>, #%N2
-   * return aarch64_output_sve_vector_inc_dec (\"%0.<Vetype>\", operands[2]);
-   movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
-   movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
-   add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,*,*,yes,yes,*")]
+  {@ [ cons: =0 , 1  , 2   ; attrs: movprfx ]
+     [ w        , %0 , vsa ; *              ] add\t%0.<Vetype>, %0.<Vetype>, #%D2
+     [ w        , 0  , vsn ; *              ] sub\t%0.<Vetype>, %0.<Vetype>, #%N2
+     [ w        , 0  , vsi ; *              ] << aarch64_output_sve_vector_inc_dec ("%0.<Vetype>", operands[2]);
+     [ ?w       , w  , vsa ; yes            ] movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2
+     [ ?w       , w  , vsn ; yes            ] movprfx\t%0, %1\;sub\t%0.<Vetype>, %0.<Vetype>, #%N2
+     [ w        , w  , w   ; *              ] add\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; Merging forms are handled through SVE_INT_BINARY.
 ;; -------------------------------------------------------------------------
 
 (define_insn "sub<mode>3"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (minus:SVE_I
-         (match_operand:SVE_I 1 "aarch64_sve_arith_operand" "w, vsa, vsa")
-         (match_operand:SVE_I 2 "register_operand" "w, 0, w")))]
+         (match_operand:SVE_I 1 "aarch64_sve_arith_operand")
+         (match_operand:SVE_I 2 "register_operand")))]
   "TARGET_SVE"
-  "@
-   sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
-   subr\t%0.<Vetype>, %0.<Vetype>, #%D1
-   movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , w   , w ; *              ] sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+     [ w        , vsa , 0 ; *              ] subr\t%0.<Vetype>, %0.<Vetype>, #%D1
+     [ ?&w      , vsa , w ; yes            ] movprfx\t%0, %2\;subr\t%0.<Vetype>, %0.<Vetype>, #%D1
+  }
 )
 
 ;; Merging forms are handled through SVE_INT_BINARY.
 
 ;; Predicated integer absolute difference.
 (define_insn "@aarch64_pred_<su>abd<mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (minus:SVE_I
          (unspec:SVE_I
-           [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+           [(match_operand:<VPRED> 1 "register_operand")
             (USMAX:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "%0, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w"))]
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand"))]
            UNSPEC_PRED_X)
          (unspec:SVE_I
            [(match_dup 1)
               (match_dup 3))]
            UNSPEC_PRED_X)))]
   "TARGET_SVE"
-  "@
-   <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3 ; attrs: movprfx ]
+     [ w        , Upl , %0 , w ; *              ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , w ; yes            ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 (define_expand "@aarch64_cond_<su>abd<mode>"
 
 ;; Predicated integer absolute difference, merging with the first input.
 (define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (minus:SVE_I
             (unspec:SVE_I
               [(match_operand 4)
                (USMAX:SVE_I
-                 (match_operand:SVE_I 2 "register_operand" "0, w")
-                 (match_operand:SVE_I 3 "register_operand" "w, w"))]
+                 (match_operand:SVE_I 2 "register_operand")
+                 (match_operand:SVE_I 3 "register_operand"))]
               UNSPEC_PRED_X)
             (unspec:SVE_I
               [(match_operand 5)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
   {
     operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated integer absolute difference, merging with the second input.
 (define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_3"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (minus:SVE_I
             (unspec:SVE_I
               [(match_operand 4)
                (USMAX:SVE_I
-                 (match_operand:SVE_I 2 "register_operand" "w, w")
-                 (match_operand:SVE_I 3 "register_operand" "0, w"))]
+                 (match_operand:SVE_I 2 "register_operand")
+                 (match_operand:SVE_I 3 "register_operand"))]
               UNSPEC_PRED_X)
             (unspec:SVE_I
               [(match_operand 5)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
   "&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
   {
     operands[4] = operands[5] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated integer absolute difference, merging with an independent value.
 (define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (minus:SVE_I
             (unspec:SVE_I
               [(match_operand 5)
                (USMAX:SVE_I
-                 (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
-                 (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))]
+                 (match_operand:SVE_I 2 "register_operand")
+                 (match_operand:SVE_I 3 "register_operand"))]
               UNSPEC_PRED_X)
             (unspec:SVE_I
               [(match_operand 6)
                  (match_dup 2)
                  (match_dup 3))]
               UNSPEC_PRED_X))
-          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& 1"
   {
     if (!CONSTANT_P (operands[5]) || !CONSTANT_P (operands[6]))
 
 ;; Unpredicated saturating signed addition and subtraction.
 (define_insn "@aarch64_sve_<optab><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w, w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (SBINQOPS:SVE_FULL_I
-         (match_operand:SVE_FULL_I 1 "register_operand" "0, 0, w, w, w")
-         (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand" "vsQ, vsS, vsQ, vsS, w")))]
+         (match_operand:SVE_FULL_I 1 "register_operand")
+         (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand")))]
   "TARGET_SVE"
-  "@
-   <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
-   <binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
-   movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
-   movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
-   <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes,yes,*")]
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , vsQ ; *              ] <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+     [ w        , 0 , vsS ; *              ] <binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
+     [ ?&w      , w , vsQ ; yes            ] movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+     [ ?&w      , w , vsS ; yes            ] movprfx\t%0, %1\;<binqops_op_rev>\t%0.<Vetype>, %0.<Vetype>, #%N2
+     [ w        , w , w   ; *              ] <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; Unpredicated saturating unsigned addition and subtraction.
 (define_insn "@aarch64_sve_<optab><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (UBINQOPS:SVE_FULL_I
-         (match_operand:SVE_FULL_I 1 "register_operand" "0, w, w")
-         (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand" "vsa, vsa, w")))]
+         (match_operand:SVE_FULL_I 1 "register_operand")
+         (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand")))]
   "TARGET_SVE"
-  "@
-   <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
-   movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
-   <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,*")]
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , vsa ; *              ] <binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+     [ ?&w      , w , vsa ; yes            ] movprfx\t%0, %1\;<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
+     [ w        , w , w   ; *              ] <binqops_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated highpart multiplication.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_I
-            [(match_operand:SVE_I 2 "register_operand" "%0, w")
-             (match_operand:SVE_I 3 "register_operand" "w, w")]
+            [(match_operand:SVE_I 2 "register_operand")
+             (match_operand:SVE_I 3 "register_operand")]
             MUL_HIGHPART)]
          UNSPEC_PRED_X))]
   "TARGET_SVE"
-  "@
-   <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3 ; attrs: movprfx ]
+     [ w        , Upl , %0 , w ; *              ] <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , w ; yes            ] movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated highpart multiplications with merging.
 
 ;; Predicated highpart multiplications, merging with the first input.
 (define_insn "*cond_<optab><mode>_2"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
-            [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+            [(match_operand:SVE_FULL_I 2 "register_operand")
+             (match_operand:SVE_FULL_I 3 "register_operand")]
             MUL_HIGHPART)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")])
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
+)
 
 ;; Predicated highpart multiplications, merging with zero.
 (define_insn "*cond_<optab><mode>_z"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
-            [(match_operand:SVE_FULL_I 2 "register_operand" "%0, w")
-             (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+            [(match_operand:SVE_FULL_I 2 "register_operand")
+             (match_operand:SVE_FULL_I 3 "register_operand")]
             MUL_HIGHPART)
           (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2  , 3  ]
+     [ &w       , Upl , %0 , w  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w  , w  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   [(set_attr "movprfx" "yes")])
 
 ;; -------------------------------------------------------------------------
 
 ;; Integer division predicated with a PTRUE.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (unspec:SVE_FULL_SDI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY_SD:SVE_FULL_SDI
-            (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w")
-            (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w"))]
+            (match_operand:SVE_FULL_SDI 2 "register_operand")
+            (match_operand:SVE_FULL_SDI 3 "register_operand"))]
          UNSPEC_PRED_X))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ w        , Upl , w , 0 ; *              ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer division with merging.
 
 ;; Predicated integer division, merging with the first input.
 (define_insn "*cond_<optab><mode>_2"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (unspec:SVE_FULL_SDI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY_SD:SVE_FULL_SDI
-            (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w")
-            (match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))
+            (match_operand:SVE_FULL_SDI 2 "register_operand")
+            (match_operand:SVE_FULL_SDI 3 "register_operand"))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer division, merging with the second input.
 (define_insn "*cond_<optab><mode>_3"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (unspec:SVE_FULL_SDI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY_SD:SVE_FULL_SDI
-            (match_operand:SVE_FULL_SDI 2 "register_operand" "w, w")
-            (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w"))
+            (match_operand:SVE_FULL_SDI 2 "register_operand")
+            (match_operand:SVE_FULL_SDI 3 "register_operand"))
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; Predicated integer division, merging with an independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (unspec:SVE_FULL_SDI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (SVE_INT_BINARY_SD:SVE_FULL_SDI
-            (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w, w, w")
-            (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w, w, w"))
-          (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+            (match_operand:SVE_FULL_SDI 2 "register_operand")
+            (match_operand:SVE_FULL_SDI 3 "register_operand"))
+          (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Unpredicated integer binary logical operations.
 (define_insn "<optab><mode>3"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?w, w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (LOGICAL:SVE_I
-         (match_operand:SVE_I 1 "register_operand" "%0, w, w")
-         (match_operand:SVE_I 2 "aarch64_sve_logical_operand" "vsl, vsl, w")))]
+         (match_operand:SVE_I 1 "register_operand")
+         (match_operand:SVE_I 2 "aarch64_sve_logical_operand")))]
   "TARGET_SVE"
-  "@
-   <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
-   movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
-   <logical>\t%0.d, %1.d, %2.d"
-  [(set_attr "movprfx" "*,yes,*")]
+  {@ [ cons: =0 , 1  , 2   ; attrs: movprfx ]
+     [ w        , %0 , vsl ; *              ] <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
+     [ ?w       , w  , vsl ; yes            ] movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
+     [ w        , w  , w   ; *              ] <logical>\t%0.d, %1.d, %2.d
+  }
 )
 
 ;; Merging forms are handled through SVE_INT_BINARY.
 
 ;; Predicated integer BIC, merging with the first input.
 (define_insn "*cond_bic<mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (and:SVE_I
             (not:SVE_I
-              (match_operand:SVE_I 3 "register_operand" "w, w"))
-            (match_operand:SVE_I 2 "register_operand" "0, w"))
+              (match_operand:SVE_I 3 "register_operand"))
+            (match_operand:SVE_I 2 "register_operand"))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer BIC, merging with an independent value.
 (define_insn_and_rewrite "*cond_bic<mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (and:SVE_I
             (not:SVE_I
-              (match_operand:SVE_I 3 "register_operand" "w, w, w, w"))
-            (match_operand:SVE_I 2 "register_operand" "0, w, w, w"))
-          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+              (match_operand:SVE_I 3 "register_operand"))
+            (match_operand:SVE_I 2 "register_operand"))
+          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;bic\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 ;; likely to gain much and would make the instruction seem less uniform
 ;; to the register allocator.
 (define_insn_and_split "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (ASHIFT:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "w, 0, w, w")
-            (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))]
+            (match_operand:SVE_I 2 "register_operand")
+            (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand"))]
          UNSPEC_PRED_X))]
   "TARGET_SVE"
-  "@
-   #
-   <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3     ; attrs: movprfx ]
+     [ w        , Upl , w , D<lr> ; *              ] #
+     [ w        , Upl , 0 , w     ; *              ] <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ w        , Upl , w , 0     ; *              ] <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w     ; yes            ] movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& reload_completed
    && !register_operand (operands[3], <MODE>mode)"
   [(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,yes")]
 )
 
 ;; Unpredicated shift operations by a constant (post-RA only).
 
 ;; Predicated integer shift, merging with the first input.
 (define_insn "*cond_<optab><mode>_2_const"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (ASHIFT:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "0, w")
+            (match_operand:SVE_I 2 "register_operand")
             (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
           (match_dup 2)]
         UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+  }
 )
 
 ;; Predicated integer shift, merging with an independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_const"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (ASHIFT:SVE_I
-            (match_operand:SVE_I 2 "register_operand" "w, w, w")
+            (match_operand:SVE_I 2 "register_operand")
             (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
-          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
         UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   #"
+  {@ [ cons: =0 , 1   , 2 , 4   ]
+     [ w        , Upl , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ &w       , Upl , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 ;; Predicated shifts of narrow elements by 64-bit amounts, merging with
 ;; the first input.
 (define_insn "*cond_<sve_int_op><mode>_m"
-  [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
        (unspec:SVE_FULL_BHSI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_BHSI
-            [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
-             (match_operand:VNx2DI 3 "register_operand" "w, w")]
+            [(match_operand:SVE_FULL_BHSI 2 "register_operand")
+             (match_operand:VNx2DI 3 "register_operand")]
             SVE_SHIFT_WIDE)
           (match_dup 2)]
         UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
-  [(set_attr "movprfx" "*, yes")])
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+  }
+)
 
 ;; Predicated shifts of narrow elements by 64-bit amounts, merging with zero.
 (define_insn "*cond_<sve_int_op><mode>_z"
-  [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=&w, &w")
+  [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
        (unspec:SVE_FULL_BHSI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_BHSI
-            [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
-             (match_operand:VNx2DI 3 "register_operand" "w, w")]
+            [(match_operand:SVE_FULL_BHSI 2 "register_operand")
+             (match_operand:VNx2DI 3 "register_operand")]
             SVE_SHIFT_WIDE)
           (match_operand:SVE_FULL_BHSI 4 "aarch64_simd_imm_zero")]
         UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d"
+  {@ [ cons: =0 , 1   , 2 , 3  ]
+     [ &w       , Upl , 0 , w  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+     [ &w       , Upl , w , w  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.d
+  }
   [(set_attr "movprfx" "yes")])
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated ASRD.
 (define_insn "*sdiv_pow2<mode>3"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_I
-            [(match_operand:SVE_I 2 "register_operand" "0, w")
+            [(match_operand:SVE_I 2 "register_operand")
              (match_operand:SVE_I 3 "aarch64_simd_rshift_imm")]
             UNSPEC_ASRD)]
          UNSPEC_PRED_X))]
   "TARGET_SVE"
-  "@
-   asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")])
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+  }
+)
 
 ;; Predicated shift with merging.
 (define_expand "@cond_<sve_int_op><mode>"
 
 ;; Predicated shift, merging with the first input.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_I
             [(match_operand 4)
              (unspec:SVE_I
-               [(match_operand:SVE_I 2 "register_operand" "0, w")
+               [(match_operand:SVE_I 2 "register_operand")
                 (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm")]
                SVE_INT_SHIFT_IMM)]
             UNSPEC_PRED_X)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")])
+)
 
 ;; Predicated shift, merging with an independent value.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_I
             [(match_operand 5)
              (unspec:SVE_I
-               [(match_operand:SVE_I 2 "register_operand" "w, w, w")
+               [(match_operand:SVE_I 2 "register_operand")
                 (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm")]
                SVE_INT_SHIFT_IMM)]
             UNSPEC_PRED_X)
-          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+          (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
         UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   #"
+  {@ [ cons: =0 , 1   , 2 , 4   ]
+     [ w        , Upl , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ &w       , Upl , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 ;; Predicated floating-point binary operations that take an integer
 ;; as their second operand.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 4 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-          (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:<V_INT_EQUIV> 3 "register_operand")]
          SVE_COND_FP_BINARY_INT))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated floating-point binary operations with merging, taking an
 ;; Predicated floating-point binary operations that take an integer as their
 ;; second operand, with inactive lanes coming from the first operand.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated floating-point binary operations that take an integer as
 ;; their second operand, with the values of inactive lanes being distinct
 ;; from the other inputs.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
-             (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
-             (match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Predicated floating-point binary operations that have no immediate forms.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 4 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w")
-          (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "register_operand")]
          SVE_COND_FP_BINARY_REG))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ w        , Upl , w , 0 ; *              ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated floating-point operations with merging.
 
 ;; Predicated floating-point operations, merging with the first input.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FP_BINARY)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FP_BINARY)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Same for operations that take a 1-bit constant.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_const_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+             (match_operand:SVE_FULL_F 2 "register_operand")
              (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
             SVE_COND_FP_BINARY_I1)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?w       , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_2_const_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+             (match_operand:SVE_FULL_F 2 "register_operand")
              (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
             SVE_COND_FP_BINARY_I1)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?w       , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+  }
 )
 
 ;; Predicated floating-point operations, merging with the second input.
 (define_insn_and_rewrite "*cond_<optab><mode>_3_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FP_BINARY)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_3_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FP_BINARY)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; Predicated floating-point operations, merging with an independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FP_BINARY)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FP_BINARY)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Same for operations that take a 1-bit constant.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_const_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+             (match_operand:SVE_FULL_F 2 "register_operand")
              (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
             SVE_COND_FP_BINARY_I1)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   #"
+  {@ [ cons: =0 , 1   , 2 , 4   ]
+     [ w        , Upl , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?w       , Upl , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_const_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+             (match_operand:SVE_FULL_F 2 "register_operand")
              (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
             SVE_COND_FP_BINARY_I1)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   #"
+  {@ [ cons: =0 , 1   , 2 , 4   ]
+     [ w        , Upl , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?w       , Upl , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Predicated floating-point addition.
 (define_insn_and_split "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl")
-          (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, i, Z, Ui1, i, i, Ui1")
-          (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, 0, w, w, w")
-          (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, w, w, vsA, vsN, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SI 4 "aarch64_sve_gp_strictness")
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand")]
          SVE_COND_FP_ADD))]
   "TARGET_SVE"
-  "@
-   fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   #
-   fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2  , 3   , 4   ; attrs: movprfx ]
+     [ w        , Upl , %0 , vsA , i   ; *              ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , 0  , vsN , i   ; *              ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ w        , Upl , w  , w   , Z   ; *              ] #
+     [ w        , Upl , 0  , w   , Ui1 ; *              ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , vsA , i   ; yes            ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w  , vsN , i   ; yes            ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ ?&w      , Upl , w  , w   , Ui1 ; yes            ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   ; Split the unpredicated form after reload, so that we don't have
   ; the unnecessary PTRUE.
   "&& reload_completed
    && INTVAL (operands[4]) == SVE_RELAXED_GP"
   [(set (match_dup 0) (plus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
 )
 
 ;; Predicated floating-point addition of a constant, merging with the
 ;; first input.
 (define_insn_and_rewrite "*cond_add<mode>_2_const_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
-             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
             UNSPEC_COND_FADD)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
+  {@ [ cons: =0 , 1   , 2 , 3   ; attrs: movprfx ]
+     [ w        , Upl , 0 , vsA ; *              ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , 0 , vsN ; *              ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ ?w       , Upl , w , vsA ; yes            ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?w       , Upl , w , vsN ; yes            ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,*,yes,yes")]
 )
 
 (define_insn "*cond_add<mode>_2_const_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
-             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
             UNSPEC_COND_FADD)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3"
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3   ; attrs: movprfx ]
+     [ w        , Upl , 0 , vsA ; *              ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , 0 , vsN ; *              ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ ?w       , Upl , w , vsA ; yes            ] movprfx\t%0, %2\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?w       , Upl , w , vsN ; yes            ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+  }
 )
 
 ;; Predicated floating-point addition of a constant, merging with an
 ;; independent value.
 (define_insn_and_rewrite "*cond_add<mode>_any_const_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
-             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
             UNSPEC_COND_FADD)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   #
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3   , 4   ]
+     [ w        , Upl , w , vsA , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w , vsN , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ w        , Upl , w , vsA , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w , vsN , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ ?w       , Upl , w , vsA , w   ] #
+     [ ?w       , Upl , w , vsN , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_add<mode>_any_const_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
-             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate")]
             UNSPEC_COND_FADD)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
-   #
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3   , 4   ]
+     [ w        , Upl , w , vsA , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w , vsN , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ w        , Upl , w , vsA , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w , vsN , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3
+     [ ?w       , Upl , w , vsA , w   ] #
+     [ ?w       , Upl , w , vsN , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Predicated FCADD.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 4 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-          (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "register_operand")]
          SVE_COND_FCADD))]
   "TARGET_SVE"
-  "@
-   fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+  }
 )
 
 ;; Predicated FCADD with merging.
 
 ;; Predicated FCADD, merging with the first input.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FCADD)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FCADD)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+  }
 )
 
 ;; Predicated FCADD, merging with an independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FCADD)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             SVE_COND_FCADD)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fcadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Predicated floating-point subtraction.
 (define_insn_and_split "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
-          (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, Ui1, i, Ui1")
-          (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand" "vsA, w, 0, w, vsA, w")
-          (match_operand:SVE_FULL_F 3 "register_operand" "0, w, w, 0, w, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SI 4 "aarch64_sve_gp_strictness")
+          (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand")
+          (match_operand:SVE_FULL_F 3 "register_operand")]
          SVE_COND_FP_SUB))]
   "TARGET_SVE"
-  "@
-   fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   #
-   fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2   , 3 , 4   ; attrs: movprfx ]
+     [ w        , Upl , vsA , 0 , i   ; *              ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ w        , Upl , w   , w , Z   ; *              ] #
+     [ w        , Upl , 0   , w , Ui1 ; *              ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ w        , Upl , w   , 0 , Ui1 ; *              ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , vsA , w , i   ; yes            ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ ?&w      , Upl , w   , w , Ui1 ; yes            ] movprfx\t%0, %2\;fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   ; Split the unpredicated form after reload, so that we don't have
   ; the unnecessary PTRUE.
   "&& reload_completed
    && INTVAL (operands[4]) == SVE_RELAXED_GP"
   [(set (match_dup 0) (minus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,*,yes,yes")]
 )
 
 ;; Predicated floating-point subtraction from a constant, merging with the
 ;; second input.
 (define_insn_and_rewrite "*cond_sub<mode>_3_const_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
              (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
-             (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             UNSPEC_COND_FSUB)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
+  {@ [ cons: =0 , 1   , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ ?w       , Upl , w ; yes            ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_sub<mode>_3_const_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
              (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
-             (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             UNSPEC_COND_FSUB)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ ?w       , Upl , w ; yes            ] movprfx\t%0, %3\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+  }
 )
 
 ;; Predicated floating-point subtraction from a constant, merging with an
 ;; independent value.
 (define_insn_and_rewrite "*cond_sub<mode>_const_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
              (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             UNSPEC_COND_FSUB)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   #"
+  {@ [ cons: =0 , 1   , 3 , 4   ]
+     [ w        , Upl , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ w        , Upl , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ ?w       , Upl , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_sub<mode>_const_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
              (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             UNSPEC_COND_FSUB)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
-   #"
+  {@ [ cons: =0 , 1   , 3 , 4   ]
+     [ w        , Upl , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ w        , Upl , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %3.<Vetype>\;fsubr\t%0.<Vetype>, %1/m, %0.<Vetype>, #%2
+     [ ?w       , Upl , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Predicated floating-point absolute difference.
 (define_insn_and_rewrite "*aarch64_pred_abd<mode>_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 4 "aarch64_sve_gp_strictness")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             UNSPEC_COND_FSUB)]
          UNSPEC_COND_FABS))]
   "TARGET_SVE"
-  "@
-   fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2  , 3 ; attrs: movprfx ]
+     [ w        , Upl , %0 , w ; *              ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , w ; yes            ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[5])"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*aarch64_pred_abd<mode>_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 4 "aarch64_sve_gp_strictness")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")]
             UNSPEC_COND_FSUB)]
          UNSPEC_COND_FABS))]
   "TARGET_SVE"
-  "@
-   fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3 ; attrs: movprfx ]
+     [ w        , Upl , %0 , w ; *              ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , w ; yes            ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 (define_expand "@aarch64_cond_abd<mode>"
 ;; Predicated floating-point absolute difference, merging with the first
 ;; input.
 (define_insn_and_rewrite "*aarch64_cond_abd<mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
              (unspec:SVE_FULL_F
                [(match_operand 5)
                 (const_int SVE_RELAXED_GP)
-                (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-                (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+                (match_operand:SVE_FULL_F 2 "register_operand")
+                (match_operand:SVE_FULL_F 3 "register_operand")]
                UNSPEC_COND_FSUB)]
             UNSPEC_COND_FABS)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& (!rtx_equal_p (operands[1], operands[4])
        || !rtx_equal_p (operands[1], operands[5]))"
   {
     operands[4] = copy_rtx (operands[1]);
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*aarch64_cond_abd<mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
              (unspec:SVE_FULL_F
                [(match_dup 1)
                 (match_operand:SI 5 "aarch64_sve_gp_strictness")
-                (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-                (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+                (match_operand:SVE_FULL_F 2 "register_operand")
+                (match_operand:SVE_FULL_F 3 "register_operand")]
                UNSPEC_COND_FSUB)]
             UNSPEC_COND_FABS)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated floating-point absolute difference, merging with the second
 ;; input.
 (define_insn_and_rewrite "*aarch64_cond_abd<mode>_3_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
              (unspec:SVE_FULL_F
                [(match_operand 5)
                 (const_int SVE_RELAXED_GP)
-                (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-                (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+                (match_operand:SVE_FULL_F 2 "register_operand")
+                (match_operand:SVE_FULL_F 3 "register_operand")]
                UNSPEC_COND_FSUB)]
             UNSPEC_COND_FABS)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
   "&& (!rtx_equal_p (operands[1], operands[4])
        || !rtx_equal_p (operands[1], operands[5]))"
   {
     operands[4] = copy_rtx (operands[1]);
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*aarch64_cond_abd<mode>_3_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
              (unspec:SVE_FULL_F
                [(match_dup 1)
                 (match_operand:SI 5 "aarch64_sve_gp_strictness")
-                (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-                (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
+                (match_operand:SVE_FULL_F 2 "register_operand")
+                (match_operand:SVE_FULL_F 3 "register_operand")]
                UNSPEC_COND_FSUB)]
             UNSPEC_COND_FABS)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
 )
 
 ;; Predicated floating-point absolute difference, merging with an
 ;; independent value.
 (define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
              (unspec:SVE_FULL_F
                [(match_operand 6)
                 (const_int SVE_RELAXED_GP)
-                (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
-                (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+                (match_operand:SVE_FULL_F 2 "register_operand")
+                (match_operand:SVE_FULL_F 3 "register_operand")]
                UNSPEC_COND_FSUB)]
             UNSPEC_COND_FABS)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*aarch64_cond_abd<mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (match_operand:SI 5 "aarch64_sve_gp_strictness")
              (unspec:SVE_FULL_F
                [(match_dup 1)
                 (match_operand:SI 6 "aarch64_sve_gp_strictness")
-                (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
-                (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
+                (match_operand:SVE_FULL_F 2 "register_operand")
+                (match_operand:SVE_FULL_F 3 "register_operand")]
                UNSPEC_COND_FSUB)]
             UNSPEC_COND_FABS)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[4], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[4])"
 
 ;; Predicated floating-point multiplication.
 (define_insn_and_split "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
-          (match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, i, Ui1")
-          (match_operand:SVE_FULL_F 2 "register_operand" "%0, w, 0, w, w")
-          (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand" "vsM, w, w, vsM, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SI 4 "aarch64_sve_gp_strictness")
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand")]
          SVE_COND_FP_MUL))]
   "TARGET_SVE"
-  "@
-   fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   #
-   fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2  , 3   , 4   ; attrs: movprfx ]
+     [ w        , Upl , %0 , vsM , i   ; *              ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , w  , w   , Z   ; *              ] #
+     [ w        , Upl , 0  , w   , Ui1 ; *              ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , vsM , i   ; yes            ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w  , w   , Ui1 ; yes            ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   ; Split the unpredicated form after reload, so that we don't have
   ; the unnecessary PTRUE.
   "&& reload_completed
    && INTVAL (operands[4]) == SVE_RELAXED_GP"
   [(set (match_dup 0) (mult:SVE_FULL_F (match_dup 2) (match_dup 3)))]
   ""
-  [(set_attr "movprfx" "*,*,*,yes,yes")]
 )
 
 ;; Merging forms are handled through SVE_COND_FP_BINARY and
 
 ;; Predicated floating-point maximum/minimum.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 4 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, w")
-          (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand" "vsB, w, vsB, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand")]
          SVE_COND_FP_MAXMIN))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3   ; attrs: movprfx ]
+     [ w        , Upl , %0 , vsB ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , 0  , w   ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , vsB ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w  , w   ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Merging forms are handled through SVE_COND_FP_BINARY and
 
 ;; Predicated integer addition of product.
 (define_insn "@aarch64_pred_fma<mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (plus:SVE_I
          (unspec:SVE_I
-           [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+           [(match_operand:<VPRED> 1 "register_operand")
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "%0, w, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand"))]
            UNSPEC_PRED_X)
-         (match_operand:SVE_I 4 "register_operand" "w, 0, w")))]
+         (match_operand:SVE_I 4 "register_operand")))]
   "TARGET_SVE"
-  "@
-   mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , %0 , w , w ; *              ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ w        , Upl , w  , w , 0 ; *              ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , w , w ; yes            ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer addition of product with merging.
 
 ;; Predicated integer addition of product, merging with the first input.
 (define_insn "*cond_fma<mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (plus:SVE_I
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "0, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w"))
-            (match_operand:SVE_I 4 "register_operand" "w, w"))
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand"))
+            (match_operand:SVE_I 4 "register_operand"))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w , w ; *              ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %2\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+  }
 )
 
 ;; Predicated integer addition of product, merging with the third input.
 (define_insn "*cond_fma<mode>_4"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (plus:SVE_I
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "w, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w"))
-            (match_operand:SVE_I 4 "register_operand" "0, w"))
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand"))
+            (match_operand:SVE_I 4 "register_operand"))
           (match_dup 4)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer addition of product, merging with an independent value.
 (define_insn_and_rewrite "*cond_fma<mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (plus:SVE_I
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w"))
-            (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w"))
-          (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand"))
+            (match_operand:SVE_I 4 "register_operand"))
+          (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[5])
    && !rtx_equal_p (operands[3], operands[5])
    && !rtx_equal_p (operands[4], operands[5])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 , 5   ]
+     [ &w       , Upl , w , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , 0 , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mad\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[5], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[5])"
 
 ;; Predicated integer subtraction of product.
 (define_insn "@aarch64_pred_fnma<mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (minus:SVE_I
-         (match_operand:SVE_I 4 "register_operand" "w, 0, w")
+         (match_operand:SVE_I 4 "register_operand")
          (unspec:SVE_I
-           [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+           [(match_operand:<VPRED> 1 "register_operand")
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "%0, w, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand"))]
            UNSPEC_PRED_X)))]
   "TARGET_SVE"
-  "@
-   msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , %0 , w , w ; *              ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ w        , Upl , w  , w , 0 ; *              ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w  , w , w ; yes            ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer subtraction of product with merging.
 
 ;; Predicated integer subtraction of product, merging with the first input.
 (define_insn "*cond_fnma<mode>_2"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (minus:SVE_I
-            (match_operand:SVE_I 4 "register_operand" "w, w")
+            (match_operand:SVE_I 4 "register_operand")
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "0, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w")))
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand")))
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w , w ; *              ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %2\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+  }
 )
 
 ;; Predicated integer subtraction of product, merging with the third input.
 (define_insn "*cond_fnma<mode>_4"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (minus:SVE_I
-            (match_operand:SVE_I 4 "register_operand" "0, w")
+            (match_operand:SVE_I 4 "register_operand")
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "w, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w")))
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand")))
           (match_dup 4)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated integer subtraction of product, merging with an
 ;; independent value.
 (define_insn_and_rewrite "*cond_fnma<mode>_any"
-  [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (unspec:SVE_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (minus:SVE_I
-            (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w")
+            (match_operand:SVE_I 4 "register_operand")
             (mult:SVE_I
-              (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
-              (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w")))
-          (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+              (match_operand:SVE_I 2 "register_operand")
+              (match_operand:SVE_I 3 "register_operand")))
+          (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[5])
    && !rtx_equal_p (operands[3], operands[5])
    && !rtx_equal_p (operands[4], operands[5])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 , 5   ]
+     [ &w       , Upl , w , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , 0 , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;msb\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[5], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[5])"
 
 ;; Four-element integer dot-product with accumulation.
 (define_insn "<sur>dot_prod<vsi2qi>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (plus:SVE_FULL_SDI
          (unspec:SVE_FULL_SDI
-           [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
-            (match_operand:<VSI2QI> 2 "register_operand" "w, w")]
+           [(match_operand:<VSI2QI> 1 "register_operand")
+            (match_operand:<VSI2QI> 2 "register_operand")]
            DOTPROD)
-         (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_SDI 3 "register_operand")))]
   "TARGET_SVE"
-  "@
-   <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
-   movprfx\t%0, %3\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , w , w , 0 ; *              ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %3\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
+  }
 )
 
 ;; Four-element integer dot-product by selected lanes with accumulation.
 (define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (plus:SVE_FULL_SDI
          (unspec:SVE_FULL_SDI
-           [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
+           [(match_operand:<VSI2QI> 1 "register_operand")
             (unspec:<VSI2QI>
-              [(match_operand:<VSI2QI> 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+              [(match_operand:<VSI2QI> 2 "register_operand")
                (match_operand:SI 3 "const_int_operand")]
               UNSPEC_SVE_LANE_SELECT)]
            DOTPROD)
-         (match_operand:SVE_FULL_SDI 4 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_SDI 4 "register_operand")))]
   "TARGET_SVE"
-  "@
-   <sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
-   movprfx\t%0, %4\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2              , 4 ; attrs: movprfx ]
+     [ w        , w , <sve_lane_con> , 0 ; *              ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+     [ ?&w      , w , <sve_lane_con> , w ; yes            ] movprfx\t%0, %4\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+  }
 )
 
 (define_insn "@<sur>dot_prod<vsi2qi>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
         (plus:VNx4SI_ONLY
          (unspec:VNx4SI_ONLY
-           [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
-            (match_operand:<VSI2QI> 2 "register_operand" "w, w")]
+           [(match_operand:<VSI2QI> 1 "register_operand")
+            (match_operand:<VSI2QI> 2 "register_operand")]
            DOTPROD_US_ONLY)
-         (match_operand:VNx4SI_ONLY 3 "register_operand" "0, w")))]
+         (match_operand:VNx4SI_ONLY 3 "register_operand")))]
   "TARGET_SVE_I8MM"
-  "@
-   <sur>dot\\t%0.s, %1.b, %2.b
-   movprfx\t%0, %3\;<sur>dot\\t%0.s, %1.b, %2.b"
-   [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , w , w , 0 ; *              ] <sur>dot\t%0.s, %1.b, %2.b
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %3\;<sur>dot\t%0.s, %1.b, %2.b
+  }
 )
 
 (define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
        (plus:VNx4SI_ONLY
          (unspec:VNx4SI_ONLY
-           [(match_operand:<VSI2QI> 1 "register_operand" "w, w")
+           [(match_operand:<VSI2QI> 1 "register_operand")
             (unspec:<VSI2QI>
-              [(match_operand:<VSI2QI> 2 "register_operand" "y, y")
+              [(match_operand:<VSI2QI> 2 "register_operand")
                (match_operand:SI 3 "const_int_operand")]
               UNSPEC_SVE_LANE_SELECT)]
            DOTPROD_I8MM)
-         (match_operand:VNx4SI_ONLY 4 "register_operand" "0, w")))]
+         (match_operand:VNx4SI_ONLY 4 "register_operand")))]
   "TARGET_SVE_I8MM"
-  "@
-   <sur>dot\\t%0.s, %1.b, %2.b[%3]
-   movprfx\t%0, %4\;<sur>dot\\t%0.s, %1.b, %2.b[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+     [ w        , w , y , 0 ; *              ] <sur>dot\t%0.s, %1.b, %2.b[%3]
+     [ ?&w      , w , y , w ; yes            ] movprfx\t%0, %4\;<sur>dot\t%0.s, %1.b, %2.b[%3]
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_add_<optab><vsi2qi>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
        (plus:VNx4SI_ONLY
          (unspec:VNx4SI_ONLY
-           [(match_operand:<VSI2QI> 2 "register_operand" "w, w")
-            (match_operand:<VSI2QI> 3 "register_operand" "w, w")]
+           [(match_operand:<VSI2QI> 2 "register_operand")
+            (match_operand:<VSI2QI> 3 "register_operand")]
            MATMUL)
-         (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")))]
+         (match_operand:VNx4SI_ONLY 1 "register_operand")))]
   "TARGET_SVE_I8MM"
-  "@
-   <sur>mmla\\t%0.s, %2.b, %3.b
-   movprfx\t%0, %1\;<sur>mmla\\t%0.s, %2.b, %3.b"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sur>mmla\t%0.s, %2.b, %3.b
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sur>mmla\t%0.s, %2.b, %3.b
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated floating-point ternary operations.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 5 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "%w, 0, w")
-          (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")
-          (match_operand:SVE_FULL_F 4 "register_operand" "0, w, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "register_operand")
+          (match_operand:SVE_FULL_F 4 "register_operand")]
          SVE_COND_FP_TERNARY))]
   "TARGET_SVE"
-  "@
-   <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , %w , w , 0 ; *              ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ w        , Upl , 0  , w , w ; *              ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ ?&w      , Upl , w  , w , w ; yes            ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated floating-point ternary operations with merging.
 ;; Predicated floating-point ternary operations, merging with the
 ;; first input.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FP_TERNARY)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w , w ; *              ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[5])"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FP_TERNARY)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w , w ; *              ] <sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %2\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+  }
 )
 
 ;; Predicated floating-point ternary operations, merging with the
 ;; third input.
 (define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FP_TERNARY)
           (match_dup 4)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[5])"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_4_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FP_TERNARY)
           (match_dup 4)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] <sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated floating-point ternary operations, merging with an
 ;; independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 6)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FP_TERNARY)
-          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[5])
    && !rtx_equal_p (operands[3], operands[5])
    && !rtx_equal_p (operands[4], operands[5])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 , 5   ]
+     [ &w       , Upl , w , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , 0 , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FP_TERNARY)
-          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && !rtx_equal_p (operands[2], operands[5])
    && !rtx_equal_p (operands[3], operands[5])
    && !rtx_equal_p (operands[4], operands[5])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 , 5   ]
+     [ &w       , Upl , w , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , 0 , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fmad_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %4.<Vetype>
+     [ &w       , Upl , w , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;<sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[5], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[5])"
 ;; Unpredicated FMLA and FMLS by selected lanes.  It doesn't seem worth using
 ;; (fma ...) since target-independent code won't understand the indexing.
 (define_insn "@aarch64_<optab>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:SVE_FULL_F 1 "register_operand" "w, w")
+         [(match_operand:SVE_FULL_F 1 "register_operand")
           (unspec:SVE_FULL_F
-            [(match_operand:SVE_FULL_F 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+            [(match_operand:SVE_FULL_F 2 "register_operand")
              (match_operand:SI 3 "const_int_operand")]
             UNSPEC_SVE_LANE_SELECT)
-          (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_F 4 "register_operand")]
          SVE_FP_TERNARY_LANE))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
-   movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2              , 4 ; attrs: movprfx ]
+     [ w        , w , <sve_lane_con> , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
+     [ ?&w      , w , <sve_lane_con> , w ; yes            ] movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated FCMLA.
 (define_insn "@aarch64_pred_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 5 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-          (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-          (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "register_operand")
+          (match_operand:SVE_FULL_F 4 "register_operand")]
          SVE_COND_FCMLA))]
   "TARGET_SVE"
-  "@
-   fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+  }
 )
 
 ;; unpredicated optab pattern for auto-vectorizer
 
 ;; Predicated FCMLA, merging with the third input.
 (define_insn_and_rewrite "*cond_<optab><mode>_4_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FCMLA)
           (match_dup 4)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+  }
   "&& !rtx_equal_p (operands[1], operands[5])"
   {
     operands[5] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 (define_insn "*cond_<optab><mode>_4_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FCMLA)
           (match_dup 4)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 ; attrs: movprfx ]
+     [ w        , Upl , w , w , 0 ; *              ] fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w ; yes            ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+  }
 )
 
 ;; Predicated FCMLA, merging with an independent value.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 6)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FCMLA)
-          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[4], operands[5])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 , 5   ]
+     [ &w       , Upl , w , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , w , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , w , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
-             (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
-             (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F 3 "register_operand")
+             (match_operand:SVE_FULL_F 4 "register_operand")]
             SVE_COND_FCMLA)
-          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[4], operands[5])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4 , 5   ]
+     [ &w       , Upl , w , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , w , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ &w       , Upl , w , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %4.<Vetype>\;fcmla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , Upl , w , w , w , w   ] #
+  }
   "&& reload_completed
    && register_operand (operands[5], <MODE>mode)
    && !rtx_equal_p (operands[0], operands[5])"
 
 ;; Unpredicated FCMLA with indexing.
 (define_insn "@aarch64_<optab>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
        (unspec:SVE_FULL_HSF
-         [(match_operand:SVE_FULL_HSF 1 "register_operand" "w, w")
+         [(match_operand:SVE_FULL_HSF 1 "register_operand")
           (unspec:SVE_FULL_HSF
-            [(match_operand:SVE_FULL_HSF 2 "register_operand" "<sve_lane_pair_con>, <sve_lane_pair_con>")
+            [(match_operand:SVE_FULL_HSF 2 "register_operand")
              (match_operand:SI 3 "const_int_operand")]
             UNSPEC_SVE_LANE_SELECT)
-          (match_operand:SVE_FULL_HSF 4 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_HSF 4 "register_operand")]
          FCMLA))]
   "TARGET_SVE"
-  "@
-   fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
-   movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2                   , 4 ; attrs: movprfx ]
+     [ w        , w , <sve_lane_pair_con> , 0 ; *              ] fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
+     [ ?&w      , w , <sve_lane_pair_con> , w ; yes            ] movprfx\t%0, %4\;fcmla\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3], #<rot>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_tmad<mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:SVE_FULL_F 1 "register_operand" "0, w")
-          (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+         [(match_operand:SVE_FULL_F 1 "register_operand")
+          (match_operand:SVE_FULL_F 2 "register_operand")
           (match_operand:DI 3 "const_int_operand")]
          UNSPEC_FTMAD))]
   "TARGET_SVE"
-  "@
-   ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
-   movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+     [ w        , 0 , w ; *              ] ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+     [ ?&w      , w , w ; yes            ] movprfx\t%0, %1\;ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_<sve_fp_op>vnx4sf"
-  [(set (match_operand:VNx4SF 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SF 0 "register_operand")
        (unspec:VNx4SF
-         [(match_operand:VNx4SF 1 "register_operand" "0, w")
-          (match_operand:VNx8BF 2 "register_operand" "w, w")
-          (match_operand:VNx8BF 3 "register_operand" "w, w")]
+         [(match_operand:VNx4SF 1 "register_operand")
+          (match_operand:VNx8BF 2 "register_operand")
+          (match_operand:VNx8BF 3 "register_operand")]
          SVE_BFLOAT_TERNARY_LONG))]
   "TARGET_SVE_BF16"
-  "@
-   <sve_fp_op>\t%0.s, %2.h, %3.h
-   movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_fp_op>\t%0.s, %2.h, %3.h
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h
+  }
 )
 
 ;; The immediate range is enforced before generating the instruction.
 (define_insn "@aarch64_sve_<sve_fp_op>_lanevnx4sf"
-  [(set (match_operand:VNx4SF 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SF 0 "register_operand")
        (unspec:VNx4SF
-         [(match_operand:VNx4SF 1 "register_operand" "0, w")
-          (match_operand:VNx8BF 2 "register_operand" "w, w")
-          (match_operand:VNx8BF 3 "register_operand" "y, y")
+         [(match_operand:VNx4SF 1 "register_operand")
+          (match_operand:VNx8BF 2 "register_operand")
+          (match_operand:VNx8BF 3 "register_operand")
           (match_operand:SI 4 "const_int_operand")]
          SVE_BFLOAT_TERNARY_LONG_LANE))]
   "TARGET_SVE_BF16"
-  "@
-   <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
-   movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , y ; *              ] <sve_fp_op>\t%0.s, %2.h, %3.h[%4]
+     [ ?&w      , w , w , y ; yes            ] movprfx\t%0, %1\;<sve_fp_op>\t%0.s, %2.h, %3.h[%4]
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; The mode iterator enforces the target requirements.
 (define_insn "@aarch64_sve_<sve_fp_op><mode>"
-  [(set (match_operand:SVE_MATMULF 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_MATMULF 0 "register_operand")
        (unspec:SVE_MATMULF
-         [(match_operand:SVE_MATMULF 2 "register_operand" "w, w")
-          (match_operand:SVE_MATMULF 3 "register_operand" "w, w")
-          (match_operand:SVE_MATMULF 1 "register_operand" "0, w")]
+         [(match_operand:SVE_MATMULF 2 "register_operand")
+          (match_operand:SVE_MATMULF 3 "register_operand")
+          (match_operand:SVE_MATMULF 1 "register_operand")]
          FMMLA))]
   "TARGET_SVE"
-  "@
-   <sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %1\;<sve_fp_op>\\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; =========================================================================
 ;; For the other instructions, using the element size is more natural,
 ;; so we do that for SEL as well.
 (define_insn "*vcond_mask_<mode><vpred>"
-  [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w, w, ?w, ?&w, ?&w")
+  [(set (match_operand:SVE_ALL 0 "register_operand")
        (unspec:SVE_ALL
-         [(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upa, Upa, Upl, Upa, Upa")
-          (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm" "w, vss, vss, Ufc, Ufc, vss, Ufc")
-          (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "w, 0, Dz, 0, Dz, w, w")]
+         [(match_operand:<VPRED> 3 "register_operand")
+          (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm")
+          (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE
    && (!register_operand (operands[1], <MODE>mode)
        || register_operand (operands[2], <MODE>mode))"
-  "@
-   sel\t%0.<Vetype>, %3, %1.<Vetype>, %2.<Vetype>
-   mov\t%0.<Vetype>, %3/m, #%I1
-   mov\t%0.<Vetype>, %3/z, #%I1
-   fmov\t%0.<Vetype>, %3/m, #%1
-   movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
-   movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
-   movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1"
-  [(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
+  {@ [ cons: =0 , 1   , 2  , 3   ; attrs: movprfx ]
+     [ w        , w   , w  , Upa ; *              ] sel\t%0.<Vetype>, %3, %1.<Vetype>, %2.<Vetype>
+     [ w        , vss , 0  , Upa ; *              ] mov\t%0.<Vetype>, %3/m, #%I1
+     [ w        , vss , Dz , Upa ; *              ] mov\t%0.<Vetype>, %3/z, #%I1
+     [ w        , Ufc , 0  , Upa ; *              ] fmov\t%0.<Vetype>, %3/m, #%1
+     [ ?w       , Ufc , Dz , Upl ; yes            ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;fmov\t%0.<Vetype>, %3/m, #%1
+     [ ?&w      , vss , w  , Upa ; yes            ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, #%I1
+     [ ?&w      , Ufc , w  , Upa ; yes            ] movprfx\t%0, %2\;fmov\t%0.<Vetype>, %3/m, #%1
+  }
 )
 
 ;; Optimize selects between a duplicated scalar variable and another vector,
 ;; of GPRs as being more expensive than duplicates of FPRs, since they
 ;; involve a cross-file move.
 (define_insn "@aarch64_sel_dup<mode>"
-  [(set (match_operand:SVE_ALL 0 "register_operand" "=?w, w, ??w, ?&w, ??&w, ?&w")
+  [(set (match_operand:SVE_ALL 0 "register_operand")
        (unspec:SVE_ALL
-         [(match_operand:<VPRED> 3 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 3 "register_operand")
           (vec_duplicate:SVE_ALL
-            (match_operand:<VEL> 1 "register_operand" "r, w, r, w, r, w"))
-          (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "0, 0, Dz, Dz, w, w")]
+            (match_operand:<VEL> 1 "register_operand"))
+          (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   mov\t%0.<Vetype>, %3/m, %<vwcore>1
-   mov\t%0.<Vetype>, %3/m, %<Vetype>1
-   movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
-   movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
-   movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
-   movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1"
-  [(set_attr "movprfx" "*,*,yes,yes,yes,yes")]
+  {@ [ cons: =0 , 1 , 2  , 3   ; attrs: movprfx ]
+     [ ?w       , r , 0  , Upl ; *              ] mov\t%0.<Vetype>, %3/m, %<vwcore>1
+     [ w        , w , 0  , Upl ; *              ] mov\t%0.<Vetype>, %3/m, %<Vetype>1
+     [ ??w      , r , Dz , Upl ; yes            ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
+     [ ?&w      , w , Dz , Upl ; yes            ] movprfx\t%0.<Vetype>, %3/z, %0.<Vetype>\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
+     [ ??&w     , r , w  , Upl ; yes            ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<vwcore>1
+     [ ?&w      , w , w  , Upl ; yes            ] movprfx\t%0, %2\;mov\t%0.<Vetype>, %3/m, %<Vetype>1
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;;   - the predicate result bit is in the undefined part of a VNx2BI,
 ;;     so its value doesn't matter anyway.
 (define_insn "@aarch64_pred_cmp<cmp_op><mode>"
-  [(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+  [(set (match_operand:<VPRED> 0 "register_operand")
        (unspec:<VPRED>
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 2 "aarch64_sve_ptrue_flag")
           (SVE_INT_CMP:<VPRED>
-            (match_operand:SVE_I 3 "register_operand" "w, w")
-            (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+            (match_operand:SVE_I 3 "register_operand")
+            (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
          UNSPEC_PRED_Z))
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE"
-  "@
-   cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
-   cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+  {@ [ cons: =0 , 1   , 3 , 4              ]
+     [ Upa      , Upl , w , <sve_imm_con>  ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
+     [ Upa      , Upl , w , w              ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+  }
 )
 
 ;; Predicated integer comparisons in which both the flag and predicate
 (define_insn_and_rewrite "*cmp<cmp_op><mode>_cc"
   [(set (reg:CC_NZC CC_REGNUM)
        (unspec:CC_NZC
-         [(match_operand:VNx16BI 1 "register_operand" "Upl, Upl")
+         [(match_operand:VNx16BI 1 "register_operand")
           (match_operand 4)
           (match_operand:SI 5 "aarch64_sve_ptrue_flag")
           (unspec:<VPRED>
             [(match_operand 6)
              (match_operand:SI 7 "aarch64_sve_ptrue_flag")
              (SVE_INT_CMP:<VPRED>
-               (match_operand:SVE_I 2 "register_operand" "w, w")
-               (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+               (match_operand:SVE_I 2 "register_operand")
+               (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
             UNSPEC_PRED_Z)]
          UNSPEC_PTEST))
-   (set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+   (set (match_operand:<VPRED> 0 "register_operand")
        (unspec:<VPRED>
          [(match_dup 6)
           (match_dup 7)
          UNSPEC_PRED_Z))]
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
-  "@
-   cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
-   cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3              ]
+     [ Upa      , Upl , w , <sve_imm_con>  ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+     [ Upa      , Upl , w , w              ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+  }
   "&& !rtx_equal_p (operands[4], operands[6])"
   {
     operands[6] = copy_rtx (operands[4]);
 (define_insn_and_rewrite "*cmp<cmp_op><mode>_ptest"
   [(set (reg:CC_NZC CC_REGNUM)
        (unspec:CC_NZC
-         [(match_operand:VNx16BI 1 "register_operand" "Upl, Upl")
+         [(match_operand:VNx16BI 1 "register_operand")
           (match_operand 4)
           (match_operand:SI 5 "aarch64_sve_ptrue_flag")
           (unspec:<VPRED>
             [(match_operand 6)
              (match_operand:SI 7 "aarch64_sve_ptrue_flag")
              (SVE_INT_CMP:<VPRED>
-               (match_operand:SVE_I 2 "register_operand" "w, w")
-               (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+               (match_operand:SVE_I 2 "register_operand")
+               (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand"))]
             UNSPEC_PRED_Z)]
          UNSPEC_PTEST))
    (clobber (match_scratch:<VPRED> 0 "=Upa, Upa"))]
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
-  "@
-   cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
-   cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
+  {@ [ cons: 1 , 2 , 3              ]
+     [ Upl     , w , <sve_imm_con>  ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+     [ Upl     , w , w              ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+  }
   "&& !rtx_equal_p (operands[4], operands[6])"
   {
     operands[6] = copy_rtx (operands[4]);
 
 ;; Predicated floating-point comparisons.
 (define_insn "@aarch64_pred_fcm<cmp_op><mode>"
-  [(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
+  [(set (match_operand:<VPRED> 0 "register_operand")
        (unspec:<VPRED>
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 2 "aarch64_sve_ptrue_flag")
-          (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, w")]
+          (match_operand:SVE_FULL_F 3 "register_operand")
+          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
          SVE_COND_FP_CMP_I0))]
   "TARGET_SVE"
-  "@
-   fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
-   fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
+  {@ [ cons: =0 , 1   , 3 , 4   ]
+     [ Upa      , Upl , w , Dz  ] fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #0.0
+     [ Upa      , Upl , w , w   ] fcm<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+  }
 )
 
 ;; Same for unordered comparisons.
 ;; Set operand 0 to the last active element in operand 3, or to tied
 ;; operand 1 if no elements are active.
 (define_insn "@fold_extract_<last_op>_<mode>"
-  [(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
+  [(set (match_operand:<VEL> 0 "register_operand")
        (unspec:<VEL>
-         [(match_operand:<VEL> 1 "register_operand" "0, 0")
-          (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
-          (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+         [(match_operand:<VEL> 1 "register_operand")
+          (match_operand:<VPRED> 2 "register_operand")
+          (match_operand:SVE_FULL 3 "register_operand")]
          CLAST))]
   "TARGET_SVE"
-  "@
-   clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
-   clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>"
+  {@ [ cons: =0 , 1 , 2   , 3  ]
+     [ ?r       , 0 , Upl , w  ] clast<ab>\t%<vwcore>0, %2, %<vwcore>0, %3.<Vetype>
+     [ w        , 0 , Upl , w  ] clast<ab>\t%<Vetype>0, %2, %<Vetype>0, %3.<Vetype>
+  }
 )
 
 (define_insn "@aarch64_fold_extract_vector_<last_op>_<mode>"
-  [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL 0 "register_operand")
        (unspec:SVE_FULL
-         [(match_operand:SVE_FULL 1 "register_operand" "0, w")
-          (match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
-          (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+         [(match_operand:SVE_FULL 1 "register_operand")
+          (match_operand:<VPRED> 2 "register_operand")
+          (match_operand:SVE_FULL 3 "register_operand")]
          CLAST))]
   "TARGET_SVE"
-  "@
-   clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1 , 2   , 3  ]
+     [ w        , 0 , Upl , w  ] clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , w , Upl , w  ] movprfx\t%0, %1\;clast<ab>\t%0.<Vetype>, %2, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Like EXT, but start at the first active element.
 (define_insn "@aarch64_sve_splice<mode>"
-  [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL 0 "register_operand")
        (unspec:SVE_FULL
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
-          (match_operand:SVE_FULL 2 "register_operand" "0, w")
-          (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SVE_FULL 2 "register_operand")
+          (match_operand:SVE_FULL 3 "register_operand")]
          UNSPEC_SVE_SPLICE))]
   "TARGET_SVE"
-  "@
-   splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*, yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Permutes that take half the elements from one vector and half the
 
 ;; Predicated float-to-integer conversion, either to the same width or wider.
 (define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")]
          SVE_COND_FCVTI))]
   "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
-  "@
-   fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
-   movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+  }
 )
 
 ;; Predicated narrowing float-to-integer conversion.
 (define_insn "@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
        (unspec:VNx4SI_ONLY
-         [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl")
+         [(match_operand:VNx2BI 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:VNx2DF_ONLY 2 "register_operand" "0, w")]
+          (match_operand:VNx2DF_ONLY 2 "register_operand")]
          SVE_COND_FCVTI))]
   "TARGET_SVE"
-  "@
-   fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
-   movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+  }
 )
 
 ;; Predicated float-to-integer conversion with merging, either to the same
 ;; alternatives earlyclobber makes things more consistent for the
 ;; register allocator.
 (define_insn_and_rewrite "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (unspec:SVE_FULL_HSDI
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE_COND_FCVTI)
-          (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
-  "@
-   fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
-   movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 (define_insn "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (unspec:SVE_FULL_HSDI
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE_COND_FCVTI)
-          (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
-  "@
-   fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
-   movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+  }
 )
 
 ;; Predicated narrowing float-to-integer conversion with merging.
 )
 
 (define_insn "*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=&w, &w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
        (unspec:VNx4SI_ONLY
-         [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:VNx2BI 1 "register_operand")
           (unspec:VNx4SI_ONLY
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
-             (match_operand:VNx2DF_ONLY 2 "register_operand" "w, w, w")]
+             (match_operand:VNx2DF_ONLY 2 "register_operand")]
             SVE_COND_FCVTI)
-          (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
-   movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
-   movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;fcvtz<su>\t%0.<VNx4SI_ONLY:Vetype>, %1/m, %2.<VNx2DF_ONLY:Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; Predicated integer-to-float conversion, either to the same width or
 ;; narrower.
 (define_insn "@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_HSDI 2 "register_operand")]
          SVE_COND_ICVTF))]
   "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
-  "@
-   <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+  }
 )
 
 ;; Predicated widening integer-to-float conversion.
 (define_insn "@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
-  [(set (match_operand:VNx2DF_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx2DF_ONLY 0 "register_operand")
        (unspec:VNx2DF_ONLY
-         [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl")
+         [(match_operand:VNx2BI 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:VNx4SI_ONLY 2 "register_operand" "0, w")]
+          (match_operand:VNx4SI_ONLY 2 "register_operand")]
          SVE_COND_ICVTF))]
   "TARGET_SVE"
-  "@
-   <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
-   movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+  }
 )
 
 ;; Predicated integer-to-float conversion with merging, either to the same
 ;; alternatives earlyclobber makes things more consistent for the
 ;; register allocator.
 (define_insn_and_rewrite "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_HSDI 2 "register_operand")]
             SVE_COND_ICVTF)
-          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
-  "@
-   <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 (define_insn "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
           (unspec:SVE_FULL_F
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_HSDI 2 "register_operand")]
             SVE_COND_ICVTF)
-          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
-  "@
-   <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+  }
 )
 
 ;; Predicated widening integer-to-float conversion with merging.
 )
 
 (define_insn "*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>"
-  [(set (match_operand:VNx2DF_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:VNx2DF_ONLY 0 "register_operand")
        (unspec:VNx2DF_ONLY
-         [(match_operand:VNx2BI 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:VNx2BI 1 "register_operand")
           (unspec:VNx2DF_ONLY
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
-             (match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w")]
+             (match_operand:VNx4SI_ONLY 2 "register_operand")]
             SVE_COND_ICVTF)
-          (match_operand:VNx2DF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:VNx2DF_ONLY 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  "@
-   <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
-   movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
-   movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] <su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<VNx2DF_ONLY:Vetype>, %1/z, %2.<VNx2DF_ONLY:Vetype>\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<su>cvtf\t%0.<VNx2DF_ONLY:Vetype>, %1/m, %2.<VNx4SI_ONLY:Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated float-to-float truncation.
 (define_insn "@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
-  [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
        (unspec:SVE_FULL_HSF
-         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_SDF 2 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_SDF 2 "register_operand")]
          SVE_COND_FCVT))]
   "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
-  "@
-   fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
-   movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+  }
 )
 
 ;; Predicated float-to-float truncation with merging.
 )
 
 (define_insn "*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
-  [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
        (unspec:SVE_FULL_HSF
-         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
           (unspec:SVE_FULL_HSF
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
-             (match_operand:SVE_FULL_SDF 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_SDF 2 "register_operand")]
             SVE_COND_FCVT)
-          (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
-  "@
-   fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
-   movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
-   movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated BFCVT.
 (define_insn "@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
-  [(set (match_operand:VNx8BF_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx8BF_ONLY 0 "register_operand")
        (unspec:VNx8BF_ONLY
-         [(match_operand:VNx4BI 1 "register_operand" "Upl, Upl")
+         [(match_operand:VNx4BI 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:VNx4SF_ONLY 2 "register_operand" "0, w")]
+          (match_operand:VNx4SF_ONLY 2 "register_operand")]
          SVE_COND_FCVT))]
   "TARGET_SVE_BF16"
-  "@
-   bfcvt\t%0.h, %1/m, %2.s
-   movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] bfcvt\t%0.h, %1/m, %2.s
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;bfcvt\t%0.h, %1/m, %2.s
+  }
 )
 
 ;; Predicated BFCVT with merging.
 )
 
 (define_insn "*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>"
-  [(set (match_operand:VNx8BF_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:VNx8BF_ONLY 0 "register_operand")
        (unspec:VNx8BF_ONLY
-         [(match_operand:VNx4BI 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:VNx4BI 1 "register_operand")
           (unspec:VNx8BF_ONLY
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
-             (match_operand:VNx4SF_ONLY 2 "register_operand" "w, w, w")]
+             (match_operand:VNx4SF_ONLY 2 "register_operand")]
             SVE_COND_FCVT)
-          (match_operand:VNx8BF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:VNx8BF_ONLY 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE_BF16"
-  "@
-   bfcvt\t%0.h, %1/m, %2.s
-   movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
-   movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] bfcvt\t%0.h, %1/m, %2.s
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.s, %1/z, %2.s\;bfcvt\t%0.h, %1/m, %2.s
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;bfcvt\t%0.h, %1/m, %2.s
+  }
 )
 
 ;; Predicated BFCVTNT.  This doesn't give a natural aarch64_pred_*/cond_*
 
 ;; Predicated float-to-float extension.
 (define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
-  [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
        (unspec:SVE_FULL_SDF
-         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_HSF 2 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_HSF 2 "register_operand")]
          SVE_COND_FCVT))]
   "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
-  "@
-   fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
-   movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+  }
 )
 
 ;; Predicated float-to-float extension with merging.
 )
 
 (define_insn "*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
-  [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
        (unspec:SVE_FULL_SDF
-         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
           (unspec:SVE_FULL_SDF
             [(match_dup 1)
              (match_operand:SI 4 "aarch64_sve_gp_strictness")
-             (match_operand:SVE_FULL_HSF 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_HSF 2 "register_operand")]
             SVE_COND_FCVT)
-          (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
-  "@
-   fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
-   movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
-   movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; zeroing forms, these instructions don't operate elementwise and so
 ;; don't fit the IFN_COND model.
 (define_insn "@aarch64_brk<brk_op>"
-  [(set (match_operand:VNx16BI 0 "register_operand" "=Upa, Upa")
+  [(set (match_operand:VNx16BI 0 "register_operand")
        (unspec:VNx16BI
-         [(match_operand:VNx16BI 1 "register_operand" "Upa, Upa")
-          (match_operand:VNx16BI 2 "register_operand" "Upa, Upa")
-          (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero" "Dz, 0")]
+         [(match_operand:VNx16BI 1 "register_operand")
+          (match_operand:VNx16BI 2 "register_operand")
+          (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero")]
          SVE_BRK_UNARY))]
   "TARGET_SVE"
-  "@
-   brk<brk_op>\t%0.b, %1/z, %2.b
-   brk<brk_op>\t%0.b, %1/m, %2.b"
+  {@ [ cons: =0 , 1   , 2   , 3   ]
+     [ Upa      , Upa , Upa , Dz  ] brk<brk_op>\t%0.b, %1/z, %2.b
+     [ Upa      , Upa , Upa , 0   ] brk<brk_op>\t%0.b, %1/m, %2.b
+  }
 )
 
 ;; Same, but also producing a flags result.
 )
 
 (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
-  [(set (match_operand:VNx2DI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx2DI 0 "register_operand")
        (ANY_PLUS:VNx2DI
          (vec_duplicate:VNx2DI
            (zero_extend:DI
              (unspec:SI
                [(match_operand 3)
                 (const_int SVE_KNOWN_PTRUE)
-                (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+                (match_operand:<VPRED> 2 "register_operand")]
                UNSPEC_CNTP)))
-         (match_operand:VNx2DI_ONLY 1 "register_operand" "0, w")))]
+         (match_operand:VNx2DI_ONLY 1 "register_operand")))]
   "TARGET_SVE"
-  "@
-   <inc_dec>p\t%0.d, %2
-   movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2"
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , Upa ; *              ] <inc_dec>p\t%0.d, %2
+     [ ?&w      , w , Upa ; yes            ] movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2
+  }
   "&& !CONSTANT_P (operands[3])"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Increment a vector of SIs by the number of set bits in a predicate.
 )
 
 (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
-  [(set (match_operand:VNx4SI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI 0 "register_operand")
        (ANY_PLUS:VNx4SI
          (vec_duplicate:VNx4SI
            (unspec:SI
              [(match_operand 3)
               (const_int SVE_KNOWN_PTRUE)
-              (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+              (match_operand:<VPRED> 2 "register_operand")]
              UNSPEC_CNTP))
-         (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")))]
+         (match_operand:VNx4SI_ONLY 1 "register_operand")))]
   "TARGET_SVE"
-  "@
-   <inc_dec>p\t%0.s, %2
-   movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2"
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , Upa ; *              ] <inc_dec>p\t%0.s, %2
+     [ ?&w      , w , Upa ; yes            ] movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2
+  }
   "&& !CONSTANT_P (operands[3])"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Increment a vector of HIs by the number of set bits in a predicate.
 )
 
 (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
-  [(set (match_operand:VNx8HI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx8HI 0 "register_operand")
        (ANY_PLUS:VNx8HI
          (vec_duplicate:VNx8HI
            (match_operator:HI 3 "subreg_lowpart_operator"
              [(unspec:SI
                 [(match_operand 4)
                  (const_int SVE_KNOWN_PTRUE)
-                 (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+                 (match_operand:<VPRED> 2 "register_operand")]
                 UNSPEC_CNTP)]))
-         (match_operand:VNx8HI_ONLY 1 "register_operand" "0, w")))]
+         (match_operand:VNx8HI_ONLY 1 "register_operand")))]
   "TARGET_SVE"
-  "@
-   <inc_dec>p\t%0.h, %2
-   movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2"
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , Upa ; *              ] <inc_dec>p\t%0.h, %2
+     [ ?&w      , w , Upa ; yes            ] movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; -------------------------------------------------------------------------
 )
 
 (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
-  [(set (match_operand:VNx2DI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx2DI 0 "register_operand")
        (ANY_MINUS:VNx2DI
-         (match_operand:VNx2DI_ONLY 1 "register_operand" "0, w")
+         (match_operand:VNx2DI_ONLY 1 "register_operand")
          (vec_duplicate:VNx2DI
            (zero_extend:DI
              (unspec:SI
                [(match_operand 3)
                 (const_int SVE_KNOWN_PTRUE)
-                (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+                (match_operand:<VPRED> 2 "register_operand")]
                UNSPEC_CNTP)))))]
   "TARGET_SVE"
-  "@
-   <inc_dec>p\t%0.d, %2
-   movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2"
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , Upa ; *              ] <inc_dec>p\t%0.d, %2
+     [ ?&w      , w , Upa ; yes            ] movprfx\t%0, %1\;<inc_dec>p\t%0.d, %2
+  }
   "&& !CONSTANT_P (operands[3])"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Decrement a vector of SIs by the number of set bits in a predicate.
 )
 
 (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
-  [(set (match_operand:VNx4SI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI 0 "register_operand")
        (ANY_MINUS:VNx4SI
-         (match_operand:VNx4SI_ONLY 1 "register_operand" "0, w")
+         (match_operand:VNx4SI_ONLY 1 "register_operand")
          (vec_duplicate:VNx4SI
            (unspec:SI
              [(match_operand 3)
               (const_int SVE_KNOWN_PTRUE)
-              (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+              (match_operand:<VPRED> 2 "register_operand")]
              UNSPEC_CNTP))))]
   "TARGET_SVE"
-  "@
-   <inc_dec>p\t%0.s, %2
-   movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2"
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , Upa ; *              ] <inc_dec>p\t%0.s, %2
+     [ ?&w      , w , Upa ; yes            ] movprfx\t%0, %1\;<inc_dec>p\t%0.s, %2
+  }
   "&& !CONSTANT_P (operands[3])"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Decrement a vector of HIs by the number of set bits in a predicate.
 )
 
 (define_insn_and_rewrite "*aarch64_sve_<inc_dec><mode>_cntp"
-  [(set (match_operand:VNx8HI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx8HI 0 "register_operand")
        (ANY_MINUS:VNx8HI
-         (match_operand:VNx8HI_ONLY 1 "register_operand" "0, w")
+         (match_operand:VNx8HI_ONLY 1 "register_operand")
          (vec_duplicate:VNx8HI
            (match_operator:HI 3 "subreg_lowpart_operator"
              [(unspec:SI
                 [(match_operand 4)
                  (const_int SVE_KNOWN_PTRUE)
-                 (match_operand:<VPRED> 2 "register_operand" "Upa, Upa")]
+                 (match_operand:<VPRED> 2 "register_operand")]
                 UNSPEC_CNTP)]))))]
   "TARGET_SVE"
-  "@
-   <inc_dec>p\t%0.h, %2
-   movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2"
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , 0 , Upa ; *              ] <inc_dec>p\t%0.h, %2
+     [ ?&w      , w , Upa ; yes            ] movprfx\t%0, %1\;<inc_dec>p\t%0.h, %2
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
index 7a77e9b7502f4334016bf029caaaf1c3b237503f..ffa964d606046cfb9004e8bbccf72685cb68b9c2 100644 (file)
 (define_insn "@aarch64_scatter_stnt<mode>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:<VPRED> 0 "register_operand" "Upl, Upl")
-          (match_operand:DI 1 "aarch64_reg_or_zero" "Z, r")
-          (match_operand:<V_INT_EQUIV> 2 "register_operand" "w, w")
-          (match_operand:SVE_FULL_SD 3 "register_operand" "w, w")]
+         [(match_operand:<VPRED> 0 "register_operand")
+          (match_operand:DI 1 "aarch64_reg_or_zero")
+          (match_operand:<V_INT_EQUIV> 2 "register_operand")
+          (match_operand:SVE_FULL_SD 3 "register_operand")]
 
          UNSPEC_STNT1_SCATTER))]
   "TARGET_SVE"
-  "@
-   stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>]
-   stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>, %1]"
+  {@ [ cons: 0 , 1 , 2 , 3  ]
+     [ Upl     , Z , w , w  ] stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>]
+     [ Upl     , r , w , w  ] stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>, %1]
+  }
 )
 
 ;; Truncating stores.
 (define_insn "@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
-         [(match_operand:<SVE_FULL_SDI:VPRED> 0 "register_operand" "Upl, Upl")
-          (match_operand:DI 1 "aarch64_reg_or_zero" "Z, r")
-          (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 2 "register_operand" "w, w")
+         [(match_operand:<SVE_FULL_SDI:VPRED> 0 "register_operand")
+          (match_operand:DI 1 "aarch64_reg_or_zero")
+          (match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 2 "register_operand")
           (truncate:SVE_PARTIAL_I
-            (match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))]
+            (match_operand:SVE_FULL_SDI 3 "register_operand"))]
          UNSPEC_STNT1_SCATTER))]
   "TARGET_SVE2
    && (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
-  "@
-   stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>]
-   stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>, %1]"
+  {@ [ cons: 0 , 1 , 2 , 3  ]
+     [ Upl     , Z , w , w  ] stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>]
+     [ Upl     , r , w , w  ] stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>, %1]
+  }
 )
 
 ;; =========================================================================
 ;; The 2nd and 3rd alternatives are valid for just TARGET_SVE as well but
 ;; we include them here to allow matching simpler, unpredicated RTL.
 (define_insn "*aarch64_mul_unpredicated_<mode>"
-  [(set (match_operand:SVE_I 0 "register_operand" "=w,w,?&w")
+  [(set (match_operand:SVE_I 0 "register_operand")
        (mult:SVE_I
-         (match_operand:SVE_I 1 "register_operand" "w,0,w")
-         (match_operand:SVE_I 2 "aarch64_sve_vsm_operand" "w,vsm,vsm")))]
+         (match_operand:SVE_I 1 "register_operand")
+         (match_operand:SVE_I 2 "aarch64_sve_vsm_operand")))]
   "TARGET_SVE2"
-  "@
-   mul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
-   mul\t%0.<Vetype>, %0.<Vetype>, #%2
-   movprfx\t%0, %1\;mul\t%0.<Vetype>, %0.<Vetype>, #%2"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1 , 2   ; attrs: movprfx ]
+     [ w        , w , w   ; *              ] mul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
+     [ w        , 0 , vsm ; *              ] mul\t%0.<Vetype>, %0.<Vetype>, #%2
+     [ ?&w      , w , vsm ; yes            ] movprfx\t%0, %1\;mul\t%0.<Vetype>, %0.<Vetype>, #%2
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; General predicated binary arithmetic.  All operations handled here
 ;; are commutative or have a reversed form.
 (define_insn "@aarch64_pred_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
-            [(match_operand:SVE_FULL_I 2 "register_operand" "0, w, w")
-             (match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w")]
+            [(match_operand:SVE_FULL_I 2 "register_operand")
+             (match_operand:SVE_FULL_I 3 "register_operand")]
             SVE2_COND_INT_BINARY_REV)]
          UNSPEC_PRED_X))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ w        , Upl , w , 0 ; *              ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated binary arithmetic with merging.
 
 ;; Predicated binary arithmetic, merging with the first input.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 4)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "register_operand")]
                SVE2_COND_INT_BINARY)]
             UNSPEC_PRED_X)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated binary arithmetic, merging with the second input.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_3"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 4)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
-                (match_operand:SVE_FULL_I 3 "register_operand" "0, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "register_operand")]
                SVE2_COND_INT_BINARY_REV)]
             UNSPEC_PRED_X)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated binary operations, merging with an independent value.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 5)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "0, w, w, w, w")
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w, w, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "register_operand")]
                SVE2_COND_INT_BINARY_REV)]
             UNSPEC_PRED_X)
-          (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+          (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2
    && !rtx_equal_p (operands[2], operands[4])
    && !rtx_equal_p (operands[3], operands[4])"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   #"
+  {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0 , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 ;; so there's no correctness requirement to handle merging with an
 ;; independent value.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_z"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 5)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "register_operand")]
                SVE2_COND_INT_BINARY_NOREV)]
             UNSPEC_PRED_X)
           (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ]
+     [ &w       , Upl , 0 , w  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& !CONSTANT_P (operands[5])"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
 
 ;; Predicated left shifts.
 (define_insn "@aarch64_pred_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
-            [(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w, w")
-             (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, D<lr>, w")]
+            [(match_operand:SVE_FULL_I 2 "register_operand")
+             (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand")]
             SVE2_COND_INT_SHIFT)]
          UNSPEC_PRED_X))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,*,*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3     ; attrs: movprfx ]
+     [ w        , Upl , 0 , D<lr> ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , 0 , w     ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ w        , Upl , w , 0     ; *              ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , D<lr> ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w , w     ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; Predicated left shifts with merging.
 
 ;; Predicated left shifts, merging with the first input.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 4)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w")
-                (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, D<lr>, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand")]
                SVE2_COND_INT_SHIFT)]
             UNSPEC_PRED_X)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3     ; attrs: movprfx ]
+     [ w        , Upl , 0 , D<lr> ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ w        , Upl , 0 , w     ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , D<lr> ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ ?&w      , Upl , w , w     ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,*,yes,yes")]
 )
 
 ;; Predicated left shifts, merging with the second input.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_3"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 4)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
-                (match_operand:SVE_FULL_I 3 "register_operand" "0, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "register_operand")]
                SVE2_COND_INT_SHIFT)]
             UNSPEC_PRED_X)
           (match_dup 3)]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , w , 0 ; *              ] <sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %3\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated left shifts, merging with an independent value.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, &w, &w, &w, ?&w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_I
             [(match_operand 5)
              (unspec:SVE_FULL_I
-               [(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w, w, w, w, w, w")
-                (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, D<lr>, w, D<lr>, w, D<lr>, w")]
+               [(match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand")]
                SVE2_COND_INT_SHIFT)]
             UNSPEC_PRED_X)
-          (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, Dz, 0, 0, w, w")]
+          (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2
    && !rtx_equal_p (operands[2], operands[4])
    && (CONSTANT_P (operands[4]) || !rtx_equal_p (operands[3], operands[4]))"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
-   movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-  #
-  #"
+  {@ [ cons: =0 , 1   , 2 , 3     , 4   ]
+     [ &w       , Upl , 0 , D<lr> , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ &w       , Upl , 0 , w     , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , 0     , Dz  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>r\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+     [ &w       , Upl , w , D<lr> , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ &w       , Upl , w , w     , Dz  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , D<lr> , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+     [ &w       , Upl , w , w     , 0   ] movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , D<lr> , w   ] #
+     [ ?&w      , Upl , w , w     , w   ] #
+  }
   "&& 1"
   {
     if (reload_completed
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
-          (match_operand:SVE_FULL_I 3 "register_operand" "w, w")
-          (match_operand:SVE_FULL_I 1 "register_operand" "0, w")]
+         [(match_operand:SVE_FULL_I 2 "register_operand")
+          (match_operand:SVE_FULL_I 3 "register_operand")
+          (match_operand:SVE_FULL_I 1 "register_operand")]
          SVE2_INT_TERNARY))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 (define_insn "@aarch64_sve_<sve_int_op>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w")
+         [(match_operand:SVE_FULL_HSDI 2 "register_operand")
           (unspec:SVE_FULL_HSDI
-            [(match_operand:SVE_FULL_HSDI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+            [(match_operand:SVE_FULL_HSDI 3 "register_operand")
              (match_operand:SI 4 "const_int_operand")]
             UNSPEC_SVE_LANE_SELECT)
-          (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_HSDI 1 "register_operand")]
          SVE2_INT_TERNARY_LANE))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_add_mul_lane_<mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (plus:SVE_FULL_HSDI
          (mult:SVE_FULL_HSDI
            (unspec:SVE_FULL_HSDI
-             [(match_operand:SVE_FULL_HSDI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+             [(match_operand:SVE_FULL_HSDI 3 "register_operand")
               (match_operand:SI 4 "const_int_operand")]
              UNSPEC_SVE_LANE_SELECT)
-           (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w"))
-         (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")))]
+           (match_operand:SVE_FULL_HSDI 2 "register_operand"))
+         (match_operand:SVE_FULL_HSDI 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
-   movprfx\t%0, %1\;mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;mla\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+  }
 )
 
 (define_insn "@aarch64_sve_sub_mul_lane_<mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (minus:SVE_FULL_HSDI
-         (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")
+         (match_operand:SVE_FULL_HSDI 1 "register_operand")
          (mult:SVE_FULL_HSDI
            (unspec:SVE_FULL_HSDI
-             [(match_operand:SVE_FULL_HSDI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+             [(match_operand:SVE_FULL_HSDI 3 "register_operand")
               (match_operand:SI 4 "const_int_operand")]
              UNSPEC_SVE_LANE_SELECT)
-           (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w"))))]
+           (match_operand:SVE_FULL_HSDI 2 "register_operand"))))]
   "TARGET_SVE2"
-  "@
-   mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
-   movprfx\t%0, %1\;mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;mls\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4]
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve2_xar<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (rotatert:SVE_FULL_I
          (xor:SVE_FULL_I
-           (match_operand:SVE_FULL_I 1 "register_operand" "%0, w")
-           (match_operand:SVE_FULL_I 2 "register_operand" "w, w"))
+           (match_operand:SVE_FULL_I 1 "register_operand")
+           (match_operand:SVE_FULL_I 2 "register_operand"))
          (match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")))]
   "TARGET_SVE2"
-  "@
-  xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
-  movprfx\t%0, %1\;xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1  , 2 ; attrs: movprfx ]
+     [ w        , %0 , w ; *              ] xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+     [ ?&w      , w  , w ; yes            ] movprfx\t%0, %1\;xar\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
+  }
 )
 
 ;; -------------------------------------------------------------------------
 )
 
 (define_insn_and_rewrite "*aarch64_sve2_bcax<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (xor:SVE_FULL_I
          (and:SVE_FULL_I
            (unspec:SVE_FULL_I
              [(match_operand 4)
               (not:SVE_FULL_I
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))]
+                (match_operand:SVE_FULL_I 3 "register_operand"))]
              UNSPEC_PRED_X)
-           (match_operand:SVE_FULL_I 2 "register_operand" "w, w"))
-         (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+           (match_operand:SVE_FULL_I 2 "register_operand"))
+         (match_operand:SVE_FULL_I 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-  bcax\t%0.d, %0.d, %2.d, %3.d
-  movprfx\t%0, %1\;bcax\t%0.d, %0.d, %2.d, %3.d"
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] bcax\t%0.d, %0.d, %2.d, %3.d
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;bcax\t%0.d, %0.d, %2.d, %3.d
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Unpredicated 3-way exclusive OR.
 (define_insn "@aarch64_sve2_eor3<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (xor:SVE_FULL_I
          (xor:SVE_FULL_I
-           (match_operand:SVE_FULL_I 1 "register_operand" "0, w, w, w")
-           (match_operand:SVE_FULL_I 2 "register_operand" "w, 0, w, w"))
-         (match_operand:SVE_FULL_I 3 "register_operand" "w, w, 0, w")))]
+           (match_operand:SVE_FULL_I 1 "register_operand")
+           (match_operand:SVE_FULL_I 2 "register_operand"))
+         (match_operand:SVE_FULL_I 3 "register_operand")))]
   "TARGET_SVE2"
-  "@
-  eor3\t%0.d, %0.d, %2.d, %3.d
-  eor3\t%0.d, %0.d, %1.d, %3.d
-  eor3\t%0.d, %0.d, %1.d, %2.d
-  movprfx\t%0, %1\;eor3\t%0.d, %0.d, %2.d, %3.d"
-  [(set_attr "movprfx" "*,*,*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] eor3\t%0.d, %0.d, %2.d, %3.d
+     [ w        , w , 0 , w ; *              ] eor3\t%0.d, %0.d, %1.d, %3.d
+     [ w        , w , w , 0 ; *              ] eor3\t%0.d, %0.d, %1.d, %2.d
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;eor3\t%0.d, %0.d, %2.d, %3.d
+  }
 )
 
 ;; Use NBSL for vector NOR.
 (define_insn_and_rewrite "*aarch64_sve2_nor<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
          [(match_operand 3)
           (and:SVE_FULL_I
             (not:SVE_FULL_I
-              (match_operand:SVE_FULL_I 1 "register_operand" "%0, w"))
+              (match_operand:SVE_FULL_I 1 "register_operand"))
             (not:SVE_FULL_I
-              (match_operand:SVE_FULL_I 2 "register_operand" "w, w")))]
+              (match_operand:SVE_FULL_I 2 "register_operand")))]
          UNSPEC_PRED_X))]
   "TARGET_SVE2"
-  "@
-  nbsl\t%0.d, %0.d, %2.d, %0.d
-  movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %0.d"
+  {@ [ cons: =0 , 1  , 2 ; attrs: movprfx ]
+     [ w        , %0 , w ; *              ] nbsl\t%0.d, %0.d, %2.d, %0.d
+     [ ?&w      , w  , w ; yes            ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %0.d
+  }
   "&& !CONSTANT_P (operands[3])"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Use NBSL for vector NAND.
 (define_insn_and_rewrite "*aarch64_sve2_nand<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
          [(match_operand 3)
           (ior:SVE_FULL_I
             (not:SVE_FULL_I
-              (match_operand:SVE_FULL_I 1 "register_operand" "%0, w"))
+              (match_operand:SVE_FULL_I 1 "register_operand"))
             (not:SVE_FULL_I
-              (match_operand:SVE_FULL_I 2 "register_operand" "w, w")))]
+              (match_operand:SVE_FULL_I 2 "register_operand")))]
          UNSPEC_PRED_X))]
   "TARGET_SVE2"
-  "@
-  nbsl\t%0.d, %0.d, %2.d, %2.d
-  movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %2.d"
+  {@ [ cons: =0 , 1  , 2 ; attrs: movprfx ]
+     [ w        , %0 , w ; *              ] nbsl\t%0.d, %0.d, %2.d, %2.d
+     [ ?&w      , w  , w ; yes            ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %2.d
+  }
   "&& !CONSTANT_P (operands[3])"
   {
     operands[3] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Unpredicated bitwise select.
 )
 
 (define_insn "*aarch64_sve2_bsl<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (xor:SVE_FULL_I
          (and:SVE_FULL_I
            (xor:SVE_FULL_I
-             (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
-             (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
-           (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+             (match_operand:SVE_FULL_I 1 "register_operand")
+             (match_operand:SVE_FULL_I 2 "register_operand"))
+           (match_operand:SVE_FULL_I 3 "register_operand"))
          (match_dup BSL_DUP)))]
   "TARGET_SVE2"
-  "@
-  bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
-  movprfx\t%0, %<bsl_mov>\;bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1         , 2         , 3 ; attrs: movprfx ]
+     [ w        , <bsl_1st> , <bsl_2nd> , w ; *              ] bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+     [ ?&w      , w         , w         , w ; yes            ] movprfx\t%0, %<bsl_mov>\;bsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+  }
 )
 
 ;; Unpredicated bitwise inverted select.
 )
 
 (define_insn_and_rewrite "*aarch64_sve2_nbsl<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
          [(match_operand 4)
           (not:SVE_FULL_I
             (xor:SVE_FULL_I
               (and:SVE_FULL_I
                 (xor:SVE_FULL_I
-                  (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
-                  (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+                  (match_operand:SVE_FULL_I 1 "register_operand")
+                  (match_operand:SVE_FULL_I 2 "register_operand"))
+                (match_operand:SVE_FULL_I 3 "register_operand"))
               (match_dup BSL_DUP)))]
          UNSPEC_PRED_X))]
   "TARGET_SVE2"
-  "@
-  nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
-  movprfx\t%0, %<bsl_mov>\;nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
+  {@ [ cons: =0 , 1         , 2         , 3 ; attrs: movprfx ]
+     [ w        , <bsl_1st> , <bsl_2nd> , w ; *              ] nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+     [ ?&w      , w         , w         , w ; yes            ] movprfx\t%0, %<bsl_mov>\;nbsl\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Unpredicated bitwise select with inverted first operand.
 )
 
 (define_insn_and_rewrite "*aarch64_sve2_bsl1n<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (xor:SVE_FULL_I
          (and:SVE_FULL_I
            (unspec:SVE_FULL_I
              [(match_operand 4)
               (not:SVE_FULL_I
                 (xor:SVE_FULL_I
-                  (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
-                  (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w")))]
+                  (match_operand:SVE_FULL_I 1 "register_operand")
+                  (match_operand:SVE_FULL_I 2 "register_operand")))]
              UNSPEC_PRED_X)
-           (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+           (match_operand:SVE_FULL_I 3 "register_operand"))
          (match_dup BSL_DUP)))]
   "TARGET_SVE2"
-  "@
-  bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d
-  movprfx\t%0, %<bsl_mov>\;bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d"
+  {@ [ cons: =0 , 1         , 2         , 3 ; attrs: movprfx ]
+     [ w        , <bsl_1st> , <bsl_2nd> , w ; *              ] bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+     [ ?&w      , w         , w         , w ; yes            ] movprfx\t%0, %<bsl_mov>\;bsl1n\t%0.d, %0.d, %<bsl_dup>.d, %3.d
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Unpredicated bitwise select with inverted second operand.
 )
 
 (define_insn_and_rewrite "*aarch64_sve2_bsl2n<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (ior:SVE_FULL_I
          (and:SVE_FULL_I
-           (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
-           (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
+           (match_operand:SVE_FULL_I 1 "register_operand")
+           (match_operand:SVE_FULL_I 2 "register_operand"))
          (unspec:SVE_FULL_I
            [(match_operand 4)
             (and:SVE_FULL_I
               (not:SVE_FULL_I
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+                (match_operand:SVE_FULL_I 3 "register_operand"))
               (not:SVE_FULL_I
                 (match_dup BSL_DUP)))]
            UNSPEC_PRED_X)))]
   "TARGET_SVE2"
-  "@
-  bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
-  movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d"
+  {@ [ cons: =0 , 1         , 2         , 3 ; attrs: movprfx ]
+     [ w        , <bsl_1st> , <bsl_2nd> , w ; *              ] bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+     [ ?&w      , w         , w         , w ; yes            ] movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Unpredicated bitwise select with inverted second operand, alternative form.
 ;; (bsl_dup ? bsl_mov : ~op3) == ((bsl_dup & bsl_mov) | (~bsl_dup & ~op3))
 (define_insn_and_rewrite "*aarch64_sve2_bsl2n<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (ior:SVE_FULL_I
          (and:SVE_FULL_I
-           (match_operand:SVE_FULL_I 1 "register_operand" "<bsl_1st>, w")
-           (match_operand:SVE_FULL_I 2 "register_operand" "<bsl_2nd>, w"))
+           (match_operand:SVE_FULL_I 1 "register_operand")
+           (match_operand:SVE_FULL_I 2 "register_operand"))
          (unspec:SVE_FULL_I
            [(match_operand 4)
             (and:SVE_FULL_I
               (not:SVE_FULL_I
                 (match_dup BSL_DUP))
               (not:SVE_FULL_I
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w")))]
+                (match_operand:SVE_FULL_I 3 "register_operand")))]
            UNSPEC_PRED_X)))]
   "TARGET_SVE2"
-  "@
-  bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
-  movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d"
+  {@ [ cons: =0 , 1         , 2         , 3 ; attrs: movprfx ]
+     [ w        , <bsl_1st> , <bsl_2nd> , w ; *              ] bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+     [ ?&w      , w         , w         , w ; yes            ] movprfx\t%0, %<bsl_mov>\;bsl2n\t%0.d, %0.d, %3.d, %<bsl_dup>.d
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; -------------------------------------------------------------------------
 ;; Pattern-match SSRA and USRA as a predicated operation whose predicate
 ;; isn't needed.
 (define_insn_and_rewrite "*aarch64_sve2_sra<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (plus:SVE_FULL_I
          (unspec:SVE_FULL_I
            [(match_operand 4)
             (SHIFTRT:SVE_FULL_I
-              (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+              (match_operand:SVE_FULL_I 2 "register_operand")
               (match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm"))]
            UNSPEC_PRED_X)
-        (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+        (match_operand:SVE_FULL_I 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3
-   movprfx\t%0, %1\;<sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3"
+  {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+     [ w        , 0 , w ; *              ] <sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+     [ ?&w      , w , w ; yes            ] movprfx\t%0, %1\;<sra_op>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; SRSRA and URSRA.
 (define_insn "@aarch64_sve_add_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (plus:SVE_FULL_I
          (unspec:SVE_FULL_I
-           [(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+           [(match_operand:SVE_FULL_I 2 "register_operand")
             (match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")]
            VRSHR_N)
-        (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+        (match_operand:SVE_FULL_I 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
-   movprfx\t%0, %1\;<sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+     [ w        , 0 , w ; *              ] <sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+     [ ?&w      , w , w ; yes            ] movprfx\t%0, %1\;<sur>sra\t%0.<Vetype>, %2.<Vetype>, #%3
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; Pattern-match SABA and UABA as an absolute-difference-and-accumulate
 ;; operation whose predicates aren't needed.
 (define_insn "*aarch64_sve2_<su>aba<mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (plus:SVE_FULL_I
          (minus:SVE_FULL_I
            (unspec:SVE_FULL_I
              [(match_operand 4)
               (USMAX:SVE_FULL_I
-                (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
-                (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))]
+                (match_operand:SVE_FULL_I 2 "register_operand")
+                (match_operand:SVE_FULL_I 3 "register_operand"))]
              UNSPEC_PRED_X)
            (unspec:SVE_FULL_I
              [(match_operand 5)
                 (match_dup 2)
                 (match_dup 3))]
              UNSPEC_PRED_X))
-         (match_operand:SVE_FULL_I 1 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_I 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %1\;<su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<su>aba\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; =========================================================================
 
 ;; Non-saturating MLA operations.
 (define_insn "@aarch64_sve_add_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (plus:SVE_FULL_HSDI
          (unspec:SVE_FULL_HSDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
-            (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+           [(match_operand:<VNARROW> 2 "register_operand")
+            (match_operand:<VNARROW> 3 "register_operand")]
            SVE2_INT_ADD_BINARY_LONG)
-         (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_HSDI 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
-   movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+  }
 )
 
 ;; Non-saturating MLA operations with lane select.
 (define_insn "@aarch64_sve_add_<sve_int_op>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (plus:SVE_FULL_SDI
          (unspec:SVE_FULL_SDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+           [(match_operand:<VNARROW> 2 "register_operand")
             (unspec:<VNARROW>
-              [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+              [(match_operand:<VNARROW> 3 "register_operand")
                (match_operand:SI 4 "const_int_operand")]
               UNSPEC_SVE_LANE_SELECT)]
            SVE2_INT_ADD_BINARY_LONG_LANE)
-         (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_SDI 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
-   movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_add_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+  }
 )
 
 ;; Saturating MLA operations.
 (define_insn "@aarch64_sve_qadd_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (ss_plus:SVE_FULL_HSDI
          (unspec:SVE_FULL_HSDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
-            (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+           [(match_operand:<VNARROW> 2 "register_operand")
+            (match_operand:<VNARROW> 3 "register_operand")]
            SVE2_INT_QADD_BINARY_LONG)
-         (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_HSDI 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
-   movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+  }
 )
 
 ;; Saturating MLA operations with lane select.
 (define_insn "@aarch64_sve_qadd_<sve_int_op>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (ss_plus:SVE_FULL_SDI
          (unspec:SVE_FULL_SDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+           [(match_operand:<VNARROW> 2 "register_operand")
             (unspec:<VNARROW>
-              [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+              [(match_operand:<VNARROW> 3 "register_operand")
                (match_operand:SI 4 "const_int_operand")]
               UNSPEC_SVE_LANE_SELECT)]
            SVE2_INT_QADD_BINARY_LONG_LANE)
-         (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")))]
+         (match_operand:SVE_FULL_SDI 1 "register_operand")))]
   "TARGET_SVE2"
-  "@
-   <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
-   movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_qadd_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+  }
 )
 
 ;; Non-saturating MLS operations.
 (define_insn "@aarch64_sve_sub_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (minus:SVE_FULL_HSDI
-         (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")
+         (match_operand:SVE_FULL_HSDI 1 "register_operand")
          (unspec:SVE_FULL_HSDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
-            (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+           [(match_operand:<VNARROW> 2 "register_operand")
+            (match_operand:<VNARROW> 3 "register_operand")]
            SVE2_INT_SUB_BINARY_LONG)))]
   "TARGET_SVE2"
-  "@
-   <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
-   movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+  }
 )
 
 ;; Non-saturating MLS operations with lane select.
 (define_insn "@aarch64_sve_sub_<sve_int_op>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (minus:SVE_FULL_SDI
-         (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
+         (match_operand:SVE_FULL_SDI 1 "register_operand")
          (unspec:SVE_FULL_SDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+           [(match_operand:<VNARROW> 2 "register_operand")
             (unspec:<VNARROW>
-              [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+              [(match_operand:<VNARROW> 3 "register_operand")
                (match_operand:SI 4 "const_int_operand")]
               UNSPEC_SVE_LANE_SELECT)]
            SVE2_INT_SUB_BINARY_LONG_LANE)))]
   "TARGET_SVE2"
-  "@
-   <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
-   movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_sub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+  }
 )
 
 ;; Saturating MLS operations.
 (define_insn "@aarch64_sve_qsub_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (ss_minus:SVE_FULL_HSDI
-         (match_operand:SVE_FULL_HSDI 1 "register_operand" "0, w")
+         (match_operand:SVE_FULL_HSDI 1 "register_operand")
          (unspec:SVE_FULL_HSDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
-            (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+           [(match_operand:<VNARROW> 2 "register_operand")
+            (match_operand:<VNARROW> 3 "register_operand")]
            SVE2_INT_QSUB_BINARY_LONG)))]
   "TARGET_SVE2"
-  "@
-   <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
-   movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>
+  }
 )
 
 ;; Saturating MLS operations with lane select.
 (define_insn "@aarch64_sve_qsub_<sve_int_op>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (ss_minus:SVE_FULL_SDI
-         (match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
+         (match_operand:SVE_FULL_SDI 1 "register_operand")
          (unspec:SVE_FULL_SDI
-           [(match_operand:<VNARROW> 2 "register_operand" "w, w")
+           [(match_operand:<VNARROW> 2 "register_operand")
             (unspec:<VNARROW>
-              [(match_operand:<VNARROW> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+              [(match_operand:<VNARROW> 3 "register_operand")
                (match_operand:SI 4 "const_int_operand")]
               UNSPEC_SVE_LANE_SELECT)]
            SVE2_INT_QSUB_BINARY_LONG_LANE)))]
   "TARGET_SVE2"
-  "@
-   <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
-   movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
+  }
 )
 ;; -------------------------------------------------------------------------
 ;; ---- [FP] Long multiplication with accumulation
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_<sve_fp_op><mode>"
-  [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
        (unspec:VNx4SF_ONLY
-         [(match_operand:<VNARROW> 1 "register_operand" "w, w")
-          (match_operand:<VNARROW> 2 "register_operand" "w, w")
-          (match_operand:VNx4SF_ONLY 3 "register_operand" "0, w")]
+         [(match_operand:<VNARROW> 1 "register_operand")
+          (match_operand:<VNARROW> 2 "register_operand")
+          (match_operand:VNx4SF_ONLY 3 "register_operand")]
          SVE2_FP_TERNARY_LONG))]
   "TARGET_SVE2"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , w , w , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>
+  }
 )
 
 (define_insn "@aarch64_<sve_fp_op>_lane_<mode>"
-  [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
        (unspec:VNx4SF_ONLY
-         [(match_operand:<VNARROW> 1 "register_operand" "w, w")
+         [(match_operand:<VNARROW> 1 "register_operand")
           (unspec:<VNARROW>
-            [(match_operand:<VNARROW> 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+            [(match_operand:<VNARROW> 2 "register_operand")
              (match_operand:SI 3 "const_int_operand")]
             UNSPEC_SVE_LANE_SELECT)
-          (match_operand:VNx4SF_ONLY 4 "register_operand" "0, w")]
+          (match_operand:VNx4SF_ONLY 4 "register_operand")]
          SVE2_FP_TERNARY_LONG_LANE))]
   "TARGET_SVE2"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
-   movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2              , 4 ; attrs: movprfx ]
+     [ w        , w , <sve_lane_con> , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
+     [ ?&w      , w , <sve_lane_con> , w ; yes            ] movprfx\t%0, %4\;<sve_fp_op>\t%0.<Vetype>, %1.<Ventype>, %2.<Ventype>[%3]
+  }
 )
 
 ;; =========================================================================
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_pred_<sve_int_op><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
-          (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
-          (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SVE_FULL_I 2 "register_operand")
+          (match_operand:SVE_FULL_I 3 "register_operand")]
          SVE2_INT_BINARY_PAIR))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_pred_<sve_fp_op><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_F 0 "register_operand")
        (unspec:SVE_FULL_F
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
-          (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
+         [(match_operand:<VPRED> 1 "register_operand")
+          (match_operand:SVE_FULL_F 2 "register_operand")
+          (match_operand:SVE_FULL_F 3 "register_operand")]
          SVE2_FP_BINARY_PAIR))]
   "TARGET_SVE2"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
 ;; Predicated pairwise absolute difference and accumulate, merging with
 ;; the first input.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_2"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_HSDI
             [(match_operand 4)
-             (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")
-             (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_HSDI 2 "register_operand")
+             (match_operand:<VNARROW> 3 "register_operand")]
             SVE2_INT_BINARY_PAIR_LONG)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>"
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w        , Upl , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes")]
 )
 
 ;; Predicated pairwise absolute difference and accumulate, merging with zero.
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>_z"
-  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w")
+  [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
        (unspec:SVE_FULL_HSDI
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:SVE_FULL_HSDI
             [(match_operand 5)
-             (match_operand:SVE_FULL_HSDI 2 "register_operand" "0, w")
-             (match_operand:<VNARROW> 3 "register_operand" "w, w")]
+             (match_operand:SVE_FULL_HSDI 2 "register_operand")
+             (match_operand:<VNARROW> 3 "register_operand")]
             SVE2_INT_BINARY_PAIR_LONG)
           (match_operand:SVE_FULL_HSDI 4 "aarch64_simd_imm_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ]
+     [ &w       , Upl , 0 , w  ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+     [ &w       , Upl , w , w  ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %3.<Ventype>
+  }
   "&& !CONSTANT_P (operands[5])"
   {
     operands[5] = CONSTM1_RTX (<VPRED>mode);
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_<optab><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:SVE_FULL_I 1 "register_operand" "0, w")
-          (match_operand:SVE_FULL_I 2 "register_operand" "w, w")]
+         [(match_operand:SVE_FULL_I 1 "register_operand")
+          (match_operand:SVE_FULL_I 2 "register_operand")]
          SVE2_INT_CADD))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+     [ w        , 0 , w ; *              ] <sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
+     [ ?&w      , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #<rot>
+  }
 )
 
 ;; unpredicated optab pattern for auto-vectorizer
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_<optab><mode>"
-  [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_I 0 "register_operand")
        (unspec:SVE_FULL_I
-         [(match_operand:SVE_FULL_I 1 "register_operand" "0, w")
-          (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
-          (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+         [(match_operand:SVE_FULL_I 1 "register_operand")
+          (match_operand:SVE_FULL_I 2 "register_operand")
+          (match_operand:SVE_FULL_I 3 "register_operand")]
          SVE2_INT_CMLA))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>, #<rot>
+  }
 )
 
 (define_insn "@aarch64_<optab>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_HSI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_HSI 0 "register_operand")
        (unspec:SVE_FULL_HSI
-         [(match_operand:SVE_FULL_HSI 1 "register_operand" "0, w")
-          (match_operand:SVE_FULL_HSI 2 "register_operand" "w, w")
+         [(match_operand:SVE_FULL_HSI 1 "register_operand")
+          (match_operand:SVE_FULL_HSI 2 "register_operand")
           (unspec:SVE_FULL_HSI
-            [(match_operand:SVE_FULL_HSI 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+            [(match_operand:SVE_FULL_HSI 3 "register_operand")
              (match_operand:SI 4 "const_int_operand")]
             UNSPEC_SVE_LANE_SELECT)]
          SVE2_INT_CMLA))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>[%4], #<rot>
+  }
 )
 
 ;; unpredicated optab pattern for auto-vectorizer
 ;; -------------------------------------------------------------------------
 
 (define_insn "@aarch64_sve_<optab><mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (unspec:SVE_FULL_SDI
-         [(match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
-          (match_operand:<VSI2QI> 2 "register_operand" "w, w")
-          (match_operand:<VSI2QI> 3 "register_operand" "w, w")]
+         [(match_operand:SVE_FULL_SDI 1 "register_operand")
+          (match_operand:<VSI2QI> 2 "register_operand")
+          (match_operand:<VSI2QI> 3 "register_operand")]
          SVE2_INT_CDOT))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>, #<rot>
+  }
 )
 
 (define_insn "@aarch64_<optab>_lane_<mode>"
-  [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
        (unspec:SVE_FULL_SDI
-         [(match_operand:SVE_FULL_SDI 1 "register_operand" "0, w")
-          (match_operand:<VSI2QI> 2 "register_operand" "w, w")
+         [(match_operand:SVE_FULL_SDI 1 "register_operand")
+          (match_operand:<VSI2QI> 2 "register_operand")
           (unspec:<VSI2QI>
-            [(match_operand:<VSI2QI> 3 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+            [(match_operand:<VSI2QI> 3 "register_operand")
              (match_operand:SI 4 "const_int_operand")]
             UNSPEC_SVE_LANE_SELECT)]
          SVE2_INT_CDOT))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
-   movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1 , 2 , 3              ; attrs: movprfx ]
+     [ w        , 0 , w , <sve_lane_con> ; *              ] <sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
+     [ ?&w      , w , w , <sve_lane_con> ; yes            ] movprfx\t%0, %1\;<sve_int_op>\t%0.<Vetype>, %2.<Vetype_fourth>, %3.<Vetype_fourth>[%4], #<rot>
+  }
 )
 
 ;; =========================================================================
 ;; Predicated FCVTX (equivalent to what would be FCVTXNB, except that
 ;; it supports MOVPRFX).
 (define_insn "@aarch64_pred_<sve_fp_op><mode>"
-  [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
        (unspec:VNx4SF_ONLY
-         [(match_operand:<VWIDE_PRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VWIDE_PRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:<VWIDE> 2 "register_operand" "0, w")]
+          (match_operand:<VWIDE> 2 "register_operand")]
          SVE2_COND_FP_UNARY_NARROWB))]
   "TARGET_SVE2"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+  }
 )
 
 ;; Predicated FCVTX with merging.
 )
 
 (define_insn_and_rewrite "*cond_<sve_fp_op><mode>_any_relaxed"
-  [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=&w, &w, &w")
+  [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
        (unspec:VNx4SF_ONLY
-         [(match_operand:<VWIDE_PRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VWIDE_PRED> 1 "register_operand")
           (unspec:VNx4SF_ONLY
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:<VWIDE> 2 "register_operand" "w, w, w")]
+             (match_operand:<VWIDE> 2 "register_operand")]
             SVE2_COND_FP_UNARY_NARROWB)
-          (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
-   movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+     [ &w       , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 (define_insn "*cond_<sve_fp_op><mode>_any_strict"
-  [(set (match_operand:VNx4SF_ONLY 0 "register_operand" "=&w, &w, &w")
+  [(set (match_operand:VNx4SF_ONLY 0 "register_operand")
        (unspec:VNx4SF_ONLY
-         [(match_operand:<VWIDE_PRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VWIDE_PRED> 1 "register_operand")
           (unspec:VNx4SF_ONLY
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:<VWIDE> 2 "register_operand" "w, w, w")]
+             (match_operand:<VWIDE> 2 "register_operand")]
             SVE2_COND_FP_UNARY_NARROWB)
-          (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:VNx4SF_ONLY 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
-   movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+     [ &w       , Upl , w , Dz ; yes            ] movprfx\t%0.<Vewtype>, %1/z, %2.<Vewtype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+     [ &w       , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vewtype>
+  }
 )
 
 ;; Predicated FCVTXNT.  This doesn't give a natural aarch64_pred_*/cond_*
 
 ;; Predicated integer unary operations.
 (define_insn "@aarch64_pred_<sve_int_op><mode>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
        (unspec:VNx4SI_ONLY
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:VNx4SI_ONLY
-            [(match_operand:VNx4SI_ONLY 2 "register_operand" "0, w")]
+            [(match_operand:VNx4SI_ONLY 2 "register_operand")]
             SVE2_U32_UNARY)]
          UNSPEC_PRED_X))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated integer unary operations with merging.
 )
 
 (define_insn_and_rewrite "*cond_<sve_int_op><mode>"
-  [(set (match_operand:VNx4SI_ONLY 0 "register_operand" "=w, ?&w, ?&w")
+  [(set (match_operand:VNx4SI_ONLY 0 "register_operand")
        (unspec:VNx4SI_ONLY
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:VNx4SI_ONLY
             [(match_operand 4)
              (unspec:VNx4SI_ONLY
-               [(match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w")]
+               [(match_operand:VNx4SI_ONLY 2 "register_operand")]
                SVE2_U32_UNARY)]
             UNSPEC_PRED_X)
-          (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:VNx4SI_ONLY 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2"
-  "@
-   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ w        , Upl , w , 0  ; *              ] <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
   "&& !CONSTANT_P (operands[4])"
   {
     operands[4] = CONSTM1_RTX (<VPRED>mode);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 ;; -------------------------------------------------------------------------
 
 ;; Predicated FLOGB.
 (define_insn "@aarch64_pred_<sve_fp_op><mode>"
-  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w, ?&w")
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
        (unspec:<V_INT_EQUIV>
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (match_operand:SI 3 "aarch64_sve_gp_strictness")
-          (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
+          (match_operand:SVE_FULL_F 2 "register_operand")]
          SVE2_COND_INT_UNARY_FP))]
   "TARGET_SVE2"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: movprfx ]
+     [ w        , Upl , 0 ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w ; yes            ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; Predicated FLOGB with merging.
 )
 
 (define_insn_and_rewrite "*cond_<sve_fp_op><mode>"
-  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
        (unspec:<V_INT_EQUIV>
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:<V_INT_EQUIV>
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE2_COND_INT_UNARY_FP)
-          (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
     operands[4] = copy_rtx (operands[1]);
   }
-  [(set_attr "movprfx" "*,yes,yes")]
 )
 
 (define_insn "*cond_<sve_fp_op><mode>_strict"
-  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=&w, ?&w, ?&w")
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
        (unspec:<V_INT_EQUIV>
-         [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+         [(match_operand:<VPRED> 1 "register_operand")
           (unspec:<V_INT_EQUIV>
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
+             (match_operand:SVE_FULL_F 2 "register_operand")]
             SVE2_COND_INT_UNARY_FP)
-          (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+          (match_operand:<V_INT_EQUIV> 3 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE2 && !rtx_equal_p (operands[2], operands[3])"
-  "@
-   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
-   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
-  [(set_attr "movprfx" "*,yes,yes")]
+  {@ [ cons: =0 , 1   , 2 , 3  ; attrs: movprfx ]
+     [ &w       , Upl , w , 0  ; *              ] <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , Dz ; yes            ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+     [ ?&w      , Upl , w , w  ; yes            ] movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+  }
 )
 
 ;; -------------------------------------------------------------------------
index f76c63b6355e5dcbb79c7c3c06c057ff442bf90f..32c7adc89281b249b52ecedf2f1678749c289d18 100644 (file)
   "")
 
 (define_insn "@ccmp<CC_ONLY:mode><GPI:mode>"
-  [(set (match_operand:CC_ONLY 1 "cc_register" "")
+  [(set (match_operand:CC_ONLY 1 "cc_register")
        (if_then_else:CC_ONLY
          (match_operator 4 "aarch64_comparison_operator"
-          [(match_operand 0 "cc_register" "")
+          [(match_operand 0 "cc_register")
            (const_int 0)])
          (compare:CC_ONLY
-           (match_operand:GPI 2 "register_operand" "r,r,r")
-           (match_operand:GPI 3 "aarch64_ccmp_operand" "r,Uss,Usn"))
+           (match_operand:GPI 2 "register_operand")
+           (match_operand:GPI 3 "aarch64_ccmp_operand"))
          (unspec:CC_ONLY
            [(match_operand 5 "immediate_operand")]
            UNSPEC_NZCV)))]
   ""
-  "@
-   ccmp\\t%<w>2, %<w>3, %k5, %m4
-   ccmp\\t%<w>2, %3, %k5, %m4
-   ccmn\\t%<w>2, #%n3, %k5, %m4"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: 2 , 3   ; attrs: type ]
+     [ r       , r   ; alus_sreg   ] ccmp\t%<w>2, %<w>3, %k5, %m4
+     [ r       , Uss ; alus_imm    ] ccmp\t%<w>2, %3, %k5, %m4
+     [ r       , Usn ; alus_imm    ] ccmn\t%<w>2, #%n3, %k5, %m4
+  }
 )
 
 (define_insn "@ccmp<CCFP_CCFPE:mode><GPF:mode>"
 )
 
 (define_insn "@ccmp<CC_ONLY:mode><GPI:mode>_rev"
-  [(set (match_operand:CC_ONLY 1 "cc_register" "")
+  [(set (match_operand:CC_ONLY 1 "cc_register")
        (if_then_else:CC_ONLY
          (match_operator 4 "aarch64_comparison_operator"
-          [(match_operand 0 "cc_register" "")
+          [(match_operand 0 "cc_register")
            (const_int 0)])
          (unspec:CC_ONLY
            [(match_operand 5 "immediate_operand")]
            UNSPEC_NZCV)
          (compare:CC_ONLY
-           (match_operand:GPI 2 "register_operand" "r,r,r")
-           (match_operand:GPI 3 "aarch64_ccmp_operand" "r,Uss,Usn"))))]
+           (match_operand:GPI 2 "register_operand")
+           (match_operand:GPI 3 "aarch64_ccmp_operand"))))]
   ""
-  "@
-   ccmp\\t%<w>2, %<w>3, %k5, %M4
-   ccmp\\t%<w>2, %3, %k5, %M4
-   ccmn\\t%<w>2, #%n3, %k5, %M4"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: 2 , 3   ; attrs: type ]
+     [ r       , r   ; alus_sreg   ] ccmp\t%<w>2, %<w>3, %k5, %M4
+     [ r       , Uss ; alus_imm    ] ccmp\t%<w>2, %3, %k5, %M4
+     [ r       , Usn ; alus_imm    ] ccmn\t%<w>2, #%n3, %k5, %M4
+  }
 )
 
 (define_insn "@ccmp<CCFP_CCFPE:mode><GPF:mode>_rev"
 )
 
 (define_insn "*call_insn"
-  [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "Ucr, Usf"))
+  [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand"))
         (match_operand 1 "" ""))
    (unspec:DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_CALLEE_ABI)
    (clobber (reg:DI LR_REGNUM))]
   ""
-  "@
-  * return aarch64_indirect_call_asm (operands[0]);
-  bl\\t%c0"
-  [(set_attr "type" "call, call")])
+  {@ [ cons: 0 ; attrs: type ]
+     [ Ucr     ; call        ] << aarch64_indirect_call_asm (operands[0]);
+     [ Usf     ; call        ] bl\t%c0
+  }
+)
 
 (define_expand "call_value"
   [(parallel
 
 (define_insn "*call_value_insn"
   [(set (match_operand 0 "" "")
-       (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "Ucr, Usf"))
+       (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand"))
                      (match_operand 2 "" "")))
    (unspec:DI [(match_operand:DI 3 "const_int_operand")] UNSPEC_CALLEE_ABI)
    (clobber (reg:DI LR_REGNUM))]
   ""
-  "@
-  * return aarch64_indirect_call_asm (operands[1]);
-  bl\\t%c1"
-  [(set_attr "type" "call, call")]
+  {@ [ cons: 1 ; attrs: type ]
+     [ Ucr     ; call        ] << aarch64_indirect_call_asm (operands[1]);
+     [ Usf     ; call        ] bl\t%c1
+  }
 )
 
 (define_expand "sibcall"
 )
 
 (define_insn "*mov<mode>_aarch64"
-  [(set (match_operand:HFBF 0 "nonimmediate_operand" "=w,w  ,w ,w  ,?r,?r,w,w,w  ,w  ,w,m,r,m ,r")
-       (match_operand:HFBF 1 "general_operand"      "Y ,?rY,?r,?rY, w, w,w,w,Ufc,Uvi,m,w,m,rY,r"))]
+  [(set (match_operand:HFBF 0 "nonimmediate_operand")
+       (match_operand:HFBF 1 "general_operand"))]
   "TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
     || aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
-  "@
-   movi\\t%0.4h, #0
-   fmov\\t%h0, %w1
-   dup\\t%w0.4h, %w1
-   fmov\\t%s0, %w1
-   umov\\t%w0, %1.h[0]
-   fmov\\t%w0, %s1
-   mov\\t%0.h[0], %1.h[0]
-   fmov\\t%s0, %s1
-   fmov\\t%h0, %1
-   * return aarch64_output_scalar_simd_mov_immediate (operands[1], HImode);
-   ldr\\t%h0, %1
-   str\\t%h1, %0
-   ldrh\\t%w0, %1
-   strh\\t%w1, %0
-   mov\\t%w0, %w1"
-  [(set_attr "type" "neon_move,f_mcr,neon_move,f_mcr,neon_to_gp,f_mrc,
-                    neon_move,fmov,fconsts,neon_move,f_loads,f_stores,
-                    load_4,store_4,mov_reg")
-   (set_attr "arch" "simd,fp16,simd,*,simd,*,simd,*,fp16,simd,*,*,*,*,*")]
+  {@ [ cons: =0 , 1   ; attrs: type , arch  ]
+     [ w        , Y   ; neon_move   , simd  ] movi\t%0.4h, #0
+     [ w        , ?rY ; f_mcr       , fp16  ] fmov\t%h0, %w1
+     [ w        , ?r  ; neon_move   , simd  ] dup\t%w0.4h, %w1
+     [ w        , ?rY ; f_mcr       , *     ] fmov\t%s0, %w1
+     [ ?r       , w   ; neon_to_gp  , simd  ] umov\t%w0, %1.h[0]
+     [ ?r       , w   ; f_mrc       , *     ] fmov\t%w0, %s1
+     [ w        , w   ; neon_move   , simd  ] mov\t%0.h[0], %1.h[0]
+     [ w        , w   ; fmov        , *     ] fmov\t%s0, %s1
+     [ w        , Ufc ; fconsts     , fp16  ] fmov\t%h0, %1
+     [ w        , Uvi ; neon_move   , simd  ] << aarch64_output_scalar_simd_mov_immediate (operands[1], HImode);
+     [ w        , m   ; f_loads     , *     ] ldr\t%h0, %1
+     [ m        , w   ; f_stores    , *     ] str\t%h1, %0
+     [ r        , m   ; load_4      , *     ] ldrh\t%w0, %1
+     [ m        , rY  ; store_4     , *     ] strh\t%w1, %0
+     [ r        , r   ; mov_reg     , *     ] mov\t%w0, %w1
+  }
 )
 
 (define_insn "*mov<mode>_aarch64"
-  [(set (match_operand:SFD 0 "nonimmediate_operand" "=w,w  ,?r,w,w  ,w  ,w,m,r,m ,r,r")
-       (match_operand:SFD 1 "general_operand"      "Y ,?rY, w,w,Ufc,Uvi,m,w,m,rY,r,M"))]
+  [(set (match_operand:SFD 0 "nonimmediate_operand")
+       (match_operand:SFD 1 "general_operand"))]
   "TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
     || aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
-  "@
-   movi\\t%0.2s, #0
-   fmov\\t%s0, %w1
-   fmov\\t%w0, %s1
-   fmov\\t%s0, %s1
-   fmov\\t%s0, %1
-   * return aarch64_output_scalar_simd_mov_immediate (operands[1], SImode);
-   ldr\\t%s0, %1
-   str\\t%s1, %0
-   ldr\\t%w0, %1
-   str\\t%w1, %0
-   mov\\t%w0, %w1
-   mov\\t%w0, %1"
-  [(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconsts,neon_move,\
-                    f_loads,f_stores,load_4,store_4,mov_reg,\
-                    fconsts")
-   (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
+  {@ [ cons: =0 , 1   ; attrs: type , arch  ]
+     [ w        , Y   ; neon_move   , simd  ] movi\t%0.2s, #0
+     [ w        , ?rY ; f_mcr       , *     ] fmov\t%s0, %w1
+     [ ?r       , w   ; f_mrc       , *     ] fmov\t%w0, %s1
+     [ w        , w   ; fmov        , *     ] fmov\t%s0, %s1
+     [ w        , Ufc ; fconsts     , *     ] fmov\t%s0, %1
+     [ w        , Uvi ; neon_move   , simd  ] << aarch64_output_scalar_simd_mov_immediate (operands[1], SImode);
+     [ w        , m   ; f_loads     , *     ] ldr\t%s0, %1
+     [ m        , w   ; f_stores    , *     ] str\t%s1, %0
+     [ r        , m   ; load_4      , *     ] ldr\t%w0, %1
+     [ m        , rY  ; store_4     , *     ] str\t%w1, %0
+     [ r        , r   ; mov_reg     , *     ] mov\t%w0, %w1
+     [ r        , M   ; fconsts     , *     ] mov\t%w0, %1
+  }
 )
 
 (define_insn "*mov<mode>_aarch64"
-  [(set (match_operand:DFD 0 "nonimmediate_operand" "=w, w  ,?r,w,w  ,w  ,w,m,r,m ,r,r")
-       (match_operand:DFD 1 "general_operand"      "Y , ?rY, w,w,Ufc,Uvi,m,w,m,rY,r,O"))]
+  [(set (match_operand:DFD 0 "nonimmediate_operand")
+       (match_operand:DFD 1 "general_operand"))]
   "TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
     || aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
-  "@
-   movi\\t%d0, #0
-   fmov\\t%d0, %x1
-   fmov\\t%x0, %d1
-   fmov\\t%d0, %d1
-   fmov\\t%d0, %1
-   * return aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
-   ldr\\t%d0, %1
-   str\\t%d1, %0
-   ldr\\t%x0, %1
-   str\\t%x1, %0
-   mov\\t%x0, %x1
-   * return aarch64_is_mov_xn_imm (INTVAL (operands[1])) ? \"mov\\t%x0, %1\" : \"mov\\t%w0, %1\";"
-  [(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconstd,neon_move,\
-                    f_loadd,f_stored,load_8,store_8,mov_reg,\
-                    fconstd")
-   (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
+  {@ [ cons: =0 , 1   ; attrs: type , arch  ]
+     [ w        , Y   ; neon_move   , simd  ] movi\t%d0, #0
+     [ w        , ?rY ; f_mcr       , *     ] fmov\t%d0, %x1
+     [ ?r       , w   ; f_mrc       , *     ] fmov\t%x0, %d1
+     [ w        , w   ; fmov        , *     ] fmov\t%d0, %d1
+     [ w        , Ufc ; fconstd     , *     ] fmov\t%d0, %1
+     [ w        , Uvi ; neon_move   , simd  ] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
+     [ w        , m   ; f_loadd     , *     ] ldr\t%d0, %1
+     [ m        , w   ; f_stored    , *     ] str\t%d1, %0
+     [ r        , m   ; load_8      , *     ] ldr\t%x0, %1
+     [ m        , rY  ; store_8     , *     ] str\t%x1, %0
+     [ r        , r   ; mov_reg     , *     ] mov\t%x0, %x1
+     [ r        , O   ; fconstd     , *     ] << aarch64_is_mov_xn_imm (INTVAL (operands[1])) ? "mov\t%x0, %1" : "mov\t%w0, %1";
+  }
 )
 
 (define_split
 ;; Operands 1 and 3 are tied together by the final condition; so we allow
 ;; fairly lax checking on the second memory operation.
 (define_insn "load_pair_sw_<SX:mode><SX2:mode>"
-  [(set (match_operand:SX 0 "register_operand" "=r,w")
-       (match_operand:SX 1 "aarch64_mem_pair_operand" "Ump,Ump"))
-   (set (match_operand:SX2 2 "register_operand" "=r,w")
-       (match_operand:SX2 3 "memory_operand" "m,m"))]
+  [(set (match_operand:SX 0 "register_operand")
+       (match_operand:SX 1 "aarch64_mem_pair_operand"))
+   (set (match_operand:SX2 2 "register_operand")
+       (match_operand:SX2 3 "memory_operand"))]
    "rtx_equal_p (XEXP (operands[3], 0),
                 plus_constant (Pmode,
                                XEXP (operands[1], 0),
                                GET_MODE_SIZE (<SX:MODE>mode)))"
-  "@
-   ldp\\t%w0, %w2, %z1
-   ldp\\t%s0, %s2, %z1"
-  [(set_attr "type" "load_8,neon_load1_2reg")
-   (set_attr "arch" "*,fp")]
+  {@ [ cons: =0 , 1   , =2 , 3 ; attrs: type     , arch ]
+     [ r        , Ump , r  , m ; load_8          , *    ] ldp\t%w0, %w2, %z1
+     [ w        , Ump , w  , m ; neon_load1_2reg , fp   ] ldp\t%s0, %s2, %z1
+  }
 )
 
 ;; Storing different modes that can still be merged
 (define_insn "load_pair_dw_<DX:mode><DX2:mode>"
-  [(set (match_operand:DX 0 "register_operand" "=r,w")
-       (match_operand:DX 1 "aarch64_mem_pair_operand" "Ump,Ump"))
-   (set (match_operand:DX2 2 "register_operand" "=r,w")
-       (match_operand:DX2 3 "memory_operand" "m,m"))]
+  [(set (match_operand:DX 0 "register_operand")
+       (match_operand:DX 1 "aarch64_mem_pair_operand"))
+   (set (match_operand:DX2 2 "register_operand")
+       (match_operand:DX2 3 "memory_operand"))]
    "rtx_equal_p (XEXP (operands[3], 0),
                 plus_constant (Pmode,
                                XEXP (operands[1], 0),
                                GET_MODE_SIZE (<DX:MODE>mode)))"
-  "@
-   ldp\\t%x0, %x2, %z1
-   ldp\\t%d0, %d2, %z1"
-  [(set_attr "type" "load_16,neon_load1_2reg")
-   (set_attr "arch" "*,fp")]
+  {@ [ cons: =0 , 1   , =2 , 3 ; attrs: type     , arch ]
+     [ r        , Ump , r  , m ; load_16         , *    ] ldp\t%x0, %x2, %z1
+     [ w        , Ump , w  , m ; neon_load1_2reg , fp   ] ldp\t%d0, %d2, %z1
+  }
 )
 
 (define_insn "load_pair_dw_tftf"
 ;; Operands 0 and 2 are tied together by the final condition; so we allow
 ;; fairly lax checking on the second memory operation.
 (define_insn "store_pair_sw_<SX:mode><SX2:mode>"
-  [(set (match_operand:SX 0 "aarch64_mem_pair_operand" "=Ump,Ump")
-       (match_operand:SX 1 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))
-   (set (match_operand:SX2 2 "memory_operand" "=m,m")
-       (match_operand:SX2 3 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))]
+  [(set (match_operand:SX 0 "aarch64_mem_pair_operand")
+       (match_operand:SX 1 "aarch64_reg_zero_or_fp_zero"))
+   (set (match_operand:SX2 2 "memory_operand")
+       (match_operand:SX2 3 "aarch64_reg_zero_or_fp_zero"))]
    "rtx_equal_p (XEXP (operands[2], 0),
                 plus_constant (Pmode,
                                XEXP (operands[0], 0),
                                GET_MODE_SIZE (<SX:MODE>mode)))"
-  "@
-   stp\\t%w1, %w3, %z0
-   stp\\t%s1, %s3, %z0"
-  [(set_attr "type" "store_8,neon_store1_2reg")
-   (set_attr "arch" "*,fp")]
+  {@ [ cons: =0 , 1   , =2 , 3   ; attrs: type      , arch ]
+     [ Ump      , rYZ , m  , rYZ ; store_8          , *    ] stp\t%w1, %w3, %z0
+     [ Ump      , w   , m  , w   ; neon_store1_2reg , fp   ] stp\t%s1, %s3, %z0
+  }
 )
 
 ;; Storing different modes that can still be merged
 (define_insn "store_pair_dw_<DX:mode><DX2:mode>"
-  [(set (match_operand:DX 0 "aarch64_mem_pair_operand" "=Ump,Ump")
-       (match_operand:DX 1 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))
-   (set (match_operand:DX2 2 "memory_operand" "=m,m")
-       (match_operand:DX2 3 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))]
+  [(set (match_operand:DX 0 "aarch64_mem_pair_operand")
+       (match_operand:DX 1 "aarch64_reg_zero_or_fp_zero"))
+   (set (match_operand:DX2 2 "memory_operand")
+       (match_operand:DX2 3 "aarch64_reg_zero_or_fp_zero"))]
    "rtx_equal_p (XEXP (operands[2], 0),
                 plus_constant (Pmode,
                                XEXP (operands[0], 0),
                                GET_MODE_SIZE (<DX:MODE>mode)))"
-  "@
-   stp\\t%x1, %x3, %z0
-   stp\\t%d1, %d3, %z0"
-  [(set_attr "type" "store_16,neon_store1_2reg")
-   (set_attr "arch" "*,fp")]
+  {@ [ cons: =0 , 1   , =2 , 3   ; attrs: type      , arch ]
+     [ Ump      , rYZ , m  , rYZ ; store_16         , *    ] stp\t%x1, %x3, %z0
+     [ Ump      , w   , m  , w   ; neon_store1_2reg , fp   ] stp\t%d1, %d3, %z0
+  }
 )
 
 (define_insn "store_pair_dw_tftf"
 )
 
 (define_insn "*extendsidi2_aarch64"
-  [(set (match_operand:DI 0 "register_operand" "=r,r")
-        (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+  [(set (match_operand:DI 0 "register_operand")
+        (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
   ""
-  "@
-   sxtw\t%0, %w1
-   ldrsw\t%0, %1"
-  [(set_attr "type" "extend,load_4")]
+  {@ [ cons: =0 , 1 ; attrs: type ]
+     [ r        , r ; extend      ] sxtw\t%0, %w1
+     [ r        , m ; load_4      ] ldrsw\t%0, %1
+  }
 )
 
 (define_insn "*load_pair_extendsidi2_aarch64"
 )
 
 (define_insn "*zero_extendsidi2_aarch64"
-  [(set (match_operand:DI 0 "register_operand" "=r,r,w,w,r,w")
-        (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,r,m,w,w")))]
-  ""
-  "@
-   uxtw\t%0, %w1
-   ldr\t%w0, %1
-   fmov\t%s0, %w1
-   ldr\t%s0, %1
-   fmov\t%w0, %s1
-   fmov\t%s0, %s1"
-  [(set_attr "type" "mov_reg,load_4,f_mcr,f_loads,f_mrc,fmov")
-   (set_attr "arch" "*,*,fp,fp,fp,fp")]
+  [(set (match_operand:DI 0 "register_operand")
+        (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
+  ""
+  {@ [ cons: =0 , 1 ; attrs: type , arch ]
+     [ r        , r ; mov_reg     , *    ] uxtw\t%0, %w1
+     [ r        , m ; load_4      , *    ] ldr\t%w0, %1
+     [ w        , r ; f_mcr       , fp   ] fmov\t%s0, %w1
+     [ w        , m ; f_loads     , fp   ] ldr\t%s0, %1
+     [ r        , w ; f_mrc       , fp   ] fmov\t%w0, %s1
+     [ w        , w ; fmov        , fp   ] fmov\t%s0, %s1
+  }
 )
 
 (define_insn "*load_pair_zero_extendsidi2_aarch64"
-  [(set (match_operand:DI 0 "register_operand" "=r,w")
-       (zero_extend:DI (match_operand:SI 1 "aarch64_mem_pair_operand" "Ump,Ump")))
-   (set (match_operand:DI 2 "register_operand" "=r,w")
-       (zero_extend:DI (match_operand:SI 3 "memory_operand" "m,m")))]
+  [(set (match_operand:DI 0 "register_operand")
+       (zero_extend:DI (match_operand:SI 1 "aarch64_mem_pair_operand")))
+   (set (match_operand:DI 2 "register_operand")
+       (zero_extend:DI (match_operand:SI 3 "memory_operand")))]
   "rtx_equal_p (XEXP (operands[3], 0),
                plus_constant (Pmode,
                               XEXP (operands[1], 0),
                               GET_MODE_SIZE (SImode)))"
-  "@
-   ldp\t%w0, %w2, %z1
-   ldp\t%s0, %s2, %z1"
-  [(set_attr "type" "load_8,neon_load1_2reg")
-   (set_attr "arch" "*,fp")]
+  {@ [ cons: =0 , 1   , =2 , 3 ; attrs: type     , arch ]
+     [ r        , Ump , r  , m ; load_8          , *    ] ldp\t%w0, %w2, %z1
+     [ w        , Ump , w  , m ; neon_load1_2reg , fp   ] ldp\t%s0, %s2, %z1
+  }
 )
 
 (define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2"
 )
 
 (define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r,r")
-        (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+        (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
   ""
-  "@
-   sxt<SHORT:size>\t%<GPI:w>0, %w1
-   ldrs<SHORT:size>\t%<GPI:w>0, %1
-   smov\t%<GPI:w>0, %1.<SHORT:size>[0]"
-  [(set_attr "type" "extend,load_4,neon_to_gp")
-   (set_attr "arch" "*,*,fp")]
+  {@ [ cons: =0 , 1 ; attrs: type , arch ]
+     [ r        , r ; extend      , *    ] sxt<SHORT:size>\t%<GPI:w>0, %w1
+     [ r        , m ; load_4      , *    ] ldrs<SHORT:size>\t%<GPI:w>0, %1
+     [ r        , w ; neon_to_gp  , fp   ] smov\t%<GPI:w>0, %1.<SHORT:size>[0]
+  }
 )
 
 (define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r,w,r")
-        (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+        (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
   ""
-  "@
-   and\t%<GPI:w>0, %<GPI:w>1, <SHORT:short_mask>
-   ldr<SHORT:size>\t%w0, %1
-   ldr\t%<SHORT:size>0, %1
-   umov\t%w0, %1.<SHORT:size>[0]"
-  [(set_attr "type" "logic_imm,load_4,f_loads,neon_to_gp")
-   (set_attr "arch" "*,*,fp,fp")]
+  {@ [ cons: =0 , 1 ; attrs: type , arch ]
+     [ r        , r ; logic_imm   , *    ] and\t%<GPI:w>0, %<GPI:w>1, <SHORT:short_mask>
+     [ r        , m ; load_4      , *    ] ldr<SHORT:size>\t%w0, %1
+     [ w        , m ; f_loads     , fp   ] ldr\t%<SHORT:size>0, %1
+     [ r        , w ; neon_to_gp  , fp   ] umov\t%w0, %1.<SHORT:size>[0]
+  }
 )
 
 (define_expand "<optab>qihi2"
 )
 
 (define_insn "*extendqihi2_aarch64"
-  [(set (match_operand:HI 0 "register_operand" "=r,r")
-       (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+  [(set (match_operand:HI 0 "register_operand")
+       (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
   ""
-  "@
-   sxtb\t%w0, %w1
-   ldrsb\t%w0, %1"
-  [(set_attr "type" "extend,load_4")]
+  {@ [ cons: =0 , 1 ; attrs: type ]
+     [ r        , r ; extend      ] sxtb\t%w0, %w1
+     [ r        , m ; load_4      ] ldrsb\t%w0, %1
+  }
 )
 
 (define_insn "*zero_extendqihi2_aarch64"
-  [(set (match_operand:HI 0 "register_operand" "=r,r")
-       (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+  [(set (match_operand:HI 0 "register_operand")
+       (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
   ""
-  "@
-   and\t%w0, %w1, 255
-   ldrb\t%w0, %1"
-  [(set_attr "type" "logic_imm,load_4")]
+  {@ [ cons: =0 , 1 ; attrs: type ]
+     [ r        , r ; logic_imm   ] and\t%w0, %w1, 255
+     [ r        , m ; load_4      ] ldrb\t%w0, %1
+  }
 )
 
 ;; -------------------------------------------------------------------
 
 (define_insn "*add<mode>3_aarch64"
   [(set
-    (match_operand:GPI 0 "register_operand" "=rk,rk,w,rk,r,r,rk")
+    (match_operand:GPI 0 "register_operand")
     (plus:GPI
-     (match_operand:GPI 1 "register_operand" "%rk,rk,w,rk,rk,0,rk")
-     (match_operand:GPI 2 "aarch64_pluslong_operand" "I,r,w,J,Uaa,Uai,Uav")))]
-  ""
-  "@
-  add\\t%<w>0, %<w>1, %2
-  add\\t%<w>0, %<w>1, %<w>2
-  add\\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
-  sub\\t%<w>0, %<w>1, #%n2
-  #
-  * return aarch64_output_sve_scalar_inc_dec (operands[2]);
-  * return aarch64_output_sve_addvl_addpl (operands[2]);"
+     (match_operand:GPI 1 "register_operand")
+     (match_operand:GPI 2 "aarch64_pluslong_operand")))]
+  ""
+  {@ [ cons: =0 , 1   , 2   ; attrs: type , arch  ]
+     [ rk       , %rk , I   ; alu_imm     , *     ] add\t%<w>0, %<w>1, %2
+     [ rk       , rk  , r   ; alu_sreg    , *     ] add\t%<w>0, %<w>1, %<w>2
+     [ w        , w   , w   ; neon_add    , simd  ] add\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
+     [ rk       , rk  , J   ; alu_imm     , *     ] sub\t%<w>0, %<w>1, #%n2
+     [ r        , rk  , Uaa ; multiple    , *     ] #
+     [ r        , 0   , Uai ; alu_imm     , sve   ] << aarch64_output_sve_scalar_inc_dec (operands[2]);
+     [ rk       , rk  , Uav ; alu_imm     , sve   ] << aarch64_output_sve_addvl_addpl (operands[2]);
+  }
   ;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders.
-  [(set_attr "type" "alu_imm,alu_sreg,neon_add,alu_imm,multiple,alu_imm,alu_imm")
-   (set_attr "arch" "*,*,simd,*,*,sve,sve")]
 )
 
 ;; zero_extend version of above
 (define_insn "*addsi3_aarch64_uxtw"
   [(set
-    (match_operand:DI 0 "register_operand" "=rk,rk,rk,r")
+    (match_operand:DI 0 "register_operand")
     (zero_extend:DI
-     (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk,rk")
-             (match_operand:SI 2 "aarch64_pluslong_operand" "I,r,J,Uaa"))))]
-  ""
-  "@
-  add\\t%w0, %w1, %2
-  add\\t%w0, %w1, %w2
-  sub\\t%w0, %w1, #%n2
-  #"
-  [(set_attr "type" "alu_imm,alu_sreg,alu_imm,multiple")]
+     (plus:SI (match_operand:SI 1 "register_operand")
+             (match_operand:SI 2 "aarch64_pluslong_operand"))))]
+  ""
+  {@ [ cons: =0 , 1   , 2   ; attrs: type ]
+     [ rk       , %rk , I   ; alu_imm     ] add\t%w0, %w1, %2
+     [ rk       , rk  , r   ; alu_sreg    ] add\t%w0, %w1, %w2
+     [ rk       , rk  , J   ; alu_imm     ] sub\t%w0, %w1, #%n2
+     [ r        , rk  , Uaa ; multiple    ] #
+  }
 )
 
 ;; If there's a free register, and we can load the constant with a
 ;; this pattern.
 (define_insn_and_split "*add<mode>3_poly_1"
   [(set
-    (match_operand:GPI 0 "register_operand" "=r,r,r,r,r,r,&r")
+    (match_operand:GPI 0 "register_operand")
     (plus:GPI
-     (match_operand:GPI 1 "register_operand" "%rk,rk,rk,rk,0,rk,rk")
-     (match_operand:GPI 2 "aarch64_pluslong_or_poly_operand" "I,r,J,Uaa,Uai,Uav,Uat")))]
+     (match_operand:GPI 1 "register_operand")
+     (match_operand:GPI 2 "aarch64_pluslong_or_poly_operand")))]
   "TARGET_SVE && operands[0] != stack_pointer_rtx"
-  "@
-  add\\t%<w>0, %<w>1, %2
-  add\\t%<w>0, %<w>1, %<w>2
-  sub\\t%<w>0, %<w>1, #%n2
-  #
-  * return aarch64_output_sve_scalar_inc_dec (operands[2]);
-  * return aarch64_output_sve_addvl_addpl (operands[2]);
-  #"
+  {@ [ cons: =0 , 1   , 2   ; attrs: type ]
+     [ r        , %rk , I   ; alu_imm     ] add\t%<w>0, %<w>1, %2
+     [ r        , rk  , r   ; alu_sreg    ] add\t%<w>0, %<w>1, %<w>2
+     [ r        , rk  , J   ; alu_imm     ] sub\t%<w>0, %<w>1, #%n2
+     [ r        , rk  , Uaa ; multiple    ] #
+     [ r        , 0   , Uai ; alu_imm     ] << aarch64_output_sve_scalar_inc_dec (operands[2]);
+     [ r        , rk  , Uav ; alu_imm     ] << aarch64_output_sve_addvl_addpl (operands[2]);
+     [ &r       , rk  , Uat ; multiple    ] #
+  }
   "&& epilogue_completed
    && !reg_overlap_mentioned_p (operands[0], operands[1])
    && aarch64_split_add_offset_immediate (operands[2], <MODE>mode)"
     DONE;
   }
   ;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders.
-  [(set_attr "type" "alu_imm,alu_sreg,alu_imm,multiple,alu_imm,alu_imm,multiple")]
 )
 
 (define_split
 (define_insn "add<mode>3_compare0"
   [(set (reg:CC_NZ CC_REGNUM)
        (compare:CC_NZ
-        (plus:GPI (match_operand:GPI 1 "register_operand" "%rk,rk,rk")
-                  (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J"))
+        (plus:GPI (match_operand:GPI 1 "register_operand")
+                  (match_operand:GPI 2 "aarch64_plus_operand"))
         (const_int 0)))
-   (set (match_operand:GPI 0 "register_operand" "=r,r,r")
+   (set (match_operand:GPI 0 "register_operand")
        (plus:GPI (match_dup 1) (match_dup 2)))]
   ""
-  "@
-  adds\\t%<w>0, %<w>1, %<w>2
-  adds\\t%<w>0, %<w>1, %2
-  subs\\t%<w>0, %<w>1, #%n2"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: type ]
+     [ r        , %rk , r ; alus_sreg   ] adds\t%<w>0, %<w>1, %<w>2
+     [ r        , rk  , I ; alus_imm    ] adds\t%<w>0, %<w>1, %2
+     [ r        , rk  , J ; alus_imm    ] subs\t%<w>0, %<w>1, #%n2
+  }
 )
 
 ;; zero_extend version of above
 (define_insn "*addsi3_compare0_uxtw"
   [(set (reg:CC_NZ CC_REGNUM)
        (compare:CC_NZ
-        (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk")
-                 (match_operand:SI 2 "aarch64_plus_operand" "r,I,J"))
+        (plus:SI (match_operand:SI 1 "register_operand")
+                 (match_operand:SI 2 "aarch64_plus_operand"))
         (const_int 0)))
-   (set (match_operand:DI 0 "register_operand" "=r,r,r")
+   (set (match_operand:DI 0 "register_operand")
        (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
   ""
-  "@
-  adds\\t%w0, %w1, %w2
-  adds\\t%w0, %w1, %2
-  subs\\t%w0, %w1, #%n2"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: =0 , 1   , 2 ; attrs: type ]
+     [ r        , %rk , r ; alus_sreg   ] adds\t%w0, %w1, %w2
+     [ r        , rk  , I ; alus_imm    ] adds\t%w0, %w1, %2
+     [ r        , rk  , J ; alus_imm    ] subs\t%w0, %w1, #%n2
+  }
 )
 
 (define_insn "*add<mode>3_compareC_cconly"
   [(set (reg:CC_C CC_REGNUM)
        (compare:CC_C
          (plus:GPI
-           (match_operand:GPI 0 "register_operand" "r,r,r")
-           (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J"))
+           (match_operand:GPI 0 "register_operand")
+           (match_operand:GPI 1 "aarch64_plus_operand"))
          (match_dup 0)))]
   ""
-  "@
-  cmn\\t%<w>0, %<w>1
-  cmn\\t%<w>0, %1
-  cmp\\t%<w>0, #%n1"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: 0 , 1 ; attrs: type ]
+     [ r       , r ; alus_sreg   ] cmn\t%<w>0, %<w>1
+     [ r       , I ; alus_imm    ] cmn\t%<w>0, %1
+     [ r       , J ; alus_imm    ] cmp\t%<w>0, #%n1
+  }
 )
 
 (define_insn "add<mode>3_compareC"
   [(set (reg:CC_C CC_REGNUM)
        (compare:CC_C
          (plus:GPI
-           (match_operand:GPI 1 "register_operand" "rk,rk,rk")
-           (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J"))
+           (match_operand:GPI 1 "register_operand")
+           (match_operand:GPI 2 "aarch64_plus_operand"))
          (match_dup 1)))
-   (set (match_operand:GPI 0 "register_operand" "=r,r,r")
+   (set (match_operand:GPI 0 "register_operand")
        (plus:GPI (match_dup 1) (match_dup 2)))]
   ""
-  "@
-  adds\\t%<w>0, %<w>1, %<w>2
-  adds\\t%<w>0, %<w>1, %2
-  subs\\t%<w>0, %<w>1, #%n2"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: =0 , 1  , 2 ; attrs: type ]
+     [ r        , rk , r ; alus_sreg   ] adds\t%<w>0, %<w>1, %<w>2
+     [ r        , rk , I ; alus_imm    ] adds\t%<w>0, %<w>1, %2
+     [ r        , rk , J ; alus_imm    ] subs\t%<w>0, %<w>1, #%n2
+  }
 )
 
 (define_insn "*add<mode>3_compareV_cconly_imm"
   [(set (reg:CC_V CC_REGNUM)
        (compare:CC_V
          (plus:<DWI>
-           (sign_extend:<DWI> (match_operand:GPI 0 "register_operand" "r,r"))
-           (match_operand:<DWI> 1 "const_scalar_int_operand" ""))
+           (sign_extend:<DWI> (match_operand:GPI 0 "register_operand"))
+           (match_operand:<DWI> 1 "const_scalar_int_operand"))
          (sign_extend:<DWI>
           (plus:GPI
            (match_dup 0)
-           (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))))]
+           (match_operand:GPI 2 "aarch64_plus_immediate")))))]
   "INTVAL (operands[1]) == INTVAL (operands[2])"
-  "@
-  cmn\\t%<w>0, %<w>1
-  cmp\\t%<w>0, #%n1"
+  {@ [ cons: 0 , 2  ]
+     [ r       , I  ] cmn\t%<w>0, %<w>1
+     [ r       , J  ] cmp\t%<w>0, #%n1
+  }
   [(set_attr "type" "alus_imm")]
 )
 
        (compare:CC_V
          (plus:<DWI>
            (sign_extend:<DWI>
-             (match_operand:GPI 1 "register_operand" "rk,rk"))
-           (match_operand:GPI 2 "aarch64_plus_immediate" "I,J"))
+             (match_operand:GPI 1 "register_operand"))
+           (match_operand:GPI 2 "aarch64_plus_immediate"))
          (sign_extend:<DWI>
            (plus:GPI (match_dup 1) (match_dup 2)))))
-   (set (match_operand:GPI 0 "register_operand" "=r,r")
+   (set (match_operand:GPI 0 "register_operand")
        (plus:GPI (match_dup 1) (match_dup 2)))]
    ""
-   "@
-   adds\\t%<w>0, %<w>1, %<w>2
-   subs\\t%<w>0, %<w>1, #%n2"
-  [(set_attr "type" "alus_imm,alus_imm")]
+   {@ [ cons: =0 , 1  , 2 ; attrs: type ]
+      [ r        , rk , I ; alus_imm    ] adds\t%<w>0, %<w>1, %<w>2
+      [ r        , rk , J ; alus_imm    ] subs\t%<w>0, %<w>1, #%n2
+  }
 )
 
 (define_insn "add<mode>3_compareV"
 (define_insn "*add<mode>3nr_compare0"
   [(set (reg:CC_NZ CC_REGNUM)
        (compare:CC_NZ
-        (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r,r")
-                  (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J"))
+        (plus:GPI (match_operand:GPI 0 "register_operand")
+                  (match_operand:GPI 1 "aarch64_plus_operand"))
         (const_int 0)))]
   ""
-  "@
-  cmn\\t%<w>0, %<w>1
-  cmn\\t%<w>0, %1
-  cmp\\t%<w>0, #%n1"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: 0 , 1 ; attrs: type ]
+     [ %r      , r ; alus_sreg   ] cmn\t%<w>0, %<w>1
+     [ r       , I ; alus_imm    ] cmn\t%<w>0, %1
+     [ r       , J ; alus_imm    ] cmp\t%<w>0, #%n1
+  }
 )
 
 (define_insn "aarch64_sub<mode>_compare0"
 )
 
 (define_insn "subdi3"
-  [(set (match_operand:DI 0 "register_operand" "=rk,w")
-       (minus:DI (match_operand:DI 1 "register_operand" "rk,w")
-                 (match_operand:DI 2 "register_operand" "r,w")))]
+  [(set (match_operand:DI 0 "register_operand")
+       (minus:DI (match_operand:DI 1 "register_operand")
+                 (match_operand:DI 2 "register_operand")))]
   ""
-  "@
-   sub\\t%x0, %x1, %x2
-   sub\\t%d0, %d1, %d2"
-  [(set_attr "type" "alu_sreg, neon_sub")
-   (set_attr "arch" "*,simd")]
+  {@ [ cons: =0 , 1  , 2 ; attrs: type , arch  ]
+     [ rk       , rk , r ; alu_sreg    , *     ] sub\t%x0, %x1, %x2
+     [ w        , w  , w ; neon_sub    , simd  ] sub\t%d0, %d1, %d2
+  }
 )
 
 (define_expand "subv<GPI:mode>4"
        (compare:CC_V
         (sign_extend:<DWI>
          (minus:GPI
-          (match_operand:GPI 1 "register_operand" "rk,rk")
-          (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))
+          (match_operand:GPI 1 "register_operand")
+          (match_operand:GPI 2 "aarch64_plus_immediate")))
         (minus:<DWI> (sign_extend:<DWI> (match_dup 1))
                      (match_dup 2))))
-   (set (match_operand:GPI 0 "register_operand" "=r,r")
+   (set (match_operand:GPI 0 "register_operand")
        (minus:GPI (match_dup 1) (match_dup 2)))]
   ""
-  "@
-   subs\\t%<w>0, %<w>1, %2
-   adds\\t%<w>0, %<w>1, #%n2"
+  {@ [ cons: =0 , 1  , 2  ]
+     [ r        , rk , I  ] subs\t%<w>0, %<w>1, %2
+     [ r        , rk , J  ] adds\t%<w>0, %<w>1, #%n2
+  }
   [(set_attr "type" "alus_sreg")]
 )
 
   [(set (reg:CC_V CC_REGNUM)
        (compare:CC_V
         (sign_extend:<DWI>
-         (minus:GPI (match_operand:GPI 0 "register_operand" "r,r,r")
-                    (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))
+         (minus:GPI (match_operand:GPI 0 "register_operand")
+                    (match_operand:GPI 1 "aarch64_plus_operand")))
         (minus:<DWI> (sign_extend:<DWI> (match_dup 0))
                    (sign_extend:<DWI> (match_dup 1)))))]
   ""
-  "@
-   cmp\\t%<w>0, %<w>1
-   cmp\\t%<w>0, %1
-   cmp\\t%<w>0, #%n1"
+  {@ [ cons: 0 , 1  ]
+     [ r       , r  ] cmp\t%<w>0, %<w>1
+     [ r       , I  ] cmp\t%<w>0, %1
+     [ r       , J  ] cmp\t%<w>0, #%n1
+  }
   [(set_attr "type" "alus_sreg")]
 )
 
 (define_insn "sub<mode>3_compare1_imm"
   [(set (reg:CC CC_REGNUM)
        (compare:CC
-         (match_operand:GPI 1 "register_operand" "rk,rk")
-         (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))
-   (set (match_operand:GPI 0 "register_operand" "=r,r")
+         (match_operand:GPI 1 "register_operand")
+         (match_operand:GPI 2 "aarch64_plus_immediate")))
+   (set (match_operand:GPI 0 "register_operand")
        (plus:GPI
          (match_dup 1)
-         (match_operand:GPI 3 "aarch64_plus_immediate" "J,I")))]
+         (match_operand:GPI 3 "aarch64_plus_immediate")))]
   "UINTVAL (operands[2]) == -UINTVAL (operands[3])"
-  "@
-  subs\\t%<w>0, %<w>1, %2
-  adds\\t%<w>0, %<w>1, #%n2"
+  {@ [ cons: =0 , 1  , 2 , 3  ]
+     [ r        , rk , I , J  ] subs\t%<w>0, %<w>1, %2
+     [ r        , rk , J , I  ] adds\t%<w>0, %<w>1, #%n2
+  }
   [(set_attr "type" "alus_imm")]
 )
 
 )
 
 (define_insn "neg<mode>2"
-  [(set (match_operand:GPI 0 "register_operand" "=r,w")
-       (neg:GPI (match_operand:GPI 1 "register_operand" "r,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+       (neg:GPI (match_operand:GPI 1 "register_operand")))]
   ""
-  "@
-   neg\\t%<w>0, %<w>1
-   neg\\t%<rtn>0<vas>, %<rtn>1<vas>"
-  [(set_attr "type" "alu_sreg, neon_neg<q>")
-   (set_attr "arch" "*,simd")]
+  {@ [ cons: =0 , 1 ; attrs: type , arch  ]
+     [ r        , r ; alu_sreg    , *     ] neg\t%<w>0, %<w>1
+     [ w        , w ; neon_neg<q> , simd  ] neg\t%<rtn>0<vas>, %<rtn>1<vas>
+  }
 )
 
 ;; zero_extend version of above
 
 (define_insn "cmp<mode>"
   [(set (reg:CC CC_REGNUM)
-       (compare:CC (match_operand:GPI 0 "register_operand" "rk,rk,rk")
-                   (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))]
+       (compare:CC (match_operand:GPI 0 "register_operand")
+                   (match_operand:GPI 1 "aarch64_plus_operand")))]
   ""
-  "@
-   cmp\\t%<w>0, %<w>1
-   cmp\\t%<w>0, %1
-   cmn\\t%<w>0, #%n1"
-  [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
+  {@ [ cons: 0 , 1 ; attrs: type ]
+     [ rk      , r ; alus_sreg   ] cmp\t%<w>0, %<w>1
+     [ rk      , I ; alus_imm    ] cmp\t%<w>0, %1
+     [ rk      , J ; alus_imm    ] cmn\t%<w>0, #%n1
+  }
 )
 
 (define_insn "fcmp<mode>"
   [(set (reg:CCFP CC_REGNUM)
-        (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w")
-                     (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+        (compare:CCFP (match_operand:GPF 0 "register_operand")
+                     (match_operand:GPF 1 "aarch64_fp_compare_operand")))]
    "TARGET_FLOAT"
-   "@
-    fcmp\\t%<s>0, #0.0
-    fcmp\\t%<s>0, %<s>1"
+   {@ [ cons: 0 , 1  ]
+      [ w       , Y  ] fcmp\t%<s>0, #0.0
+      [ w       , w  ] fcmp\t%<s>0, %<s>1
+  }
   [(set_attr "type" "fcmp<s>")]
 )
 
 (define_insn "fcmpe<mode>"
   [(set (reg:CCFPE CC_REGNUM)
-        (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w")
-                      (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+        (compare:CCFPE (match_operand:GPF 0 "register_operand")
+                      (match_operand:GPF 1 "aarch64_fp_compare_operand")))]
    "TARGET_FLOAT"
-   "@
-    fcmpe\\t%<s>0, #0.0
-    fcmpe\\t%<s>0, %<s>1"
+   {@ [ cons: 0 , 1  ]
+      [ w       , Y  ] fcmpe\t%<s>0, #0.0
+      [ w       , w  ] fcmpe\t%<s>0, %<s>1
+  }
   [(set_attr "type" "fcmp<s>")]
 )
 
 )
 
 (define_insn "*cmov<mode>_insn"
-  [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r,r,r,r")
+  [(set (match_operand:ALLI 0 "register_operand")
        (if_then_else:ALLI
         (match_operator 1 "aarch64_comparison_operator"
-         [(match_operand 2 "cc_register" "") (const_int 0)])
-        (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
-        (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1")))]
+         [(match_operand 2 "cc_register") (const_int 0)])
+        (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1")
+        (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1")))]
   "!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
      || (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
   ;; Final two alternatives should be unreachable, but included for completeness
-  "@
-   csel\\t%<w>0, %<w>3, %<w>4, %m1
-   csinv\\t%<w>0, %<w>3, <w>zr, %m1
-   csinv\\t%<w>0, %<w>4, <w>zr, %M1
-   csinc\\t%<w>0, %<w>3, <w>zr, %m1
-   csinc\\t%<w>0, %<w>4, <w>zr, %M1
-   mov\\t%<w>0, -1
-   mov\\t%<w>0, 1"
-  [(set_attr "type" "csel, csel, csel, csel, csel, mov_imm, mov_imm")]
+  {@ [ cons: =0 , 3   , 4   ; attrs: type ]
+     [ r        , rZ  , rZ  ; csel        ] csel\t%<w>0, %<w>3, %<w>4, %m1
+     [ r        , rZ  , UsM ; csel        ] csinv\t%<w>0, %<w>3, <w>zr, %m1
+     [ r        , UsM , rZ  ; csel        ] csinv\t%<w>0, %<w>4, <w>zr, %M1
+     [ r        , rZ  , Ui1 ; csel        ] csinc\t%<w>0, %<w>3, <w>zr, %m1
+     [ r        , Ui1 , rZ  ; csel        ] csinc\t%<w>0, %<w>4, <w>zr, %M1
+     [ r        , UsM , UsM ; mov_imm     ] mov\t%<w>0, -1
+     [ r        , Ui1 , Ui1 ; mov_imm     ] mov\t%<w>0, 1
+  }
 )
 
 ;; zero_extend version of above
 (define_insn "*cmovsi_insn_uxtw"
-  [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r")
+  [(set (match_operand:DI 0 "register_operand")
        (zero_extend:DI
         (if_then_else:SI
          (match_operator 1 "aarch64_comparison_operator"
-          [(match_operand 2 "cc_register" "") (const_int 0)])
-         (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
-         (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1"))))]
+          [(match_operand 2 "cc_register") (const_int 0)])
+         (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1")
+         (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1"))))]
   "!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
      || (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
   ;; Final two alternatives should be unreachable, but included for completeness
-  "@
-   csel\\t%w0, %w3, %w4, %m1
-   csinv\\t%w0, %w3, wzr, %m1
-   csinv\\t%w0, %w4, wzr, %M1
-   csinc\\t%w0, %w3, wzr, %m1
-   csinc\\t%w0, %w4, wzr, %M1
-   mov\\t%w0, -1
-   mov\\t%w0, 1"
-  [(set_attr "type" "csel, csel, csel, csel, csel, mov_imm, mov_imm")]
+  {@ [ cons: =0 , 3   , 4   ; attrs: type ]
+     [ r        , rZ  , rZ  ; csel        ] csel\t%w0, %w3, %w4, %m1
+     [ r        , rZ  , UsM ; csel        ] csinv\t%w0, %w3, wzr, %m1
+     [ r        , UsM , rZ  ; csel        ] csinv\t%w0, %w4, wzr, %M1
+     [ r        , rZ  , Ui1 ; csel        ] csinc\t%w0, %w3, wzr, %m1
+     [ r        , Ui1 , rZ  ; csel        ] csinc\t%w0, %w4, wzr, %M1
+     [ r        , UsM , UsM ; mov_imm     ] mov\t%w0, -1
+     [ r        , Ui1 , Ui1 ; mov_imm     ] mov\t%w0, 1
+  }
 )
 
 ;; There are two canonical forms for `cmp ? -1 : a`.
 )
 
 (define_insn "<optab><mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,rk,w")
-       (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r,w")
-                    (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+       (LOGICAL:GPI (match_operand:GPI 1 "register_operand")
+                    (match_operand:GPI 2 "aarch64_logical_operand")))]
   ""
-  "@
-  <logical>\\t%<w>0, %<w>1, %<w>2
-  <logical>\\t%<w>0, %<w>1, %2
-  <logical>\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
-  [(set_attr "type" "logic_reg,logic_imm,neon_logic")
-   (set_attr "arch" "*,*,simd")]
+  {@ [ cons: =0 , 1  , 2        ; attrs: type , arch  ]
+     [ r        , %r , r        ; logic_reg   , *     ] <logical>\t%<w>0, %<w>1, %<w>2
+     [ rk       , r  , <lconst> ; logic_imm   , *     ] <logical>\t%<w>0, %<w>1, %2
+     [ w        , w  , w        ; neon_logic  , simd  ] <logical>\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+  }
 )
 
 ;; zero_extend version of above
 (define_insn "*<optab>si3_uxtw"
-  [(set (match_operand:DI 0 "register_operand" "=r,rk")
+  [(set (match_operand:DI 0 "register_operand")
        (zero_extend:DI
-         (LOGICAL:SI (match_operand:SI 1 "register_operand" "%r,r")
-                    (match_operand:SI 2 "aarch64_logical_operand" "r,K"))))]
+         (LOGICAL:SI (match_operand:SI 1 "register_operand")
+                    (match_operand:SI 2 "aarch64_logical_operand"))))]
   ""
-  "@
-   <logical>\\t%w0, %w1, %w2
-   <logical>\\t%w0, %w1, %2"
-  [(set_attr "type" "logic_reg,logic_imm")]
+  {@ [ cons: =0 , 1  , 2 ; attrs: type ]
+     [ r        , %r , r ; logic_reg   ] <logical>\t%w0, %w1, %w2
+     [ rk       , r  , K ; logic_imm   ] <logical>\t%w0, %w1, %2
+  }
 )
 
 (define_insn "*and<mode>3_compare0"
   [(set (reg:CC_NZV CC_REGNUM)
        (compare:CC_NZV
-        (and:GPI (match_operand:GPI 1 "register_operand" "%r,r")
-                 (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>"))
+        (and:GPI (match_operand:GPI 1 "register_operand")
+                 (match_operand:GPI 2 "aarch64_logical_operand"))
         (const_int 0)))
-   (set (match_operand:GPI 0 "register_operand" "=r,r")
+   (set (match_operand:GPI 0 "register_operand")
        (and:GPI (match_dup 1) (match_dup 2)))]
   ""
-  "@
-   ands\\t%<w>0, %<w>1, %<w>2
-   ands\\t%<w>0, %<w>1, %2"
-  [(set_attr "type" "logics_reg,logics_imm")]
+  {@ [ cons: =0 , 1  , 2        ; attrs: type ]
+     [ r        , %r , r        ; logics_reg  ] ands\t%<w>0, %<w>1, %<w>2
+     [ r        , r  , <lconst> ; logics_imm  ] ands\t%<w>0, %<w>1, %2
+  }
 )
 
 ;; zero_extend version of above
 (define_insn "*andsi3_compare0_uxtw"
   [(set (reg:CC_NZV CC_REGNUM)
        (compare:CC_NZV
-        (and:SI (match_operand:SI 1 "register_operand" "%r,r")
-                (match_operand:SI 2 "aarch64_logical_operand" "r,K"))
+        (and:SI (match_operand:SI 1 "register_operand")
+                (match_operand:SI 2 "aarch64_logical_operand"))
         (const_int 0)))
-   (set (match_operand:DI 0 "register_operand" "=r,r")
+   (set (match_operand:DI 0 "register_operand")
        (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))]
   ""
-  "@
-   ands\\t%w0, %w1, %w2
-   ands\\t%w0, %w1, %2"
-  [(set_attr "type" "logics_reg,logics_imm")]
+  {@ [ cons: =0 , 1  , 2 ; attrs: type ]
+     [ r        , %r , r ; logics_reg  ] ands\t%w0, %w1, %w2
+     [ r        , r  , K ; logics_imm  ] ands\t%w0, %w1, %2
+  }
 )
 
 (define_insn "*and_<SHIFT:optab><mode>3_compare0"
 )
 
 (define_insn "one_cmpl<mode>2"
-  [(set (match_operand:GPI 0 "register_operand" "=r,w")
-       (not:GPI (match_operand:GPI 1 "register_operand" "r,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+       (not:GPI (match_operand:GPI 1 "register_operand")))]
   ""
-  "@
-  mvn\\t%<w>0, %<w>1
-  mvn\\t%0.8b, %1.8b"
-  [(set_attr "type" "logic_reg,neon_logic")
-   (set_attr "arch" "*,simd")]
+  {@ [ cons: =0 , 1 ; attrs: type , arch  ]
+     [ r        , r ; logic_reg   , *     ] mvn\t%<w>0, %<w>1
+     [ w        , w ; neon_logic  , simd  ] mvn\t%0.8b, %1.8b
+  }
 )
 
 (define_insn "*one_cmpl_zero_extend"
 ;; Binary logical operators negating one operand, i.e. (a & !b), (a | !b).
 
 (define_insn "*<NLOGICAL:optab>_one_cmpl<mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,w")
-       (NLOGICAL:GPI (not:GPI (match_operand:GPI 1 "register_operand" "r,w"))
-                    (match_operand:GPI 2 "register_operand" "r,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+       (NLOGICAL:GPI (not:GPI (match_operand:GPI 1 "register_operand"))
+                    (match_operand:GPI 2 "register_operand")))]
   ""
-  "@
-  <NLOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1
-  <NLOGICAL:nlogical>\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
-  [(set_attr "type" "logic_reg,neon_logic")
-   (set_attr "arch" "*,simd")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: type , arch  ]
+     [ r        , r , r ; logic_reg   , *     ] <NLOGICAL:nlogical>\t%<w>0, %<w>2, %<w>1
+     [ w        , w , w ; neon_logic  , simd  ] <NLOGICAL:nlogical>\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+  }
 )
 
 (define_insn "*<NLOGICAL:optab>_one_cmplsidi3_ze"
 (define_insn "*and<mode>3nr_compare0"
   [(set (reg:CC_NZV CC_REGNUM)
        (compare:CC_NZV
-        (and:GPI (match_operand:GPI 0 "register_operand" "%r,r")
-                 (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>"))
+        (and:GPI (match_operand:GPI 0 "register_operand")
+                 (match_operand:GPI 1 "aarch64_logical_operand"))
         (const_int 0)))]
   ""
-  "@
-   tst\\t%<w>0, %<w>1
-   tst\\t%<w>0, %1"
-  [(set_attr "type" "logics_reg,logics_imm")]
+  {@ [ cons: 0 , 1        ; attrs: type ]
+     [ %r      , r        ; logics_reg  ] tst\t%<w>0, %<w>1
+     [ r       , <lconst> ; logics_imm  ] tst\t%<w>0, %1
+  }
 )
 
 (define_split
 
 ;; Logical left shift using SISD or Integer instruction
 (define_insn "*aarch64_ashl_sisd_or_int_<mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r,w,w")
+  [(set (match_operand:GPI 0 "register_operand")
        (ashift:GPI
-         (match_operand:GPI 1 "register_operand" "r,r,w,w")
-         (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,r,Us<cmode>,w")))]
+         (match_operand:GPI 1 "register_operand")
+         (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>")))]
   ""
-  "@
-   lsl\t%<w>0, %<w>1, %2
-   lsl\t%<w>0, %<w>1, %<w>2
-   shl\t%<rtn>0<vas>, %<rtn>1<vas>, %2
-   ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>"
-  [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>, neon_shift_reg<q>")
-   (set_attr "arch" "*,*,simd,simd")]
+  {@ [ cons: =0 , 1 , 2         ; attrs: type       , arch  ]
+     [ r        , r , Us<cmode> ; bfx               , *     ] lsl\t%<w>0, %<w>1, %2
+     [ r        , r , r         ; shift_reg         , *     ] lsl\t%<w>0, %<w>1, %<w>2
+     [ w        , w , Us<cmode> ; neon_shift_imm<q> , simd  ] shl\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+     [ w        , w , w         ; neon_shift_reg<q> , simd  ] ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
+  }
 )
 
 ;; Logical right shift using SISD or Integer instruction
 (define_insn "*aarch64_lshr_sisd_or_int_<mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r,w,&w,&w")
+  [(set (match_operand:GPI 0 "register_operand")
        (lshiftrt:GPI
-        (match_operand:GPI 1 "register_operand" "r,r,w,w,w")
-        (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>"
-                             "Us<cmode>,r,Us<cmode_simd>,w,0")))]
-  ""
-  "@
-   lsr\t%<w>0, %<w>1, %2
-   lsr\t%<w>0, %<w>1, %<w>2
-   ushr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
-   #
-   #"
-  [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")
-   (set_attr "arch" "*,*,simd,simd,simd")]
+        (match_operand:GPI 1 "register_operand")
+        (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>")))]
+  ""
+  {@ [ cons: =0 , 1 , 2              ; attrs: type       , arch  ]
+     [ r        , r , Us<cmode>      ; bfx               , *     ] lsr\t%<w>0, %<w>1, %2
+     [ r        , r , r              ; shift_reg         , *     ] lsr\t%<w>0, %<w>1, %<w>2
+     [ w        , w , Us<cmode_simd> ; neon_shift_imm<q> , simd  ] ushr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+     [ &w       , w , w              ; neon_shift_reg<q> , simd  ] #
+     [ &w       , w , 0              ; neon_shift_reg<q> , simd  ] #
+  }
 )
 
 (define_split
 
 ;; Arithmetic right shift using SISD or Integer instruction
 (define_insn "*aarch64_ashr_sisd_or_int_<mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r,w,&w,&w")
+  [(set (match_operand:GPI 0 "register_operand")
        (ashiftrt:GPI
-         (match_operand:GPI 1 "register_operand" "r,r,w,w,w")
-         (match_operand:QI 2 "aarch64_reg_or_shift_imm_di"
-                              "Us<cmode>,r,Us<cmode_simd>,w,0")))]
-  ""
-  "@
-   asr\t%<w>0, %<w>1, %2
-   asr\t%<w>0, %<w>1, %<w>2
-   sshr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
-   #
-   #"
-  [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")
-   (set_attr "arch" "*,*,simd,simd,simd")]
+         (match_operand:GPI 1 "register_operand")
+         (match_operand:QI 2 "aarch64_reg_or_shift_imm_di")))]
+  ""
+  {@ [ cons: =0 , 1 , 2              ; attrs: type       , arch  ]
+     [ r        , r , Us<cmode>      ; bfx               , *     ] asr\t%<w>0, %<w>1, %2
+     [ r        , r , r              ; shift_reg         , *     ] asr\t%<w>0, %<w>1, %<w>2
+     [ w        , w , Us<cmode_simd> ; neon_shift_imm<q> , simd  ] sshr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+     [ &w       , w , w              ; neon_shift_reg<q> , simd  ] #
+     [ &w       , w , 0              ; neon_shift_reg<q> , simd  ] #
+  }
 )
 
 (define_split
 
 ;; Rotate right
 (define_insn "*ror<mode>3_insn"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r")
+  [(set (match_operand:GPI 0 "register_operand")
      (rotatert:GPI
-       (match_operand:GPI 1 "register_operand" "r,r")
-       (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,r")))]
+       (match_operand:GPI 1 "register_operand")
+       (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>")))]
   ""
-  "@
-   ror\\t%<w>0, %<w>1, %2
-   ror\\t%<w>0, %<w>1, %<w>2"
-  [(set_attr "type" "rotate_imm,shift_reg")]
+  {@ [ cons: =0 , 1 , 2         ; attrs: type ]
+     [ r        , r , Us<cmode> ; rotate_imm  ] ror\t%<w>0, %<w>1, %2
+     [ r        , r , r         ; shift_reg   ] ror\t%<w>0, %<w>1, %<w>2
+  }
 )
 
 (define_insn "*rol<mode>3_insn"
 
 ;; zero_extend version of shifts
 (define_insn "*<optab>si3_insn_uxtw"
-  [(set (match_operand:DI 0 "register_operand" "=r,r")
+  [(set (match_operand:DI 0 "register_operand")
        (zero_extend:DI (SHIFT_no_rotate:SI
-        (match_operand:SI 1 "register_operand" "r,r")
-        (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "Uss,r"))))]
+        (match_operand:SI 1 "register_operand")
+        (match_operand:QI 2 "aarch64_reg_or_shift_imm_si"))))]
   ""
-  "@
-   <shift>\\t%w0, %w1, %2
-   <shift>\\t%w0, %w1, %w2"
-  [(set_attr "type" "bfx,shift_reg")]
+  {@ [ cons: =0 , 1 , 2   ; attrs: type ]
+     [ r        , r , Uss ; bfx         ] <shift>\t%w0, %w1, %2
+     [ r        , r , r   ; shift_reg   ] <shift>\t%w0, %w1, %w2
+  }
 )
 
 ;; zero_extend version of rotate right
 ;; and making r = w more expensive
 
 (define_insn "<optab>_trunc<fcvt_target><GPI:mode>2"
-  [(set (match_operand:GPI 0 "register_operand" "=w,?r")
-       (FIXUORS:GPI (match_operand:<FCVT_TARGET> 1 "register_operand" "w,w")))]
+  [(set (match_operand:GPI 0 "register_operand")
+       (FIXUORS:GPI (match_operand:<FCVT_TARGET> 1 "register_operand")))]
   "TARGET_FLOAT"
-  "@
-   fcvtz<su>\t%<s>0, %<s>1
-   fcvtz<su>\t%<w>0, %<s>1"
-  [(set_attr "type" "neon_fp_to_int_s,f_cvtf2i")
-   (set_attr "arch" "simd,fp")]
+  {@ [ cons: =0 , 1 ; attrs: type      , arch  ]
+     [ w        , w ; neon_fp_to_int_s , simd  ] fcvtz<su>\t%<s>0, %<s>1
+     [ ?r       , w ; f_cvtf2i         , fp    ] fcvtz<su>\t%<w>0, %<s>1
+  }
 )
 
 ;; Convert HF -> SI or DI
 
 ;; Equal width integer to fp conversion.
 (define_insn "<optab><fcvt_target><GPF:mode>2"
-  [(set (match_operand:GPF 0 "register_operand" "=w,w")
-        (FLOATUORS:GPF (match_operand:<FCVT_TARGET> 1 "register_operand" "w,?r")))]
+  [(set (match_operand:GPF 0 "register_operand")
+        (FLOATUORS:GPF (match_operand:<FCVT_TARGET> 1 "register_operand")))]
   "TARGET_FLOAT"
-  "@
-   <su_optab>cvtf\t%<GPF:s>0, %<s>1
-   <su_optab>cvtf\t%<GPF:s>0, %<w1>1"
-  [(set_attr "type" "neon_int_to_fp_<Vetype>,f_cvti2f")
-   (set_attr "arch" "simd,fp")]
+  {@ [ cons: =0 , 1  ; attrs: type             , arch  ]
+     [ w        , w  ; neon_int_to_fp_<Vetype> , simd  ] <su_optab>cvtf\t%<GPF:s>0, %<s>1
+     [ w        , ?r ; f_cvti2f                , fp    ] <su_optab>cvtf\t%<GPF:s>0, %<w1>1
+  }
 )
 
 ;; Unequal width integer to fp conversions.
 ;; Convert between fixed-point and floating-point (scalar modes)
 
 (define_insn "<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3"
-  [(set (match_operand:<GPF:FCVT_TARGET> 0 "register_operand" "=r, w")
-       (unspec:<GPF:FCVT_TARGET> [(match_operand:GPF 1 "register_operand" "w, w")
-                                  (match_operand:SI 2 "immediate_operand" "i, i")]
+  [(set (match_operand:<GPF:FCVT_TARGET> 0 "register_operand")
+       (unspec:<GPF:FCVT_TARGET> [(match_operand:GPF 1 "register_operand")
+                                  (match_operand:SI 2 "immediate_operand")]
         FCVT_F2FIXED))]
   ""
-  "@
-   <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:w1>0, %<GPF:s>1, #%2
-   <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:s>0, %<GPF:s>1, #%2"
-  [(set_attr "type" "f_cvtf2i, neon_fp_to_int_<GPF:Vetype>")
-   (set_attr "arch" "fp,simd")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: type                 , arch  ]
+     [ r        , w , i ; f_cvtf2i                    , fp    ] <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:w1>0, %<GPF:s>1, #%2
+     [ w        , w , i ; neon_fp_to_int_<GPF:Vetype> , simd  ] <FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:s>0, %<GPF:s>1, #%2
+  }
 )
 
 (define_insn "<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3"
-  [(set (match_operand:<GPI:FCVT_TARGET> 0 "register_operand" "=w, w")
-       (unspec:<GPI:FCVT_TARGET> [(match_operand:GPI 1 "register_operand" "r, w")
-                                  (match_operand:SI 2 "immediate_operand" "i, i")]
+  [(set (match_operand:<GPI:FCVT_TARGET> 0 "register_operand")
+       (unspec:<GPI:FCVT_TARGET> [(match_operand:GPI 1 "register_operand")
+                                  (match_operand:SI 2 "immediate_operand")]
         FCVT_FIXED2F))]
   ""
-  "@
-   <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:w>1, #%2
-   <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:v>1, #%2"
-  [(set_attr "type" "f_cvti2f, neon_int_to_fp_<GPI:Vetype>")
-   (set_attr "arch" "fp,simd")]
+  {@ [ cons: =0 , 1 , 2 ; attrs: type                 , arch  ]
+     [ w        , r , i ; f_cvti2f                    , fp    ] <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:w>1, #%2
+     [ w        , w , i ; neon_int_to_fp_<GPI:Vetype> , simd  ] <FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:v>1, #%2
+  }
 )
 
 (define_insn "<FCVT_F2FIXED:fcvt_fixed_insn>hf<mode>3"
 )
 
 (define_insn "*aarch64_<optab><mode>3_cssc"
-  [(set (match_operand:GPI 0 "register_operand" "=r,r")
-        (MAXMIN:GPI (match_operand:GPI 1 "register_operand" "r,r")
-               (match_operand:GPI 2 "aarch64_<su>minmax_operand" "r,U<su>m")))]
+  [(set (match_operand:GPI 0 "register_operand")
+        (MAXMIN:GPI (match_operand:GPI 1 "register_operand")
+               (match_operand:GPI 2 "aarch64_<su>minmax_operand")))]
   "TARGET_CSSC"
-  "@
-   <optab>\\t%<w>0, %<w>1, %<w>2
-   <optab>\\t%<w>0, %<w>1, %2"
-  [(set_attr "type" "alu_sreg,alu_imm")]
+  {@ [ cons: =0 , 1 , 2      ; attrs: type ]
+     [ r        , r , r      ; alu_sreg    ] <optab>\t%<w>0, %<w>1, %<w>2
+     [ r        , r , U<su>m ; alu_imm     ] <optab>\t%<w>0, %<w>1, %2
+  }
 )
 
 (define_insn "*aarch64_<optab><mode>3_zero"
 )
 
 (define_insn "copysign<GPF:mode>3_insn"
-  [(set (match_operand:GPF 0 "register_operand" "=w,w,w,r")
-       (unspec:GPF [(match_operand:GPF 1 "register_operand" "w,0,w,r")
-                    (match_operand:GPF 2 "register_operand" "w,w,0,0")
-                    (match_operand:<V_INT_EQUIV> 3 "register_operand" "0,w,w,X")]
+  [(set (match_operand:GPF 0 "register_operand")
+       (unspec:GPF [(match_operand:GPF 1 "register_operand")
+                    (match_operand:GPF 2 "register_operand")
+                    (match_operand:<V_INT_EQUIV> 3 "register_operand")]
         UNSPEC_COPYSIGN))]
   "TARGET_SIMD"
-  "@
-   bsl\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
-   bit\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
-   bif\\t%0.<Vbtype>, %1.<Vbtype>, %3.<Vbtype>
-   bfxil\\t%<w1>0, %<w1>1, #0, <sizem1>"
-  [(set_attr "type" "neon_bsl<q>,neon_bsl<q>,neon_bsl<q>,bfm")]
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: type  ]
+     [ w        , w , w , 0 ; neon_bsl<q>  ] bsl\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+     [ w        , 0 , w , w ; neon_bsl<q>  ] bit\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+     [ w        , w , 0 , w ; neon_bsl<q>  ] bif\t%0.<Vbtype>, %1.<Vbtype>, %3.<Vbtype>
+     [ r        , r , 0 , X ; bfm          ] bfxil\t%<w1>0, %<w1>1, #0, <sizem1>
+  }
 )