From: Richard Earnshaw Date: Mon, 14 Apr 2025 15:41:16 +0000 (+0100) Subject: aarch64: Fix up commutative and early-clobber markers on compact insns X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=f260146bc05f6fba7b2a67a62063c770588b769d;p=thirdparty%2Fgcc.git aarch64: Fix up commutative and early-clobber markers on compact insns For constraints there are operand modifiers and constraint qualifiers. Operand modifiers apply to all alternatives and must appear, in traditional syntax before the first alternative. Constraint qualifiers, on the other hand must appear in each alternative to which they apply. There's no easy way to validate the distinction in the traditional md format, but when using the new compact format we can enforce some semantic checking of these characters to avoid some potentially surprising code generation. Fortunately, all of these errors are benign, but the two misplaced early-clobber markers were quite suspicious at first sight - it's only by luck that the second alternative does not need an early-clobber. The syntax checking will be added in the following patch, but first of all, fix up the errors in aarch64.md. gcc/ * config/aarch64/aarch64-sve.md (@aarch64_pred_): Move commutative marker to the cons specification. (add3): Likewise. (@aarch64_pred_abd): Likewise. (@aarch64_pred_): Likewise. (*cond__z): Likewise. (3): Likewise. (@aarch64_pred_): Likewise. (*aarch64_pred_abd_relaxed): Likewise. (*aarch64_pred_abd_strict): Likewise. (@aarch64_pred_): Likewise. (@aarch64_pred_): Likewise. (@aarch64_pred_fma): Likewise. (@aarch64_pred_fnma): Likewise. (@aarch64_pred_): Likewise. * config/aarch64/aarch64-sve2.md (@aarch64_sve_clamp): Move commutative marker to the cons specification. (*aarch64_sve_clamp_x): Likewise. (@aarch64_sve_fclamp): Likewise. (*aarch64_sve_fclamp_x): Likewise. (*aarch64_sve2_nor): Likewise. (*aarch64_sve2_nand): Likewise. (*aarch64_pred_faminmax_fused): Likewise. * config/aarch64/aarch64.md (*loadwb_pre_pair_): Move the early-clobber marker to the relevant alternative. (*storewb_pre_pair_): Likewise. (*add3_aarch64): Move commutative marker to the cons specification. (*addsi3_aarch64_uxtw): Likewise. (*add3_poly_1): Likewise. (add3_compare0): Likewise. (*addsi3_compare0_uxtw): Likewise. (*add3nr_compare0): Likewise. (3): Likewise. (*si3_uxtw): Likewise. (*and3_compare0): Likewise. (*andsi3_compare0_uxtw): Likewise. (@aarch64_and3nr_compare0): Likewise. --- diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index f39af6e24d5..bf0e57df62d 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -3984,8 +3984,8 @@ (match_operand:SVE_I_SIMD_DI 3 "aarch64_sve__operand"))] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , ; * ] # + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , ; * ] # [ w , Upl , 0 , w ; * ] \t%Z0., %1/m, %Z0., %Z3. [ ?&w , Upl , w , ; yes ] # [ ?&w , Upl , w , w ; yes ] movprfx\t%Z0, %Z2\;\t%Z0., %1/m, %Z0., %Z3. @@ -4114,8 +4114,8 @@ (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "aarch64_sve_add_operand")))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , vsa ; * ] add\t%0., %0., #%D2 + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , vsa ; * ] add\t%0., %0., #%D2 [ w , 0 , vsn ; * ] sub\t%0., %0., #%N2 [ w , 0 , vsi ; * ] << aarch64_output_sve_vector_inc_dec ("%0.", operands[2]); [ ?w , w , vsa ; yes ] movprfx\t%0, %1\;add\t%0., %0., #%D2 @@ -4333,8 +4333,8 @@ (match_dup 3))] UNSPEC_PRED_X)))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] abd\t%0., %1/m, %0., %3. + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] abd\t%0., %1/m, %0., %3. [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;abd\t%0., %1/m, %0., %3. } ) @@ -4548,8 +4548,8 @@ MUL_HIGHPART)] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] mulh\t%0., %1/m, %0., %3. + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] mulh\t%0., %1/m, %0., %3. [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;mulh\t%0., %1/m, %0., %3. } ) @@ -4603,8 +4603,8 @@ (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ] - [ &w , Upl , %0 , w ] movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3. + {@ [ cons: =0 , 1 , %2 , 3 ] + [ &w , Upl , 0 , w ] movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3. [ &w , Upl , w , w ] movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3. } [(set_attr "movprfx" "yes")]) @@ -4748,8 +4748,8 @@ (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "aarch64_sve_logical_operand")))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , vsl ; * ] \t%0., %0., #%C2 + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , vsl ; * ] \t%0., %0., #%C2 [ ?w , w , vsl ; yes ] movprfx\t%0, %1\;\t%0., %0., #%C2 [ w , w , w ; * ] \t%0.d, %1.d, %2.d } @@ -5788,8 +5788,8 @@ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand")] SVE_COND_FP_ADD))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , vsA , i ; * ] fadd\t%0., %1/m, %0., #%3 + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , vsA , i ; * ] fadd\t%0., %1/m, %0., #%3 [ w , Upl , 0 , vsN , i ; * ] fsub\t%0., %1/m, %0., #%N3 [ w , Upl , w , w , Z ; * ] # [ w , Upl , 0 , w , Ui1 ; * ] fadd\t%0., %1/m, %0., %3. @@ -6263,8 +6263,8 @@ UNSPEC_COND_FSUB)] UNSPEC_COND_FABS))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] fabd\t%0., %1/m, %0., %3. + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] fabd\t%0., %1/m, %0., %3. [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0., %1/m, %0., %3. } "&& !rtx_equal_p (operands[1], operands[5])" @@ -6286,8 +6286,8 @@ UNSPEC_COND_FSUB)] UNSPEC_COND_FABS))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] fabd\t%0., %1/m, %0., %3. + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] fabd\t%0., %1/m, %0., %3. [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0., %1/m, %0., %3. } ) @@ -6527,8 +6527,8 @@ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand")] SVE_COND_FP_MUL))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , vsM , i ; * ] fmul\t%0., %1/m, %0., #%3 + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , vsM , i ; * ] fmul\t%0., %1/m, %0., #%3 [ w , Upl , w , w , Z ; * ] # [ w , Upl , 0 , w , Ui1 ; * ] fmul\t%0., %1/m, %0., %3. [ ?&w , Upl , w , vsM , i ; yes ] movprfx\t%0, %2\;fmul\t%0., %1/m, %0., #%3 @@ -6761,8 +6761,8 @@ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand")] SVE_COND_FP_MAXMIN))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , vsB ; * ] \t%0., %1/m, %0., #%3 + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , vsB ; * ] \t%0., %1/m, %0., #%3 [ w , Upl , 0 , w ; * ] \t%0., %1/m, %0., %3. [ ?&w , Upl , w , vsB ; yes ] movprfx\t%0, %2\;\t%0., %1/m, %0., #%3 [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;\t%0., %1/m, %0., %3. @@ -7070,8 +7070,8 @@ UNSPEC_PRED_X) (match_operand:SVE_I 4 "register_operand")))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , w , w ; * ] mad\t%0., %1/m, %3., %4. + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , w , w ; * ] mad\t%0., %1/m, %3., %4. [ w , Upl , w , w , 0 ; * ] mla\t%0., %1/m, %2., %3. [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mla\t%0., %1/m, %2., %3. } @@ -7212,8 +7212,8 @@ (match_operand:SVE_I 3 "register_operand"))] UNSPEC_PRED_X)))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , w , w ; * ] msb\t%0., %1/m, %3., %4. + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , w , w ; * ] msb\t%0., %1/m, %3., %4. [ w , Upl , w , w , 0 ; * ] mls\t%0., %1/m, %2., %3. [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mls\t%0., %1/m, %2., %3. } @@ -7494,8 +7494,8 @@ (match_operand:SVE_FULL_F_BF 4 "register_operand")] SVE_COND_FP_TERNARY))] "TARGET_SVE && ( || !)" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx , is_rev ] - [ w , Upl , %w , w , 0 ; * , * ] \t%0., %1/m, %2., %3. + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx , is_rev ] + [ w , Upl , w , w , 0 ; * , * ] \t%0., %1/m, %2., %3. [ w , Upl , 0 , w , w ; * , true ] \t%0., %1/m, %3., %4. [ ?&w , Upl , w , w , w ; yes , * ] movprfx\t%0, %4\;\t%0., %1/m, %2., %3. } diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md index 3e08e092cd0..871cf0bd2e8 100644 --- a/gcc/config/aarch64/aarch64-sve2.md +++ b/gcc/config/aarch64/aarch64-sve2.md @@ -784,8 +784,8 @@ (match_operand:SVE_FULL_I 2 "register_operand")) (match_operand:SVE_FULL_I 3 "register_operand")))] "TARGET_SVE2p1_OR_SME" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] clamp\t%0., %2., %3. + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] clamp\t%0., %2., %3. [ ?&w, w, w, w; yes ] movprfx\t%0, %1\;clamp\t%0., %2., %3. } ) @@ -804,8 +804,8 @@ (match_operand:SVE_FULL_I 3 "register_operand"))] UNSPEC_PRED_X))] "TARGET_SVE2p1_OR_SME" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] # + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] # [ ?&w, w, w, w; yes ] # } "&& true" @@ -1373,8 +1373,8 @@ (match_operand:SVE_CLAMP_F 3 "register_operand")] UNSPEC_FMINNM))] "" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] fclamp\t%0., %2., %3. + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] fclamp\t%0., %2., %3. [ ?&w, w, w, w; yes ] movprfx\t%0, %1\;fclamp\t%0., %2., %3. } ) @@ -1393,8 +1393,8 @@ (match_operand:SVE_CLAMP_F 3 "register_operand")] UNSPEC_COND_FMINNM))] "" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] # + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] # [ ?&w, w, w, w; yes ] # } "&& true" @@ -1626,8 +1626,8 @@ (match_operand:SVE_FULL_I 2 "register_operand")))] UNSPEC_PRED_X))] "TARGET_SVE2" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %0.d + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %0.d [ ?&w , w , w ; yes ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %0.d } "&& !CONSTANT_P (operands[3])" @@ -1648,8 +1648,8 @@ (match_operand:SVE_FULL_I 2 "register_operand")))] UNSPEC_PRED_X))] "TARGET_SVE2" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %2.d + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %2.d [ ?&w , w , w ; yes ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %2.d } "&& !CONSTANT_P (operands[3])" @@ -2951,8 +2951,8 @@ UNSPEC_COND_FABS)] SVE_COND_SMAXMIN))] "TARGET_FAMINMAX && TARGET_SVE2_OR_SME2" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] \t%0., %1/m, %0., %3. + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] \t%0., %1/m, %0., %3. [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;\t%0., %1/m, %0., %3. } "&& (!rtx_equal_p (operands[1], operands[5]) diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index c678f7afb1a..5c30484e0c3 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -2182,9 +2182,9 @@ "aarch64_mem_pair_offset (operands[4], mode) && known_eq (INTVAL (operands[5]), INTVAL (operands[4]) + GET_MODE_SIZE (mode))" - {@ [cons: =&0, 1, =2, =3; attrs: type ] - [ rk, 0, r, r; load_] ldp\t%2, %3, [%0, %4]! - [ rk, 0, w, w; neon_load1_2reg ] ldp\t%2, %3, [%0, %4]! + {@ [cons: =0, 1, =2, =3; attrs: type ] + [ &rk, 0, r, r; load_] ldp\t%2, %3, [%0, %4]! + [ rk, 0, w, w; neon_load1_2reg ] ldp\t%2, %3, [%0, %4]! } ) @@ -2238,9 +2238,9 @@ INTVAL (operands[4]) + GET_MODE_SIZE (mode)) && !reg_overlap_mentioned_p (operands[0], operands[2]) && !reg_overlap_mentioned_p (operands[0], operands[3])" - {@ [cons: =&0, 1, 2, 3; attrs: type ] - [ rk, 0, rYZ, rYZ; store_] stp\t%2, %3, [%0, %4]! - [ rk, 0, w, w; neon_store1_2reg ] stp\t%2, %3, [%0, %4]! + {@ [cons: =0, 1, 2, 3; attrs: type ] + [ &rk, 0, rYZ, rYZ; store_] stp\t%2, %3, [%0, %4]! + [ rk, 0, w, w; neon_store1_2reg ] stp\t%2, %3, [%0, %4]! } ) @@ -2486,15 +2486,15 @@ (match_operand:GPI 1 "register_operand") (match_operand:GPI 2 "aarch64_pluslong_operand")))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ] - [ rk , %rk , I ; alu_imm , * ] add\t%0, %1, %2 - [ rk , rk , r ; alu_sreg , * ] add\t%0, %1, %2 - [ w , w , w ; neon_add , simd ] add\t%0, %1, %2 - [ rk , rk , J ; alu_imm , * ] sub\t%0, %1, #%n2 - [ r , rk , Uaa ; multiple , * ] # - [ r , 0 , Uai ; alu_imm , sve ] << aarch64_output_sve_scalar_inc_dec (operands[2]); - [ rk , rk , Uav ; alu_imm , sve ] << aarch64_output_sve_addvl_addpl (operands[2]); - [ rk , rk , UaV ; alu_imm , sme ] << aarch64_output_addsvl_addspl (operands[2]); + {@ [ cons: =0 , %1 , 2 ; attrs: type , arch ] + [ rk , rk , I ; alu_imm , * ] add\t%0, %1, %2 + [ rk , rk , r ; alu_sreg , * ] add\t%0, %1, %2 + [ w , w , w ; neon_add , simd ] add\t%0, %1, %2 + [ rk , rk , J ; alu_imm , * ] sub\t%0, %1, #%n2 + [ r , rk , Uaa ; multiple , * ] # + [ r , 0 , Uai ; alu_imm , sve ] << aarch64_output_sve_scalar_inc_dec (operands[2]); + [ rk , rk , Uav ; alu_imm , sve ] << aarch64_output_sve_addvl_addpl (operands[2]); + [ rk , rk , UaV ; alu_imm , sme ] << aarch64_output_addsvl_addspl (operands[2]); } ;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders. ) @@ -2507,11 +2507,11 @@ (plus:SI (match_operand:SI 1 "register_operand") (match_operand:SI 2 "aarch64_pluslong_operand"))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ rk , %rk , I ; alu_imm ] add\t%w0, %w1, %2 - [ rk , rk , r ; alu_sreg ] add\t%w0, %w1, %w2 - [ rk , rk , J ; alu_imm ] sub\t%w0, %w1, #%n2 - [ r , rk , Uaa ; multiple ] # + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ rk , rk , I ; alu_imm ] add\t%w0, %w1, %2 + [ rk , rk , r ; alu_sreg ] add\t%w0, %w1, %w2 + [ rk , rk , J ; alu_imm ] sub\t%w0, %w1, #%n2 + [ r , rk , Uaa ; multiple ] # } ) @@ -2580,14 +2580,14 @@ (match_operand:GPI 1 "register_operand") (match_operand:GPI 2 "aarch64_pluslong_or_poly_operand")))] "TARGET_SVE && operands[0] != stack_pointer_rtx" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %rk , I ; alu_imm ] add\t%0, %1, %2 - [ r , rk , r ; alu_sreg ] add\t%0, %1, %2 - [ r , rk , J ; alu_imm ] sub\t%0, %1, #%n2 - [ r , rk , Uaa ; multiple ] # - [ r , 0 , Uai ; alu_imm ] << aarch64_output_sve_scalar_inc_dec (operands[2]); - [ r , rk , Uav ; alu_imm ] << aarch64_output_sve_addvl_addpl (operands[2]); - [ &r , rk , Uat ; multiple ] # + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , rk , I ; alu_imm ] add\t%0, %1, %2 + [ r , rk , r ; alu_sreg ] add\t%0, %1, %2 + [ r , rk , J ; alu_imm ] sub\t%0, %1, #%n2 + [ r , rk , Uaa ; multiple ] # + [ r , 0 , Uai ; alu_imm ] << aarch64_output_sve_scalar_inc_dec (operands[2]); + [ r , rk , Uav ; alu_imm ] << aarch64_output_sve_addvl_addpl (operands[2]); + [ &r , rk , Uat ; multiple ] # } "&& epilogue_completed && !reg_overlap_mentioned_p (operands[0], operands[1]) @@ -2759,10 +2759,10 @@ (set (match_operand:GPI 0 "register_operand") (plus:GPI (match_dup 1) (match_dup 2)))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %rk , r ; alus_sreg ] adds\t%0, %1, %2 - [ r , rk , I ; alus_imm ] adds\t%0, %1, %2 - [ r , rk , J ; alus_imm ] subs\t%0, %1, #%n2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , rk , r ; alus_sreg ] adds\t%0, %1, %2 + [ r , rk , I ; alus_imm ] adds\t%0, %1, %2 + [ r , rk , J ; alus_imm ] subs\t%0, %1, #%n2 } ) @@ -2776,10 +2776,10 @@ (set (match_operand:DI 0 "register_operand") (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %rk , r ; alus_sreg ] adds\t%w0, %w1, %w2 - [ r , rk , I ; alus_imm ] adds\t%w0, %w1, %2 - [ r , rk , J ; alus_imm ] subs\t%w0, %w1, #%n2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , rk , r ; alus_sreg ] adds\t%w0, %w1, %w2 + [ r , rk , I ; alus_imm ] adds\t%w0, %w1, %2 + [ r , rk , J ; alus_imm ] subs\t%w0, %w1, #%n2 } ) @@ -2980,10 +2980,10 @@ (match_operand:GPI 1 "aarch64_plus_operand")) (const_int 0)))] "" - {@ [ cons: 0 , 1 ; attrs: type ] - [ %r , r ; alus_sreg ] cmn\t%0, %1 - [ r , I ; alus_imm ] cmn\t%0, %1 - [ r , J ; alus_imm ] cmp\t%0, #%n1 + {@ [ cons: %0 , 1 ; attrs: type ] + [ r , r ; alus_sreg ] cmn\t%0, %1 + [ r , I ; alus_imm ] cmn\t%0, %1 + [ r , J ; alus_imm ] cmp\t%0, #%n1 } ) @@ -5091,8 +5091,8 @@ (LOGICAL:GPI (match_operand:GPI 1 "register_operand") (match_operand:GPI 2 "aarch64_logical_operand")))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ] - [ r , %r , r ; logic_reg , * ] \t%0, %1, %2 + {@ [ cons: =0 , %1 , 2 ; attrs: type , arch ] + [ r , r , r ; logic_reg , * ] \t%0, %1, %2 [ rk , r , ; logic_imm , * ] \t%0, %1, %2 [ w , 0 , ; * , sve ] \t%Z0., %Z0., #%2 [ w , w , w ; neon_logic , simd ] \t%0., %1., %2. @@ -5106,8 +5106,8 @@ (LOGICAL:SI (match_operand:SI 1 "register_operand") (match_operand:SI 2 "aarch64_logical_operand"))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %r , r ; logic_reg ] \t%w0, %w1, %w2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , r , r ; logic_reg ] \t%w0, %w1, %w2 [ rk , r , K ; logic_imm ] \t%w0, %w1, %2 } ) @@ -5121,8 +5121,8 @@ (set (match_operand:GPI 0 "register_operand") (and:GPI (match_dup 1) (match_dup 2)))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %r , r ; logics_reg ] ands\t%0, %1, %2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , r , r ; logics_reg ] ands\t%0, %1, %2 [ r , r , ; logics_imm ] ands\t%0, %1, %2 } ) @@ -5137,8 +5137,8 @@ (set (match_operand:DI 0 "register_operand") (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %r , r ; logics_reg ] ands\t%w0, %w1, %w2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , r , r ; logics_reg ] ands\t%w0, %w1, %w2 [ r , r , K ; logics_imm ] ands\t%w0, %w1, %2 } ) @@ -5722,9 +5722,9 @@ (match_operand:GPI 1 "aarch64_logical_operand")) (const_int 0)))] "" - {@ [ cons: 0 , 1 ; attrs: type ] - [ %r , r ; logics_reg ] tst\t%0, %1 - [ r , ; logics_imm ] tst\t%0, %1 + {@ [ cons: %0 , 1 ; attrs: type ] + [ r , r ; logics_reg ] tst\t%0, %1 + [ r , ; logics_imm ] tst\t%0, %1 } )