extern void aarch64_output_patchable_area (unsigned int, bool);
+extern void aarch64_adjust_reg_alloc_order ();
+
#endif /* GCC_AARCH64_PROTOS_H */
(define_insn "*vcond_mask_<mode><vpred>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w, w, ?w, ?&w, ?&w")
(unspec:SVE_ALL
- [(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upa, Upa, Upl, Upl, Upl")
+ [(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upa, Upa, Upl, Upa, Upa")
(match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm" "w, vss, vss, Ufc, Ufc, vss, Ufc")
(match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "w, 0, Dz, 0, Dz, w, w")]
UNSPEC_SEL))]
return "";
}
+/* Set up the value of REG_ALLOC_ORDER from scratch.
+
+ It was previously good practice to put call-clobbered registers ahead
+ of call-preserved registers, but that isn't necessary these days.
+ IRA's model of register save/restore costs is much more sophisticated
+ than the model that a simple ordering could provide. We leave
+ HONOR_REG_ALLOC_ORDER undefined so that we can get the full benefit
+ of IRA's model.
+
+ However, it is still useful to list registers that are members of
+ multiple classes after registers that are members of fewer classes.
+ For example, we have:
+
+ - FP_LO8_REGS: v0-v7
+ - FP_LO_REGS: v0-v15
+ - FP_REGS: v0-v31
+
+ If, as a tie-breaker, we allocate FP_REGS in the order v0-v31,
+ we run the risk of starving other (lower-priority) pseudos that
+ require FP_LO8_REGS or FP_LO_REGS. Allocating FP_LO_REGS in the
+ order v0-v15 could similarly starve pseudos that require FP_LO8_REGS.
+ Allocating downwards rather than upwards avoids this problem, at least
+ in code that has reasonable register pressure.
+
+ The situation for predicate registers is similar. */
+
+void
+aarch64_adjust_reg_alloc_order ()
+{
+ for (int i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
+ if (IN_RANGE (i, V0_REGNUM, V31_REGNUM))
+ reg_alloc_order[i] = V31_REGNUM - (i - V0_REGNUM);
+ else if (IN_RANGE (i, P0_REGNUM, P15_REGNUM))
+ reg_alloc_order[i] = P15_REGNUM - (i - P0_REGNUM);
+ else
+ reg_alloc_order[i] = i;
+}
+
/* Target-specific selftests. */
#if CHECKING_P
STACK_BOUNDARY / BITS_PER_UNIT) \
: (crtl->outgoing_args_size + STACK_POINTER_OFFSET))
+/* Filled in by aarch64_adjust_reg_alloc_order, which is called before
+ the first relevant use. */
+#define REG_ALLOC_ORDER {}
+#define ADJUST_REG_ALLOC_ORDER aarch64_adjust_reg_alloc_order ()
+
#endif /* GCC_AARCH64_H */
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_f16_m_untied: { xfail *-*-* }
+** abd_1_f16_m_untied:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** movprfx z0, z1
** fabd z0\.h, p0/m, z0\.h, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_f32_m_untied: { xfail *-*-* }
+** abd_1_f32_m_untied:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** movprfx z0, z1
** fabd z0\.s, p0/m, z0\.s, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_f64_m_untied: { xfail *-*-* }
+** abd_1_f64_m_untied:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** movprfx z0, z1
** fabd z0\.d, p0/m, z0\.d, \1
z0 = svabd_m (p0, z0, x0))
/*
-** abd_w0_s16_m_untied: { xfail *-*-* }
+** abd_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_s16_m_untied: { xfail *-*-* }
+** abd_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_s32_m_untied: { xfail *-*-* }
+** abd_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** sabd z0\.s, p0/m, z0\.s, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_s64_m_untied: { xfail *-*-* }
+** abd_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** sabd z0\.d, p0/m, z0\.d, \1
z0 = svabd_m (p0, z0, x0))
/*
-** abd_w0_s8_m_untied: { xfail *-*-* }
+** abd_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_s8_m_untied: { xfail *-*-* }
+** abd_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, \1
z0 = svabd_m (p0, z0, x0))
/*
-** abd_w0_u16_m_untied: { xfail *-*-* }
+** abd_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_u16_m_untied: { xfail *-*-* }
+** abd_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_u32_m_untied: { xfail *-*-* }
+** abd_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** uabd z0\.s, p0/m, z0\.s, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_u64_m_untied: { xfail *-*-* }
+** abd_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** uabd z0\.d, p0/m, z0\.d, \1
z0 = svabd_m (p0, z0, x0))
/*
-** abd_w0_u8_m_untied: { xfail *-*-* }
+** abd_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, \1
z0 = svabd_m (p0, z0, 1))
/*
-** abd_1_u8_m_untied: { xfail *-*-* }
+** abd_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, \1
z0 = svadd_m (p0, z0, x0))
/*
-** add_w0_s16_m_untied: { xfail *-*-* }
+** add_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_s16_m_untied: { xfail *-*-* }
+** add_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_s32_m_untied: { xfail *-*-* }
+** add_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_s64_m_untied: { xfail *-*-* }
+** add_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1
z0 = svadd_m (p0, z0, x0))
/*
-** add_w0_s8_m_untied: { xfail *-*-* }
+** add_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_s8_m_untied: { xfail *-*-* }
+** add_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
z0 = svadd_m (p0, z0, x0))
/*
-** add_w0_u16_m_untied: { xfail *-*-* }
+** add_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_u16_m_untied: { xfail *-*-* }
+** add_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_u32_m_untied: { xfail *-*-* }
+** add_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_u64_m_untied: { xfail *-*-* }
+** add_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1
z0 = svadd_m (p0, z0, x0))
/*
-** add_w0_u8_m_untied: { xfail *-*-* }
+** add_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
z0 = svadd_m (p0, z0, 1))
/*
-** add_1_u8_m_untied: { xfail *-*-* }
+** add_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
z0 = svand_m (p0, z0, x0))
/*
-** and_w0_s16_m_untied: { xfail *-*-* }
+** and_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** and z0\.h, p0/m, z0\.h, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_s16_m_untied: { xfail *-*-* }
+** and_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** and z0\.h, p0/m, z0\.h, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_s32_m_untied: { xfail *-*-* }
+** and_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** and z0\.s, p0/m, z0\.s, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_s64_m_untied: { xfail *-*-* }
+** and_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** and z0\.d, p0/m, z0\.d, \1
z0 = svand_m (p0, z0, x0))
/*
-** and_w0_s8_m_untied: { xfail *-*-* }
+** and_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** and z0\.b, p0/m, z0\.b, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_s8_m_untied: { xfail *-*-* }
+** and_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** and z0\.b, p0/m, z0\.b, \1
z0 = svand_m (p0, z0, x0))
/*
-** and_w0_u16_m_untied: { xfail *-*-* }
+** and_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** and z0\.h, p0/m, z0\.h, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_u16_m_untied: { xfail *-*-* }
+** and_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** and z0\.h, p0/m, z0\.h, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_u32_m_untied: { xfail *-*-* }
+** and_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** and z0\.s, p0/m, z0\.s, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_u64_m_untied: { xfail *-*-* }
+** and_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** and z0\.d, p0/m, z0\.d, \1
z0 = svand_m (p0, z0, x0))
/*
-** and_w0_u8_m_untied: { xfail *-*-* }
+** and_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** and z0\.b, p0/m, z0\.b, \1
z0 = svand_m (p0, z0, 1))
/*
-** and_1_u8_m_untied: { xfail *-*-* }
+** and_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** and z0\.b, p0/m, z0\.b, \1
z0 = svasr_m (p0, z0, x0))
/*
-** asr_w0_s16_m_untied: { xfail *-*-* }
+** asr_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** asr z0\.h, p0/m, z0\.h, \1
z0 = svasr_m (p0, z0, x0))
/*
-** asr_w0_s8_m_untied: { xfail *-*-* }
+** asr_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** asr z0\.b, p0/m, z0\.b, \1
z0 = svbic_m (p0, z0, x0))
/*
-** bic_w0_s16_m_untied: { xfail *-*-* }
+** bic_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** bic z0\.h, p0/m, z0\.h, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_s16_m_untied: { xfail *-*-* }
+** bic_1_s16_m_untied:
** mov (z[0-9]+\.h), #-2
** movprfx z0, z1
** and z0\.h, p0/m, z0\.h, \1
z0 = svbic_z (p0, z0, x0))
/*
-** bic_w0_s16_z_untied: { xfail *-*-* }
+** bic_w0_s16_z_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0\.h, p0/z, z1\.h
** bic z0\.h, p0/m, z0\.h, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_s32_m_untied: { xfail *-*-* }
+** bic_1_s32_m_untied:
** mov (z[0-9]+\.s), #-2
** movprfx z0, z1
** and z0\.s, p0/m, z0\.s, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_s64_m_untied: { xfail *-*-* }
+** bic_1_s64_m_untied:
** mov (z[0-9]+\.d), #-2
** movprfx z0, z1
** and z0\.d, p0/m, z0\.d, \1
z0 = svbic_m (p0, z0, x0))
/*
-** bic_w0_s8_m_untied: { xfail *-*-* }
+** bic_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** bic z0\.b, p0/m, z0\.b, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_s8_m_untied: { xfail *-*-* }
+** bic_1_s8_m_untied:
** mov (z[0-9]+\.b), #-2
** movprfx z0, z1
** and z0\.b, p0/m, z0\.b, \1
z0 = svbic_z (p0, z0, x0))
/*
-** bic_w0_s8_z_untied: { xfail *-*-* }
+** bic_w0_s8_z_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0\.b, p0/z, z1\.b
** bic z0\.b, p0/m, z0\.b, \1
z0 = svbic_m (p0, z0, x0))
/*
-** bic_w0_u16_m_untied: { xfail *-*-* }
+** bic_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** bic z0\.h, p0/m, z0\.h, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_u16_m_untied: { xfail *-*-* }
+** bic_1_u16_m_untied:
** mov (z[0-9]+\.h), #-2
** movprfx z0, z1
** and z0\.h, p0/m, z0\.h, \1
z0 = svbic_z (p0, z0, x0))
/*
-** bic_w0_u16_z_untied: { xfail *-*-* }
+** bic_w0_u16_z_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0\.h, p0/z, z1\.h
** bic z0\.h, p0/m, z0\.h, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_u32_m_untied: { xfail *-*-* }
+** bic_1_u32_m_untied:
** mov (z[0-9]+\.s), #-2
** movprfx z0, z1
** and z0\.s, p0/m, z0\.s, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_u64_m_untied: { xfail *-*-* }
+** bic_1_u64_m_untied:
** mov (z[0-9]+\.d), #-2
** movprfx z0, z1
** and z0\.d, p0/m, z0\.d, \1
z0 = svbic_m (p0, z0, x0))
/*
-** bic_w0_u8_m_untied: { xfail *-*-* }
+** bic_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** bic z0\.b, p0/m, z0\.b, \1
z0 = svbic_m (p0, z0, 1))
/*
-** bic_1_u8_m_untied: { xfail *-*-* }
+** bic_1_u8_m_untied:
** mov (z[0-9]+\.b), #-2
** movprfx z0, z1
** and z0\.b, p0/m, z0\.b, \1
z0 = svbic_z (p0, z0, x0))
/*
-** bic_w0_u8_z_untied: { xfail *-*-* }
+** bic_w0_u8_z_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0\.b, p0/z, z1\.b
** bic z0\.b, p0/m, z0\.b, \1
z0 = svdiv_m (p0, z0, 1))
/*
-** div_1_f16_m_untied: { xfail *-*-* }
+** div_1_f16_m_untied:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** movprfx z0, z1
** fdiv z0\.h, p0/m, z0\.h, \1
z0 = svdiv_m (p0, z0, 1))
/*
-** div_1_f32_m_untied: { xfail *-*-* }
+** div_1_f32_m_untied:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** movprfx z0, z1
** fdiv z0\.s, p0/m, z0\.s, \1
z0 = svdiv_m (p0, z0, 1))
/*
-** div_1_f64_m_untied: { xfail *-*-* }
+** div_1_f64_m_untied:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** movprfx z0, z1
** fdiv z0\.d, p0/m, z0\.d, \1
z0 = svdiv_m (p0, z0, 2))
/*
-** div_2_s32_m_untied: { xfail *-*-* }
+** div_2_s32_m_untied:
** mov (z[0-9]+\.s), #2
** movprfx z0, z1
** sdiv z0\.s, p0/m, z0\.s, \1
z0 = svdiv_m (p0, z0, 2))
/*
-** div_2_s64_m_untied: { xfail *-*-* }
+** div_2_s64_m_untied:
** mov (z[0-9]+\.d), #2
** movprfx z0, z1
** sdiv z0\.d, p0/m, z0\.d, \1
z0 = svdiv_m (p0, z0, 2))
/*
-** div_2_u32_m_untied: { xfail *-*-* }
+** div_2_u32_m_untied:
** mov (z[0-9]+\.s), #2
** movprfx z0, z1
** udiv z0\.s, p0/m, z0\.s, \1
z0 = svdiv_m (p0, z0, 2))
/*
-** div_2_u64_m_untied: { xfail *-*-* }
+** div_2_u64_m_untied:
** mov (z[0-9]+\.d), #2
** movprfx z0, z1
** udiv z0\.d, p0/m, z0\.d, \1
z0 = svdivr_m (p0, z0, 1))
/*
-** divr_1_f16_m_untied: { xfail *-*-* }
+** divr_1_f16_m_untied:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** movprfx z0, z1
** fdivr z0\.h, p0/m, z0\.h, \1
z0 = svdivr_m (p0, z0, 0.5))
/*
-** divr_0p5_f16_m_untied: { xfail *-*-* }
+** divr_0p5_f16_m_untied:
** fmov (z[0-9]+\.h), #(?:0\.5|5\.0e-1)
** movprfx z0, z1
** fdivr z0\.h, p0/m, z0\.h, \1
z0 = svdivr_m (p0, z0, 1))
/*
-** divr_1_f32_m_untied: { xfail *-*-* }
+** divr_1_f32_m_untied:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** movprfx z0, z1
** fdivr z0\.s, p0/m, z0\.s, \1
z0 = svdivr_m (p0, z0, 0.5))
/*
-** divr_0p5_f32_m_untied: { xfail *-*-* }
+** divr_0p5_f32_m_untied:
** fmov (z[0-9]+\.s), #(?:0\.5|5\.0e-1)
** movprfx z0, z1
** fdivr z0\.s, p0/m, z0\.s, \1
z0 = svdivr_m (p0, z0, 1))
/*
-** divr_1_f64_m_untied: { xfail *-*-* }
+** divr_1_f64_m_untied:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** movprfx z0, z1
** fdivr z0\.d, p0/m, z0\.d, \1
z0 = svdivr_m (p0, z0, 0.5))
/*
-** divr_0p5_f64_m_untied: { xfail *-*-* }
+** divr_0p5_f64_m_untied:
** fmov (z[0-9]+\.d), #(?:0\.5|5\.0e-1)
** movprfx z0, z1
** fdivr z0\.d, p0/m, z0\.d, \1
z0 = svdivr_m (p0, z0, 2))
/*
-** divr_2_s32_m_untied: { xfail *-*-* }
+** divr_2_s32_m_untied:
** mov (z[0-9]+\.s), #2
** movprfx z0, z1
** sdivr z0\.s, p0/m, z0\.s, \1
z0 = svdivr_m (p0, z0, 2))
/*
-** divr_2_s64_m_untied: { xfail *-*-* }
+** divr_2_s64_m_untied:
** mov (z[0-9]+\.d), #2
** movprfx z0, z1
** sdivr z0\.d, p0/m, z0\.d, \1
z0 = svdivr_m (p0, z0, 2))
/*
-** divr_2_u32_m_untied: { xfail *-*-* }
+** divr_2_u32_m_untied:
** mov (z[0-9]+\.s), #2
** movprfx z0, z1
** udivr z0\.s, p0/m, z0\.s, \1
z0 = svdivr_m (p0, z0, 2))
/*
-** divr_2_u64_m_untied: { xfail *-*-* }
+** divr_2_u64_m_untied:
** mov (z[0-9]+\.d), #2
** movprfx z0, z1
** udivr z0\.d, p0/m, z0\.d, \1
z0 = svdot (z0, z4, x0))
/*
-** dot_w0_s32_untied: { xfail *-*-* }
+** dot_w0_s32_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sdot z0\.s, z4\.b, \1
z0 = svdot (z0, z4, 9))
/*
-** dot_9_s32_untied: { xfail *-*-* }
+** dot_9_s32_untied:
** mov (z[0-9]+\.b), #9
** movprfx z0, z1
** sdot z0\.s, z4\.b, \1
z0 = svdot (z0, z4, x0))
/*
-** dot_w0_s64_untied: { xfail *-*-* }
+** dot_w0_s64_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sdot z0\.d, z4\.h, \1
z0 = svdot (z0, z4, 9))
/*
-** dot_9_s64_untied: { xfail *-*-* }
+** dot_9_s64_untied:
** mov (z[0-9]+\.h), #9
** movprfx z0, z1
** sdot z0\.d, z4\.h, \1
z0 = svdot (z0, z4, x0))
/*
-** dot_w0_u32_untied: { xfail *-*-* }
+** dot_w0_u32_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** udot z0\.s, z4\.b, \1
z0 = svdot (z0, z4, 9))
/*
-** dot_9_u32_untied: { xfail *-*-* }
+** dot_9_u32_untied:
** mov (z[0-9]+\.b), #9
** movprfx z0, z1
** udot z0\.s, z4\.b, \1
z0 = svdot (z0, z4, x0))
/*
-** dot_w0_u64_untied: { xfail *-*-* }
+** dot_w0_u64_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** udot z0\.d, z4\.h, \1
z0 = svdot (z0, z4, 9))
/*
-** dot_9_u64_untied: { xfail *-*-* }
+** dot_9_u64_untied:
** mov (z[0-9]+\.h), #9
** movprfx z0, z1
** udot z0\.d, z4\.h, \1
z0 = sveor_m (p0, z0, x0))
/*
-** eor_w0_s16_m_untied: { xfail *-*-* }
+** eor_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** eor z0\.h, p0/m, z0\.h, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_s16_m_untied: { xfail *-*-* }
+** eor_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** eor z0\.h, p0/m, z0\.h, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_s32_m_untied: { xfail *-*-* }
+** eor_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** eor z0\.s, p0/m, z0\.s, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_s64_m_untied: { xfail *-*-* }
+** eor_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** eor z0\.d, p0/m, z0\.d, \1
z0 = sveor_m (p0, z0, x0))
/*
-** eor_w0_s8_m_untied: { xfail *-*-* }
+** eor_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** eor z0\.b, p0/m, z0\.b, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_s8_m_untied: { xfail *-*-* }
+** eor_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** eor z0\.b, p0/m, z0\.b, \1
z0 = sveor_m (p0, z0, x0))
/*
-** eor_w0_u16_m_untied: { xfail *-*-* }
+** eor_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** eor z0\.h, p0/m, z0\.h, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_u16_m_untied: { xfail *-*-* }
+** eor_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** eor z0\.h, p0/m, z0\.h, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_u32_m_untied: { xfail *-*-* }
+** eor_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** eor z0\.s, p0/m, z0\.s, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_u64_m_untied: { xfail *-*-* }
+** eor_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** eor z0\.d, p0/m, z0\.d, \1
z0 = sveor_m (p0, z0, x0))
/*
-** eor_w0_u8_m_untied: { xfail *-*-* }
+** eor_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** eor z0\.b, p0/m, z0\.b, \1
z0 = sveor_m (p0, z0, 1))
/*
-** eor_1_u8_m_untied: { xfail *-*-* }
+** eor_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** eor z0\.b, p0/m, z0\.b, \1
z0 = svlsl_m (p0, z0, x0))
/*
-** lsl_w0_s16_m_untied: { xfail *-*-* }
+** lsl_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_m (p0, z0, 16))
/*
-** lsl_16_s16_m_untied: { xfail *-*-* }
+** lsl_16_s16_m_untied:
** mov (z[0-9]+\.h), #16
** movprfx z0, z1
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_m (p0, z0, 32))
/*
-** lsl_32_s32_m_untied: { xfail *-*-* }
+** lsl_32_s32_m_untied:
** mov (z[0-9]+\.s), #32
** movprfx z0, z1
** lsl z0\.s, p0/m, z0\.s, \1
z0 = svlsl_m (p0, z0, 64))
/*
-** lsl_64_s64_m_untied: { xfail *-*-* }
+** lsl_64_s64_m_untied:
** mov (z[0-9]+\.d), #64
** movprfx z0, z1
** lsl z0\.d, p0/m, z0\.d, \1
z0 = svlsl_m (p0, z0, x0))
/*
-** lsl_w0_s8_m_untied: { xfail *-*-* }
+** lsl_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_m (p0, z0, 8))
/*
-** lsl_8_s8_m_untied: { xfail *-*-* }
+** lsl_8_s8_m_untied:
** mov (z[0-9]+\.b), #8
** movprfx z0, z1
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_m (p0, z0, x0))
/*
-** lsl_w0_u16_m_untied: { xfail *-*-* }
+** lsl_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_m (p0, z0, 16))
/*
-** lsl_16_u16_m_untied: { xfail *-*-* }
+** lsl_16_u16_m_untied:
** mov (z[0-9]+\.h), #16
** movprfx z0, z1
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_m (p0, z0, 32))
/*
-** lsl_32_u32_m_untied: { xfail *-*-* }
+** lsl_32_u32_m_untied:
** mov (z[0-9]+\.s), #32
** movprfx z0, z1
** lsl z0\.s, p0/m, z0\.s, \1
z0 = svlsl_m (p0, z0, 64))
/*
-** lsl_64_u64_m_untied: { xfail *-*-* }
+** lsl_64_u64_m_untied:
** mov (z[0-9]+\.d), #64
** movprfx z0, z1
** lsl z0\.d, p0/m, z0\.d, \1
z0 = svlsl_m (p0, z0, x0))
/*
-** lsl_w0_u8_m_untied: { xfail *-*-* }
+** lsl_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_m (p0, z0, 8))
/*
-** lsl_8_u8_m_untied: { xfail *-*-* }
+** lsl_8_u8_m_untied:
** mov (z[0-9]+\.b), #8
** movprfx z0, z1
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_wide_m (p0, z0, 16))
/*
-** lsl_wide_16_s16_m_untied: { xfail *-*-* }
+** lsl_wide_16_s16_m_untied:
** mov (z[0-9]+\.d), #16
** movprfx z0, z1
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_wide_z (p0, z0, 16))
/*
-** lsl_wide_16_s16_z_untied: { xfail *-*-* }
+** lsl_wide_16_s16_z_untied:
** mov (z[0-9]+\.d), #16
** movprfx z0\.h, p0/z, z1\.h
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_wide_m (p0, z0, 32))
/*
-** lsl_wide_32_s32_m_untied: { xfail *-*-* }
+** lsl_wide_32_s32_m_untied:
** mov (z[0-9]+\.d), #32
** movprfx z0, z1
** lsl z0\.s, p0/m, z0\.s, \1
z0 = svlsl_wide_z (p0, z0, 32))
/*
-** lsl_wide_32_s32_z_untied: { xfail *-*-* }
+** lsl_wide_32_s32_z_untied:
** mov (z[0-9]+\.d), #32
** movprfx z0\.s, p0/z, z1\.s
** lsl z0\.s, p0/m, z0\.s, \1
z0 = svlsl_wide_m (p0, z0, 8))
/*
-** lsl_wide_8_s8_m_untied: { xfail *-*-* }
+** lsl_wide_8_s8_m_untied:
** mov (z[0-9]+\.d), #8
** movprfx z0, z1
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_wide_z (p0, z0, 8))
/*
-** lsl_wide_8_s8_z_untied: { xfail *-*-* }
+** lsl_wide_8_s8_z_untied:
** mov (z[0-9]+\.d), #8
** movprfx z0\.b, p0/z, z1\.b
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_wide_m (p0, z0, 16))
/*
-** lsl_wide_16_u16_m_untied: { xfail *-*-* }
+** lsl_wide_16_u16_m_untied:
** mov (z[0-9]+\.d), #16
** movprfx z0, z1
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_wide_z (p0, z0, 16))
/*
-** lsl_wide_16_u16_z_untied: { xfail *-*-* }
+** lsl_wide_16_u16_z_untied:
** mov (z[0-9]+\.d), #16
** movprfx z0\.h, p0/z, z1\.h
** lsl z0\.h, p0/m, z0\.h, \1
z0 = svlsl_wide_m (p0, z0, 32))
/*
-** lsl_wide_32_u32_m_untied: { xfail *-*-* }
+** lsl_wide_32_u32_m_untied:
** mov (z[0-9]+\.d), #32
** movprfx z0, z1
** lsl z0\.s, p0/m, z0\.s, \1
z0 = svlsl_wide_z (p0, z0, 32))
/*
-** lsl_wide_32_u32_z_untied: { xfail *-*-* }
+** lsl_wide_32_u32_z_untied:
** mov (z[0-9]+\.d), #32
** movprfx z0\.s, p0/z, z1\.s
** lsl z0\.s, p0/m, z0\.s, \1
z0 = svlsl_wide_m (p0, z0, 8))
/*
-** lsl_wide_8_u8_m_untied: { xfail *-*-* }
+** lsl_wide_8_u8_m_untied:
** mov (z[0-9]+\.d), #8
** movprfx z0, z1
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsl_wide_z (p0, z0, 8))
/*
-** lsl_wide_8_u8_z_untied: { xfail *-*-* }
+** lsl_wide_8_u8_z_untied:
** mov (z[0-9]+\.d), #8
** movprfx z0\.b, p0/z, z1\.b
** lsl z0\.b, p0/m, z0\.b, \1
z0 = svlsr_m (p0, z0, x0))
/*
-** lsr_w0_u16_m_untied: { xfail *-*-* }
+** lsr_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** lsr z0\.h, p0/m, z0\.h, \1
z0 = svlsr_m (p0, z0, x0))
/*
-** lsr_w0_u8_m_untied: { xfail *-*-* }
+** lsr_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** lsr z0\.b, p0/m, z0\.b, \1
z0 = svmad_m (p0, z0, z1, 2))
/*
-** mad_2_f16_m_untied: { xfail *-*-* }
+** mad_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmad z0\.h, p0/m, z2\.h, \1
z0 = svmad_m (p0, z0, z1, 2))
/*
-** mad_2_f32_m_untied: { xfail *-*-* }
+** mad_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmad z0\.s, p0/m, z2\.s, \1
z0 = svmad_m (p0, z0, z1, 2))
/*
-** mad_2_f64_m_untied: { xfail *-*-* }
+** mad_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmad z0\.d, p0/m, z2\.d, \1
z0 = svmad_m (p0, z0, z1, x0))
/*
-** mad_w0_s16_m_untied: { xfail *-*-* }
+** mad_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mad z0\.h, p0/m, z2\.h, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_s16_m_untied: { xfail *-*-* }
+** mad_11_s16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** mad z0\.h, p0/m, z2\.h, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_s32_m_untied: { xfail *-*-* }
+** mad_11_s32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** mad z0\.s, p0/m, z2\.s, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_s64_m_untied: { xfail *-*-* }
+** mad_11_s64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** mad z0\.d, p0/m, z2\.d, \1
z0 = svmad_m (p0, z0, z1, x0))
/*
-** mad_w0_s8_m_untied: { xfail *-*-* }
+** mad_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mad z0\.b, p0/m, z2\.b, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_s8_m_untied: { xfail *-*-* }
+** mad_11_s8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** mad z0\.b, p0/m, z2\.b, \1
z0 = svmad_m (p0, z0, z1, x0))
/*
-** mad_w0_u16_m_untied: { xfail *-*-* }
+** mad_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mad z0\.h, p0/m, z2\.h, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_u16_m_untied: { xfail *-*-* }
+** mad_11_u16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** mad z0\.h, p0/m, z2\.h, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_u32_m_untied: { xfail *-*-* }
+** mad_11_u32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** mad z0\.s, p0/m, z2\.s, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_u64_m_untied: { xfail *-*-* }
+** mad_11_u64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** mad z0\.d, p0/m, z2\.d, \1
z0 = svmad_m (p0, z0, z1, x0))
/*
-** mad_w0_u8_m_untied: { xfail *-*-* }
+** mad_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mad z0\.b, p0/m, z2\.b, \1
z0 = svmad_m (p0, z0, z1, 11))
/*
-** mad_11_u8_m_untied: { xfail *-*-* }
+** mad_11_u8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** mad z0\.b, p0/m, z2\.b, \1
z0 = svmax_m (p0, z0, x0))
/*
-** max_w0_s16_m_untied: { xfail *-*-* }
+** max_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** smax z0\.h, p0/m, z0\.h, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_s16_m_untied: { xfail *-*-* }
+** max_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** smax z0\.h, p0/m, z0\.h, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_s32_m_untied: { xfail *-*-* }
+** max_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** smax z0\.s, p0/m, z0\.s, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_s64_m_untied: { xfail *-*-* }
+** max_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** smax z0\.d, p0/m, z0\.d, \1
z0 = svmax_m (p0, z0, x0))
/*
-** max_w0_s8_m_untied: { xfail *-*-* }
+** max_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** smax z0\.b, p0/m, z0\.b, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_s8_m_untied: { xfail *-*-* }
+** max_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** smax z0\.b, p0/m, z0\.b, \1
z0 = svmax_m (p0, z0, x0))
/*
-** max_w0_u16_m_untied: { xfail *-*-* }
+** max_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** umax z0\.h, p0/m, z0\.h, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_u16_m_untied: { xfail *-*-* }
+** max_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** umax z0\.h, p0/m, z0\.h, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_u32_m_untied: { xfail *-*-* }
+** max_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** umax z0\.s, p0/m, z0\.s, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_u64_m_untied: { xfail *-*-* }
+** max_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** umax z0\.d, p0/m, z0\.d, \1
z0 = svmax_m (p0, z0, x0))
/*
-** max_w0_u8_m_untied: { xfail *-*-* }
+** max_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** umax z0\.b, p0/m, z0\.b, \1
z0 = svmax_m (p0, z0, 1))
/*
-** max_1_u8_m_untied: { xfail *-*-* }
+** max_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** umax z0\.b, p0/m, z0\.b, \1
z0 = svmin_m (p0, z0, x0))
/*
-** min_w0_s16_m_untied: { xfail *-*-* }
+** min_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** smin z0\.h, p0/m, z0\.h, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_s16_m_untied: { xfail *-*-* }
+** min_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** smin z0\.h, p0/m, z0\.h, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_s32_m_untied: { xfail *-*-* }
+** min_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** smin z0\.s, p0/m, z0\.s, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_s64_m_untied: { xfail *-*-* }
+** min_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** smin z0\.d, p0/m, z0\.d, \1
z0 = svmin_m (p0, z0, x0))
/*
-** min_w0_s8_m_untied: { xfail *-*-* }
+** min_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** smin z0\.b, p0/m, z0\.b, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_s8_m_untied: { xfail *-*-* }
+** min_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** smin z0\.b, p0/m, z0\.b, \1
z0 = svmin_m (p0, z0, x0))
/*
-** min_w0_u16_m_untied: { xfail *-*-* }
+** min_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** umin z0\.h, p0/m, z0\.h, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_u16_m_untied: { xfail *-*-* }
+** min_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** umin z0\.h, p0/m, z0\.h, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_u32_m_untied: { xfail *-*-* }
+** min_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** umin z0\.s, p0/m, z0\.s, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_u64_m_untied: { xfail *-*-* }
+** min_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** umin z0\.d, p0/m, z0\.d, \1
z0 = svmin_m (p0, z0, x0))
/*
-** min_w0_u8_m_untied: { xfail *-*-* }
+** min_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** umin z0\.b, p0/m, z0\.b, \1
z0 = svmin_m (p0, z0, 1))
/*
-** min_1_u8_m_untied: { xfail *-*-* }
+** min_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** umin z0\.b, p0/m, z0\.b, \1
z0 = svmla_m (p0, z0, z1, 2))
/*
-** mla_2_f16_m_untied: { xfail *-*-* }
+** mla_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmla z0\.h, p0/m, z2\.h, \1
z0 = svmla_m (p0, z0, z1, 2))
/*
-** mla_2_f32_m_untied: { xfail *-*-* }
+** mla_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmla z0\.s, p0/m, z2\.s, \1
z0 = svmla_m (p0, z0, z1, 2))
/*
-** mla_2_f64_m_untied: { xfail *-*-* }
+** mla_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmla z0\.d, p0/m, z2\.d, \1
z0 = svmla_m (p0, z0, z1, x0))
/*
-** mla_w0_s16_m_untied: { xfail *-*-* }
+** mla_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mla z0\.h, p0/m, z2\.h, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_s16_m_untied: { xfail *-*-* }
+** mla_11_s16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** mla z0\.h, p0/m, z2\.h, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_s32_m_untied: { xfail *-*-* }
+** mla_11_s32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** mla z0\.s, p0/m, z2\.s, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_s64_m_untied: { xfail *-*-* }
+** mla_11_s64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** mla z0\.d, p0/m, z2\.d, \1
z0 = svmla_m (p0, z0, z1, x0))
/*
-** mla_w0_s8_m_untied: { xfail *-*-* }
+** mla_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mla z0\.b, p0/m, z2\.b, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_s8_m_untied: { xfail *-*-* }
+** mla_11_s8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** mla z0\.b, p0/m, z2\.b, \1
z0 = svmla_m (p0, z0, z1, x0))
/*
-** mla_w0_u16_m_untied: { xfail *-*-* }
+** mla_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mla z0\.h, p0/m, z2\.h, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_u16_m_untied: { xfail *-*-* }
+** mla_11_u16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** mla z0\.h, p0/m, z2\.h, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_u32_m_untied: { xfail *-*-* }
+** mla_11_u32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** mla z0\.s, p0/m, z2\.s, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_u64_m_untied: { xfail *-*-* }
+** mla_11_u64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** mla z0\.d, p0/m, z2\.d, \1
z0 = svmla_m (p0, z0, z1, x0))
/*
-** mla_w0_u8_m_untied: { xfail *-*-* }
+** mla_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mla z0\.b, p0/m, z2\.b, \1
z0 = svmla_m (p0, z0, z1, 11))
/*
-** mla_11_u8_m_untied: { xfail *-*-* }
+** mla_11_u8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** mla z0\.b, p0/m, z2\.b, \1
z0 = svmls_m (p0, z0, z1, 2))
/*
-** mls_2_f16_m_untied: { xfail *-*-* }
+** mls_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmls z0\.h, p0/m, z2\.h, \1
z0 = svmls_m (p0, z0, z1, 2))
/*
-** mls_2_f32_m_untied: { xfail *-*-* }
+** mls_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmls z0\.s, p0/m, z2\.s, \1
z0 = svmls_m (p0, z0, z1, 2))
/*
-** mls_2_f64_m_untied: { xfail *-*-* }
+** mls_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmls z0\.d, p0/m, z2\.d, \1
z0 = svmls_m (p0, z0, z1, x0))
/*
-** mls_w0_s16_m_untied: { xfail *-*-* }
+** mls_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mls z0\.h, p0/m, z2\.h, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_s16_m_untied: { xfail *-*-* }
+** mls_11_s16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** mls z0\.h, p0/m, z2\.h, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_s32_m_untied: { xfail *-*-* }
+** mls_11_s32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** mls z0\.s, p0/m, z2\.s, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_s64_m_untied: { xfail *-*-* }
+** mls_11_s64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** mls z0\.d, p0/m, z2\.d, \1
z0 = svmls_m (p0, z0, z1, x0))
/*
-** mls_w0_s8_m_untied: { xfail *-*-* }
+** mls_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mls z0\.b, p0/m, z2\.b, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_s8_m_untied: { xfail *-*-* }
+** mls_11_s8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** mls z0\.b, p0/m, z2\.b, \1
z0 = svmls_m (p0, z0, z1, x0))
/*
-** mls_w0_u16_m_untied: { xfail *-*-* }
+** mls_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mls z0\.h, p0/m, z2\.h, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_u16_m_untied: { xfail *-*-* }
+** mls_11_u16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** mls z0\.h, p0/m, z2\.h, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_u32_m_untied: { xfail *-*-* }
+** mls_11_u32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** mls z0\.s, p0/m, z2\.s, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_u64_m_untied: { xfail *-*-* }
+** mls_11_u64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** mls z0\.d, p0/m, z2\.d, \1
z0 = svmls_m (p0, z0, z1, x0))
/*
-** mls_w0_u8_m_untied: { xfail *-*-* }
+** mls_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mls z0\.b, p0/m, z2\.b, \1
z0 = svmls_m (p0, z0, z1, 11))
/*
-** mls_11_u8_m_untied: { xfail *-*-* }
+** mls_11_u8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** mls z0\.b, p0/m, z2\.b, \1
z0 = svmsb_m (p0, z0, z1, 2))
/*
-** msb_2_f16_m_untied: { xfail *-*-* }
+** msb_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmsb z0\.h, p0/m, z2\.h, \1
z0 = svmsb_m (p0, z0, z1, 2))
/*
-** msb_2_f32_m_untied: { xfail *-*-* }
+** msb_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmsb z0\.s, p0/m, z2\.s, \1
z0 = svmsb_m (p0, z0, z1, 2))
/*
-** msb_2_f64_m_untied: { xfail *-*-* }
+** msb_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmsb z0\.d, p0/m, z2\.d, \1
z0 = svmsb_m (p0, z0, z1, x0))
/*
-** msb_w0_s16_m_untied: { xfail *-*-* }
+** msb_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** msb z0\.h, p0/m, z2\.h, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_s16_m_untied: { xfail *-*-* }
+** msb_11_s16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** msb z0\.h, p0/m, z2\.h, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_s32_m_untied: { xfail *-*-* }
+** msb_11_s32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** msb z0\.s, p0/m, z2\.s, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_s64_m_untied: { xfail *-*-* }
+** msb_11_s64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** msb z0\.d, p0/m, z2\.d, \1
z0 = svmsb_m (p0, z0, z1, x0))
/*
-** msb_w0_s8_m_untied: { xfail *-*-* }
+** msb_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** msb z0\.b, p0/m, z2\.b, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_s8_m_untied: { xfail *-*-* }
+** msb_11_s8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** msb z0\.b, p0/m, z2\.b, \1
z0 = svmsb_m (p0, z0, z1, x0))
/*
-** msb_w0_u16_m_untied: { xfail *-*-* }
+** msb_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** msb z0\.h, p0/m, z2\.h, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_u16_m_untied: { xfail *-*-* }
+** msb_11_u16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** msb z0\.h, p0/m, z2\.h, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_u32_m_untied: { xfail *-*-* }
+** msb_11_u32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** msb z0\.s, p0/m, z2\.s, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_u64_m_untied: { xfail *-*-* }
+** msb_11_u64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** msb z0\.d, p0/m, z2\.d, \1
z0 = svmsb_m (p0, z0, z1, x0))
/*
-** msb_w0_u8_m_untied: { xfail *-*-* }
+** msb_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** msb z0\.b, p0/m, z2\.b, \1
z0 = svmsb_m (p0, z0, z1, 11))
/*
-** msb_11_u8_m_untied: { xfail *-*-* }
+** msb_11_u8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** msb z0\.b, p0/m, z2\.b, \1
z0 = svmul_m (p0, z0, 1))
/*
-** mul_1_f16_m_untied: { xfail *-*-* }
+** mul_1_f16_m_untied:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmul z0\.h, p0/m, z0\.h, \1
z0 = svmul_m (p0, z0, 1))
/*
-** mul_1_f16_m_untied: { xfail *-*-* }
+** mul_1_f16_m_untied:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmul z0\.h, p0/m, z0\.h, \1
z0 = svmul_m (p0, z0, 1))
/*
-** mul_1_f32_m_untied: { xfail *-*-* }
+** mul_1_f32_m_untied:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmul z0\.s, p0/m, z0\.s, \1
z0 = svmul_m (p0, z0, 1))
/*
-** mul_1_f32_m_untied: { xfail *-*-* }
+** mul_1_f32_m_untied:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmul z0\.s, p0/m, z0\.s, \1
z0 = svmul_m (p0, z0, 1))
/*
-** mul_1_f64_m_untied: { xfail *-*-* }
+** mul_1_f64_m_untied:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmul z0\.d, p0/m, z0\.d, \1
z0 = svmul_m (p0, z0, 1))
/*
-** mul_1_f64_m_untied: { xfail *-*-* }
+** mul_1_f64_m_untied:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmul z0\.d, p0/m, z0\.d, \1
z0 = svmul_m (p0, z0, x0))
/*
-** mul_w0_s16_m_untied: { xfail *-*-* }
+** mul_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mul z0\.h, p0/m, z0\.h, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_s16_m_untied: { xfail *-*-* }
+** mul_2_s16_m_untied:
** mov (z[0-9]+\.h), #2
** movprfx z0, z1
** mul z0\.h, p0/m, z0\.h, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_s32_m_untied: { xfail *-*-* }
+** mul_2_s32_m_untied:
** mov (z[0-9]+\.s), #2
** movprfx z0, z1
** mul z0\.s, p0/m, z0\.s, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_s64_m_untied: { xfail *-*-* }
+** mul_2_s64_m_untied:
** mov (z[0-9]+\.d), #2
** movprfx z0, z1
** mul z0\.d, p0/m, z0\.d, \1
z0 = svmul_m (p0, z0, x0))
/*
-** mul_w0_s8_m_untied: { xfail *-*-* }
+** mul_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mul z0\.b, p0/m, z0\.b, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_s8_m_untied: { xfail *-*-* }
+** mul_2_s8_m_untied:
** mov (z[0-9]+\.b), #2
** movprfx z0, z1
** mul z0\.b, p0/m, z0\.b, \1
z0 = svmul_m (p0, z0, x0))
/*
-** mul_w0_u16_m_untied: { xfail *-*-* }
+** mul_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** mul z0\.h, p0/m, z0\.h, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_u16_m_untied: { xfail *-*-* }
+** mul_2_u16_m_untied:
** mov (z[0-9]+\.h), #2
** movprfx z0, z1
** mul z0\.h, p0/m, z0\.h, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_u32_m_untied: { xfail *-*-* }
+** mul_2_u32_m_untied:
** mov (z[0-9]+\.s), #2
** movprfx z0, z1
** mul z0\.s, p0/m, z0\.s, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_u64_m_untied: { xfail *-*-* }
+** mul_2_u64_m_untied:
** mov (z[0-9]+\.d), #2
** movprfx z0, z1
** mul z0\.d, p0/m, z0\.d, \1
z0 = svmul_m (p0, z0, x0))
/*
-** mul_w0_u8_m_untied: { xfail *-*-* }
+** mul_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** mul z0\.b, p0/m, z0\.b, \1
z0 = svmul_m (p0, z0, 2))
/*
-** mul_2_u8_m_untied: { xfail *-*-* }
+** mul_2_u8_m_untied:
** mov (z[0-9]+\.b), #2
** movprfx z0, z1
** mul z0\.b, p0/m, z0\.b, \1
z0 = svmulh_m (p0, z0, x0))
/*
-** mulh_w0_s16_m_untied: { xfail *-*-* }
+** mulh_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** smulh z0\.h, p0/m, z0\.h, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_s16_m_untied: { xfail *-*-* }
+** mulh_11_s16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** smulh z0\.h, p0/m, z0\.h, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_s32_m_untied: { xfail *-*-* }
+** mulh_11_s32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** smulh z0\.s, p0/m, z0\.s, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_s64_m_untied: { xfail *-*-* }
+** mulh_11_s64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** smulh z0\.d, p0/m, z0\.d, \1
z0 = svmulh_m (p0, z0, x0))
/*
-** mulh_w0_s8_m_untied: { xfail *-*-* }
+** mulh_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** smulh z0\.b, p0/m, z0\.b, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_s8_m_untied: { xfail *-*-* }
+** mulh_11_s8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** smulh z0\.b, p0/m, z0\.b, \1
z0 = svmulh_m (p0, z0, x0))
/*
-** mulh_w0_u16_m_untied: { xfail *-*-* }
+** mulh_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** umulh z0\.h, p0/m, z0\.h, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_u16_m_untied: { xfail *-*-* }
+** mulh_11_u16_m_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** umulh z0\.h, p0/m, z0\.h, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_u32_m_untied: { xfail *-*-* }
+** mulh_11_u32_m_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** umulh z0\.s, p0/m, z0\.s, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_u64_m_untied: { xfail *-*-* }
+** mulh_11_u64_m_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** umulh z0\.d, p0/m, z0\.d, \1
z0 = svmulh_m (p0, z0, x0))
/*
-** mulh_w0_u8_m_untied: { xfail *-*-* }
+** mulh_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** umulh z0\.b, p0/m, z0\.b, \1
z0 = svmulh_m (p0, z0, 11))
/*
-** mulh_11_u8_m_untied: { xfail *-*-* }
+** mulh_11_u8_m_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** umulh z0\.b, p0/m, z0\.b, \1
z0 = svmulx_m (p0, z0, 1))
/*
-** mulx_1_f16_m_untied: { xfail *-*-* }
+** mulx_1_f16_m_untied:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmulx z0\.h, p0/m, z0\.h, \1
z0 = svmulx_m (p0, z0, 0.5))
/*
-** mulx_0p5_f16_m_untied: { xfail *-*-* }
+** mulx_0p5_f16_m_untied:
** fmov (z[0-9]+\.h), #(?:0\.5|5\.0e-1)
** movprfx z0, z1
** fmulx z0\.h, p0/m, z0\.h, \1
z0 = svmulx_m (p0, z0, 2))
/*
-** mulx_2_f16_m_untied: { xfail *-*-* }
+** mulx_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmulx z0\.h, p0/m, z0\.h, \1
z0 = svmulx_m (p0, z0, 1))
/*
-** mulx_1_f32_m_untied: { xfail *-*-* }
+** mulx_1_f32_m_untied:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmulx z0\.s, p0/m, z0\.s, \1
z0 = svmulx_m (p0, z0, 0.5))
/*
-** mulx_0p5_f32_m_untied: { xfail *-*-* }
+** mulx_0p5_f32_m_untied:
** fmov (z[0-9]+\.s), #(?:0\.5|5\.0e-1)
** movprfx z0, z1
** fmulx z0\.s, p0/m, z0\.s, \1
z0 = svmulx_m (p0, z0, 2))
/*
-** mulx_2_f32_m_untied: { xfail *-*-* }
+** mulx_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmulx z0\.s, p0/m, z0\.s, \1
z0 = svmulx_m (p0, z0, 1))
/*
-** mulx_1_f64_m_untied: { xfail *-*-* }
+** mulx_1_f64_m_untied:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** movprfx z0, z1
** fmulx z0\.d, p0/m, z0\.d, \1
z0 = svmulx_m (p0, z0, 0.5))
/*
-** mulx_0p5_f64_m_untied: { xfail *-*-* }
+** mulx_0p5_f64_m_untied:
** fmov (z[0-9]+\.d), #(?:0\.5|5\.0e-1)
** movprfx z0, z1
** fmulx z0\.d, p0/m, z0\.d, \1
z0 = svmulx_m (p0, z0, 2))
/*
-** mulx_2_f64_m_untied: { xfail *-*-* }
+** mulx_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fmulx z0\.d, p0/m, z0\.d, \1
z0 = svnmad_m (p0, z0, z1, 2))
/*
-** nmad_2_f16_m_untied: { xfail *-*-* }
+** nmad_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmad z0\.h, p0/m, z2\.h, \1
z0 = svnmad_m (p0, z0, z1, 2))
/*
-** nmad_2_f32_m_untied: { xfail *-*-* }
+** nmad_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmad z0\.s, p0/m, z2\.s, \1
z0 = svnmad_m (p0, z0, z1, 2))
/*
-** nmad_2_f64_m_untied: { xfail *-*-* }
+** nmad_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmad z0\.d, p0/m, z2\.d, \1
z0 = svnmla_m (p0, z0, z1, 2))
/*
-** nmla_2_f16_m_untied: { xfail *-*-* }
+** nmla_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmla z0\.h, p0/m, z2\.h, \1
z0 = svnmla_m (p0, z0, z1, 2))
/*
-** nmla_2_f32_m_untied: { xfail *-*-* }
+** nmla_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmla z0\.s, p0/m, z2\.s, \1
z0 = svnmla_m (p0, z0, z1, 2))
/*
-** nmla_2_f64_m_untied: { xfail *-*-* }
+** nmla_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmla z0\.d, p0/m, z2\.d, \1
z0 = svnmls_m (p0, z0, z1, 2))
/*
-** nmls_2_f16_m_untied: { xfail *-*-* }
+** nmls_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmls z0\.h, p0/m, z2\.h, \1
z0 = svnmls_m (p0, z0, z1, 2))
/*
-** nmls_2_f32_m_untied: { xfail *-*-* }
+** nmls_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmls z0\.s, p0/m, z2\.s, \1
z0 = svnmls_m (p0, z0, z1, 2))
/*
-** nmls_2_f64_m_untied: { xfail *-*-* }
+** nmls_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmls z0\.d, p0/m, z2\.d, \1
z0 = svnmsb_m (p0, z0, z1, 2))
/*
-** nmsb_2_f16_m_untied: { xfail *-*-* }
+** nmsb_2_f16_m_untied:
** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmsb z0\.h, p0/m, z2\.h, \1
z0 = svnmsb_m (p0, z0, z1, 2))
/*
-** nmsb_2_f32_m_untied: { xfail *-*-* }
+** nmsb_2_f32_m_untied:
** fmov (z[0-9]+\.s), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmsb z0\.s, p0/m, z2\.s, \1
z0 = svnmsb_m (p0, z0, z1, 2))
/*
-** nmsb_2_f64_m_untied: { xfail *-*-* }
+** nmsb_2_f64_m_untied:
** fmov (z[0-9]+\.d), #2\.0(?:e\+0)?
** movprfx z0, z1
** fnmsb z0\.d, p0/m, z2\.d, \1
z0 = svorr_m (p0, z0, x0))
/*
-** orr_w0_s16_m_untied: { xfail *-*-* }
+** orr_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** orr z0\.h, p0/m, z0\.h, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_s16_m_untied: { xfail *-*-* }
+** orr_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** orr z0\.h, p0/m, z0\.h, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_s32_m_untied: { xfail *-*-* }
+** orr_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** orr z0\.s, p0/m, z0\.s, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_s64_m_untied: { xfail *-*-* }
+** orr_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** orr z0\.d, p0/m, z0\.d, \1
z0 = svorr_m (p0, z0, x0))
/*
-** orr_w0_s8_m_untied: { xfail *-*-* }
+** orr_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** orr z0\.b, p0/m, z0\.b, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_s8_m_untied: { xfail *-*-* }
+** orr_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** orr z0\.b, p0/m, z0\.b, \1
z0 = svorr_m (p0, z0, x0))
/*
-** orr_w0_u16_m_untied: { xfail *-*-* }
+** orr_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** orr z0\.h, p0/m, z0\.h, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_u16_m_untied: { xfail *-*-* }
+** orr_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** orr z0\.h, p0/m, z0\.h, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_u32_m_untied: { xfail *-*-* }
+** orr_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** orr z0\.s, p0/m, z0\.s, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_u64_m_untied: { xfail *-*-* }
+** orr_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** orr z0\.d, p0/m, z0\.d, \1
z0 = svorr_m (p0, z0, x0))
/*
-** orr_w0_u8_m_untied: { xfail *-*-* }
+** orr_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** orr z0\.b, p0/m, z0\.b, \1
z0 = svorr_m (p0, z0, 1))
/*
-** orr_1_u8_m_untied: { xfail *-*-* }
+** orr_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** orr z0\.b, p0/m, z0\.b, \1
z0 = svscale_m (p0, z0, x0))
/*
-** scale_w0_f16_m_untied: { xfail *-*-* }
+** scale_w0_f16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** fscale z0\.h, p0/m, z0\.h, \1
z0 = svscale_m (p0, z0, 3))
/*
-** scale_3_f16_m_untied: { xfail *-*-* }
+** scale_3_f16_m_untied:
** mov (z[0-9]+\.h), #3
** movprfx z0, z1
** fscale z0\.h, p0/m, z0\.h, \1
z0 = svscale_z (p0, z0, x0))
/*
-** scale_w0_f16_z_untied: { xfail *-*-* }
+** scale_w0_f16_z_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0\.h, p0/z, z1\.h
** fscale z0\.h, p0/m, z0\.h, \1
z0 = svscale_z (p0, z0, 3))
/*
-** scale_3_f16_z_untied: { xfail *-*-* }
+** scale_3_f16_z_untied:
** mov (z[0-9]+\.h), #3
** movprfx z0\.h, p0/z, z1\.h
** fscale z0\.h, p0/m, z0\.h, \1
z0 = svscale_x (p0, z0, x0))
/*
-** scale_w0_f16_x_untied: { xfail *-*-* }
+** scale_w0_f16_x_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** fscale z0\.h, p0/m, z0\.h, \1
z0 = svscale_x (p0, z0, 3))
/*
-** scale_3_f16_x_untied: { xfail *-*-* }
+** scale_3_f16_x_untied:
** mov (z[0-9]+\.h), #3
** movprfx z0, z1
** fscale z0\.h, p0/m, z0\.h, \1
z0 = svscale_m (p0, z0, 3))
/*
-** scale_3_f32_m_untied: { xfail *-*-* }
+** scale_3_f32_m_untied:
** mov (z[0-9]+\.s), #3
** movprfx z0, z1
** fscale z0\.s, p0/m, z0\.s, \1
z0 = svscale_z (p0, z0, 3))
/*
-** scale_3_f32_z_untied: { xfail *-*-* }
+** scale_3_f32_z_untied:
** mov (z[0-9]+\.s), #3
** movprfx z0\.s, p0/z, z1\.s
** fscale z0\.s, p0/m, z0\.s, \1
z0 = svscale_x (p0, z0, 3))
/*
-** scale_3_f32_x_untied: { xfail *-*-* }
+** scale_3_f32_x_untied:
** mov (z[0-9]+\.s), #3
** movprfx z0, z1
** fscale z0\.s, p0/m, z0\.s, \1
z0 = svscale_m (p0, z0, 3))
/*
-** scale_3_f64_m_untied: { xfail *-*-* }
+** scale_3_f64_m_untied:
** mov (z[0-9]+\.d), #3
** movprfx z0, z1
** fscale z0\.d, p0/m, z0\.d, \1
z0 = svscale_z (p0, z0, 3))
/*
-** scale_3_f64_z_untied: { xfail *-*-* }
+** scale_3_f64_z_untied:
** mov (z[0-9]+\.d), #3
** movprfx z0\.d, p0/z, z1\.d
** fscale z0\.d, p0/m, z0\.d, \1
z0 = svscale_x (p0, z0, 3))
/*
-** scale_3_f64_x_untied: { xfail *-*-* }
+** scale_3_f64_x_untied:
** mov (z[0-9]+\.d), #3
** movprfx z0, z1
** fscale z0\.d, p0/m, z0\.d, \1
z0 = svsub_m (p0, z0, x0))
/*
-** sub_w0_s16_m_untied: { xfail *-*-* }
+** sub_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sub z0\.h, p0/m, z0\.h, \1
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_s16_m_untied: { xfail *-*-* }
+** sub_1_s16_m_untied:
** mov (z[0-9]+)\.b, #-1
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1\.h
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_s32_m_untied: { xfail *-*-* }
+** sub_1_s32_m_untied:
** mov (z[0-9]+)\.b, #-1
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1\.s
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_s64_m_untied: { xfail *-*-* }
+** sub_1_s64_m_untied:
** mov (z[0-9]+)\.b, #-1
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1\.d
z0 = svsub_m (p0, z0, x0))
/*
-** sub_w0_s8_m_untied: { xfail *-*-* }
+** sub_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sub z0\.b, p0/m, z0\.b, \1
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_s8_m_untied: { xfail *-*-* }
+** sub_1_s8_m_untied:
** mov (z[0-9]+\.b), #-1
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
z0 = svsub_m (p0, z0, x0))
/*
-** sub_w0_u16_m_untied: { xfail *-*-* }
+** sub_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sub z0\.h, p0/m, z0\.h, \1
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_u16_m_untied: { xfail *-*-* }
+** sub_1_u16_m_untied:
** mov (z[0-9]+)\.b, #-1
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1\.h
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_u32_m_untied: { xfail *-*-* }
+** sub_1_u32_m_untied:
** mov (z[0-9]+)\.b, #-1
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1\.s
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_u64_m_untied: { xfail *-*-* }
+** sub_1_u64_m_untied:
** mov (z[0-9]+)\.b, #-1
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1\.d
z0 = svsub_m (p0, z0, x0))
/*
-** sub_w0_u8_m_untied: { xfail *-*-* }
+** sub_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sub z0\.b, p0/m, z0\.b, \1
z0 = svsub_m (p0, z0, 1))
/*
-** sub_1_u8_m_untied: { xfail *-*-* }
+** sub_1_u8_m_untied:
** mov (z[0-9]+\.b), #-1
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
z0 = svsubr_m (p0, z0, -1))
/*
-** subr_m1_f16_m_untied: { xfail *-*-* }
+** subr_m1_f16_m_untied:
** fmov (z[0-9]+\.h), #-1\.0(?:e\+0)?
** movprfx z0, z1
** fsubr z0\.h, p0/m, z0\.h, \1
z0 = svsubr_m (p0, z0, -1))
/*
-** subr_m1_f16_m_untied: { xfail *-*-* }
+** subr_m1_f16_m_untied:
** fmov (z[0-9]+\.h), #-1\.0(?:e\+0)?
** movprfx z0, z1
** fsubr z0\.h, p0/m, z0\.h, \1
z0 = svsubr_m (p0, z0, -1))
/*
-** subr_m1_f32_m_untied: { xfail *-*-* }
+** subr_m1_f32_m_untied:
** fmov (z[0-9]+\.s), #-1\.0(?:e\+0)?
** movprfx z0, z1
** fsubr z0\.s, p0/m, z0\.s, \1
z0 = svsubr_m (p0, z0, -1))
/*
-** subr_m1_f32_m_untied: { xfail *-*-* }
+** subr_m1_f32_m_untied:
** fmov (z[0-9]+\.s), #-1\.0(?:e\+0)?
** movprfx z0, z1
** fsubr z0\.s, p0/m, z0\.s, \1
z0 = svsubr_m (p0, z0, -1))
/*
-** subr_m1_f64_m_untied: { xfail *-*-* }
+** subr_m1_f64_m_untied:
** fmov (z[0-9]+\.d), #-1\.0(?:e\+0)?
** movprfx z0, z1
** fsubr z0\.d, p0/m, z0\.d, \1
z0 = svsubr_m (p0, z0, -1))
/*
-** subr_m1_f64_m_untied: { xfail *-*-* }
+** subr_m1_f64_m_untied:
** fmov (z[0-9]+\.d), #-1\.0(?:e\+0)?
** movprfx z0, z1
** fsubr z0\.d, p0/m, z0\.d, \1
z0 = svsubr_m (p0, z0, x0))
/*
-** subr_w0_s16_m_untied: { xfail *-*-* }
+** subr_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** subr z0\.h, p0/m, z0\.h, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_s16_m_untied: { xfail *-*-* }
+** subr_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** subr z0\.h, p0/m, z0\.h, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_s32_m_untied: { xfail *-*-* }
+** subr_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** subr z0\.s, p0/m, z0\.s, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_s64_m_untied: { xfail *-*-* }
+** subr_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** subr z0\.d, p0/m, z0\.d, \1
z0 = svsubr_m (p0, z0, x0))
/*
-** subr_w0_s8_m_untied: { xfail *-*-* }
+** subr_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** subr z0\.b, p0/m, z0\.b, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_s8_m_untied: { xfail *-*-* }
+** subr_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** subr z0\.b, p0/m, z0\.b, \1
z0 = svsubr_m (p0, z0, x0))
/*
-** subr_w0_u16_m_untied: { xfail *-*-* }
+** subr_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** subr z0\.h, p0/m, z0\.h, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_u16_m_untied: { xfail *-*-* }
+** subr_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** subr z0\.h, p0/m, z0\.h, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_u32_m_untied: { xfail *-*-* }
+** subr_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** subr z0\.s, p0/m, z0\.s, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_u64_m_untied: { xfail *-*-* }
+** subr_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** subr z0\.d, p0/m, z0\.d, \1
z0 = svsubr_m (p0, z0, x0))
/*
-** subr_w0_u8_m_untied: { xfail *-*-* }
+** subr_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** subr z0\.b, p0/m, z0\.b, \1
z0 = svsubr_m (p0, z0, 1))
/*
-** subr_1_u8_m_untied: { xfail *-*-* }
+** subr_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** subr z0\.b, p0/m, z0\.b, \1
z0 = svbcax (z1, z0, x0))
/*
-** bcax_w0_s16_untied: { xfail *-*-*}
+** bcax_w0_s16_untied:
** mov (z[0-9]+)\.h, w0
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_s16_untied: { xfail *-*-*}
+** bcax_11_s16_untied:
** mov (z[0-9]+)\.h, #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_s32_untied: { xfail *-*-*}
+** bcax_11_s32_untied:
** mov (z[0-9]+)\.s, #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_s64_untied: { xfail *-*-*}
+** bcax_11_s64_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1|\1, z2\.d)
z0 = svbcax (z1, z0, x0))
/*
-** bcax_w0_s8_untied: { xfail *-*-*}
+** bcax_w0_s8_untied:
** mov (z[0-9]+)\.b, w0
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_s8_untied: { xfail *-*-*}
+** bcax_11_s8_untied:
** mov (z[0-9]+)\.b, #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, x0))
/*
-** bcax_w0_u16_untied: { xfail *-*-*}
+** bcax_w0_u16_untied:
** mov (z[0-9]+)\.h, w0
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_u16_untied: { xfail *-*-*}
+** bcax_11_u16_untied:
** mov (z[0-9]+)\.h, #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_u32_untied: { xfail *-*-*}
+** bcax_11_u32_untied:
** mov (z[0-9]+)\.s, #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_u64_untied: { xfail *-*-*}
+** bcax_11_u64_untied:
** mov (z[0-9]+\.d), #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1|\1, z2\.d)
z0 = svbcax (z1, z0, x0))
/*
-** bcax_w0_u8_untied: { xfail *-*-*}
+** bcax_w0_u8_untied:
** mov (z[0-9]+)\.b, w0
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svbcax (z1, z0, 11))
/*
-** bcax_11_u8_untied: { xfail *-*-*}
+** bcax_11_u8_untied:
** mov (z[0-9]+)\.b, #11
** movprfx z0, z1
** bcax z0\.d, z0\.d, (z2\.d, \1\.d|\1\.d, z2\.d)
z0 = svqadd_m (p0, z0, x0))
/*
-** qadd_w0_s16_m_untied: { xfail *-*-* }
+** qadd_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sqadd z0\.h, p0/m, z0\.h, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_s16_m_untied: { xfail *-*-* }
+** qadd_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** sqadd z0\.h, p0/m, z0\.h, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_s32_m_untied: { xfail *-*-* }
+** qadd_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** sqadd z0\.s, p0/m, z0\.s, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_s64_m_untied: { xfail *-*-* }
+** qadd_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** sqadd z0\.d, p0/m, z0\.d, \1
z0 = svqadd_m (p0, z0, x0))
/*
-** qadd_w0_s8_m_untied: { xfail *-*-* }
+** qadd_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sqadd z0\.b, p0/m, z0\.b, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_s8_m_untied: { xfail *-*-* }
+** qadd_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** sqadd z0\.b, p0/m, z0\.b, \1
z0 = svqadd_m (p0, z0, x0))
/*
-** qadd_w0_u16_m_untied: { xfail *-*-* }
+** qadd_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** uqadd z0\.h, p0/m, z0\.h, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_u16_m_untied: { xfail *-*-* }
+** qadd_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** uqadd z0\.h, p0/m, z0\.h, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_u32_m_untied: { xfail *-*-* }
+** qadd_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** uqadd z0\.s, p0/m, z0\.s, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_u64_m_untied: { xfail *-*-* }
+** qadd_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** uqadd z0\.d, p0/m, z0\.d, \1
z0 = svqadd_m (p0, z0, x0))
/*
-** qadd_w0_u8_m_untied: { xfail *-*-* }
+** qadd_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** uqadd z0\.b, p0/m, z0\.b, \1
z0 = svqadd_m (p0, z0, 1))
/*
-** qadd_1_u8_m_untied: { xfail *-*-* }
+** qadd_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** uqadd z0\.b, p0/m, z0\.b, \1
z0 = svqdmlalb (z0, z4, x0))
/*
-** qdmlalb_w0_s16_untied: { xfail *-*-* }
+** qdmlalb_w0_s16_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sqdmlalb z0\.h, z4\.b, \1
z0 = svqdmlalb (z0, z4, 11))
/*
-** qdmlalb_11_s16_untied: { xfail *-*-* }
+** qdmlalb_11_s16_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** sqdmlalb z0\.h, z4\.b, \1
z0 = svqdmlalb (z0, z4, x0))
/*
-** qdmlalb_w0_s32_untied: { xfail *-*-* }
+** qdmlalb_w0_s32_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sqdmlalb z0\.s, z4\.h, \1
z0 = svqdmlalb (z0, z4, 11))
/*
-** qdmlalb_11_s32_untied: { xfail *-*-* }
+** qdmlalb_11_s32_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** sqdmlalb z0\.s, z4\.h, \1
z0 = svqdmlalb (z0, z4, 11))
/*
-** qdmlalb_11_s64_untied: { xfail *-*-* }
+** qdmlalb_11_s64_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** sqdmlalb z0\.d, z4\.s, \1
z0 = svqdmlalbt (z0, z4, x0))
/*
-** qdmlalbt_w0_s16_untied: { xfail *-*-*}
+** qdmlalbt_w0_s16_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sqdmlalbt z0\.h, z4\.b, \1
z0 = svqdmlalbt (z0, z4, 11))
/*
-** qdmlalbt_11_s16_untied: { xfail *-*-*}
+** qdmlalbt_11_s16_untied:
** mov (z[0-9]+\.b), #11
** movprfx z0, z1
** sqdmlalbt z0\.h, z4\.b, \1
z0 = svqdmlalbt (z0, z4, x0))
/*
-** qdmlalbt_w0_s32_untied: { xfail *-*-*}
+** qdmlalbt_w0_s32_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sqdmlalbt z0\.s, z4\.h, \1
z0 = svqdmlalbt (z0, z4, 11))
/*
-** qdmlalbt_11_s32_untied: { xfail *-*-*}
+** qdmlalbt_11_s32_untied:
** mov (z[0-9]+\.h), #11
** movprfx z0, z1
** sqdmlalbt z0\.s, z4\.h, \1
z0 = svqdmlalbt (z0, z4, 11))
/*
-** qdmlalbt_11_s64_untied: { xfail *-*-*}
+** qdmlalbt_11_s64_untied:
** mov (z[0-9]+\.s), #11
** movprfx z0, z1
** sqdmlalbt z0\.d, z4\.s, \1
z0 = svqsub_m (p0, z0, x0))
/*
-** qsub_w0_s16_m_untied: { xfail *-*-* }
+** qsub_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sqsub z0\.h, p0/m, z0\.h, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_s16_m_untied: { xfail *-*-* }
+** qsub_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** sqsub z0\.h, p0/m, z0\.h, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_s32_m_untied: { xfail *-*-* }
+** qsub_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** sqsub z0\.s, p0/m, z0\.s, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_s64_m_untied: { xfail *-*-* }
+** qsub_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** sqsub z0\.d, p0/m, z0\.d, \1
z0 = svqsub_m (p0, z0, x0))
/*
-** qsub_w0_s8_m_untied: { xfail *-*-* }
+** qsub_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sqsub z0\.b, p0/m, z0\.b, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_s8_m_untied: { xfail *-*-* }
+** qsub_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** sqsub z0\.b, p0/m, z0\.b, \1
z0 = svqsub_m (p0, z0, x0))
/*
-** qsub_w0_u16_m_untied: { xfail *-*-* }
+** qsub_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** uqsub z0\.h, p0/m, z0\.h, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_u16_m_untied: { xfail *-*-* }
+** qsub_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** uqsub z0\.h, p0/m, z0\.h, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_u32_m_untied: { xfail *-*-* }
+** qsub_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** uqsub z0\.s, p0/m, z0\.s, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_u64_m_untied: { xfail *-*-* }
+** qsub_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** uqsub z0\.d, p0/m, z0\.d, \1
z0 = svqsub_m (p0, z0, x0))
/*
-** qsub_w0_u8_m_untied: { xfail *-*-* }
+** qsub_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** uqsub z0\.b, p0/m, z0\.b, \1
z0 = svqsub_m (p0, z0, 1))
/*
-** qsub_1_u8_m_untied: { xfail *-*-* }
+** qsub_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** uqsub z0\.b, p0/m, z0\.b, \1
z0 = svqsubr_m (p0, z0, x0))
/*
-** qsubr_w0_s16_m_untied: { xfail *-*-* }
+** qsubr_w0_s16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sqsubr z0\.h, p0/m, z0\.h, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_s16_m_untied: { xfail *-*-* }
+** qsubr_1_s16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** sqsubr z0\.h, p0/m, z0\.h, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_s32_m_untied: { xfail *-*-* }
+** qsubr_1_s32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** sqsubr z0\.s, p0/m, z0\.s, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_s64_m_untied: { xfail *-*-* }
+** qsubr_1_s64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** sqsubr z0\.d, p0/m, z0\.d, \1
z0 = svqsubr_m (p0, z0, x0))
/*
-** qsubr_w0_s8_m_untied: { xfail *-*-* }
+** qsubr_w0_s8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sqsubr z0\.b, p0/m, z0\.b, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_s8_m_untied: { xfail *-*-* }
+** qsubr_1_s8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** sqsubr z0\.b, p0/m, z0\.b, \1
z0 = svqsubr_m (p0, z0, x0))
/*
-** qsubr_w0_u16_m_untied: { xfail *-*-* }
+** qsubr_w0_u16_m_untied:
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** uqsubr z0\.h, p0/m, z0\.h, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_u16_m_untied: { xfail *-*-* }
+** qsubr_1_u16_m_untied:
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** uqsubr z0\.h, p0/m, z0\.h, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_u32_m_untied: { xfail *-*-* }
+** qsubr_1_u32_m_untied:
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** uqsubr z0\.s, p0/m, z0\.s, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_u64_m_untied: { xfail *-*-* }
+** qsubr_1_u64_m_untied:
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** uqsubr z0\.d, p0/m, z0\.d, \1
z0 = svqsubr_m (p0, z0, x0))
/*
-** qsubr_w0_u8_m_untied: { xfail *-*-* }
+** qsubr_w0_u8_m_untied:
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** uqsubr z0\.b, p0/m, z0\.b, \1
z0 = svqsubr_m (p0, z0, 1))
/*
-** qsubr_1_u8_m_untied: { xfail *-*-* }
+** qsubr_1_u8_m_untied:
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** uqsubr z0\.b, p0/m, z0\.b, \1