gimple_seq stmts = NULL;
tree pred = convert_pred (stmts, vector_type (0), 0);
+ x = force_vector (stmts, TREE_TYPE (lhs), x);
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
return gimple_build_assign (lhs, VEC_COND_EXPR, pred, x, vec_inactive);
}
z0 = svmul_s16_z (p0, svdup_s16 (1), z0),
z0 = svmul_z (p0, svdup_s16 (1), z0))
+/*
+** mul_1op1n_s16_z:
+** movprfx z0\.h, p0/z, z0\.h
+** mov z0\.h, p0/m, w0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_s16_z, svint16_t, int16_t,
+ z0 = svmul_n_s16_z (p0, svdup_s16 (1), x0),
+ z0 = svmul_z (p0, svdup_s16 (1), x0))
+
/*
** mul_3_s16_z_tied1:
** mov (z[0-9]+\.h), #3
z0 = svmul_s32_z (p0, svdup_s32 (1), z0),
z0 = svmul_z (p0, svdup_s32 (1), z0))
+/*
+** mul_1op1n_s32_z:
+** movprfx z0\.s, p0/z, z0\.s
+** mov z0\.s, p0/m, w0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_s32_z, svint32_t, int32_t,
+ z0 = svmul_n_s32_z (p0, svdup_s32 (1), x0),
+ z0 = svmul_z (p0, svdup_s32 (1), x0))
+
/*
** mul_3_s32_z_tied1:
** mov (z[0-9]+\.s), #3
z0 = svmul_s64_z (p0, svdup_s64 (1), z0),
z0 = svmul_z (p0, svdup_s64 (1), z0))
+/*
+** mul_1op1n_s64_z:
+** movprfx z0\.d, p0/z, z0\.d
+** mov z0\.d, p0/m, x0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_s64_z, svint64_t, int64_t,
+ z0 = svmul_n_s64_z (p0, svdup_s64 (1), x0),
+ z0 = svmul_z (p0, svdup_s64 (1), x0))
+
/*
** mul_2_s64_z_tied1:
** movprfx z0.d, p0/z, z0.d
z0 = svmul_s8_z (p0, svdup_s8 (1), z0),
z0 = svmul_z (p0, svdup_s8 (1), z0))
+/*
+** mul_1op1n_s8_z:
+** movprfx z0\.b, p0/z, z0\.b
+** mov z0\.b, p0/m, w0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_s8_z, svint8_t, int8_t,
+ z0 = svmul_n_s8_z (p0, svdup_s8 (1), x0),
+ z0 = svmul_z (p0, svdup_s8 (1), x0))
+
/*
** mul_3_s8_z_tied1:
** mov (z[0-9]+\.b), #3
z0 = svmul_u16_z (p0, svdup_u16 (1), z0),
z0 = svmul_z (p0, svdup_u16 (1), z0))
+/*
+** mul_1op1n_u16_z:
+** movprfx z0\.h, p0/z, z0\.h
+** mov z0\.h, p0/m, w0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_u16_z, svuint16_t, uint16_t,
+ z0 = svmul_n_u16_z (p0, svdup_u16 (1), x0),
+ z0 = svmul_z (p0, svdup_u16 (1), x0))
+
/*
** mul_3_u16_z_tied1:
** mov (z[0-9]+\.h), #3
z0 = svmul_u32_z (p0, svdup_u32 (1), z0),
z0 = svmul_z (p0, svdup_u32 (1), z0))
+/*
+** mul_1op1n_u32_z:
+** movprfx z0\.s, p0/z, z0\.s
+** mov z0\.s, p0/m, w0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_u32_z, svuint32_t, uint32_t,
+ z0 = svmul_n_u32_z (p0, svdup_u32 (1), x0),
+ z0 = svmul_z (p0, svdup_u32 (1), x0))
+
/*
** mul_3_u32_z_tied1:
** mov (z[0-9]+\.s), #3
z0 = svmul_u64_z (p0, svdup_u64 (1), z0),
z0 = svmul_z (p0, svdup_u64 (1), z0))
+/*
+** mul_1op1n_u64_z:
+** movprfx z0\.d, p0/z, z0\.d
+** mov z0\.d, p0/m, x0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_u64_z, svuint64_t, uint64_t,
+ z0 = svmul_n_u64_z (p0, svdup_u64 (1), x0),
+ z0 = svmul_z (p0, svdup_u64 (1), x0))
+
/*
** mul_2_u64_z_tied1:
** movprfx z0.d, p0/z, z0.d
z0 = svmul_u8_z (p0, svdup_u8 (1), z0),
z0 = svmul_z (p0, svdup_u8 (1), z0))
+/*
+** mul_1op1n_u8_z:
+** movprfx z0\.b, p0/z, z0\.b
+** mov z0\.b, p0/m, w0
+** ret
+*/
+TEST_UNIFORM_ZX (mul_1op1n_u8_z, svuint8_t, uint8_t,
+ z0 = svmul_n_u8_z (p0, svdup_u8 (1), x0),
+ z0 = svmul_z (p0, svdup_u8 (1), x0))
+
/*
** mul_3_u8_z_tied1:
** mov (z[0-9]+\.b), #3