]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/mlslt_f32.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / mlslt_f32.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** mlslt_f32_tied1:
7 ** fmlslt z0\.s, z4\.h, z5\.h
8 ** ret
9 */
10 TEST_DUAL_Z (mlslt_f32_tied1, svfloat32_t, svfloat16_t,
11 z0 = svmlslt_f32 (z0, z4, z5),
12 z0 = svmlslt (z0, z4, z5))
13
14 /*
15 ** mlslt_f32_tied2:
16 ** mov (z[0-9]+)\.d, z0\.d
17 ** movprfx z0, z4
18 ** fmlslt z0\.s, \1\.h, z1\.h
19 ** ret
20 */
21 TEST_DUAL_Z_REV (mlslt_f32_tied2, svfloat32_t, svfloat16_t,
22 z0_res = svmlslt_f32 (z4, z0, z1),
23 z0_res = svmlslt (z4, z0, z1))
24
25 /*
26 ** mlslt_f32_tied3:
27 ** mov (z[0-9]+)\.d, z0\.d
28 ** movprfx z0, z4
29 ** fmlslt z0\.s, z1\.h, \1\.h
30 ** ret
31 */
32 TEST_DUAL_Z_REV (mlslt_f32_tied3, svfloat32_t, svfloat16_t,
33 z0_res = svmlslt_f32 (z4, z1, z0),
34 z0_res = svmlslt (z4, z1, z0))
35
36 /*
37 ** mlslt_f32_untied:
38 ** movprfx z0, z1
39 ** fmlslt z0\.s, z4\.h, z5\.h
40 ** ret
41 */
42 TEST_DUAL_Z (mlslt_f32_untied, svfloat32_t, svfloat16_t,
43 z0 = svmlslt_f32 (z1, z4, z5),
44 z0 = svmlslt (z1, z4, z5))
45
46 /*
47 ** mlslt_h7_f32_tied1:
48 ** mov (z[0-9]+\.h), h7
49 ** fmlslt z0\.s, z4\.h, \1
50 ** ret
51 */
52 TEST_DUAL_ZD (mlslt_h7_f32_tied1, svfloat32_t, svfloat16_t, float16_t,
53 z0 = svmlslt_n_f32 (z0, z4, d7),
54 z0 = svmlslt (z0, z4, d7))
55
56 /*
57 ** mlslt_h7_f32_untied:
58 ** mov (z[0-9]+\.h), h7
59 ** movprfx z0, z1
60 ** fmlslt z0\.s, z4\.h, \1
61 ** ret
62 */
63 TEST_DUAL_ZD (mlslt_h7_f32_untied, svfloat32_t, svfloat16_t, float16_t,
64 z0 = svmlslt_n_f32 (z1, z4, d7),
65 z0 = svmlslt (z1, z4, d7))
66
67 /*
68 ** mlslt_2_f32_tied1:
69 ** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
70 ** fmlslt z0\.s, z4\.h, \1
71 ** ret
72 */
73 TEST_DUAL_Z (mlslt_2_f32_tied1, svfloat32_t, svfloat16_t,
74 z0 = svmlslt_n_f32 (z0, z4, 2),
75 z0 = svmlslt (z0, z4, 2))
76
77 /*
78 ** mlslt_2_f32_untied:
79 ** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
80 ** movprfx z0, z1
81 ** fmlslt z0\.s, z4\.h, \1
82 ** ret
83 */
84 TEST_DUAL_Z (mlslt_2_f32_untied, svfloat32_t, svfloat16_t,
85 z0 = svmlslt_n_f32 (z1, z4, 2),
86 z0 = svmlslt (z1, z4, 2))