]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/mlalt_f32.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / mlalt_f32.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** mlalt_f32_tied1:
7 ** fmlalt z0\.s, z4\.h, z5\.h
8 ** ret
9 */
10 TEST_DUAL_Z (mlalt_f32_tied1, svfloat32_t, svfloat16_t,
11 z0 = svmlalt_f32 (z0, z4, z5),
12 z0 = svmlalt (z0, z4, z5))
13
14 /*
15 ** mlalt_f32_tied2:
16 ** mov (z[0-9]+)\.d, z0\.d
17 ** movprfx z0, z4
18 ** fmlalt z0\.s, \1\.h, z1\.h
19 ** ret
20 */
21 TEST_DUAL_Z_REV (mlalt_f32_tied2, svfloat32_t, svfloat16_t,
22 z0_res = svmlalt_f32 (z4, z0, z1),
23 z0_res = svmlalt (z4, z0, z1))
24
25 /*
26 ** mlalt_f32_tied3:
27 ** mov (z[0-9]+)\.d, z0\.d
28 ** movprfx z0, z4
29 ** fmlalt z0\.s, z1\.h, \1\.h
30 ** ret
31 */
32 TEST_DUAL_Z_REV (mlalt_f32_tied3, svfloat32_t, svfloat16_t,
33 z0_res = svmlalt_f32 (z4, z1, z0),
34 z0_res = svmlalt (z4, z1, z0))
35
36 /*
37 ** mlalt_f32_untied:
38 ** movprfx z0, z1
39 ** fmlalt z0\.s, z4\.h, z5\.h
40 ** ret
41 */
42 TEST_DUAL_Z (mlalt_f32_untied, svfloat32_t, svfloat16_t,
43 z0 = svmlalt_f32 (z1, z4, z5),
44 z0 = svmlalt (z1, z4, z5))
45
46 /*
47 ** mlalt_h7_f32_tied1:
48 ** mov (z[0-9]+\.h), h7
49 ** fmlalt z0\.s, z4\.h, \1
50 ** ret
51 */
52 TEST_DUAL_ZD (mlalt_h7_f32_tied1, svfloat32_t, svfloat16_t, float16_t,
53 z0 = svmlalt_n_f32 (z0, z4, d7),
54 z0 = svmlalt (z0, z4, d7))
55
56 /*
57 ** mlalt_h7_f32_untied:
58 ** mov (z[0-9]+\.h), h7
59 ** movprfx z0, z1
60 ** fmlalt z0\.s, z4\.h, \1
61 ** ret
62 */
63 TEST_DUAL_ZD (mlalt_h7_f32_untied, svfloat32_t, svfloat16_t, float16_t,
64 z0 = svmlalt_n_f32 (z1, z4, d7),
65 z0 = svmlalt (z1, z4, d7))
66
67 /*
68 ** mlalt_2_f32_tied1:
69 ** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
70 ** fmlalt z0\.s, z4\.h, \1
71 ** ret
72 */
73 TEST_DUAL_Z (mlalt_2_f32_tied1, svfloat32_t, svfloat16_t,
74 z0 = svmlalt_n_f32 (z0, z4, 2),
75 z0 = svmlalt (z0, z4, 2))
76
77 /*
78 ** mlalt_2_f32_untied:
79 ** fmov (z[0-9]+\.h), #2\.0(?:e\+0)?
80 ** movprfx z0, z1
81 ** fmlalt z0\.s, z4\.h, \1
82 ** ret
83 */
84 TEST_DUAL_Z (mlalt_2_f32_untied, svfloat32_t, svfloat16_t,
85 z0 = svmlalt_n_f32 (z1, z4, 2),
86 z0 = svmlalt (z1, z4, 2))