]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addhnt_s32.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / addhnt_s32.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** addhnt_s32_tied1:
7 ** addhnt z0\.h, (z4\.s, z5\.s|z5\.s, z4\.s)
8 ** ret
9 */
10 TEST_DUAL_Z (addhnt_s32_tied1, svint16_t, svint32_t,
11 z0 = svaddhnt_s32 (z0, z4, z5),
12 z0 = svaddhnt (z0, z4, z5))
13
14 /* Bad RA choice: no preferred output sequence. */
15 TEST_DUAL_Z_REV (addhnt_s32_tied2, svint16_t, svint32_t,
16 z0_res = svaddhnt_s32 (z4, z0, z1),
17 z0_res = svaddhnt (z4, z0, z1))
18
19 /* Bad RA choice: no preferred output sequence. */
20 TEST_DUAL_Z_REV (addhnt_s32_tied3, svint16_t, svint32_t,
21 z0_res = svaddhnt_s32 (z4, z1, z0),
22 z0_res = svaddhnt (z4, z1, z0))
23
24 /*
25 ** addhnt_s32_untied:
26 ** (
27 ** mov z0\.d, z1\.d
28 ** addhnt z0\.h, (z4\.s, z5\.s|z5\.s, z4\.s)
29 ** |
30 ** addhnt z1\.h, (z4\.s, z5\.s|z5\.s, z4\.s)
31 ** mov z0\.d, z1\.d
32 ** )
33 ** ret
34 */
35 TEST_DUAL_Z (addhnt_s32_untied, svint16_t, svint32_t,
36 z0 = svaddhnt_s32 (z1, z4, z5),
37 z0 = svaddhnt (z1, z4, z5))
38
39 /*
40 ** addhnt_w0_s32_tied1:
41 ** mov (z[0-9]+\.s), w0
42 ** addhnt z0\.h, (z4\.s, \1|\1, z4\.s)
43 ** ret
44 */
45 TEST_DUAL_ZX (addhnt_w0_s32_tied1, svint16_t, svint32_t, int32_t,
46 z0 = svaddhnt_n_s32 (z0, z4, x0),
47 z0 = svaddhnt (z0, z4, x0))
48
49 /*
50 ** addhnt_w0_s32_untied:
51 ** mov (z[0-9]+\.s), w0
52 ** (
53 ** mov z0\.d, z1\.d
54 ** addhnt z0\.h, (z4\.s, \1|\1, z4\.s)
55 ** |
56 ** addhnt z1\.h, (z4\.s, \1|\1, z4\.s)
57 ** mov z0\.d, z1\.d
58 ** )
59 ** ret
60 */
61 TEST_DUAL_ZX (addhnt_w0_s32_untied, svint16_t, svint32_t, int32_t,
62 z0 = svaddhnt_n_s32 (z1, z4, x0),
63 z0 = svaddhnt (z1, z4, x0))
64
65 /*
66 ** addhnt_11_s32_tied1:
67 ** mov (z[0-9]+\.s), #11
68 ** addhnt z0\.h, (z4\.s, \1|\1, z4\.s)
69 ** ret
70 */
71 TEST_DUAL_Z (addhnt_11_s32_tied1, svint16_t, svint32_t,
72 z0 = svaddhnt_n_s32 (z0, z4, 11),
73 z0 = svaddhnt (z0, z4, 11))
74
75 /*
76 ** addhnt_11_s32_untied:
77 ** mov (z[0-9]+\.s), #11
78 ** (
79 ** mov z0\.d, z1\.d
80 ** addhnt z0\.h, (z4\.s, \1|\1, z4\.s)
81 ** |
82 ** addhnt z1\.h, (z4\.s, \1|\1, z4\.s)
83 ** mov z0\.d, z1\.d
84 ** )
85 ** ret
86 */
87 TEST_DUAL_Z (addhnt_11_s32_untied, svint16_t, svint32_t,
88 z0 = svaddhnt_n_s32 (z1, z4, 11),
89 z0 = svaddhnt (z1, z4, 11))