]> git.ipfire.org Git - people/ms/gcc.git/blame - gcc/config/aarch64/aarch64-modes.def
Update copyright years.
[people/ms/gcc.git] / gcc / config / aarch64 / aarch64-modes.def
CommitLineData
43e9d192 1/* Machine description for AArch64 architecture.
7adcbafe 2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
43e9d192
IB
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
f7343f20
RE
21/* Important note about Carry generation in AArch64.
22
23 Unlike some architectures, the C flag generated by a subtract
24 operation, or a simple compare operation is set to 1 if the result
25 does not overflow in an unsigned sense. That is, if there is no
26 borrow needed from a higher word. That means that overflow from
27 addition will set C, but overflow from a subtraction will clear C.
28 We use CC_Cmode to represent detection of overflow from addition as
29 CCmode is used for 'normal' compare (subtraction) operations. For
30 ADC, the representation becomes more complex still, since we cannot
31 use the normal idiom of comparing the result to one of the input
32 operands; instead we use CC_ADCmode to represent this case. */
43e9d192
IB
33CC_MODE (CCFP);
34CC_MODE (CCFPE);
35CC_MODE (CC_SWP);
57d6f4d0
RS
36CC_MODE (CC_NZC); /* Only N, Z and C bits of condition flags are valid.
37 (Used with SVE predicate tests.) */
43e9d192 38CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
1c992d1e 39CC_MODE (CC_Z); /* Only Z bit of condition flags is valid. */
f7343f20
RE
40CC_MODE (CC_C); /* C represents unsigned overflow of a simple addition. */
41CC_MODE (CC_ADC); /* Unsigned overflow from an ADC (add with carry). */
30c46053 42CC_MODE (CC_V); /* Only V bit of condition flags is valid. */
43e9d192 43
c2ec330c
AL
44/* Half-precision floating point for __fp16. */
45FLOAT_MODE (HF, 2, 0);
46ADJUST_FLOAT_FORMAT (HF, &ieee_half_format);
47
43e9d192 48/* Vector modes. */
43cacb12
RS
49
50VECTOR_BOOL_MODE (VNx16BI, 16, 2);
51VECTOR_BOOL_MODE (VNx8BI, 8, 2);
52VECTOR_BOOL_MODE (VNx4BI, 4, 2);
53VECTOR_BOOL_MODE (VNx2BI, 2, 2);
54
55ADJUST_NUNITS (VNx16BI, aarch64_sve_vg * 8);
56ADJUST_NUNITS (VNx8BI, aarch64_sve_vg * 4);
57ADJUST_NUNITS (VNx4BI, aarch64_sve_vg * 2);
58ADJUST_NUNITS (VNx2BI, aarch64_sve_vg);
59
60ADJUST_ALIGNMENT (VNx16BI, 2);
61ADJUST_ALIGNMENT (VNx8BI, 2);
62ADJUST_ALIGNMENT (VNx4BI, 2);
63ADJUST_ALIGNMENT (VNx2BI, 2);
64
02fcd8ac
RS
65/* Bfloat16 modes. */
66FLOAT_MODE (BF, 2, 0);
67ADJUST_FLOAT_FORMAT (BF, &arm_bfloat_half_format);
68
43e9d192
IB
69VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
70VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
71VECTOR_MODES (FLOAT, 8); /* V2SF. */
72VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
ad7d90cc 73VECTOR_MODE (FLOAT, DF, 1); /* V1DF. */
27086ea3 74VECTOR_MODE (FLOAT, HF, 2); /* V2HF. */
43e9d192
IB
75
76/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
77INT_MODE (OI, 32);
78
6ec0e5b9
AL
79/* Opaque integer modes for 3 or 4 Neon q-registers / 6 or 8 Neon d-registers
80 (2 d-regs = 1 q-reg = TImode). */
43e9d192
IB
81INT_MODE (CI, 48);
82INT_MODE (XI, 64);
83
dd159a41
PW
84/* V8DI mode. */
85VECTOR_MODE_WITH_PREFIX (V, INT, DI, 8, 5);
86
87ADJUST_ALIGNMENT (V8DI, 8);
88
66f206b8
JW
89/* Define Advanced SIMD modes for structures of 2, 3 and 4 d-registers. */
90#define ADV_SIMD_D_REG_STRUCT_MODES(NVECS, VB, VH, VS, VD) \
91 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, INT, 8, 3); \
92 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, FLOAT, 8, 3); \
93 VECTOR_MODE_WITH_PREFIX (V##NVECS##x, FLOAT, DF, 1, 3); \
94 VECTOR_MODE_WITH_PREFIX (V##NVECS##x, INT, DI, 1, 3); \
95 \
96 ADJUST_NUNITS (VB##QI, NVECS * 8); \
97 ADJUST_NUNITS (VH##HI, NVECS * 4); \
98 ADJUST_NUNITS (VS##SI, NVECS * 2); \
99 ADJUST_NUNITS (VD##DI, NVECS); \
100 ADJUST_NUNITS (VH##BF, NVECS * 4); \
101 ADJUST_NUNITS (VH##HF, NVECS * 4); \
102 ADJUST_NUNITS (VS##SF, NVECS * 2); \
103 ADJUST_NUNITS (VD##DF, NVECS); \
104 \
105 ADJUST_ALIGNMENT (VB##QI, 8); \
106 ADJUST_ALIGNMENT (VH##HI, 8); \
107 ADJUST_ALIGNMENT (VS##SI, 8); \
108 ADJUST_ALIGNMENT (VD##DI, 8); \
109 ADJUST_ALIGNMENT (VH##BF, 8); \
110 ADJUST_ALIGNMENT (VH##HF, 8); \
111 ADJUST_ALIGNMENT (VS##SF, 8); \
112 ADJUST_ALIGNMENT (VD##DF, 8);
113
114ADV_SIMD_D_REG_STRUCT_MODES (2, V2x8, V2x4, V2x2, V2x1)
115ADV_SIMD_D_REG_STRUCT_MODES (3, V3x8, V3x4, V3x2, V3x1)
116ADV_SIMD_D_REG_STRUCT_MODES (4, V4x8, V4x4, V4x2, V4x1)
117
118/* Define Advanced SIMD modes for structures of 2, 3 and 4 q-registers. */
119#define ADV_SIMD_Q_REG_STRUCT_MODES(NVECS, VB, VH, VS, VD) \
120 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, INT, 16, 3); \
121 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, FLOAT, 16, 3); \
122 \
123 ADJUST_NUNITS (VB##QI, NVECS * 16); \
124 ADJUST_NUNITS (VH##HI, NVECS * 8); \
125 ADJUST_NUNITS (VS##SI, NVECS * 4); \
126 ADJUST_NUNITS (VD##DI, NVECS * 2); \
127 ADJUST_NUNITS (VH##BF, NVECS * 8); \
128 ADJUST_NUNITS (VH##HF, NVECS * 8); \
129 ADJUST_NUNITS (VS##SF, NVECS * 4); \
130 ADJUST_NUNITS (VD##DF, NVECS * 2); \
131 \
132 ADJUST_ALIGNMENT (VB##QI, 16); \
133 ADJUST_ALIGNMENT (VH##HI, 16); \
134 ADJUST_ALIGNMENT (VS##SI, 16); \
135 ADJUST_ALIGNMENT (VD##DI, 16); \
136 ADJUST_ALIGNMENT (VH##BF, 16); \
137 ADJUST_ALIGNMENT (VH##HF, 16); \
138 ADJUST_ALIGNMENT (VS##SF, 16); \
139 ADJUST_ALIGNMENT (VD##DF, 16);
140
141ADV_SIMD_Q_REG_STRUCT_MODES (2, V2x16, V2x8, V2x4, V2x2)
142ADV_SIMD_Q_REG_STRUCT_MODES (3, V3x16, V3x8, V3x4, V3x2)
143ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
144
43cacb12
RS
145/* Define SVE modes for NVECS vectors. VB, VH, VS and VD are the prefixes
146 for 8-bit, 16-bit, 32-bit and 64-bit elements respectively. It isn't
147 strictly necessary to set the alignment here, since the default would
148 be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */
149#define SVE_MODES(NVECS, VB, VH, VS, VD) \
66f206b8
JW
150 VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
151 VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
43cacb12
RS
152 \
153 ADJUST_NUNITS (VB##QI, aarch64_sve_vg * NVECS * 8); \
154 ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \
155 ADJUST_NUNITS (VS##SI, aarch64_sve_vg * NVECS * 2); \
156 ADJUST_NUNITS (VD##DI, aarch64_sve_vg * NVECS); \
02fcd8ac 157 ADJUST_NUNITS (VH##BF, aarch64_sve_vg * NVECS * 4); \
43cacb12
RS
158 ADJUST_NUNITS (VH##HF, aarch64_sve_vg * NVECS * 4); \
159 ADJUST_NUNITS (VS##SF, aarch64_sve_vg * NVECS * 2); \
160 ADJUST_NUNITS (VD##DF, aarch64_sve_vg * NVECS); \
161 \
162 ADJUST_ALIGNMENT (VB##QI, 16); \
163 ADJUST_ALIGNMENT (VH##HI, 16); \
164 ADJUST_ALIGNMENT (VS##SI, 16); \
165 ADJUST_ALIGNMENT (VD##DI, 16); \
02fcd8ac 166 ADJUST_ALIGNMENT (VH##BF, 16); \
43cacb12
RS
167 ADJUST_ALIGNMENT (VH##HF, 16); \
168 ADJUST_ALIGNMENT (VS##SF, 16); \
169 ADJUST_ALIGNMENT (VD##DF, 16);
170
171/* Give SVE vectors the names normally used for 256-bit vectors.
172 The actual number depends on command-line flags. */
173SVE_MODES (1, VNx16, VNx8, VNx4, VNx2)
9f4cbab8
RS
174SVE_MODES (2, VNx32, VNx16, VNx8, VNx4)
175SVE_MODES (3, VNx48, VNx24, VNx12, VNx6)
176SVE_MODES (4, VNx64, VNx32, VNx16, VNx8)
43cacb12 177
550a3380
RS
178/* Partial SVE vectors:
179
180 VNx2QI VNx4QI VNx8QI
181 VNx2HI VNx4HI
182 VNx2SI
183
184 In memory they occupy contiguous locations, in the same way as fixed-length
185 vectors. E.g. VNx8QImode is half the size of VNx16QImode.
186
66f206b8
JW
187 Passing 2 as the final argument ensures that the modes come after all
188 other single-vector modes in the GET_MODE_WIDER chain, so that we never
189 pick them in preference to a full vector mode. */
190VECTOR_MODES_WITH_PREFIX (VNx, INT, 2, 2);
191VECTOR_MODES_WITH_PREFIX (VNx, INT, 4, 2);
192VECTOR_MODES_WITH_PREFIX (VNx, INT, 8, 2);
193VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 4, 2);
194VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 8, 2);
550a3380
RS
195
196ADJUST_NUNITS (VNx2QI, aarch64_sve_vg);
197ADJUST_NUNITS (VNx2HI, aarch64_sve_vg);
198ADJUST_NUNITS (VNx2SI, aarch64_sve_vg);
cc68f7c2 199ADJUST_NUNITS (VNx2HF, aarch64_sve_vg);
6c3ce63b 200ADJUST_NUNITS (VNx2BF, aarch64_sve_vg);
cc68f7c2 201ADJUST_NUNITS (VNx2SF, aarch64_sve_vg);
550a3380
RS
202
203ADJUST_NUNITS (VNx4QI, aarch64_sve_vg * 2);
204ADJUST_NUNITS (VNx4HI, aarch64_sve_vg * 2);
cc68f7c2 205ADJUST_NUNITS (VNx4HF, aarch64_sve_vg * 2);
6c3ce63b 206ADJUST_NUNITS (VNx4BF, aarch64_sve_vg * 2);
550a3380
RS
207
208ADJUST_NUNITS (VNx8QI, aarch64_sve_vg * 4);
209
210ADJUST_ALIGNMENT (VNx2QI, 1);
211ADJUST_ALIGNMENT (VNx4QI, 1);
212ADJUST_ALIGNMENT (VNx8QI, 1);
213
214ADJUST_ALIGNMENT (VNx2HI, 2);
215ADJUST_ALIGNMENT (VNx4HI, 2);
cc68f7c2 216ADJUST_ALIGNMENT (VNx2HF, 2);
6c3ce63b 217ADJUST_ALIGNMENT (VNx2BF, 2);
cc68f7c2 218ADJUST_ALIGNMENT (VNx4HF, 2);
6c3ce63b 219ADJUST_ALIGNMENT (VNx4BF, 2);
550a3380
RS
220
221ADJUST_ALIGNMENT (VNx2SI, 4);
cc68f7c2 222ADJUST_ALIGNMENT (VNx2SF, 4);
550a3380 223
43e9d192
IB
224/* Quad float: 128-bit floating mode for long doubles. */
225FLOAT_MODE (TF, 16, ieee_quad_format);
6a70badb 226
43cacb12
RS
227/* A 4-tuple of SVE vectors with the maximum -msve-vector-bits= setting.
228 Note that this is a limit only on the compile-time sizes of modes;
229 it is not a limit on the runtime sizes, since VL-agnostic code
230 must work with arbitary vector lengths. */
231#define MAX_BITSIZE_MODE_ANY_MODE (2048 * 4)
232
6a70badb
RS
233/* Coefficient 1 is multiplied by the number of 128-bit chunks in an
234 SVE vector (referred to as "VQ") minus one. */
235#define NUM_POLY_INT_COEFFS 2