]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/aarch64/aarch64-modes.def
Update copyright years.
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64-modes.def
1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Important note about Carry generation in AArch64.
22
23 Unlike some architectures, the C flag generated by a subtract
24 operation, or a simple compare operation is set to 1 if the result
25 does not overflow in an unsigned sense. That is, if there is no
26 borrow needed from a higher word. That means that overflow from
27 addition will set C, but overflow from a subtraction will clear C.
28 We use CC_Cmode to represent detection of overflow from addition as
29 CCmode is used for 'normal' compare (subtraction) operations. For
30 ADC, the representation becomes more complex still, since we cannot
31 use the normal idiom of comparing the result to one of the input
32 operands; instead we use CC_ADCmode to represent this case. */
33 CC_MODE (CCFP);
34 CC_MODE (CCFPE);
35 CC_MODE (CC_SWP);
36 CC_MODE (CC_NZC); /* Only N, Z and C bits of condition flags are valid.
37 (Used with SVE predicate tests.) */
38 CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
39 CC_MODE (CC_Z); /* Only Z bit of condition flags is valid. */
40 CC_MODE (CC_C); /* C represents unsigned overflow of a simple addition. */
41 CC_MODE (CC_ADC); /* Unsigned overflow from an ADC (add with carry). */
42 CC_MODE (CC_V); /* Only V bit of condition flags is valid. */
43
44 /* Half-precision floating point for __fp16. */
45 FLOAT_MODE (HF, 2, 0);
46 ADJUST_FLOAT_FORMAT (HF, &ieee_half_format);
47
48 /* Vector modes. */
49
50 VECTOR_BOOL_MODE (VNx16BI, 16, 2);
51 VECTOR_BOOL_MODE (VNx8BI, 8, 2);
52 VECTOR_BOOL_MODE (VNx4BI, 4, 2);
53 VECTOR_BOOL_MODE (VNx2BI, 2, 2);
54
55 ADJUST_NUNITS (VNx16BI, aarch64_sve_vg * 8);
56 ADJUST_NUNITS (VNx8BI, aarch64_sve_vg * 4);
57 ADJUST_NUNITS (VNx4BI, aarch64_sve_vg * 2);
58 ADJUST_NUNITS (VNx2BI, aarch64_sve_vg);
59
60 ADJUST_ALIGNMENT (VNx16BI, 2);
61 ADJUST_ALIGNMENT (VNx8BI, 2);
62 ADJUST_ALIGNMENT (VNx4BI, 2);
63 ADJUST_ALIGNMENT (VNx2BI, 2);
64
65 /* Bfloat16 modes. */
66 FLOAT_MODE (BF, 2, 0);
67 ADJUST_FLOAT_FORMAT (BF, &arm_bfloat_half_format);
68
69 VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
70 VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
71 VECTOR_MODES (FLOAT, 8); /* V2SF. */
72 VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
73 VECTOR_MODE (FLOAT, DF, 1); /* V1DF. */
74 VECTOR_MODE (FLOAT, HF, 2); /* V2HF. */
75
76 /* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
77 INT_MODE (OI, 32);
78
79 /* Opaque integer modes for 3 or 4 Neon q-registers / 6 or 8 Neon d-registers
80 (2 d-regs = 1 q-reg = TImode). */
81 INT_MODE (CI, 48);
82 INT_MODE (XI, 64);
83
84 /* V8DI mode. */
85 VECTOR_MODE_WITH_PREFIX (V, INT, DI, 8, 5);
86
87 ADJUST_ALIGNMENT (V8DI, 8);
88
89 /* Define Advanced SIMD modes for structures of 2, 3 and 4 d-registers. */
90 #define ADV_SIMD_D_REG_STRUCT_MODES(NVECS, VB, VH, VS, VD) \
91 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, INT, 8, 3); \
92 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, FLOAT, 8, 3); \
93 VECTOR_MODE_WITH_PREFIX (V##NVECS##x, FLOAT, DF, 1, 3); \
94 VECTOR_MODE_WITH_PREFIX (V##NVECS##x, INT, DI, 1, 3); \
95 \
96 ADJUST_NUNITS (VB##QI, NVECS * 8); \
97 ADJUST_NUNITS (VH##HI, NVECS * 4); \
98 ADJUST_NUNITS (VS##SI, NVECS * 2); \
99 ADJUST_NUNITS (VD##DI, NVECS); \
100 ADJUST_NUNITS (VH##BF, NVECS * 4); \
101 ADJUST_NUNITS (VH##HF, NVECS * 4); \
102 ADJUST_NUNITS (VS##SF, NVECS * 2); \
103 ADJUST_NUNITS (VD##DF, NVECS); \
104 \
105 ADJUST_ALIGNMENT (VB##QI, 8); \
106 ADJUST_ALIGNMENT (VH##HI, 8); \
107 ADJUST_ALIGNMENT (VS##SI, 8); \
108 ADJUST_ALIGNMENT (VD##DI, 8); \
109 ADJUST_ALIGNMENT (VH##BF, 8); \
110 ADJUST_ALIGNMENT (VH##HF, 8); \
111 ADJUST_ALIGNMENT (VS##SF, 8); \
112 ADJUST_ALIGNMENT (VD##DF, 8);
113
114 ADV_SIMD_D_REG_STRUCT_MODES (2, V2x8, V2x4, V2x2, V2x1)
115 ADV_SIMD_D_REG_STRUCT_MODES (3, V3x8, V3x4, V3x2, V3x1)
116 ADV_SIMD_D_REG_STRUCT_MODES (4, V4x8, V4x4, V4x2, V4x1)
117
118 /* Define Advanced SIMD modes for structures of 2, 3 and 4 q-registers. */
119 #define ADV_SIMD_Q_REG_STRUCT_MODES(NVECS, VB, VH, VS, VD) \
120 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, INT, 16, 3); \
121 VECTOR_MODES_WITH_PREFIX (V##NVECS##x, FLOAT, 16, 3); \
122 \
123 ADJUST_NUNITS (VB##QI, NVECS * 16); \
124 ADJUST_NUNITS (VH##HI, NVECS * 8); \
125 ADJUST_NUNITS (VS##SI, NVECS * 4); \
126 ADJUST_NUNITS (VD##DI, NVECS * 2); \
127 ADJUST_NUNITS (VH##BF, NVECS * 8); \
128 ADJUST_NUNITS (VH##HF, NVECS * 8); \
129 ADJUST_NUNITS (VS##SF, NVECS * 4); \
130 ADJUST_NUNITS (VD##DF, NVECS * 2); \
131 \
132 ADJUST_ALIGNMENT (VB##QI, 16); \
133 ADJUST_ALIGNMENT (VH##HI, 16); \
134 ADJUST_ALIGNMENT (VS##SI, 16); \
135 ADJUST_ALIGNMENT (VD##DI, 16); \
136 ADJUST_ALIGNMENT (VH##BF, 16); \
137 ADJUST_ALIGNMENT (VH##HF, 16); \
138 ADJUST_ALIGNMENT (VS##SF, 16); \
139 ADJUST_ALIGNMENT (VD##DF, 16);
140
141 ADV_SIMD_Q_REG_STRUCT_MODES (2, V2x16, V2x8, V2x4, V2x2)
142 ADV_SIMD_Q_REG_STRUCT_MODES (3, V3x16, V3x8, V3x4, V3x2)
143 ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
144
145 /* Define SVE modes for NVECS vectors. VB, VH, VS and VD are the prefixes
146 for 8-bit, 16-bit, 32-bit and 64-bit elements respectively. It isn't
147 strictly necessary to set the alignment here, since the default would
148 be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */
149 #define SVE_MODES(NVECS, VB, VH, VS, VD) \
150 VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
151 VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
152 \
153 ADJUST_NUNITS (VB##QI, aarch64_sve_vg * NVECS * 8); \
154 ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \
155 ADJUST_NUNITS (VS##SI, aarch64_sve_vg * NVECS * 2); \
156 ADJUST_NUNITS (VD##DI, aarch64_sve_vg * NVECS); \
157 ADJUST_NUNITS (VH##BF, aarch64_sve_vg * NVECS * 4); \
158 ADJUST_NUNITS (VH##HF, aarch64_sve_vg * NVECS * 4); \
159 ADJUST_NUNITS (VS##SF, aarch64_sve_vg * NVECS * 2); \
160 ADJUST_NUNITS (VD##DF, aarch64_sve_vg * NVECS); \
161 \
162 ADJUST_ALIGNMENT (VB##QI, 16); \
163 ADJUST_ALIGNMENT (VH##HI, 16); \
164 ADJUST_ALIGNMENT (VS##SI, 16); \
165 ADJUST_ALIGNMENT (VD##DI, 16); \
166 ADJUST_ALIGNMENT (VH##BF, 16); \
167 ADJUST_ALIGNMENT (VH##HF, 16); \
168 ADJUST_ALIGNMENT (VS##SF, 16); \
169 ADJUST_ALIGNMENT (VD##DF, 16);
170
171 /* Give SVE vectors the names normally used for 256-bit vectors.
172 The actual number depends on command-line flags. */
173 SVE_MODES (1, VNx16, VNx8, VNx4, VNx2)
174 SVE_MODES (2, VNx32, VNx16, VNx8, VNx4)
175 SVE_MODES (3, VNx48, VNx24, VNx12, VNx6)
176 SVE_MODES (4, VNx64, VNx32, VNx16, VNx8)
177
178 /* Partial SVE vectors:
179
180 VNx2QI VNx4QI VNx8QI
181 VNx2HI VNx4HI
182 VNx2SI
183
184 In memory they occupy contiguous locations, in the same way as fixed-length
185 vectors. E.g. VNx8QImode is half the size of VNx16QImode.
186
187 Passing 2 as the final argument ensures that the modes come after all
188 other single-vector modes in the GET_MODE_WIDER chain, so that we never
189 pick them in preference to a full vector mode. */
190 VECTOR_MODES_WITH_PREFIX (VNx, INT, 2, 2);
191 VECTOR_MODES_WITH_PREFIX (VNx, INT, 4, 2);
192 VECTOR_MODES_WITH_PREFIX (VNx, INT, 8, 2);
193 VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 4, 2);
194 VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 8, 2);
195
196 ADJUST_NUNITS (VNx2QI, aarch64_sve_vg);
197 ADJUST_NUNITS (VNx2HI, aarch64_sve_vg);
198 ADJUST_NUNITS (VNx2SI, aarch64_sve_vg);
199 ADJUST_NUNITS (VNx2HF, aarch64_sve_vg);
200 ADJUST_NUNITS (VNx2BF, aarch64_sve_vg);
201 ADJUST_NUNITS (VNx2SF, aarch64_sve_vg);
202
203 ADJUST_NUNITS (VNx4QI, aarch64_sve_vg * 2);
204 ADJUST_NUNITS (VNx4HI, aarch64_sve_vg * 2);
205 ADJUST_NUNITS (VNx4HF, aarch64_sve_vg * 2);
206 ADJUST_NUNITS (VNx4BF, aarch64_sve_vg * 2);
207
208 ADJUST_NUNITS (VNx8QI, aarch64_sve_vg * 4);
209
210 ADJUST_ALIGNMENT (VNx2QI, 1);
211 ADJUST_ALIGNMENT (VNx4QI, 1);
212 ADJUST_ALIGNMENT (VNx8QI, 1);
213
214 ADJUST_ALIGNMENT (VNx2HI, 2);
215 ADJUST_ALIGNMENT (VNx4HI, 2);
216 ADJUST_ALIGNMENT (VNx2HF, 2);
217 ADJUST_ALIGNMENT (VNx2BF, 2);
218 ADJUST_ALIGNMENT (VNx4HF, 2);
219 ADJUST_ALIGNMENT (VNx4BF, 2);
220
221 ADJUST_ALIGNMENT (VNx2SI, 4);
222 ADJUST_ALIGNMENT (VNx2SF, 4);
223
224 /* Quad float: 128-bit floating mode for long doubles. */
225 FLOAT_MODE (TF, 16, ieee_quad_format);
226
227 /* A 4-tuple of SVE vectors with the maximum -msve-vector-bits= setting.
228 Note that this is a limit only on the compile-time sizes of modes;
229 it is not a limit on the runtime sizes, since VL-agnostic code
230 must work with arbitary vector lengths. */
231 #define MAX_BITSIZE_MODE_ANY_MODE (2048 * 4)
232
233 /* Coefficient 1 is multiplied by the number of 128-bit chunks in an
234 SVE vector (referred to as "VQ") minus one. */
235 #define NUM_POLY_INT_COEFFS 2