]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/riscv/riscv-vector-switch.def
Update copyright years.
[thirdparty/gcc.git] / gcc / config / riscv / riscv-vector-switch.def
1 /* Machine mode switch for RISC-V 'V' Extension for GNU compiler.
2 Copyright (C) 2022-2023 Free Software Foundation, Inc.
3 Contributed by Ju-Zhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This file is enable or disable the RVV modes according '-march'. */
22
23 /* According to rvv-intrinsic and RISC-V 'V' Extension ISA document:
24 https://github.com/riscv-non-isa/rvv-intrinsic-doc/blob/master/rvv-intrinsic-rfc.md.
25 https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc.
26
27 Data Types
28 Encode SEW and LMUL into data types.
29 We enforce the constraint LMUL ≥ SEW/ELEN in the implementation.
30 There are the following data types for MIN_VLEN > 32.
31
32 Note: N/A means the corresponding vector type is disabled.
33
34 |Types |LMUL=1|LMUL=2 |LMUL=4 |LMUL=8 |LMUL=1/2|LMUL=1/4|LMUL=1/8|
35 |int64_t |VNx1DI|VNx2DI |VNx4DI |VNx8DI |N/A |N/A |N/A |
36 |uint64_t|VNx1DI|VNx2DI |VNx4DI |VNx8DI |N/A |N/A |N/A |
37 |int32_t |VNx2SI|VNx4SI |VNx8SI |VNx16SI|VNx1SI |N/A |N/A |
38 |uint32_t|VNx2SI|VNx4SI |VNx8SI |VNx16SI|VNx1SI |N/A |N/A |
39 |int16_t |VNx4HI|VNx8HI |VNx16HI|VNx32HI|VNx2HI |VNx1HI |N/A |
40 |uint16_t|VNx4HI|VNx8HI |VNx16HI|VNx32HI|VNx2HI |VNx1HI |N/A |
41 |int8_t |VNx8QI|VNx16QI|VNx32QI|VNx64QI|VNx4QI |VNx2QI |VNx1QI |
42 |uint8_t |VNx8QI|VNx16QI|VNx32QI|VNx64QI|VNx4QI |VNx2QI |VNx1QI |
43 |float64 |VNx1DF|VNx2DF |VNx4DF |VNx8DF |N/A |N/A |N/A |
44 |float32 |VNx2SF|VNx4SF |VNx8SF |VNx16SF|VNx1SF |N/A |N/A |
45 |float16 |VNx4HF|VNx8HF |VNx16HF|VNx32HF|VNx2HF |VNx1HF |N/A |
46
47 Mask Types Encode the ratio of SEW/LMUL into the
48 mask types. There are the following mask types.
49
50 n = SEW/LMUL
51
52 |Types|n=1 |n=2 |n=4 |n=8 |n=16 |n=32 |n=64 |
53 |bool |VNx64BI|VNx32BI|VNx16BI|VNx8BI|VNx4BI|VNx2BI|VNx1BI|
54
55 There are the following data types for MIN_VLEN = 32.
56
57 |Types |LMUL=1|LMUL=2|LMUL=4 |LMUL=8 |LMUL=1/2|LMUL=1/4|LMUL=1/8|
58 |int64_t |N/A |N/A |N/A |N/A |N/A |N/A |N/A |
59 |uint64_t|N/A |N/A |N/A |N/A |N/A |N/A |N/A |
60 |int32_t |VNx1SI|VNx2SI|VNx4SI |VNx8SI |N/A |N/A |N/A |
61 |uint32_t|VNx1SI|VNx2SI|VNx4SI |VNx8SI |N/A |N/A |N/A |
62 |int16_t |VNx2HI|VNx4HI|VNx8HI |VNx16HI|VNx1HI |N/A |N/A |
63 |uint16_t|VNx2HI|VNx4HI|VNx8HI |VNx16HI|VNx1HI |N/A |N/A |
64 |int8_t |VNx4QI|VNx8QI|VNx16QI|VNx32QI|VNx2QI |VNx1QI |N/A |
65 |uint8_t |VNx4QI|VNx8QI|VNx16QI|VNx32QI|VNx2QI |VNx1QI |N/A |
66 |float64 |N/A |N/A |N/A |N/A |N/A |N/A |N/A |
67 |float32 |VNx1SF|VNx2SF|VNx4SF |VNx8SF |N/A |N/A |N/A |
68 |float16 |VNx2HF|VNx4HF|VNx8HF |VNx16HF|VNx1HF |N/A |N/A |
69
70 Mask Types Encode the ratio of SEW/LMUL into the
71 mask types. There are the following mask types.
72
73 n = SEW/LMUL
74
75 |Types|n=1 |n=2 |n=4 |n=8 |n=16 |n=32 |n=64|
76 |bool |VNx32BI|VNx16BI|VNx8BI|VNx4BI|VNx2BI|VNx1BI|N/A |
77
78 TODO: FP16 vector needs support of 'zvfh', we don't support it yet. */
79
80 /* Return 'REQUIREMENT' for machine_mode 'MODE'.
81 For example: 'MODE' = VNx64BImode needs TARGET_MIN_VLEN > 32. */
82 #ifndef ENTRY
83 #define ENTRY(MODE, REQUIREMENT, VLMUL_FOR_MIN_VLEN32, RATIO_FOR_MIN_VLEN32, \
84 VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64)
85 #endif
86 /* Flag of FP32 vector. */
87 #ifndef TARGET_VECTOR_FP32
88 #define TARGET_VECTOR_FP32 \
89 (TARGET_HARD_FLOAT && (TARGET_VECTOR_ELEN_FP_32 || TARGET_VECTOR_ELEN_FP_64))
90 #endif
91 /* Flag of FP64 vector. */
92 #ifndef TARGET_VECTOR_FP64
93 #define TARGET_VECTOR_FP64 \
94 (TARGET_DOUBLE_FLOAT && TARGET_VECTOR_ELEN_FP_64 && (TARGET_MIN_VLEN > 32))
95 #endif
96
97 /* Mask modes. Disable VNx64BImode when TARGET_MIN_VLEN == 32. */
98 ENTRY (VNx64BI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1)
99 ENTRY (VNx32BI, true, LMUL_8, 1, LMUL_4, 2)
100 ENTRY (VNx16BI, true, LMUL_4, 2, LMUL_2, 4)
101 ENTRY (VNx8BI, true, LMUL_2, 4, LMUL_1, 8)
102 ENTRY (VNx4BI, true, LMUL_1, 8, LMUL_F2, 16)
103 ENTRY (VNx2BI, true, LMUL_F2, 16, LMUL_F4, 32)
104 ENTRY (VNx1BI, true, LMUL_F4, 32, LMUL_F8, 64)
105
106 /* SEW = 8. Disable VNx64QImode when TARGET_MIN_VLEN == 32. */
107 ENTRY (VNx64QI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1)
108 ENTRY (VNx32QI, true, LMUL_8, 1, LMUL_4, 2)
109 ENTRY (VNx16QI, true, LMUL_4, 2, LMUL_2, 4)
110 ENTRY (VNx8QI, true, LMUL_2, 4, LMUL_1, 8)
111 ENTRY (VNx4QI, true, LMUL_1, 8, LMUL_F2, 16)
112 ENTRY (VNx2QI, true, LMUL_F2, 16, LMUL_F4, 32)
113 ENTRY (VNx1QI, true, LMUL_F4, 32, LMUL_F8, 64)
114
115 /* SEW = 16. Disable VNx32HImode when TARGET_MIN_VLEN == 32. */
116 ENTRY (VNx32HI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 2)
117 ENTRY (VNx16HI, true, LMUL_8, 2, LMUL_4, 4)
118 ENTRY (VNx8HI, true, LMUL_4, 4, LMUL_2, 8)
119 ENTRY (VNx4HI, true, LMUL_2, 8, LMUL_1, 16)
120 ENTRY (VNx2HI, true, LMUL_1, 16, LMUL_F2, 32)
121 ENTRY (VNx1HI, true, LMUL_F2, 32, LMUL_F4, 64)
122
123 /* TODO:Disable all FP16 vector, enable them when 'zvfh' is supported. */
124 ENTRY (VNx32HF, false, LMUL_RESERVED, 0, LMUL_8, 2)
125 ENTRY (VNx16HF, false, LMUL_8, 2, LMUL_4, 4)
126 ENTRY (VNx8HF, false, LMUL_4, 4, LMUL_2, 8)
127 ENTRY (VNx4HF, false, LMUL_2, 8, LMUL_1, 16)
128 ENTRY (VNx2HF, false, LMUL_1, 16, LMUL_F2, 32)
129 ENTRY (VNx1HF, false, LMUL_F2, 32, LMUL_F4, 64)
130
131 /* SEW = 32. Disable VNx16SImode when TARGET_MIN_VLEN == 32.
132 For single-precision floating-point, we need TARGET_VECTOR_FP32 ==
133 RVV_ENABLE. */
134 ENTRY (VNx16SI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 4)
135 ENTRY (VNx8SI, true, LMUL_8, 4, LMUL_4, 8)
136 ENTRY (VNx4SI, true, LMUL_4, 8, LMUL_2, 16)
137 ENTRY (VNx2SI, true, LMUL_2, 16, LMUL_1, 32)
138 ENTRY (VNx1SI, true, LMUL_1, 32, LMUL_F2, 64)
139
140 ENTRY (VNx16SF, TARGET_VECTOR_FP32 && (TARGET_MIN_VLEN > 32), LMUL_RESERVED, 0,
141 LMUL_8, 4)
142 ENTRY (VNx8SF, TARGET_VECTOR_FP32, LMUL_8, 4, LMUL_4, 8)
143 ENTRY (VNx4SF, TARGET_VECTOR_FP32, LMUL_4, 8, LMUL_2, 16)
144 ENTRY (VNx2SF, TARGET_VECTOR_FP32, LMUL_2, 16, LMUL_1, 32)
145 ENTRY (VNx1SF, TARGET_VECTOR_FP32, LMUL_1, 32, LMUL_F2, 64)
146
147 /* SEW = 64. Enable when TARGET_MIN_VLEN > 32.
148 For double-precision floating-point, we need TARGET_VECTOR_FP64 ==
149 RVV_ENABLE. */
150 ENTRY (VNx8DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 8)
151 ENTRY (VNx4DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_4, 16)
152 ENTRY (VNx2DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_2, 32)
153 ENTRY (VNx1DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_1, 64)
154
155 ENTRY (VNx8DF, TARGET_VECTOR_FP64 && (TARGET_MIN_VLEN > 32), LMUL_RESERVED, 0,
156 LMUL_8, 8)
157 ENTRY (VNx4DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_4, 16)
158 ENTRY (VNx2DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_2, 32)
159 ENTRY (VNx1DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_1, 64)
160
161 #undef TARGET_VECTOR_FP32
162 #undef TARGET_VECTOR_FP64
163 #undef ENTRY