]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/riscv/riscv-vector-switch.def
RISC-V:Add float16 tuple type support
[thirdparty/gcc.git] / gcc / config / riscv / riscv-vector-switch.def
1 /* Machine mode switch for RISC-V 'V' Extension for GNU compiler.
2 Copyright (C) 2022-2023 Free Software Foundation, Inc.
3 Contributed by Ju-Zhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This file is enable or disable the RVV modes according '-march'. */
22
23 /* According to rvv-intrinsic and RISC-V 'V' Extension ISA document:
24 https://github.com/riscv-non-isa/rvv-intrinsic-doc/blob/master/rvv-intrinsic-rfc.md.
25 https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc.
26
27 Data Types
28 Encode SEW and LMUL into data types.
29 We enforce the constraint LMUL ≥ SEW/ELEN in the implementation.
30 There are the following data types for MIN_VLEN > 32.
31
32 Note: N/A means the corresponding vector type is disabled.
33
34 |Types |LMUL=1|LMUL=2 |LMUL=4 |LMUL=8 |LMUL=1/2|LMUL=1/4|LMUL=1/8|
35 |int64_t |VNx1DI|VNx2DI |VNx4DI |VNx8DI |N/A |N/A |N/A |
36 |uint64_t|VNx1DI|VNx2DI |VNx4DI |VNx8DI |N/A |N/A |N/A |
37 |int32_t |VNx2SI|VNx4SI |VNx8SI |VNx16SI|VNx1SI |N/A |N/A |
38 |uint32_t|VNx2SI|VNx4SI |VNx8SI |VNx16SI|VNx1SI |N/A |N/A |
39 |int16_t |VNx4HI|VNx8HI |VNx16HI|VNx32HI|VNx2HI |VNx1HI |N/A |
40 |uint16_t|VNx4HI|VNx8HI |VNx16HI|VNx32HI|VNx2HI |VNx1HI |N/A |
41 |int8_t |VNx8QI|VNx16QI|VNx32QI|VNx64QI|VNx4QI |VNx2QI |VNx1QI |
42 |uint8_t |VNx8QI|VNx16QI|VNx32QI|VNx64QI|VNx4QI |VNx2QI |VNx1QI |
43 |float64 |VNx1DF|VNx2DF |VNx4DF |VNx8DF |N/A |N/A |N/A |
44 |float32 |VNx2SF|VNx4SF |VNx8SF |VNx16SF|VNx1SF |N/A |N/A |
45 |float16 |VNx4HF|VNx8HF |VNx16HF|VNx32HF|VNx2HF |VNx1HF |N/A |
46
47 Mask Types Encode the ratio of SEW/LMUL into the
48 mask types. There are the following mask types.
49
50 n = SEW/LMUL
51
52 |Types|n=1 |n=2 |n=4 |n=8 |n=16 |n=32 |n=64 |
53 |bool |VNx64BI|VNx32BI|VNx16BI|VNx8BI|VNx4BI|VNx2BI|VNx1BI|
54
55 There are the following data types for MIN_VLEN = 32.
56
57 |Types |LMUL=1|LMUL=2|LMUL=4 |LMUL=8 |LMUL=1/2|LMUL=1/4|LMUL=1/8|
58 |int64_t |N/A |N/A |N/A |N/A |N/A |N/A |N/A |
59 |uint64_t|N/A |N/A |N/A |N/A |N/A |N/A |N/A |
60 |int32_t |VNx1SI|VNx2SI|VNx4SI |VNx8SI |N/A |N/A |N/A |
61 |uint32_t|VNx1SI|VNx2SI|VNx4SI |VNx8SI |N/A |N/A |N/A |
62 |int16_t |VNx2HI|VNx4HI|VNx8HI |VNx16HI|VNx1HI |N/A |N/A |
63 |uint16_t|VNx2HI|VNx4HI|VNx8HI |VNx16HI|VNx1HI |N/A |N/A |
64 |int8_t |VNx4QI|VNx8QI|VNx16QI|VNx32QI|VNx2QI |VNx1QI |N/A |
65 |uint8_t |VNx4QI|VNx8QI|VNx16QI|VNx32QI|VNx2QI |VNx1QI |N/A |
66 |float64 |N/A |N/A |N/A |N/A |N/A |N/A |N/A |
67 |float32 |VNx1SF|VNx2SF|VNx4SF |VNx8SF |N/A |N/A |N/A |
68 |float16 |VNx2HF|VNx4HF|VNx8HF |VNx16HF|VNx1HF |N/A |N/A |
69
70 Mask Types Encode the ratio of SEW/LMUL into the
71 mask types. There are the following mask types.
72
73 n = SEW/LMUL
74
75 |Types|n=1 |n=2 |n=4 |n=8 |n=16 |n=32 |n=64|
76 |bool |VNx32BI|VNx16BI|VNx8BI|VNx4BI|VNx2BI|VNx1BI|N/A |
77
78 TODO: FP16 vector needs support of 'zvfh', we don't support it yet. */
79
80 /* Return 'REQUIREMENT' for machine_mode 'MODE'.
81 For example: 'MODE' = VNx64BImode needs TARGET_MIN_VLEN > 32. */
82 #ifndef ENTRY
83 #define ENTRY(MODE, REQUIREMENT, VLMUL_FOR_MIN_VLEN32, RATIO_FOR_MIN_VLEN32, \
84 VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64, \
85 VLMUL_FOR_MIN_VLEN128, RATIO_FOR_MIN_VLEN128)
86 #endif
87 #ifndef TUPLE_ENTRY
88 #define TUPLE_ENTRY(MODE, REQUIREMENT, SUBPART_MODE, NF, VLMUL_FOR_MIN_VLEN32, \
89 RATIO_FOR_MIN_VLEN32, VLMUL_FOR_MIN_VLEN64, \
90 RATIO_FOR_MIN_VLEN64, VLMUL_FOR_MIN_VLEN128, \
91 RATIO_FOR_MIN_VLEN128)
92 #endif
93
94 /* Mask modes. Disable VNx64BImode when TARGET_MIN_VLEN == 32. */
95 ENTRY (VNx128BI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 1)
96 ENTRY (VNx64BI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1, LMUL_4, 2)
97 ENTRY (VNx32BI, true, LMUL_8, 1, LMUL_4, 2, LMUL_2, 4)
98 ENTRY (VNx16BI, true, LMUL_4, 2, LMUL_2, 4, LMUL_1, 8)
99 ENTRY (VNx8BI, true, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
100 ENTRY (VNx4BI, true, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
101 ENTRY (VNx2BI, true, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
102 ENTRY (VNx1BI, TARGET_MIN_VLEN < 128, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
103
104 /* SEW = 8. Disable VNx64QImode when TARGET_MIN_VLEN == 32. */
105 ENTRY (VNx128QI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 1)
106 ENTRY (VNx64QI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1, LMUL_4, 2)
107 ENTRY (VNx32QI, true, LMUL_8, 1, LMUL_4, 2, LMUL_2, 4)
108 ENTRY (VNx16QI, true, LMUL_4, 2, LMUL_2, 4, LMUL_1, 8)
109 ENTRY (VNx8QI, true, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
110 ENTRY (VNx4QI, true, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
111 ENTRY (VNx2QI, true, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
112 ENTRY (VNx1QI, TARGET_MIN_VLEN < 128, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
113
114 /* SEW = 16. Disable VNx32HImode when TARGET_MIN_VLEN == 32. */
115 ENTRY (VNx64HI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 2)
116 ENTRY (VNx32HI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 2, LMUL_4, 4)
117 ENTRY (VNx16HI, true, LMUL_8, 2, LMUL_4, 4, LMUL_2, 8)
118 ENTRY (VNx8HI, true, LMUL_4, 4, LMUL_2, 8, LMUL_1, 16)
119 ENTRY (VNx4HI, true, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
120 ENTRY (VNx2HI, true, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
121 ENTRY (VNx1HI, TARGET_MIN_VLEN < 128, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
122
123 /* SEW = 16 for float point. Enabled when 'zvfh' or 'zvfhmin' is given. */
124 ENTRY (VNx64HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, \
125 LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 2)
126 ENTRY (VNx32HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, \
127 LMUL_RESERVED, 0, LMUL_8, 2, LMUL_4, 4)
128 ENTRY (VNx16HF, TARGET_VECTOR_ELEN_FP_16, \
129 LMUL_8, 2, LMUL_4, 4, LMUL_2, 8)
130 ENTRY (VNx8HF, TARGET_VECTOR_ELEN_FP_16, \
131 LMUL_4, 4, LMUL_2, 8, LMUL_1, 16)
132 ENTRY (VNx4HF, TARGET_VECTOR_ELEN_FP_16, \
133 LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
134 ENTRY (VNx2HF, TARGET_VECTOR_ELEN_FP_16, \
135 LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
136 ENTRY (VNx1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, \
137 LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
138
139 /* SEW = 32. Disable VNx16SImode when TARGET_MIN_VLEN == 32.
140 For single-precision floating-point, we need TARGET_VECTOR_ELEN_FP_32 to be
141 true. */
142 ENTRY (VNx32SI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 4)
143 ENTRY (VNx16SI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 4, LMUL_4, 8)
144 ENTRY (VNx8SI, true, LMUL_8, 4, LMUL_4, 8, LMUL_2, 16)
145 ENTRY (VNx4SI, true, LMUL_4, 8, LMUL_2, 16, LMUL_1, 32)
146 ENTRY (VNx2SI, true, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
147 ENTRY (VNx1SI, TARGET_MIN_VLEN < 128, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
148
149 ENTRY (VNx32SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 4)
150 ENTRY (VNx16SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0,
151 LMUL_8, 4, LMUL_4, 8)
152 ENTRY (VNx8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4, LMUL_4, 8, LMUL_2, 16)
153 ENTRY (VNx4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8, LMUL_2, 16, LMUL_1, 32)
154 ENTRY (VNx2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
155 ENTRY (VNx1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
156
157 /* SEW = 64. Enable when TARGET_VECTOR_ELEN_64 is true.
158 For double-precision floating-point, we need TARGET_VECTOR_ELEN_FP_64 to be
159 true. */
160 ENTRY (VNx16DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 8)
161 ENTRY (VNx8DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_8, 8, LMUL_4, 16)
162 ENTRY (VNx4DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_4, 16, LMUL_2, 32)
163 ENTRY (VNx2DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
164 ENTRY (VNx1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
165
166 ENTRY (VNx16DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 8)
167 ENTRY (VNx8DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0,
168 LMUL_8, 8, LMUL_4, 16)
169 ENTRY (VNx4DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_4, 16, LMUL_2, 32)
170 ENTRY (VNx2DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
171 ENTRY (VNx1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
172
173 /* Enable or disable the tuple type. BASE_MODE is the base vector mode of the
174 tuple mode. For example, the BASE_MODE of VNx2x1SImode is VNx1SImode. ALL
175 tuple modes should always satisfy NF * BASE_MODE LMUL <= 8. */
176
177 /* Tuple modes for EEW = 8. */
178 TUPLE_ENTRY (VNx2x64QI, TARGET_MIN_VLEN >= 128, VNx64QI, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 2)
179 TUPLE_ENTRY (VNx2x32QI, TARGET_MIN_VLEN >= 64, VNx32QI, 2, LMUL_RESERVED, 0, LMUL_4, 2, LMUL_2, 4)
180 TUPLE_ENTRY (VNx3x32QI, TARGET_MIN_VLEN >= 128, VNx32QI, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 4)
181 TUPLE_ENTRY (VNx4x32QI, TARGET_MIN_VLEN >= 128, VNx32QI, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 4)
182 TUPLE_ENTRY (VNx2x16QI, true, VNx16QI, 2, LMUL_4, 2, LMUL_2, 4, LMUL_1, 8)
183 TUPLE_ENTRY (VNx3x16QI, TARGET_MIN_VLEN >= 64, VNx16QI, 3, LMUL_RESERVED, 0, LMUL_2, 4, LMUL_1, 8)
184 TUPLE_ENTRY (VNx4x16QI, TARGET_MIN_VLEN >= 64, VNx16QI, 4, LMUL_RESERVED, 0, LMUL_2, 4, LMUL_1, 8)
185 TUPLE_ENTRY (VNx5x16QI, TARGET_MIN_VLEN >= 128, VNx16QI, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 8)
186 TUPLE_ENTRY (VNx6x16QI, TARGET_MIN_VLEN >= 128, VNx16QI, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 8)
187 TUPLE_ENTRY (VNx7x16QI, TARGET_MIN_VLEN >= 128, VNx16QI, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 8)
188 TUPLE_ENTRY (VNx8x16QI, TARGET_MIN_VLEN >= 128, VNx16QI, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 8)
189 TUPLE_ENTRY (VNx2x8QI, true, VNx8QI, 2, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
190 TUPLE_ENTRY (VNx3x8QI, true, VNx8QI, 3, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
191 TUPLE_ENTRY (VNx4x8QI, true, VNx8QI, 4, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
192 TUPLE_ENTRY (VNx5x8QI, TARGET_MIN_VLEN >= 64, VNx8QI, 5, LMUL_RESERVED, 0, LMUL_1, 8, LMUL_F2, 16)
193 TUPLE_ENTRY (VNx6x8QI, TARGET_MIN_VLEN >= 64, VNx8QI, 6, LMUL_RESERVED, 0, LMUL_1, 8, LMUL_F2, 16)
194 TUPLE_ENTRY (VNx7x8QI, TARGET_MIN_VLEN >= 64, VNx8QI, 7, LMUL_RESERVED, 0, LMUL_1, 8, LMUL_F2, 16)
195 TUPLE_ENTRY (VNx8x8QI, TARGET_MIN_VLEN >= 64, VNx8QI, 8, LMUL_RESERVED, 0, LMUL_1, 8, LMUL_F2, 16)
196 TUPLE_ENTRY (VNx2x4QI, true, VNx4QI, 2, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
197 TUPLE_ENTRY (VNx3x4QI, true, VNx4QI, 3, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
198 TUPLE_ENTRY (VNx4x4QI, true, VNx4QI, 4, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
199 TUPLE_ENTRY (VNx5x4QI, true, VNx4QI, 5, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
200 TUPLE_ENTRY (VNx6x4QI, true, VNx4QI, 6, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
201 TUPLE_ENTRY (VNx7x4QI, true, VNx4QI, 7, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
202 TUPLE_ENTRY (VNx8x4QI, true, VNx4QI, 8, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
203 TUPLE_ENTRY (VNx2x2QI, true, VNx2QI, 2, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
204 TUPLE_ENTRY (VNx3x2QI, true, VNx2QI, 3, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
205 TUPLE_ENTRY (VNx4x2QI, true, VNx2QI, 4, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
206 TUPLE_ENTRY (VNx5x2QI, true, VNx2QI, 5, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
207 TUPLE_ENTRY (VNx6x2QI, true, VNx2QI, 6, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
208 TUPLE_ENTRY (VNx7x2QI, true, VNx2QI, 7, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
209 TUPLE_ENTRY (VNx8x2QI, true, VNx2QI, 8, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
210 TUPLE_ENTRY (VNx2x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 2, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
211 TUPLE_ENTRY (VNx3x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 3, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
212 TUPLE_ENTRY (VNx4x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 4, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
213 TUPLE_ENTRY (VNx5x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 5, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
214 TUPLE_ENTRY (VNx6x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 6, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
215 TUPLE_ENTRY (VNx7x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 7, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
216 TUPLE_ENTRY (VNx8x1QI, TARGET_MIN_VLEN < 128, VNx1QI, 8, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 0)
217
218 /* Tuple modes for EEW = 16. */
219 TUPLE_ENTRY (VNx2x32HI, TARGET_MIN_VLEN >= 128, VNx32HI, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 4)
220 TUPLE_ENTRY (VNx2x16HI, TARGET_MIN_VLEN >= 64, VNx16HI, 2, LMUL_RESERVED, 0, LMUL_4, 4, LMUL_2, 8)
221 TUPLE_ENTRY (VNx3x16HI, TARGET_MIN_VLEN >= 128, VNx16HI, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 8)
222 TUPLE_ENTRY (VNx4x16HI, TARGET_MIN_VLEN >= 128, VNx16HI, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 8)
223 TUPLE_ENTRY (VNx2x8HI, true, VNx8HI, 2, LMUL_4, 4, LMUL_2, 8, LMUL_1, 16)
224 TUPLE_ENTRY (VNx3x8HI, TARGET_MIN_VLEN >= 64, VNx8HI, 3, LMUL_RESERVED, 0, LMUL_2, 8, LMUL_1, 16)
225 TUPLE_ENTRY (VNx4x8HI, TARGET_MIN_VLEN >= 64, VNx8HI, 4, LMUL_RESERVED, 0, LMUL_2, 8, LMUL_1, 16)
226 TUPLE_ENTRY (VNx5x8HI, TARGET_MIN_VLEN >= 128, VNx8HI, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
227 TUPLE_ENTRY (VNx6x8HI, TARGET_MIN_VLEN >= 128, VNx8HI, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
228 TUPLE_ENTRY (VNx7x8HI, TARGET_MIN_VLEN >= 128, VNx8HI, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
229 TUPLE_ENTRY (VNx8x8HI, TARGET_MIN_VLEN >= 128, VNx8HI, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
230 TUPLE_ENTRY (VNx2x4HI, true, VNx4HI, 2, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
231 TUPLE_ENTRY (VNx3x4HI, true, VNx4HI, 3, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
232 TUPLE_ENTRY (VNx4x4HI, true, VNx4HI, 4, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
233 TUPLE_ENTRY (VNx5x4HI, TARGET_MIN_VLEN >= 64, VNx4HI, 5, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
234 TUPLE_ENTRY (VNx6x4HI, TARGET_MIN_VLEN >= 64, VNx4HI, 6, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
235 TUPLE_ENTRY (VNx7x4HI, TARGET_MIN_VLEN >= 64, VNx4HI, 7, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
236 TUPLE_ENTRY (VNx8x4HI, TARGET_MIN_VLEN >= 64, VNx4HI, 8, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
237 TUPLE_ENTRY (VNx2x2HI, true, VNx2HI, 2, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
238 TUPLE_ENTRY (VNx3x2HI, true, VNx2HI, 3, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
239 TUPLE_ENTRY (VNx4x2HI, true, VNx2HI, 4, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
240 TUPLE_ENTRY (VNx5x2HI, true, VNx2HI, 5, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
241 TUPLE_ENTRY (VNx6x2HI, true, VNx2HI, 6, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
242 TUPLE_ENTRY (VNx7x2HI, true, VNx2HI, 7, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
243 TUPLE_ENTRY (VNx8x2HI, true, VNx2HI, 8, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
244 TUPLE_ENTRY (VNx2x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 2, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
245 TUPLE_ENTRY (VNx3x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 3, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
246 TUPLE_ENTRY (VNx4x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 4, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
247 TUPLE_ENTRY (VNx5x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 5, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
248 TUPLE_ENTRY (VNx6x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 6, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
249 TUPLE_ENTRY (VNx7x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 7, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
250 TUPLE_ENTRY (VNx8x1HI, TARGET_MIN_VLEN < 128, VNx1HI, 8, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
251 TUPLE_ENTRY (VNx2x32HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx32HF, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 4)
252 TUPLE_ENTRY (VNx2x16HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx16HF, 2, LMUL_RESERVED, 0, LMUL_4, 4, LMUL_2, 8)
253 TUPLE_ENTRY (VNx3x16HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx16HF, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 8)
254 TUPLE_ENTRY (VNx4x16HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx16HF, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 8)
255 TUPLE_ENTRY (VNx2x8HF, TARGET_VECTOR_ELEN_FP_16, VNx8HF, 2, LMUL_4, 4, LMUL_2, 8, LMUL_1, 16)
256 TUPLE_ENTRY (VNx3x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx8HF, 3, LMUL_RESERVED, 0, LMUL_2, 8, LMUL_1, 16)
257 TUPLE_ENTRY (VNx4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx8HF, 4, LMUL_RESERVED, 0, LMUL_2, 8, LMUL_1, 16)
258 TUPLE_ENTRY (VNx5x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx8HF, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
259 TUPLE_ENTRY (VNx6x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx8HF, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
260 TUPLE_ENTRY (VNx7x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx8HF, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
261 TUPLE_ENTRY (VNx8x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128, VNx8HF, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 16)
262 TUPLE_ENTRY (VNx2x4HF, TARGET_VECTOR_ELEN_FP_16, VNx4HF, 2, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
263 TUPLE_ENTRY (VNx3x4HF, TARGET_VECTOR_ELEN_FP_16, VNx4HF, 3, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
264 TUPLE_ENTRY (VNx4x4HF, TARGET_VECTOR_ELEN_FP_16, VNx4HF, 4, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
265 TUPLE_ENTRY (VNx5x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx4HF, 5, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
266 TUPLE_ENTRY (VNx6x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx4HF, 6, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
267 TUPLE_ENTRY (VNx7x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx4HF, 7, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
268 TUPLE_ENTRY (VNx8x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64, VNx4HF, 8, LMUL_RESERVED, 0, LMUL_1, 16, LMUL_F2, 32)
269 TUPLE_ENTRY (VNx2x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 2, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
270 TUPLE_ENTRY (VNx3x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 3, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
271 TUPLE_ENTRY (VNx4x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 4, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
272 TUPLE_ENTRY (VNx5x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 5, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
273 TUPLE_ENTRY (VNx6x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 6, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
274 TUPLE_ENTRY (VNx7x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 7, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
275 TUPLE_ENTRY (VNx8x2HF, TARGET_VECTOR_ELEN_FP_16, VNx2HF, 8, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
276 TUPLE_ENTRY (VNx2x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 2, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
277 TUPLE_ENTRY (VNx3x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 3, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
278 TUPLE_ENTRY (VNx4x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 4, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
279 TUPLE_ENTRY (VNx5x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 5, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
280 TUPLE_ENTRY (VNx6x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 6, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
281 TUPLE_ENTRY (VNx7x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 7, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
282 TUPLE_ENTRY (VNx8x1HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN < 128, VNx1HF, 8, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
283
284 /* Tuple modes for EEW = 32. */
285 TUPLE_ENTRY (VNx2x16SI, TARGET_MIN_VLEN >= 128, VNx16SI, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 8)
286 TUPLE_ENTRY (VNx2x8SI, TARGET_MIN_VLEN >= 64, VNx8SI, 2, LMUL_RESERVED, 0, LMUL_4, 8, LMUL_2, 16)
287 TUPLE_ENTRY (VNx3x8SI, TARGET_MIN_VLEN >= 128, VNx8SI, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 16)
288 TUPLE_ENTRY (VNx4x8SI, TARGET_MIN_VLEN >= 128, VNx8SI, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 16)
289 TUPLE_ENTRY (VNx2x4SI, true, VNx4SI, 2, LMUL_4, 8, LMUL_2, 16, LMUL_1, 32)
290 TUPLE_ENTRY (VNx3x4SI, TARGET_MIN_VLEN >= 64, VNx4SI, 3, LMUL_RESERVED, 0, LMUL_2, 16, LMUL_1, 32)
291 TUPLE_ENTRY (VNx4x4SI, TARGET_MIN_VLEN >= 64, VNx4SI, 4, LMUL_RESERVED, 0, LMUL_2, 16, LMUL_1, 32)
292 TUPLE_ENTRY (VNx5x4SI, TARGET_MIN_VLEN >= 128, VNx4SI, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
293 TUPLE_ENTRY (VNx6x4SI, TARGET_MIN_VLEN >= 128, VNx4SI, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
294 TUPLE_ENTRY (VNx7x4SI, TARGET_MIN_VLEN >= 128, VNx4SI, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
295 TUPLE_ENTRY (VNx8x4SI, TARGET_MIN_VLEN >= 128, VNx4SI, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
296 TUPLE_ENTRY (VNx2x2SI, true, VNx2SI, 2, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
297 TUPLE_ENTRY (VNx3x2SI, true, VNx2SI, 3, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
298 TUPLE_ENTRY (VNx4x2SI, true, VNx2SI, 4, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
299 TUPLE_ENTRY (VNx5x2SI, TARGET_MIN_VLEN >= 64, VNx2SI, 5, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
300 TUPLE_ENTRY (VNx6x2SI, TARGET_MIN_VLEN >= 64, VNx2SI, 6, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
301 TUPLE_ENTRY (VNx7x2SI, TARGET_MIN_VLEN >= 64, VNx2SI, 7, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
302 TUPLE_ENTRY (VNx8x2SI, TARGET_MIN_VLEN >= 64, VNx2SI, 8, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
303 TUPLE_ENTRY (VNx2x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 2, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
304 TUPLE_ENTRY (VNx3x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 3, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
305 TUPLE_ENTRY (VNx4x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 4, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
306 TUPLE_ENTRY (VNx5x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 5, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
307 TUPLE_ENTRY (VNx6x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 6, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
308 TUPLE_ENTRY (VNx7x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 7, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
309 TUPLE_ENTRY (VNx8x1SI, TARGET_MIN_VLEN < 128, VNx1SI, 8, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
310 TUPLE_ENTRY (VNx2x16SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, VNx16SF, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 8)
311 TUPLE_ENTRY (VNx2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx8SF, 2, LMUL_RESERVED, 0, LMUL_4, 8, LMUL_2, 16)
312 TUPLE_ENTRY (VNx3x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, VNx8SF, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 16)
313 TUPLE_ENTRY (VNx4x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, VNx8SF, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 16)
314 TUPLE_ENTRY (VNx2x4SF, TARGET_VECTOR_ELEN_FP_32, VNx4SF, 2, LMUL_4, 8, LMUL_2, 16, LMUL_1, 32)
315 TUPLE_ENTRY (VNx3x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx4SF, 3, LMUL_RESERVED, 0, LMUL_2, 16, LMUL_1, 32)
316 TUPLE_ENTRY (VNx4x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx4SF, 4, LMUL_RESERVED, 0, LMUL_2, 16, LMUL_1, 32)
317 TUPLE_ENTRY (VNx5x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, VNx4SF, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
318 TUPLE_ENTRY (VNx6x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, VNx4SF, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
319 TUPLE_ENTRY (VNx7x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, VNx4SF, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
320 TUPLE_ENTRY (VNx8x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, VNx4SF, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 32)
321 TUPLE_ENTRY (VNx2x2SF, TARGET_VECTOR_ELEN_FP_32, VNx2SF, 2, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
322 TUPLE_ENTRY (VNx3x2SF, TARGET_VECTOR_ELEN_FP_32, VNx2SF, 3, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
323 TUPLE_ENTRY (VNx4x2SF, TARGET_VECTOR_ELEN_FP_32, VNx2SF, 4, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
324 TUPLE_ENTRY (VNx5x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx2SF, 5, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
325 TUPLE_ENTRY (VNx6x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx2SF, 6, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
326 TUPLE_ENTRY (VNx7x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx2SF, 7, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
327 TUPLE_ENTRY (VNx8x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64, VNx2SF, 8, LMUL_RESERVED, 0, LMUL_1, 32, LMUL_F2, 64)
328 TUPLE_ENTRY (VNx2x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 2, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
329 TUPLE_ENTRY (VNx3x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 3, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
330 TUPLE_ENTRY (VNx4x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 4, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
331 TUPLE_ENTRY (VNx5x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 5, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
332 TUPLE_ENTRY (VNx6x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 6, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
333 TUPLE_ENTRY (VNx7x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 7, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
334 TUPLE_ENTRY (VNx8x1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, VNx1SF, 8, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 0)
335
336 /* Tuple modes for EEW = 64. */
337 TUPLE_ENTRY (VNx2x8DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx8DI, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 16)
338 TUPLE_ENTRY (VNx2x4DI, TARGET_VECTOR_ELEN_64, VNx4DI, 2, LMUL_RESERVED, 0, LMUL_4, 16, LMUL_2, 32)
339 TUPLE_ENTRY (VNx3x4DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx4DI, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 32)
340 TUPLE_ENTRY (VNx4x4DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx4DI, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 32)
341 TUPLE_ENTRY (VNx2x2DI, TARGET_VECTOR_ELEN_64, VNx2DI, 2, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
342 TUPLE_ENTRY (VNx3x2DI, TARGET_VECTOR_ELEN_64, VNx2DI, 3, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
343 TUPLE_ENTRY (VNx4x2DI, TARGET_VECTOR_ELEN_64, VNx2DI, 4, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
344 TUPLE_ENTRY (VNx5x2DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx2DI, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
345 TUPLE_ENTRY (VNx6x2DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx2DI, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
346 TUPLE_ENTRY (VNx7x2DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx2DI, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
347 TUPLE_ENTRY (VNx8x2DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, VNx2DI, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
348 TUPLE_ENTRY (VNx2x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 2, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
349 TUPLE_ENTRY (VNx3x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 3, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
350 TUPLE_ENTRY (VNx4x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 4, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
351 TUPLE_ENTRY (VNx5x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 5, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
352 TUPLE_ENTRY (VNx6x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 6, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
353 TUPLE_ENTRY (VNx7x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 7, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
354 TUPLE_ENTRY (VNx8x1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, VNx1DI, 8, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
355 TUPLE_ENTRY (VNx2x8DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx8DF, 2, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_4, 16)
356 TUPLE_ENTRY (VNx2x4DF, TARGET_VECTOR_ELEN_FP_64, VNx4DF, 2, LMUL_RESERVED, 0, LMUL_4, 16, LMUL_2, 32)
357 TUPLE_ENTRY (VNx3x4DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx4DF, 3, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 32)
358 TUPLE_ENTRY (VNx4x4DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx4DF, 4, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_2, 32)
359 TUPLE_ENTRY (VNx2x2DF, TARGET_VECTOR_ELEN_FP_64, VNx2DF, 2, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
360 TUPLE_ENTRY (VNx3x2DF, TARGET_VECTOR_ELEN_FP_64, VNx2DF, 3, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
361 TUPLE_ENTRY (VNx4x2DF, TARGET_VECTOR_ELEN_FP_64, VNx2DF, 4, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
362 TUPLE_ENTRY (VNx5x2DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx2DF, 5, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
363 TUPLE_ENTRY (VNx6x2DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx2DF, 6, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
364 TUPLE_ENTRY (VNx7x2DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx2DF, 7, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
365 TUPLE_ENTRY (VNx8x2DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, VNx2DF, 8, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_1, 64)
366 TUPLE_ENTRY (VNx2x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 2, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
367 TUPLE_ENTRY (VNx3x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 3, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
368 TUPLE_ENTRY (VNx4x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 4, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
369 TUPLE_ENTRY (VNx5x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 5, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
370 TUPLE_ENTRY (VNx6x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 6, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
371 TUPLE_ENTRY (VNx7x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 7, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
372 TUPLE_ENTRY (VNx8x1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, VNx1DF, 8, LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
373
374 #undef ENTRY
375 #undef TUPLE_ENTRY