]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / net / ethernet / marvell / mvpp2 / mvpp2_cls.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RSS and Classifier helpers for Marvell PPv2 Network Controller
4 *
5 * Copyright (C) 2014 Marvell
6 *
7 * Marcin Wojtas <mw@semihalf.com>
8 */
9
10 #include "mvpp2.h"
11 #include "mvpp2_cls.h"
12 #include "mvpp2_prs.h"
13
14 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
15 { \
16 .flow_type = _type, \
17 .flow_id = _id, \
18 .supported_hash_opts = _opts, \
19 .prs_ri = { \
20 .ri = _ri, \
21 .ri_mask = _ri_mask \
22 } \
23 }
24
25 static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
28 MVPP22_CLS_HEK_IP4_5T,
29 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
30 MVPP2_PRS_RI_L4_TCP,
31 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
32
33 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
34 MVPP22_CLS_HEK_IP4_5T,
35 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
36 MVPP2_PRS_RI_L4_TCP,
37 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
38
39 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
40 MVPP22_CLS_HEK_IP4_5T,
41 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
42 MVPP2_PRS_RI_L4_TCP,
43 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
44
45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
47 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
48 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
49 MVPP2_PRS_IP_MASK),
50
51 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
52 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
53 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
54 MVPP2_PRS_IP_MASK),
55
56 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
57 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
58 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
59 MVPP2_PRS_IP_MASK),
60
61 /* TCP over IPv4 flows, fragmented, no vlan tag */
62 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
63 MVPP22_CLS_HEK_IP4_2T,
64 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
65 MVPP2_PRS_RI_L4_TCP,
66 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
67
68 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
69 MVPP22_CLS_HEK_IP4_2T,
70 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
71 MVPP2_PRS_RI_L4_TCP,
72 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
73
74 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
75 MVPP22_CLS_HEK_IP4_2T,
76 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
77 MVPP2_PRS_RI_L4_TCP,
78 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
79
80 /* TCP over IPv4 flows, fragmented, with vlan tag */
81 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
82 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
83 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
84 MVPP2_PRS_IP_MASK),
85
86 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
87 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
88 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
89 MVPP2_PRS_IP_MASK),
90
91 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
92 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
93 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
94 MVPP2_PRS_IP_MASK),
95
96 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
97 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
98 MVPP22_CLS_HEK_IP4_5T,
99 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
100 MVPP2_PRS_RI_L4_UDP,
101 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
102
103 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
104 MVPP22_CLS_HEK_IP4_5T,
105 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
106 MVPP2_PRS_RI_L4_UDP,
107 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
108
109 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
110 MVPP22_CLS_HEK_IP4_5T,
111 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
112 MVPP2_PRS_RI_L4_UDP,
113 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
114
115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
116 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
117 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
118 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
119 MVPP2_PRS_IP_MASK),
120
121 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
122 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
123 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
124 MVPP2_PRS_IP_MASK),
125
126 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
127 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
128 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
129 MVPP2_PRS_IP_MASK),
130
131 /* UDP over IPv4 flows, fragmented, no vlan tag */
132 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
133 MVPP22_CLS_HEK_IP4_2T,
134 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
135 MVPP2_PRS_RI_L4_UDP,
136 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
137
138 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
139 MVPP22_CLS_HEK_IP4_2T,
140 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
141 MVPP2_PRS_RI_L4_UDP,
142 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
143
144 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
145 MVPP22_CLS_HEK_IP4_2T,
146 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
147 MVPP2_PRS_RI_L4_UDP,
148 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
149
150 /* UDP over IPv4 flows, fragmented, with vlan tag */
151 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
152 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
153 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
154 MVPP2_PRS_IP_MASK),
155
156 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
157 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
158 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
159 MVPP2_PRS_IP_MASK),
160
161 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
162 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
163 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
164 MVPP2_PRS_IP_MASK),
165
166 /* TCP over IPv6 flows, not fragmented, no vlan tag */
167 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
168 MVPP22_CLS_HEK_IP6_5T,
169 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
170 MVPP2_PRS_RI_L4_TCP,
171 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
172
173 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
174 MVPP22_CLS_HEK_IP6_5T,
175 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
176 MVPP2_PRS_RI_L4_TCP,
177 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
178
179 /* TCP over IPv6 flows, not fragmented, with vlan tag */
180 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
181 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
182 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
183 MVPP2_PRS_IP_MASK),
184
185 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
186 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
187 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
188 MVPP2_PRS_IP_MASK),
189
190 /* TCP over IPv6 flows, fragmented, no vlan tag */
191 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
192 MVPP22_CLS_HEK_IP6_2T,
193 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
194 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
195 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
196
197 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
198 MVPP22_CLS_HEK_IP6_2T,
199 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
200 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
201 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
202
203 /* TCP over IPv6 flows, fragmented, with vlan tag */
204 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
205 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
206 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
207 MVPP2_PRS_RI_L4_TCP,
208 MVPP2_PRS_IP_MASK),
209
210 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
211 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
212 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
213 MVPP2_PRS_RI_L4_TCP,
214 MVPP2_PRS_IP_MASK),
215
216 /* UDP over IPv6 flows, not fragmented, no vlan tag */
217 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
218 MVPP22_CLS_HEK_IP6_5T,
219 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
220 MVPP2_PRS_RI_L4_UDP,
221 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
222
223 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
224 MVPP22_CLS_HEK_IP6_5T,
225 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
226 MVPP2_PRS_RI_L4_UDP,
227 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
228
229 /* UDP over IPv6 flows, not fragmented, with vlan tag */
230 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
231 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
232 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
233 MVPP2_PRS_IP_MASK),
234
235 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
236 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
237 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
238 MVPP2_PRS_IP_MASK),
239
240 /* UDP over IPv6 flows, fragmented, no vlan tag */
241 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
242 MVPP22_CLS_HEK_IP6_2T,
243 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
244 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
245 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
246
247 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
248 MVPP22_CLS_HEK_IP6_2T,
249 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
250 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
251 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
252
253 /* UDP over IPv6 flows, fragmented, with vlan tag */
254 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
255 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
256 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
257 MVPP2_PRS_RI_L4_UDP,
258 MVPP2_PRS_IP_MASK),
259
260 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
261 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
262 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
263 MVPP2_PRS_RI_L4_UDP,
264 MVPP2_PRS_IP_MASK),
265
266 /* IPv4 flows, no vlan tag */
267 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
268 MVPP22_CLS_HEK_IP4_2T,
269 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
270 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
271 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
272 MVPP22_CLS_HEK_IP4_2T,
273 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
274 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
275 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
276 MVPP22_CLS_HEK_IP4_2T,
277 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
278 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
279
280 /* IPv4 flows, with vlan tag */
281 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
282 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
283 MVPP2_PRS_RI_L3_IP4,
284 MVPP2_PRS_RI_L3_PROTO_MASK),
285 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
286 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
287 MVPP2_PRS_RI_L3_IP4_OPT,
288 MVPP2_PRS_RI_L3_PROTO_MASK),
289 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
290 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
291 MVPP2_PRS_RI_L3_IP4_OTHER,
292 MVPP2_PRS_RI_L3_PROTO_MASK),
293
294 /* IPv6 flows, no vlan tag */
295 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
296 MVPP22_CLS_HEK_IP6_2T,
297 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
298 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
299 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
300 MVPP22_CLS_HEK_IP6_2T,
301 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
302 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
303
304 /* IPv6 flows, with vlan tag */
305 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
306 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
307 MVPP2_PRS_RI_L3_IP6,
308 MVPP2_PRS_RI_L3_PROTO_MASK),
309 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
310 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
311 MVPP2_PRS_RI_L3_IP6,
312 MVPP2_PRS_RI_L3_PROTO_MASK),
313
314 /* Non IP flow, no vlan tag */
315 MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
316 0,
317 MVPP2_PRS_RI_VLAN_NONE,
318 MVPP2_PRS_RI_VLAN_MASK),
319 /* Non IP flow, with vlan tag */
320 MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
321 MVPP22_CLS_HEK_OPT_VLAN,
322 0, 0),
323 };
324
325 u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
326 {
327 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
328
329 return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
330 }
331
332 void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
333 struct mvpp2_cls_flow_entry *fe)
334 {
335 fe->index = index;
336 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
337 fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
338 fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
339 fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
340 }
341
342 /* Update classification flow table registers */
343 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
344 struct mvpp2_cls_flow_entry *fe)
345 {
346 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
347 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
348 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
349 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
350 }
351
352 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
353 {
354 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
355
356 return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
357 }
358
359 void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
360 struct mvpp2_cls_lookup_entry *le)
361 {
362 u32 val;
363
364 val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
365 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
366 le->way = way;
367 le->lkpid = lkpid;
368 le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
369 }
370
371 /* Update classification lookup table register */
372 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
373 struct mvpp2_cls_lookup_entry *le)
374 {
375 u32 val;
376
377 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
378 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
379 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
380 }
381
382 /* Operations on flow entry */
383 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
384 {
385 return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
386 }
387
388 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
389 int num_of_fields)
390 {
391 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
392 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
393 }
394
395 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
396 int field_index)
397 {
398 return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
399 MVPP2_CLS_FLOW_TBL2_FLD_MASK;
400 }
401
402 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
403 int field_index, int field_id)
404 {
405 fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
406 MVPP2_CLS_FLOW_TBL2_FLD_MASK);
407 fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
408 }
409
410 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
411 int engine)
412 {
413 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
414 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
415 }
416
417 int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
418 {
419 return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
420 MVPP2_CLS_FLOW_TBL0_ENG_MASK;
421 }
422
423 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
424 bool from_packet)
425 {
426 if (from_packet)
427 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
428 else
429 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
430 }
431
432 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
433 bool is_last)
434 {
435 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
436 fe->data[0] |= !!is_last;
437 }
438
439 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
440 {
441 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
442 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
443 }
444
445 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
446 u32 port)
447 {
448 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
449 }
450
451 static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
452 u32 port)
453 {
454 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
455 }
456
457 static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
458 u8 lu_type)
459 {
460 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
461 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
462 }
463
464 /* Initialize the parser entry for the given flow */
465 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
466 const struct mvpp2_cls_flow *flow)
467 {
468 mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
469 flow->prs_ri.ri_mask);
470 }
471
472 /* Initialize the Lookup Id table entry for the given flow */
473 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
474 const struct mvpp2_cls_flow *flow)
475 {
476 struct mvpp2_cls_lookup_entry le;
477
478 le.way = 0;
479 le.lkpid = flow->flow_id;
480
481 /* The default RxQ for this port is set in the C2 lookup */
482 le.data = 0;
483
484 /* We point on the first lookup in the sequence for the flow, that is
485 * the C2 lookup.
486 */
487 le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
488
489 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
490 le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
491
492 mvpp2_cls_lookup_write(priv, &le);
493 }
494
495 static void mvpp2_cls_c2_write(struct mvpp2 *priv,
496 struct mvpp2_cls_c2_entry *c2)
497 {
498 u32 val;
499 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
500
501 val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
502 if (c2->valid)
503 val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
504 else
505 val |= MVPP22_CLS_C2_TCAM_INV_BIT;
506 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
507
508 mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
509
510 mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
511 mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
512 mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
513 mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
514
515 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
516 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
517 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
518 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
519 /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
520 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
521 }
522
523 void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
524 struct mvpp2_cls_c2_entry *c2)
525 {
526 u32 val;
527 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
528
529 c2->index = index;
530
531 c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
532 c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
533 c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
534 c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
535 c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
536
537 c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
538
539 c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
540 c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
541 c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
542 c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
543
544 val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
545 c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
546 }
547
548 static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
549 {
550 switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
551 case ETHER_FLOW:
552 return MVPP22_FLOW_ETHERNET;
553 case TCP_V4_FLOW:
554 return MVPP22_FLOW_TCP4;
555 case TCP_V6_FLOW:
556 return MVPP22_FLOW_TCP6;
557 case UDP_V4_FLOW:
558 return MVPP22_FLOW_UDP4;
559 case UDP_V6_FLOW:
560 return MVPP22_FLOW_UDP6;
561 case IPV4_FLOW:
562 return MVPP22_FLOW_IP4;
563 case IPV6_FLOW:
564 return MVPP22_FLOW_IP6;
565 default:
566 return -EOPNOTSUPP;
567 }
568 }
569
570 static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
571 {
572 return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
573 }
574
575 /* Initialize the flow table entries for the given flow */
576 static void mvpp2_cls_flow_init(struct mvpp2 *priv,
577 const struct mvpp2_cls_flow *flow)
578 {
579 struct mvpp2_cls_flow_entry fe;
580 int i, pri = 0;
581
582 /* Assign default values to all entries in the flow */
583 for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
584 i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
585 memset(&fe, 0, sizeof(fe));
586 fe.index = i;
587 mvpp2_cls_flow_pri_set(&fe, pri++);
588
589 if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
590 mvpp2_cls_flow_last_set(&fe, 1);
591
592 mvpp2_cls_flow_write(priv, &fe);
593 }
594
595 /* RSS config C2 lookup */
596 mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
597 &fe);
598
599 mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
600 mvpp2_cls_flow_port_id_sel(&fe, true);
601 mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
602
603 /* Add all ports */
604 for (i = 0; i < MVPP2_MAX_PORTS; i++)
605 mvpp2_cls_flow_port_add(&fe, BIT(i));
606
607 mvpp2_cls_flow_write(priv, &fe);
608
609 /* C3Hx lookups */
610 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
611 mvpp2_cls_flow_read(priv,
612 MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
613 &fe);
614
615 /* Set a default engine. Will be overwritten when setting the
616 * real HEK parameters
617 */
618 mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
619 mvpp2_cls_flow_port_id_sel(&fe, true);
620 mvpp2_cls_flow_port_add(&fe, BIT(i));
621
622 mvpp2_cls_flow_write(priv, &fe);
623 }
624 }
625
626 /* Adds a field to the Header Extracted Key generation parameters*/
627 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
628 u32 field_id)
629 {
630 int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
631
632 if (nb_fields == MVPP2_FLOW_N_FIELDS)
633 return -EINVAL;
634
635 mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
636
637 mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
638
639 return 0;
640 }
641
642 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
643 unsigned long hash_opts)
644 {
645 u32 field_id;
646 int i;
647
648 /* Clear old fields */
649 mvpp2_cls_flow_hek_num_set(fe, 0);
650 fe->data[2] = 0;
651
652 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
653 switch (BIT(i)) {
654 case MVPP22_CLS_HEK_OPT_MAC_DA:
655 field_id = MVPP22_CLS_FIELD_MAC_DA;
656 break;
657 case MVPP22_CLS_HEK_OPT_VLAN:
658 field_id = MVPP22_CLS_FIELD_VLAN;
659 break;
660 case MVPP22_CLS_HEK_OPT_VLAN_PRI:
661 field_id = MVPP22_CLS_FIELD_VLAN_PRI;
662 break;
663 case MVPP22_CLS_HEK_OPT_IP4SA:
664 field_id = MVPP22_CLS_FIELD_IP4SA;
665 break;
666 case MVPP22_CLS_HEK_OPT_IP4DA:
667 field_id = MVPP22_CLS_FIELD_IP4DA;
668 break;
669 case MVPP22_CLS_HEK_OPT_IP6SA:
670 field_id = MVPP22_CLS_FIELD_IP6SA;
671 break;
672 case MVPP22_CLS_HEK_OPT_IP6DA:
673 field_id = MVPP22_CLS_FIELD_IP6DA;
674 break;
675 case MVPP22_CLS_HEK_OPT_L4SIP:
676 field_id = MVPP22_CLS_FIELD_L4SIP;
677 break;
678 case MVPP22_CLS_HEK_OPT_L4DIP:
679 field_id = MVPP22_CLS_FIELD_L4DIP;
680 break;
681 default:
682 return -EINVAL;
683 }
684 if (mvpp2_flow_add_hek_field(fe, field_id))
685 return -EINVAL;
686 }
687
688 return 0;
689 }
690
691 /* Returns the size, in bits, of the corresponding HEK field */
692 static int mvpp2_cls_hek_field_size(u32 field)
693 {
694 switch (field) {
695 case MVPP22_CLS_HEK_OPT_MAC_DA:
696 return 48;
697 case MVPP22_CLS_HEK_OPT_VLAN:
698 return 12;
699 case MVPP22_CLS_HEK_OPT_VLAN_PRI:
700 return 3;
701 case MVPP22_CLS_HEK_OPT_IP4SA:
702 case MVPP22_CLS_HEK_OPT_IP4DA:
703 return 32;
704 case MVPP22_CLS_HEK_OPT_IP6SA:
705 case MVPP22_CLS_HEK_OPT_IP6DA:
706 return 128;
707 case MVPP22_CLS_HEK_OPT_L4SIP:
708 case MVPP22_CLS_HEK_OPT_L4DIP:
709 return 16;
710 default:
711 return -1;
712 }
713 }
714
715 const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
716 {
717 if (flow >= MVPP2_N_PRS_FLOWS)
718 return NULL;
719
720 return &cls_flows[flow];
721 }
722
723 /* Set the hash generation options for the given traffic flow.
724 * One traffic flow (in the ethtool sense) has multiple classification flows,
725 * to handle specific cases such as fragmentation, or the presence of a
726 * VLAN / DSA Tag.
727 *
728 * Each of these individual flows has different constraints, for example we
729 * can't hash fragmented packets on L4 data (else we would risk having packet
730 * re-ordering), so each classification flows masks the options with their
731 * supported ones.
732 *
733 */
734 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
735 u16 requested_opts)
736 {
737 const struct mvpp2_cls_flow *flow;
738 struct mvpp2_cls_flow_entry fe;
739 int i, engine, flow_index;
740 u16 hash_opts;
741
742 for_each_cls_flow_id_with_type(i, flow_type) {
743 flow = mvpp2_cls_flow_get(i);
744 if (!flow)
745 return -EINVAL;
746
747 flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
748
749 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
750
751 hash_opts = flow->supported_hash_opts & requested_opts;
752
753 /* Use C3HB engine to access L4 infos. This adds L4 infos to the
754 * hash parameters
755 */
756 if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
757 engine = MVPP22_CLS_ENGINE_C3HB;
758 else
759 engine = MVPP22_CLS_ENGINE_C3HA;
760
761 if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
762 return -EINVAL;
763
764 mvpp2_cls_flow_eng_set(&fe, engine);
765
766 mvpp2_cls_flow_write(port->priv, &fe);
767 }
768
769 return 0;
770 }
771
772 u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
773 {
774 u16 hash_opts = 0;
775 int n_fields, i, field;
776
777 n_fields = mvpp2_cls_flow_hek_num_get(fe);
778
779 for (i = 0; i < n_fields; i++) {
780 field = mvpp2_cls_flow_hek_get(fe, i);
781
782 switch (field) {
783 case MVPP22_CLS_FIELD_MAC_DA:
784 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
785 break;
786 case MVPP22_CLS_FIELD_VLAN:
787 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
788 break;
789 case MVPP22_CLS_FIELD_VLAN_PRI:
790 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
791 break;
792 case MVPP22_CLS_FIELD_L3_PROTO:
793 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
794 break;
795 case MVPP22_CLS_FIELD_IP4SA:
796 hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
797 break;
798 case MVPP22_CLS_FIELD_IP4DA:
799 hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
800 break;
801 case MVPP22_CLS_FIELD_IP6SA:
802 hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
803 break;
804 case MVPP22_CLS_FIELD_IP6DA:
805 hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
806 break;
807 case MVPP22_CLS_FIELD_L4SIP:
808 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
809 break;
810 case MVPP22_CLS_FIELD_L4DIP:
811 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
812 break;
813 default:
814 break;
815 }
816 }
817 return hash_opts;
818 }
819
820 /* Returns the hash opts for this flow. There are several classifier flows
821 * for one traffic flow, this returns an aggregation of all configurations.
822 */
823 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
824 {
825 const struct mvpp2_cls_flow *flow;
826 struct mvpp2_cls_flow_entry fe;
827 int i, flow_index;
828 u16 hash_opts = 0;
829
830 for_each_cls_flow_id_with_type(i, flow_type) {
831 flow = mvpp2_cls_flow_get(i);
832 if (!flow)
833 return 0;
834
835 flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
836
837 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
838
839 hash_opts |= mvpp2_flow_get_hek_fields(&fe);
840 }
841
842 return hash_opts;
843 }
844
845 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
846 {
847 const struct mvpp2_cls_flow *flow;
848 int i;
849
850 for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
851 flow = mvpp2_cls_flow_get(i);
852 if (!flow)
853 break;
854
855 mvpp2_cls_flow_prs_init(priv, flow);
856 mvpp2_cls_flow_lkp_init(priv, flow);
857 mvpp2_cls_flow_init(priv, flow);
858 }
859 }
860
861 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
862 {
863 struct mvpp2_cls_c2_entry c2;
864 u8 qh, ql, pmap;
865
866 memset(&c2, 0, sizeof(c2));
867
868 c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
869
870 pmap = BIT(port->id);
871 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
872 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
873
874 /* Match on Lookup Type */
875 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
876 c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
877
878 /* Update RSS status after matching this entry */
879 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
880
881 /* Mark packet as "forwarded to software", needed for RSS */
882 c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
883
884 /* Configure the default rx queue : Update Queue Low and Queue High, but
885 * don't lock, since the rx queue selection might be overridden by RSS
886 */
887 c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
888 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
889
890 qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
891 ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
892
893 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
894 MVPP22_CLS_C2_ATTR0_QLOW(ql);
895
896 c2.valid = true;
897
898 mvpp2_cls_c2_write(port->priv, &c2);
899 }
900
901 /* Classifier default initialization */
902 void mvpp2_cls_init(struct mvpp2 *priv)
903 {
904 struct mvpp2_cls_lookup_entry le;
905 struct mvpp2_cls_flow_entry fe;
906 struct mvpp2_cls_c2_entry c2;
907 int index;
908
909 /* Enable classifier */
910 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
911
912 /* Clear classifier flow table */
913 memset(&fe.data, 0, sizeof(fe.data));
914 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
915 fe.index = index;
916 mvpp2_cls_flow_write(priv, &fe);
917 }
918
919 /* Clear classifier lookup table */
920 le.data = 0;
921 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
922 le.lkpid = index;
923 le.way = 0;
924 mvpp2_cls_lookup_write(priv, &le);
925
926 le.way = 1;
927 mvpp2_cls_lookup_write(priv, &le);
928 }
929
930 /* Clear C2 TCAM engine table */
931 memset(&c2, 0, sizeof(c2));
932 c2.valid = false;
933 for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
934 c2.index = index;
935 mvpp2_cls_c2_write(priv, &c2);
936 }
937
938 /* Disable the FIFO stages in C2 engine, which are only used in BIST
939 * mode
940 */
941 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
942 MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
943
944 mvpp2_cls_port_init_flows(priv);
945 }
946
947 void mvpp2_cls_port_config(struct mvpp2_port *port)
948 {
949 struct mvpp2_cls_lookup_entry le;
950 u32 val;
951
952 /* Set way for the port */
953 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
954 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
955 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
956
957 /* Pick the entry to be accessed in lookup ID decoding table
958 * according to the way and lkpid.
959 */
960 le.lkpid = port->id;
961 le.way = 0;
962 le.data = 0;
963
964 /* Set initial CPU queue for receiving packets */
965 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
966 le.data |= port->first_rxq;
967
968 /* Disable classification engines */
969 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
970
971 /* Update lookup ID table entry */
972 mvpp2_cls_lookup_write(port->priv, &le);
973
974 mvpp2_port_c2_cls_init(port);
975 }
976
977 u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
978 {
979 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
980
981 return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
982 }
983
984 static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
985 {
986 struct mvpp2_cls_c2_entry c2;
987 u8 qh, ql;
988
989 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
990
991 /* The RxQ number is used to select the RSS table. It that case, we set
992 * it to be the ctx number.
993 */
994 qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
995 ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
996
997 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
998 MVPP22_CLS_C2_ATTR0_QLOW(ql);
999
1000 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1001
1002 mvpp2_cls_c2_write(port->priv, &c2);
1003 }
1004
1005 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
1006 {
1007 struct mvpp2_cls_c2_entry c2;
1008 u8 qh, ql;
1009
1010 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
1011
1012 /* Reset the default destination RxQ to the port's first rx queue. */
1013 qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1014 ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1015
1016 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1017 MVPP22_CLS_C2_ATTR0_QLOW(ql);
1018
1019 c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
1020
1021 mvpp2_cls_c2_write(port->priv, &c2);
1022 }
1023
1024 static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
1025 {
1026 return port->rss_ctx[port_rss_ctx];
1027 }
1028
1029 int mvpp22_port_rss_enable(struct mvpp2_port *port)
1030 {
1031 if (mvpp22_rss_ctx(port, 0) < 0)
1032 return -EINVAL;
1033
1034 mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
1035
1036 return 0;
1037 }
1038
1039 int mvpp22_port_rss_disable(struct mvpp2_port *port)
1040 {
1041 if (mvpp22_rss_ctx(port, 0) < 0)
1042 return -EINVAL;
1043
1044 mvpp2_rss_port_c2_disable(port);
1045
1046 return 0;
1047 }
1048
1049 static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
1050 {
1051 struct mvpp2_cls_c2_entry c2;
1052
1053 mvpp2_cls_c2_read(port->priv, entry, &c2);
1054
1055 /* Clear the port map so that the entry doesn't match anymore */
1056 c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
1057
1058 mvpp2_cls_c2_write(port->priv, &c2);
1059 }
1060
1061 /* Set CPU queue number for oversize packets */
1062 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
1063 {
1064 u32 val;
1065
1066 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
1067 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
1068
1069 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
1070 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
1071
1072 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
1073 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
1074 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
1075 }
1076
1077 static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
1078 struct mvpp2_rfs_rule *rule)
1079 {
1080 struct flow_action_entry *act;
1081 struct mvpp2_cls_c2_entry c2;
1082 u8 qh, ql, pmap;
1083 int index, ctx;
1084
1085 if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
1086 return -EOPNOTSUPP;
1087
1088 memset(&c2, 0, sizeof(c2));
1089
1090 index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
1091 if (index < 0)
1092 return -EINVAL;
1093 c2.index = index;
1094
1095 act = &rule->flow->action.entries[0];
1096
1097 rule->c2_index = c2.index;
1098
1099 c2.tcam[3] = (rule->c2_tcam & 0xffff) |
1100 ((rule->c2_tcam_mask & 0xffff) << 16);
1101 c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
1102 (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
1103 c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
1104 (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
1105 c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
1106 (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
1107
1108 pmap = BIT(port->id);
1109 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
1110 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
1111
1112 /* Match on Lookup Type */
1113 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
1114 c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
1115
1116 if (act->id == FLOW_ACTION_DROP) {
1117 c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
1118 } else {
1119 /* We want to keep the default color derived from the Header
1120 * Parser drop entries, for VLAN and MAC filtering. This will
1121 * assign a default color of Green or Red, and we want matches
1122 * with a non-drop action to keep that color.
1123 */
1124 c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
1125
1126 /* Update RSS status after matching this entry */
1127 if (act->queue.ctx)
1128 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1129
1130 /* Always lock the RSS_EN decision. We might have high prio
1131 * rules steering to an RXQ, and a lower one steering to RSS,
1132 * we don't want the low prio RSS rule overwriting this flag.
1133 */
1134 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
1135
1136 /* Mark packet as "forwarded to software", needed for RSS */
1137 c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
1138
1139 c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
1140 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
1141
1142 if (act->queue.ctx) {
1143 /* Get the global ctx number */
1144 ctx = mvpp22_rss_ctx(port, act->queue.ctx);
1145 if (ctx < 0)
1146 return -EINVAL;
1147
1148 qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1149 ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1150 } else {
1151 qh = ((act->queue.index + port->first_rxq) >> 3) &
1152 MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1153 ql = (act->queue.index + port->first_rxq) &
1154 MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1155 }
1156
1157 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1158 MVPP22_CLS_C2_ATTR0_QLOW(ql);
1159 }
1160
1161 c2.valid = true;
1162
1163 mvpp2_cls_c2_write(port->priv, &c2);
1164
1165 return 0;
1166 }
1167
1168 static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
1169 struct mvpp2_rfs_rule *rule)
1170 {
1171 return mvpp2_port_c2_tcam_rule_add(port, rule);
1172 }
1173
1174 static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
1175 struct mvpp2_rfs_rule *rule)
1176 {
1177 const struct mvpp2_cls_flow *flow;
1178 struct mvpp2_cls_flow_entry fe;
1179 int index, i;
1180
1181 for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1182 flow = mvpp2_cls_flow_get(i);
1183 if (!flow)
1184 return 0;
1185
1186 index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1187
1188 mvpp2_cls_flow_read(port->priv, index, &fe);
1189 mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
1190 mvpp2_cls_flow_write(port->priv, &fe);
1191 }
1192
1193 if (rule->c2_index >= 0)
1194 mvpp22_port_c2_lookup_disable(port, rule->c2_index);
1195
1196 return 0;
1197 }
1198
1199 static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
1200 struct mvpp2_rfs_rule *rule)
1201 {
1202 const struct mvpp2_cls_flow *flow;
1203 struct mvpp2 *priv = port->priv;
1204 struct mvpp2_cls_flow_entry fe;
1205 int index, ret, i;
1206
1207 if (rule->engine != MVPP22_CLS_ENGINE_C2)
1208 return -EOPNOTSUPP;
1209
1210 ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
1211 if (ret)
1212 return ret;
1213
1214 for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1215 flow = mvpp2_cls_flow_get(i);
1216 if (!flow)
1217 return 0;
1218
1219 if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
1220 continue;
1221
1222 index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1223
1224 mvpp2_cls_flow_read(priv, index, &fe);
1225 mvpp2_cls_flow_eng_set(&fe, rule->engine);
1226 mvpp2_cls_flow_port_id_sel(&fe, true);
1227 mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
1228 mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
1229 mvpp2_cls_flow_port_add(&fe, 0xf);
1230
1231 mvpp2_cls_flow_write(priv, &fe);
1232 }
1233
1234 return 0;
1235 }
1236
1237 static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
1238 {
1239 struct flow_rule *flow = rule->flow;
1240 int offs = 0;
1241
1242 /* The order of insertion in C2 tcam must match the order in which
1243 * the fields are found in the header
1244 */
1245 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
1246 struct flow_match_vlan match;
1247
1248 flow_rule_match_vlan(flow, &match);
1249 if (match.mask->vlan_id) {
1250 rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
1251
1252 rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
1253 rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
1254
1255 /* Don't update the offset yet */
1256 }
1257
1258 if (match.mask->vlan_priority) {
1259 rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
1260
1261 /* VLAN pri is always at offset 13 relative to the
1262 * current offset
1263 */
1264 rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
1265 (offs + 13);
1266 rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
1267 (offs + 13);
1268 }
1269
1270 if (match.mask->vlan_dei)
1271 return -EOPNOTSUPP;
1272
1273 /* vlan id and prio always seem to take a full 16-bit slot in
1274 * the Header Extracted Key.
1275 */
1276 offs += 16;
1277 }
1278
1279 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
1280 struct flow_match_ports match;
1281
1282 flow_rule_match_ports(flow, &match);
1283 if (match.mask->src) {
1284 rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
1285
1286 rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
1287 rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
1288 offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
1289 }
1290
1291 if (match.mask->dst) {
1292 rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
1293
1294 rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
1295 rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
1296 offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
1297 }
1298 }
1299
1300 if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
1301 return -EOPNOTSUPP;
1302
1303 return 0;
1304 }
1305
1306 static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
1307 {
1308 struct flow_rule *flow = rule->flow;
1309 struct flow_action_entry *act;
1310
1311 if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
1312 return -EOPNOTSUPP;
1313
1314 act = &flow->action.entries[0];
1315 if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
1316 return -EOPNOTSUPP;
1317
1318 /* When both an RSS context and an queue index are set, the index
1319 * is considered as an offset to be added to the indirection table
1320 * entries. We don't support this, so reject this rule.
1321 */
1322 if (act->queue.ctx && act->queue.index)
1323 return -EOPNOTSUPP;
1324
1325 /* For now, only use the C2 engine which has a HEK size limited to 64
1326 * bits for TCAM matching.
1327 */
1328 rule->engine = MVPP22_CLS_ENGINE_C2;
1329
1330 if (mvpp2_cls_c2_build_match(rule))
1331 return -EINVAL;
1332
1333 return 0;
1334 }
1335
1336 int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
1337 struct ethtool_rxnfc *rxnfc)
1338 {
1339 struct mvpp2_ethtool_fs *efs;
1340
1341 if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1342 return -EINVAL;
1343
1344 efs = port->rfs_rules[rxnfc->fs.location];
1345 if (!efs)
1346 return -ENOENT;
1347
1348 memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
1349
1350 return 0;
1351 }
1352
1353 int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
1354 struct ethtool_rxnfc *info)
1355 {
1356 struct ethtool_rx_flow_spec_input input = {};
1357 struct ethtool_rx_flow_rule *ethtool_rule;
1358 struct mvpp2_ethtool_fs *efs, *old_efs;
1359 int ret = 0;
1360
1361 if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1362 return -EINVAL;
1363
1364 efs = kzalloc(sizeof(*efs), GFP_KERNEL);
1365 if (!efs)
1366 return -ENOMEM;
1367
1368 input.fs = &info->fs;
1369
1370 /* We need to manually set the rss_ctx, since this info isn't present
1371 * in info->fs
1372 */
1373 if (info->fs.flow_type & FLOW_RSS)
1374 input.rss_ctx = info->rss_context;
1375
1376 ethtool_rule = ethtool_rx_flow_rule_create(&input);
1377 if (IS_ERR(ethtool_rule)) {
1378 ret = PTR_ERR(ethtool_rule);
1379 goto clean_rule;
1380 }
1381
1382 efs->rule.flow = ethtool_rule->rule;
1383 efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
1384 if (efs->rule.flow_type < 0) {
1385 ret = efs->rule.flow_type;
1386 goto clean_rule;
1387 }
1388
1389 ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
1390 if (ret)
1391 goto clean_eth_rule;
1392
1393 efs->rule.loc = info->fs.location;
1394
1395 /* Replace an already existing rule */
1396 if (port->rfs_rules[efs->rule.loc]) {
1397 old_efs = port->rfs_rules[efs->rule.loc];
1398 ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
1399 if (ret)
1400 goto clean_eth_rule;
1401 kfree(old_efs);
1402 port->n_rfs_rules--;
1403 }
1404
1405 ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
1406 if (ret)
1407 goto clean_eth_rule;
1408
1409 ethtool_rx_flow_rule_destroy(ethtool_rule);
1410 efs->rule.flow = NULL;
1411
1412 memcpy(&efs->rxnfc, info, sizeof(*info));
1413 port->rfs_rules[efs->rule.loc] = efs;
1414 port->n_rfs_rules++;
1415
1416 return ret;
1417
1418 clean_eth_rule:
1419 ethtool_rx_flow_rule_destroy(ethtool_rule);
1420 clean_rule:
1421 kfree(efs);
1422 return ret;
1423 }
1424
1425 int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1426 struct ethtool_rxnfc *info)
1427 {
1428 struct mvpp2_ethtool_fs *efs;
1429 int ret;
1430
1431 if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1432 return -EINVAL;
1433
1434 efs = port->rfs_rules[info->fs.location];
1435 if (!efs)
1436 return -EINVAL;
1437
1438 /* Remove the rule from the engines. */
1439 ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
1440 if (ret)
1441 return ret;
1442
1443 port->n_rfs_rules--;
1444 port->rfs_rules[info->fs.location] = NULL;
1445 kfree(efs);
1446
1447 return 0;
1448 }
1449
1450 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
1451 {
1452 int nrxqs, cpu, cpus = num_possible_cpus();
1453
1454 /* Number of RXQs per CPU */
1455 nrxqs = port->nrxqs / cpus;
1456
1457 /* CPU that will handle this rx queue */
1458 cpu = rxq / nrxqs;
1459
1460 if (!cpu_online(cpu))
1461 return port->first_rxq;
1462
1463 /* Indirection to better distribute the paquets on the CPUs when
1464 * configuring the RSS queues.
1465 */
1466 return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
1467 }
1468
1469 static void mvpp22_rss_fill_table(struct mvpp2_port *port,
1470 struct mvpp2_rss_table *table,
1471 u32 rss_ctx)
1472 {
1473 struct mvpp2 *priv = port->priv;
1474 int i;
1475
1476 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
1477 u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
1478 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
1479 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
1480
1481 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
1482 mvpp22_rxfh_indir(port, table->indir[i]));
1483 }
1484 }
1485
1486 static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
1487 {
1488 struct mvpp2 *priv = port->priv;
1489 u32 ctx;
1490
1491 /* Find the first free RSS table */
1492 for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
1493 if (!priv->rss_tables[ctx])
1494 break;
1495 }
1496
1497 if (ctx == MVPP22_N_RSS_TABLES)
1498 return -EINVAL;
1499
1500 priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
1501 GFP_KERNEL);
1502 if (!priv->rss_tables[ctx])
1503 return -ENOMEM;
1504
1505 *rss_ctx = ctx;
1506
1507 /* Set the table width: replace the whole classifier Rx queue number
1508 * with the ones configured in RSS table entries.
1509 */
1510 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
1511 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1512
1513 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
1514 mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
1515
1516 return 0;
1517 }
1518
1519 int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
1520 {
1521 u32 rss_ctx;
1522 int ret, i;
1523
1524 ret = mvpp22_rss_context_create(port, &rss_ctx);
1525 if (ret)
1526 return ret;
1527
1528 /* Find the first available context number in the port, starting from 1.
1529 * Context 0 on each port is reserved for the default context.
1530 */
1531 for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
1532 if (port->rss_ctx[i] < 0)
1533 break;
1534 }
1535
1536 if (i == MVPP22_N_RSS_TABLES)
1537 return -EINVAL;
1538
1539 port->rss_ctx[i] = rss_ctx;
1540 *port_ctx = i;
1541
1542 return 0;
1543 }
1544
1545 static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
1546 int rss_ctx)
1547 {
1548 if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1549 return NULL;
1550
1551 return priv->rss_tables[rss_ctx];
1552 }
1553
1554 int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
1555 {
1556 struct mvpp2 *priv = port->priv;
1557 struct ethtool_rxnfc *rxnfc;
1558 int i, rss_ctx, ret;
1559
1560 rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1561
1562 if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1563 return -EINVAL;
1564
1565 /* Invalidate any active classification rule that use this context */
1566 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
1567 if (!port->rfs_rules[i])
1568 continue;
1569
1570 rxnfc = &port->rfs_rules[i]->rxnfc;
1571 if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
1572 rxnfc->rss_context != port_ctx)
1573 continue;
1574
1575 ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
1576 if (ret) {
1577 netdev_warn(port->dev,
1578 "couldn't remove classification rule %d associated to this context",
1579 rxnfc->fs.location);
1580 }
1581 }
1582
1583 kfree(priv->rss_tables[rss_ctx]);
1584
1585 priv->rss_tables[rss_ctx] = NULL;
1586 port->rss_ctx[port_ctx] = -1;
1587
1588 return 0;
1589 }
1590
1591 int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
1592 const u32 *indir)
1593 {
1594 int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1595 struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
1596 rss_ctx);
1597
1598 if (!rss_table)
1599 return -EINVAL;
1600
1601 memcpy(rss_table->indir, indir,
1602 MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1603
1604 mvpp22_rss_fill_table(port, rss_table, rss_ctx);
1605
1606 return 0;
1607 }
1608
1609 int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
1610 u32 *indir)
1611 {
1612 int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1613 struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
1614 rss_ctx);
1615
1616 if (!rss_table)
1617 return -EINVAL;
1618
1619 memcpy(indir, rss_table->indir,
1620 MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1621
1622 return 0;
1623 }
1624
1625 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1626 {
1627 u16 hash_opts = 0;
1628 u32 flow_type;
1629
1630 flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
1631
1632 switch (flow_type) {
1633 case MVPP22_FLOW_TCP4:
1634 case MVPP22_FLOW_UDP4:
1635 case MVPP22_FLOW_TCP6:
1636 case MVPP22_FLOW_UDP6:
1637 if (info->data & RXH_L4_B_0_1)
1638 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
1639 if (info->data & RXH_L4_B_2_3)
1640 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
1641 /* Fallthrough */
1642 case MVPP22_FLOW_IP4:
1643 case MVPP22_FLOW_IP6:
1644 if (info->data & RXH_L2DA)
1645 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
1646 if (info->data & RXH_VLAN)
1647 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
1648 if (info->data & RXH_L3_PROTO)
1649 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
1650 if (info->data & RXH_IP_SRC)
1651 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
1652 MVPP22_CLS_HEK_OPT_IP6SA);
1653 if (info->data & RXH_IP_DST)
1654 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
1655 MVPP22_CLS_HEK_OPT_IP6DA);
1656 break;
1657 default: return -EOPNOTSUPP;
1658 }
1659
1660 return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
1661 }
1662
1663 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1664 {
1665 unsigned long hash_opts;
1666 u32 flow_type;
1667 int i;
1668
1669 flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
1670
1671 hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
1672 info->data = 0;
1673
1674 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
1675 switch (BIT(i)) {
1676 case MVPP22_CLS_HEK_OPT_MAC_DA:
1677 info->data |= RXH_L2DA;
1678 break;
1679 case MVPP22_CLS_HEK_OPT_VLAN:
1680 info->data |= RXH_VLAN;
1681 break;
1682 case MVPP22_CLS_HEK_OPT_L3_PROTO:
1683 info->data |= RXH_L3_PROTO;
1684 break;
1685 case MVPP22_CLS_HEK_OPT_IP4SA:
1686 case MVPP22_CLS_HEK_OPT_IP6SA:
1687 info->data |= RXH_IP_SRC;
1688 break;
1689 case MVPP22_CLS_HEK_OPT_IP4DA:
1690 case MVPP22_CLS_HEK_OPT_IP6DA:
1691 info->data |= RXH_IP_DST;
1692 break;
1693 case MVPP22_CLS_HEK_OPT_L4SIP:
1694 info->data |= RXH_L4_B_0_1;
1695 break;
1696 case MVPP22_CLS_HEK_OPT_L4DIP:
1697 info->data |= RXH_L4_B_2_3;
1698 break;
1699 default:
1700 return -EINVAL;
1701 }
1702 }
1703 return 0;
1704 }
1705
1706 int mvpp22_port_rss_init(struct mvpp2_port *port)
1707 {
1708 struct mvpp2_rss_table *table;
1709 u32 context = 0;
1710 int i, ret;
1711
1712 for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
1713 port->rss_ctx[i] = -1;
1714
1715 ret = mvpp22_rss_context_create(port, &context);
1716 if (ret)
1717 return ret;
1718
1719 table = mvpp22_rss_table_get(port->priv, context);
1720 if (!table)
1721 return -EINVAL;
1722
1723 port->rss_ctx[0] = context;
1724
1725 /* Configure the first table to evenly distribute the packets across
1726 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1727 */
1728 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1729 table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
1730
1731 mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
1732
1733 /* Configure default flows */
1734 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
1735 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
1736 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
1737 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
1738 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
1739 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
1740
1741 return 0;
1742 }