]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net-next: mediatek: remove superfluous free_irq() call
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed
AV
33#include <net/flow_dissector.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
12185a9f 36#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
37#include <linux/mlx5/fs.h>
38#include <linux/mlx5/device.h>
39#include <linux/rhashtable.h>
40#include "en.h"
41#include "en_tc.h"
42
43struct mlx5e_tc_flow {
44 struct rhash_head node;
45 u64 cookie;
46 struct mlx5_flow_rule *rule;
47};
48
acff797c
MG
49#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
50#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac
AV
51
52static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53 u32 *match_c, u32 *match_v,
54 u32 action, u32 flow_tag)
55{
aad7e08d
AV
56 struct mlx5_core_dev *dev = priv->mdev;
57 struct mlx5_flow_destination dest = { 0 };
58 struct mlx5_fc *counter = NULL;
e8f887ac
AV
59 struct mlx5_flow_rule *rule;
60 bool table_created = false;
61
aad7e08d
AV
62 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
63 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
64 dest.ft = priv->fs.vlan.ft.t;
65 } else {
66 counter = mlx5_fc_create(dev, true);
67 if (IS_ERR(counter))
68 return ERR_CAST(counter);
69
70 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
71 dest.counter = counter;
72 }
73
acff797c
MG
74 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
75 priv->fs.tc.t =
76 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
77 MLX5E_TC_PRIO,
78 MLX5E_TC_TABLE_NUM_ENTRIES,
79 MLX5E_TC_TABLE_NUM_GROUPS,
d63cd286 80 0);
acff797c 81 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
82 netdev_err(priv->netdev,
83 "Failed to create tc offload table\n");
aad7e08d
AV
84 rule = ERR_CAST(priv->fs.tc.t);
85 goto err_create_ft;
e8f887ac
AV
86 }
87
88 table_created = true;
89 }
90
acff797c 91 rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
e8f887ac
AV
92 match_c, match_v,
93 action, flow_tag,
aad7e08d
AV
94 &dest);
95
96 if (IS_ERR(rule))
97 goto err_add_rule;
98
99 return rule;
e8f887ac 100
aad7e08d
AV
101err_add_rule:
102 if (table_created) {
acff797c
MG
103 mlx5_destroy_flow_table(priv->fs.tc.t);
104 priv->fs.tc.t = NULL;
e8f887ac 105 }
aad7e08d
AV
106err_create_ft:
107 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
108
109 return rule;
110}
111
112static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
113 struct mlx5_flow_rule *rule)
114{
aad7e08d
AV
115 struct mlx5_fc *counter = NULL;
116
117 counter = mlx5_flow_rule_counter(rule);
118
e8f887ac
AV
119 mlx5_del_flow_rule(rule);
120
aad7e08d
AV
121 mlx5_fc_destroy(priv->mdev, counter);
122
e8f887ac 123 if (!mlx5e_tc_num_filters(priv)) {
acff797c
MG
124 mlx5_destroy_flow_table(priv->fs.tc.t);
125 priv->fs.tc.t = NULL;
e8f887ac
AV
126 }
127}
128
e3a2b7ed
AV
129static int parse_cls_flower(struct mlx5e_priv *priv,
130 u32 *match_c, u32 *match_v,
131 struct tc_cls_flower_offload *f)
132{
133 void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
135 u16 addr_type = 0;
136 u8 ip_proto = 0;
137
138 if (f->dissector->used_keys &
139 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
140 BIT(FLOW_DISSECTOR_KEY_BASIC) |
141 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
142 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
143 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
144 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
145 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
146 f->dissector->used_keys);
147 return -EOPNOTSUPP;
148 }
149
150 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
151 struct flow_dissector_key_control *key =
152 skb_flow_dissector_target(f->dissector,
153 FLOW_DISSECTOR_KEY_BASIC,
154 f->key);
155 addr_type = key->addr_type;
156 }
157
158 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
159 struct flow_dissector_key_basic *key =
160 skb_flow_dissector_target(f->dissector,
161 FLOW_DISSECTOR_KEY_BASIC,
162 f->key);
163 struct flow_dissector_key_basic *mask =
164 skb_flow_dissector_target(f->dissector,
165 FLOW_DISSECTOR_KEY_BASIC,
166 f->mask);
167 ip_proto = key->ip_proto;
168
169 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
170 ntohs(mask->n_proto));
171 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
172 ntohs(key->n_proto));
173
174 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
175 mask->ip_proto);
176 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
177 key->ip_proto);
178 }
179
180 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
181 struct flow_dissector_key_eth_addrs *key =
182 skb_flow_dissector_target(f->dissector,
183 FLOW_DISSECTOR_KEY_ETH_ADDRS,
184 f->key);
185 struct flow_dissector_key_eth_addrs *mask =
186 skb_flow_dissector_target(f->dissector,
187 FLOW_DISSECTOR_KEY_ETH_ADDRS,
188 f->mask);
189
190 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
191 dmac_47_16),
192 mask->dst);
193 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
194 dmac_47_16),
195 key->dst);
196
197 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
198 smac_47_16),
199 mask->src);
200 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
201 smac_47_16),
202 key->src);
203 }
204
205 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
206 struct flow_dissector_key_ipv4_addrs *key =
207 skb_flow_dissector_target(f->dissector,
208 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
209 f->key);
210 struct flow_dissector_key_ipv4_addrs *mask =
211 skb_flow_dissector_target(f->dissector,
212 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
213 f->mask);
214
215 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
216 src_ipv4_src_ipv6.ipv4_layout.ipv4),
217 &mask->src, sizeof(mask->src));
218 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
219 src_ipv4_src_ipv6.ipv4_layout.ipv4),
220 &key->src, sizeof(key->src));
221 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
222 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
223 &mask->dst, sizeof(mask->dst));
224 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
225 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
226 &key->dst, sizeof(key->dst));
227 }
228
229 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
230 struct flow_dissector_key_ipv6_addrs *key =
231 skb_flow_dissector_target(f->dissector,
232 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
233 f->key);
234 struct flow_dissector_key_ipv6_addrs *mask =
235 skb_flow_dissector_target(f->dissector,
236 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
237 f->mask);
238
239 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
240 src_ipv4_src_ipv6.ipv6_layout.ipv6),
241 &mask->src, sizeof(mask->src));
242 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
243 src_ipv4_src_ipv6.ipv6_layout.ipv6),
244 &key->src, sizeof(key->src));
245
246 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
247 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
248 &mask->dst, sizeof(mask->dst));
249 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
250 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
251 &key->dst, sizeof(key->dst));
252 }
253
254 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
255 struct flow_dissector_key_ports *key =
256 skb_flow_dissector_target(f->dissector,
257 FLOW_DISSECTOR_KEY_PORTS,
258 f->key);
259 struct flow_dissector_key_ports *mask =
260 skb_flow_dissector_target(f->dissector,
261 FLOW_DISSECTOR_KEY_PORTS,
262 f->mask);
263 switch (ip_proto) {
264 case IPPROTO_TCP:
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 tcp_sport, ntohs(mask->src));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 tcp_sport, ntohs(key->src));
269
270 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
271 tcp_dport, ntohs(mask->dst));
272 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
273 tcp_dport, ntohs(key->dst));
274 break;
275
276 case IPPROTO_UDP:
277 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
278 udp_sport, ntohs(mask->src));
279 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
280 udp_sport, ntohs(key->src));
281
282 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
283 udp_dport, ntohs(mask->dst));
284 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
285 udp_dport, ntohs(key->dst));
286 break;
287 default:
288 netdev_err(priv->netdev,
289 "Only UDP and TCP transport are supported\n");
290 return -EINVAL;
291 }
292 }
293
294 return 0;
295}
296
297static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
298 u32 *action, u32 *flow_tag)
299{
300 const struct tc_action *a;
301
302 if (tc_no_actions(exts))
303 return -EINVAL;
304
305 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
306 *action = 0;
307
308 tc_for_each_action(a, exts) {
309 /* Only support a single action per rule */
310 if (*action)
311 return -EINVAL;
312
313 if (is_tcf_gact_shot(a)) {
314 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
315 if (MLX5_CAP_FLOWTABLE(priv->mdev,
316 flow_table_properties_nic_receive.flow_counter))
317 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
318 continue;
319 }
320
321 if (is_tcf_skbedit_mark(a)) {
322 u32 mark = tcf_skbedit_mark(a);
323
324 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
325 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
326 mark);
327 return -EINVAL;
328 }
329
330 *flow_tag = mark;
331 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
332 continue;
333 }
334
335 return -EINVAL;
336 }
337
338 return 0;
339}
340
341int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
342 struct tc_cls_flower_offload *f)
343{
acff797c 344 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
345 u32 *match_c;
346 u32 *match_v;
347 int err = 0;
348 u32 flow_tag;
349 u32 action;
350 struct mlx5e_tc_flow *flow;
351 struct mlx5_flow_rule *old = NULL;
352
353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
354 tc->ht_params);
355 if (flow)
356 old = flow->rule;
357 else
358 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
359
360 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
361 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
362 if (!match_c || !match_v || !flow) {
363 err = -ENOMEM;
364 goto err_free;
365 }
366
367 flow->cookie = f->cookie;
368
369 err = parse_cls_flower(priv, match_c, match_v, f);
370 if (err < 0)
371 goto err_free;
372
373 err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
374 if (err < 0)
375 goto err_free;
376
377 err = rhashtable_insert_fast(&tc->ht, &flow->node,
378 tc->ht_params);
379 if (err)
380 goto err_free;
381
382 flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
383 flow_tag);
384 if (IS_ERR(flow->rule)) {
385 err = PTR_ERR(flow->rule);
386 goto err_hash_del;
387 }
388
389 if (old)
390 mlx5e_tc_del_flow(priv, old);
391
392 goto out;
393
394err_hash_del:
395 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
396
397err_free:
398 if (!old)
399 kfree(flow);
400out:
401 kfree(match_c);
402 kfree(match_v);
403 return err;
404}
405
406int mlx5e_delete_flower(struct mlx5e_priv *priv,
407 struct tc_cls_flower_offload *f)
408{
409 struct mlx5e_tc_flow *flow;
acff797c 410 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
411
412 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
413 tc->ht_params);
414 if (!flow)
415 return -EINVAL;
416
417 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
418
419 mlx5e_tc_del_flow(priv, flow->rule);
420
421 kfree(flow);
422
423 return 0;
424}
425
aad7e08d
AV
426int mlx5e_stats_flower(struct mlx5e_priv *priv,
427 struct tc_cls_flower_offload *f)
428{
429 struct mlx5e_tc_table *tc = &priv->fs.tc;
430 struct mlx5e_tc_flow *flow;
431 struct tc_action *a;
432 struct mlx5_fc *counter;
433 u64 bytes;
434 u64 packets;
435 u64 lastuse;
436
437 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
438 tc->ht_params);
439 if (!flow)
440 return -EINVAL;
441
442 counter = mlx5_flow_rule_counter(flow->rule);
443 if (!counter)
444 return 0;
445
446 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
447
448 tc_for_each_action(a, f->exts)
449 tcf_action_stats_update(a, bytes, packets, lastuse);
450
451 return 0;
452}
453
e8f887ac
AV
454static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
455 .head_offset = offsetof(struct mlx5e_tc_flow, node),
456 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
457 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
458 .automatic_shrinking = true,
459};
460
461int mlx5e_tc_init(struct mlx5e_priv *priv)
462{
acff797c 463 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
464
465 tc->ht_params = mlx5e_tc_flow_ht_params;
466 return rhashtable_init(&tc->ht, &tc->ht_params);
467}
468
469static void _mlx5e_tc_del_flow(void *ptr, void *arg)
470{
471 struct mlx5e_tc_flow *flow = ptr;
472 struct mlx5e_priv *priv = arg;
473
474 mlx5e_tc_del_flow(priv, flow->rule);
475 kfree(flow);
476}
477
478void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
479{
acff797c 480 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
481
482 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
483
acff797c
MG
484 if (!IS_ERR_OR_NULL(tc->t)) {
485 mlx5_destroy_flow_table(tc->t);
486 tc->t = NULL;
e8f887ac
AV
487 }
488}