]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_flower.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
13
14 #include "spectrum.h"
15 #include "core_acl_flex_keys.h"
16
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 struct mlxsw_sp_acl_block *block,
19 struct mlxsw_sp_acl_rule_info *rulei,
20 struct flow_action *flow_action,
21 struct netlink_ext_ack *extack)
22 {
23 const struct flow_action_entry *act;
24 int mirror_act_count = 0;
25 int err, i;
26
27 if (!flow_action_has_entries(flow_action))
28 return 0;
29 if (!flow_action_mixed_hw_stats_check(flow_action, extack))
30 return -EOPNOTSUPP;
31
32 act = flow_action_first_entry_get(flow_action);
33 if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY ||
34 act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) {
35 /* Count action is inserted first */
36 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
37 if (err)
38 return err;
39 } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
40 act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
41 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
42 return -EOPNOTSUPP;
43 }
44
45 flow_action_for_each(i, act, flow_action) {
46 switch (act->id) {
47 case FLOW_ACTION_ACCEPT:
48 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
49 if (err) {
50 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
51 return err;
52 }
53 break;
54 case FLOW_ACTION_DROP: {
55 bool ingress;
56
57 if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
58 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
59 return -EOPNOTSUPP;
60 }
61 ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
62 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
63 act->cookie, extack);
64 if (err) {
65 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
66 return err;
67 }
68
69 /* Forbid block with this rulei to be bound
70 * to ingress/egress in future. Ingress rule is
71 * a blocker for egress and vice versa.
72 */
73 if (ingress)
74 rulei->egress_bind_blocker = 1;
75 else
76 rulei->ingress_bind_blocker = 1;
77 }
78 break;
79 case FLOW_ACTION_TRAP:
80 err = mlxsw_sp_acl_rulei_act_trap(rulei);
81 if (err) {
82 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
83 return err;
84 }
85 break;
86 case FLOW_ACTION_GOTO: {
87 u32 chain_index = act->chain_index;
88 struct mlxsw_sp_acl_ruleset *ruleset;
89 u16 group_id;
90
91 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
92 chain_index,
93 MLXSW_SP_ACL_PROFILE_FLOWER);
94 if (IS_ERR(ruleset))
95 return PTR_ERR(ruleset);
96
97 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
98 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
99 if (err) {
100 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
101 return err;
102 }
103 }
104 break;
105 case FLOW_ACTION_REDIRECT: {
106 struct net_device *out_dev;
107 struct mlxsw_sp_fid *fid;
108 u16 fid_index;
109
110 if (mlxsw_sp_acl_block_is_egress_bound(block)) {
111 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
112 return -EOPNOTSUPP;
113 }
114
115 /* Forbid block with this rulei to be bound
116 * to egress in future.
117 */
118 rulei->egress_bind_blocker = 1;
119
120 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
121 fid_index = mlxsw_sp_fid_index(fid);
122 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
123 fid_index, extack);
124 if (err)
125 return err;
126
127 out_dev = act->dev;
128 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
129 out_dev, extack);
130 if (err)
131 return err;
132 }
133 break;
134 case FLOW_ACTION_MIRRED: {
135 struct net_device *out_dev = act->dev;
136
137 if (mirror_act_count++) {
138 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
139 return -EOPNOTSUPP;
140 }
141
142 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
143 block, out_dev,
144 extack);
145 if (err)
146 return err;
147 }
148 break;
149 case FLOW_ACTION_VLAN_MANGLE: {
150 u16 proto = be16_to_cpu(act->vlan.proto);
151 u8 prio = act->vlan.prio;
152 u16 vid = act->vlan.vid;
153
154 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
155 act->id, vid,
156 proto, prio, extack);
157 if (err)
158 return err;
159 break;
160 }
161 case FLOW_ACTION_PRIORITY:
162 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
163 act->priority,
164 extack);
165 if (err)
166 return err;
167 break;
168 case FLOW_ACTION_MANGLE: {
169 enum flow_action_mangle_base htype = act->mangle.htype;
170 __be32 be_mask = (__force __be32) act->mangle.mask;
171 __be32 be_val = (__force __be32) act->mangle.val;
172 u32 offset = act->mangle.offset;
173 u32 mask = be32_to_cpu(be_mask);
174 u32 val = be32_to_cpu(be_val);
175
176 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
177 htype, offset,
178 mask, val, extack);
179 if (err)
180 return err;
181 break;
182 }
183 default:
184 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
185 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
186 return -EOPNOTSUPP;
187 }
188 }
189 return 0;
190 }
191
192 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
193 struct flow_cls_offload *f,
194 struct mlxsw_sp_acl_block *block)
195 {
196 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
197 struct mlxsw_sp_port *mlxsw_sp_port;
198 struct net_device *ingress_dev;
199 struct flow_match_meta match;
200
201 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
202 return 0;
203
204 flow_rule_match_meta(rule, &match);
205 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
206 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
207 return -EINVAL;
208 }
209
210 ingress_dev = __dev_get_by_index(block->net,
211 match.key->ingress_ifindex);
212 if (!ingress_dev) {
213 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
214 return -EINVAL;
215 }
216
217 if (!mlxsw_sp_port_dev_check(ingress_dev)) {
218 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
219 return -EINVAL;
220 }
221
222 mlxsw_sp_port = netdev_priv(ingress_dev);
223 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
224 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
225 return -EINVAL;
226 }
227
228 mlxsw_sp_acl_rulei_keymask_u32(rulei,
229 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
230 mlxsw_sp_port->local_port,
231 0xFFFFFFFF);
232 return 0;
233 }
234
235 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
236 struct flow_cls_offload *f)
237 {
238 struct flow_match_ipv4_addrs match;
239
240 flow_rule_match_ipv4_addrs(f->rule, &match);
241
242 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
243 (char *) &match.key->src,
244 (char *) &match.mask->src, 4);
245 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
246 (char *) &match.key->dst,
247 (char *) &match.mask->dst, 4);
248 }
249
250 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
251 struct flow_cls_offload *f)
252 {
253 struct flow_match_ipv6_addrs match;
254
255 flow_rule_match_ipv6_addrs(f->rule, &match);
256
257 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
258 &match.key->src.s6_addr[0x0],
259 &match.mask->src.s6_addr[0x0], 4);
260 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
261 &match.key->src.s6_addr[0x4],
262 &match.mask->src.s6_addr[0x4], 4);
263 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
264 &match.key->src.s6_addr[0x8],
265 &match.mask->src.s6_addr[0x8], 4);
266 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
267 &match.key->src.s6_addr[0xC],
268 &match.mask->src.s6_addr[0xC], 4);
269 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
270 &match.key->dst.s6_addr[0x0],
271 &match.mask->dst.s6_addr[0x0], 4);
272 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
273 &match.key->dst.s6_addr[0x4],
274 &match.mask->dst.s6_addr[0x4], 4);
275 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
276 &match.key->dst.s6_addr[0x8],
277 &match.mask->dst.s6_addr[0x8], 4);
278 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
279 &match.key->dst.s6_addr[0xC],
280 &match.mask->dst.s6_addr[0xC], 4);
281 }
282
283 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
284 struct mlxsw_sp_acl_rule_info *rulei,
285 struct flow_cls_offload *f,
286 u8 ip_proto)
287 {
288 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
289 struct flow_match_ports match;
290
291 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
292 return 0;
293
294 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
295 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
296 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
297 return -EINVAL;
298 }
299
300 flow_rule_match_ports(rule, &match);
301 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
302 ntohs(match.key->dst),
303 ntohs(match.mask->dst));
304 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
305 ntohs(match.key->src),
306 ntohs(match.mask->src));
307 return 0;
308 }
309
310 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
311 struct mlxsw_sp_acl_rule_info *rulei,
312 struct flow_cls_offload *f,
313 u8 ip_proto)
314 {
315 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
316 struct flow_match_tcp match;
317
318 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
319 return 0;
320
321 if (ip_proto != IPPROTO_TCP) {
322 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
323 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
324 return -EINVAL;
325 }
326
327 flow_rule_match_tcp(rule, &match);
328
329 if (match.mask->flags & htons(0x0E00)) {
330 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
331 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
332 return -EINVAL;
333 }
334
335 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
336 ntohs(match.key->flags),
337 ntohs(match.mask->flags));
338 return 0;
339 }
340
341 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
342 struct mlxsw_sp_acl_rule_info *rulei,
343 struct flow_cls_offload *f,
344 u16 n_proto)
345 {
346 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
347 struct flow_match_ip match;
348
349 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
350 return 0;
351
352 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
353 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
354 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
355 return -EINVAL;
356 }
357
358 flow_rule_match_ip(rule, &match);
359
360 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
361 match.key->ttl, match.mask->ttl);
362
363 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
364 match.key->tos & 0x3,
365 match.mask->tos & 0x3);
366
367 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
368 match.key->tos >> 2,
369 match.mask->tos >> 2);
370
371 return 0;
372 }
373
374 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
375 struct mlxsw_sp_acl_block *block,
376 struct mlxsw_sp_acl_rule_info *rulei,
377 struct flow_cls_offload *f)
378 {
379 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
380 struct flow_dissector *dissector = rule->match.dissector;
381 u16 n_proto_mask = 0;
382 u16 n_proto_key = 0;
383 u16 addr_type = 0;
384 u8 ip_proto = 0;
385 int err;
386
387 if (dissector->used_keys &
388 ~(BIT(FLOW_DISSECTOR_KEY_META) |
389 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
390 BIT(FLOW_DISSECTOR_KEY_BASIC) |
391 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
392 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
393 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
394 BIT(FLOW_DISSECTOR_KEY_PORTS) |
395 BIT(FLOW_DISSECTOR_KEY_TCP) |
396 BIT(FLOW_DISSECTOR_KEY_IP) |
397 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
398 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
399 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
400 return -EOPNOTSUPP;
401 }
402
403 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
404
405 err = mlxsw_sp_flower_parse_meta(rulei, f, block);
406 if (err)
407 return err;
408
409 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
410 struct flow_match_control match;
411
412 flow_rule_match_control(rule, &match);
413 addr_type = match.key->addr_type;
414 }
415
416 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
417 struct flow_match_basic match;
418
419 flow_rule_match_basic(rule, &match);
420 n_proto_key = ntohs(match.key->n_proto);
421 n_proto_mask = ntohs(match.mask->n_proto);
422
423 if (n_proto_key == ETH_P_ALL) {
424 n_proto_key = 0;
425 n_proto_mask = 0;
426 }
427 mlxsw_sp_acl_rulei_keymask_u32(rulei,
428 MLXSW_AFK_ELEMENT_ETHERTYPE,
429 n_proto_key, n_proto_mask);
430
431 ip_proto = match.key->ip_proto;
432 mlxsw_sp_acl_rulei_keymask_u32(rulei,
433 MLXSW_AFK_ELEMENT_IP_PROTO,
434 match.key->ip_proto,
435 match.mask->ip_proto);
436 }
437
438 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
439 struct flow_match_eth_addrs match;
440
441 flow_rule_match_eth_addrs(rule, &match);
442 mlxsw_sp_acl_rulei_keymask_buf(rulei,
443 MLXSW_AFK_ELEMENT_DMAC_32_47,
444 match.key->dst,
445 match.mask->dst, 2);
446 mlxsw_sp_acl_rulei_keymask_buf(rulei,
447 MLXSW_AFK_ELEMENT_DMAC_0_31,
448 match.key->dst + 2,
449 match.mask->dst + 2, 4);
450 mlxsw_sp_acl_rulei_keymask_buf(rulei,
451 MLXSW_AFK_ELEMENT_SMAC_32_47,
452 match.key->src,
453 match.mask->src, 2);
454 mlxsw_sp_acl_rulei_keymask_buf(rulei,
455 MLXSW_AFK_ELEMENT_SMAC_0_31,
456 match.key->src + 2,
457 match.mask->src + 2, 4);
458 }
459
460 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
461 struct flow_match_vlan match;
462
463 flow_rule_match_vlan(rule, &match);
464 if (mlxsw_sp_acl_block_is_egress_bound(block)) {
465 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
466 return -EOPNOTSUPP;
467 }
468
469 /* Forbid block with this rulei to be bound
470 * to egress in future.
471 */
472 rulei->egress_bind_blocker = 1;
473
474 if (match.mask->vlan_id != 0)
475 mlxsw_sp_acl_rulei_keymask_u32(rulei,
476 MLXSW_AFK_ELEMENT_VID,
477 match.key->vlan_id,
478 match.mask->vlan_id);
479 if (match.mask->vlan_priority != 0)
480 mlxsw_sp_acl_rulei_keymask_u32(rulei,
481 MLXSW_AFK_ELEMENT_PCP,
482 match.key->vlan_priority,
483 match.mask->vlan_priority);
484 }
485
486 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
487 mlxsw_sp_flower_parse_ipv4(rulei, f);
488
489 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
490 mlxsw_sp_flower_parse_ipv6(rulei, f);
491
492 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
493 if (err)
494 return err;
495 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
496 if (err)
497 return err;
498
499 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
500 if (err)
501 return err;
502
503 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
504 &f->rule->action,
505 f->common.extack);
506 }
507
508 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
509 struct mlxsw_sp_acl_block *block,
510 struct flow_cls_offload *f)
511 {
512 struct mlxsw_sp_acl_rule_info *rulei;
513 struct mlxsw_sp_acl_ruleset *ruleset;
514 struct mlxsw_sp_acl_rule *rule;
515 int err;
516
517 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
518 f->common.chain_index,
519 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
520 if (IS_ERR(ruleset))
521 return PTR_ERR(ruleset);
522
523 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
524 f->common.extack);
525 if (IS_ERR(rule)) {
526 err = PTR_ERR(rule);
527 goto err_rule_create;
528 }
529
530 rulei = mlxsw_sp_acl_rule_rulei(rule);
531 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
532 if (err)
533 goto err_flower_parse;
534
535 err = mlxsw_sp_acl_rulei_commit(rulei);
536 if (err)
537 goto err_rulei_commit;
538
539 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
540 if (err)
541 goto err_rule_add;
542
543 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
544 return 0;
545
546 err_rule_add:
547 err_rulei_commit:
548 err_flower_parse:
549 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
550 err_rule_create:
551 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
552 return err;
553 }
554
555 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
556 struct mlxsw_sp_acl_block *block,
557 struct flow_cls_offload *f)
558 {
559 struct mlxsw_sp_acl_ruleset *ruleset;
560 struct mlxsw_sp_acl_rule *rule;
561
562 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
563 f->common.chain_index,
564 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
565 if (IS_ERR(ruleset))
566 return;
567
568 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
569 if (rule) {
570 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
571 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
572 }
573
574 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
575 }
576
577 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
578 struct mlxsw_sp_acl_block *block,
579 struct flow_cls_offload *f)
580 {
581 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
582 struct mlxsw_sp_acl_ruleset *ruleset;
583 struct mlxsw_sp_acl_rule *rule;
584 u64 packets;
585 u64 lastuse;
586 u64 bytes;
587 int err;
588
589 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
590 f->common.chain_index,
591 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
592 if (WARN_ON(IS_ERR(ruleset)))
593 return -EINVAL;
594
595 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
596 if (!rule)
597 return -EINVAL;
598
599 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
600 &lastuse, &used_hw_stats);
601 if (err)
602 goto err_rule_get_stats;
603
604 flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats);
605
606 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
607 return 0;
608
609 err_rule_get_stats:
610 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
611 return err;
612 }
613
614 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
615 struct mlxsw_sp_acl_block *block,
616 struct flow_cls_offload *f)
617 {
618 struct mlxsw_sp_acl_ruleset *ruleset;
619 struct mlxsw_sp_acl_rule_info rulei;
620 int err;
621
622 memset(&rulei, 0, sizeof(rulei));
623 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
624 if (err)
625 return err;
626 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
627 f->common.chain_index,
628 MLXSW_SP_ACL_PROFILE_FLOWER,
629 &rulei.values.elusage);
630
631 /* keep the reference to the ruleset */
632 return PTR_ERR_OR_ZERO(ruleset);
633 }
634
635 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
636 struct mlxsw_sp_acl_block *block,
637 struct flow_cls_offload *f)
638 {
639 struct mlxsw_sp_acl_ruleset *ruleset;
640
641 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
642 f->common.chain_index,
643 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
644 if (IS_ERR(ruleset))
645 return;
646 /* put the reference to the ruleset kept in create */
647 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
648 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
649 }