1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
15 #include "core_acl_flex_keys.h"
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp
*mlxsw_sp
,
18 struct mlxsw_sp_acl_block
*block
,
19 struct mlxsw_sp_acl_rule_info
*rulei
,
20 struct flow_action
*flow_action
,
21 struct netlink_ext_ack
*extack
)
23 const struct flow_action_entry
*act
;
24 int mirror_act_count
= 0;
27 if (!flow_action_has_entries(flow_action
))
29 if (!flow_action_mixed_hw_stats_check(flow_action
, extack
))
32 act
= flow_action_first_entry_get(flow_action
);
33 if (act
->hw_stats
== FLOW_ACTION_HW_STATS_ANY
||
34 act
->hw_stats
== FLOW_ACTION_HW_STATS_IMMEDIATE
) {
35 /* Count action is inserted first */
36 err
= mlxsw_sp_acl_rulei_act_count(mlxsw_sp
, rulei
, extack
);
39 } else if (act
->hw_stats
!= FLOW_ACTION_HW_STATS_DISABLED
&&
40 act
->hw_stats
!= FLOW_ACTION_HW_STATS_DONT_CARE
) {
41 NL_SET_ERR_MSG_MOD(extack
, "Unsupported action HW stats type");
45 flow_action_for_each(i
, act
, flow_action
) {
47 case FLOW_ACTION_ACCEPT
:
48 err
= mlxsw_sp_acl_rulei_act_terminate(rulei
);
50 NL_SET_ERR_MSG_MOD(extack
, "Cannot append terminate action");
54 case FLOW_ACTION_DROP
: {
57 if (mlxsw_sp_acl_block_is_mixed_bound(block
)) {
58 NL_SET_ERR_MSG_MOD(extack
, "Drop action is not supported when block is bound to ingress and egress");
61 ingress
= mlxsw_sp_acl_block_is_ingress_bound(block
);
62 err
= mlxsw_sp_acl_rulei_act_drop(rulei
, ingress
,
65 NL_SET_ERR_MSG_MOD(extack
, "Cannot append drop action");
69 /* Forbid block with this rulei to be bound
70 * to ingress/egress in future. Ingress rule is
71 * a blocker for egress and vice versa.
74 rulei
->egress_bind_blocker
= 1;
76 rulei
->ingress_bind_blocker
= 1;
79 case FLOW_ACTION_TRAP
:
80 err
= mlxsw_sp_acl_rulei_act_trap(rulei
);
82 NL_SET_ERR_MSG_MOD(extack
, "Cannot append trap action");
86 case FLOW_ACTION_GOTO
: {
87 u32 chain_index
= act
->chain_index
;
88 struct mlxsw_sp_acl_ruleset
*ruleset
;
91 ruleset
= mlxsw_sp_acl_ruleset_lookup(mlxsw_sp
, block
,
93 MLXSW_SP_ACL_PROFILE_FLOWER
);
95 return PTR_ERR(ruleset
);
97 group_id
= mlxsw_sp_acl_ruleset_group_id(ruleset
);
98 err
= mlxsw_sp_acl_rulei_act_jump(rulei
, group_id
);
100 NL_SET_ERR_MSG_MOD(extack
, "Cannot append jump action");
105 case FLOW_ACTION_REDIRECT
: {
106 struct net_device
*out_dev
;
107 struct mlxsw_sp_fid
*fid
;
110 if (mlxsw_sp_acl_block_is_egress_bound(block
)) {
111 NL_SET_ERR_MSG_MOD(extack
, "Redirect action is not supported on egress");
115 /* Forbid block with this rulei to be bound
116 * to egress in future.
118 rulei
->egress_bind_blocker
= 1;
120 fid
= mlxsw_sp_acl_dummy_fid(mlxsw_sp
);
121 fid_index
= mlxsw_sp_fid_index(fid
);
122 err
= mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp
, rulei
,
128 err
= mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp
, rulei
,
134 case FLOW_ACTION_MIRRED
: {
135 struct net_device
*out_dev
= act
->dev
;
137 if (mirror_act_count
++) {
138 NL_SET_ERR_MSG_MOD(extack
, "Multiple mirror actions per rule are not supported");
142 err
= mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp
, rulei
,
149 case FLOW_ACTION_VLAN_MANGLE
: {
150 u16 proto
= be16_to_cpu(act
->vlan
.proto
);
151 u8 prio
= act
->vlan
.prio
;
152 u16 vid
= act
->vlan
.vid
;
154 err
= mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp
, rulei
,
156 proto
, prio
, extack
);
161 case FLOW_ACTION_PRIORITY
:
162 err
= mlxsw_sp_acl_rulei_act_priority(mlxsw_sp
, rulei
,
168 case FLOW_ACTION_MANGLE
: {
169 enum flow_action_mangle_base htype
= act
->mangle
.htype
;
170 __be32 be_mask
= (__force __be32
) act
->mangle
.mask
;
171 __be32 be_val
= (__force __be32
) act
->mangle
.val
;
172 u32 offset
= act
->mangle
.offset
;
173 u32 mask
= be32_to_cpu(be_mask
);
174 u32 val
= be32_to_cpu(be_val
);
176 err
= mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp
, rulei
,
184 NL_SET_ERR_MSG_MOD(extack
, "Unsupported action");
185 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported action\n");
192 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info
*rulei
,
193 struct flow_cls_offload
*f
,
194 struct mlxsw_sp_acl_block
*block
)
196 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
197 struct mlxsw_sp_port
*mlxsw_sp_port
;
198 struct net_device
*ingress_dev
;
199 struct flow_match_meta match
;
201 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_META
))
204 flow_rule_match_meta(rule
, &match
);
205 if (match
.mask
->ingress_ifindex
!= 0xFFFFFFFF) {
206 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Unsupported ingress ifindex mask");
210 ingress_dev
= __dev_get_by_index(block
->net
,
211 match
.key
->ingress_ifindex
);
213 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Can't find specified ingress port to match on");
217 if (!mlxsw_sp_port_dev_check(ingress_dev
)) {
218 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Can't match on non-mlxsw ingress port");
222 mlxsw_sp_port
= netdev_priv(ingress_dev
);
223 if (mlxsw_sp_port
->mlxsw_sp
!= block
->mlxsw_sp
) {
224 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Can't match on a port from different device");
228 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
229 MLXSW_AFK_ELEMENT_SRC_SYS_PORT
,
230 mlxsw_sp_port
->local_port
,
235 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info
*rulei
,
236 struct flow_cls_offload
*f
)
238 struct flow_match_ipv4_addrs match
;
240 flow_rule_match_ipv4_addrs(f
->rule
, &match
);
242 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_0_31
,
243 (char *) &match
.key
->src
,
244 (char *) &match
.mask
->src
, 4);
245 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_0_31
,
246 (char *) &match
.key
->dst
,
247 (char *) &match
.mask
->dst
, 4);
250 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info
*rulei
,
251 struct flow_cls_offload
*f
)
253 struct flow_match_ipv6_addrs match
;
255 flow_rule_match_ipv6_addrs(f
->rule
, &match
);
257 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_96_127
,
258 &match
.key
->src
.s6_addr
[0x0],
259 &match
.mask
->src
.s6_addr
[0x0], 4);
260 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_64_95
,
261 &match
.key
->src
.s6_addr
[0x4],
262 &match
.mask
->src
.s6_addr
[0x4], 4);
263 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_32_63
,
264 &match
.key
->src
.s6_addr
[0x8],
265 &match
.mask
->src
.s6_addr
[0x8], 4);
266 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_0_31
,
267 &match
.key
->src
.s6_addr
[0xC],
268 &match
.mask
->src
.s6_addr
[0xC], 4);
269 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_96_127
,
270 &match
.key
->dst
.s6_addr
[0x0],
271 &match
.mask
->dst
.s6_addr
[0x0], 4);
272 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_64_95
,
273 &match
.key
->dst
.s6_addr
[0x4],
274 &match
.mask
->dst
.s6_addr
[0x4], 4);
275 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_32_63
,
276 &match
.key
->dst
.s6_addr
[0x8],
277 &match
.mask
->dst
.s6_addr
[0x8], 4);
278 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_0_31
,
279 &match
.key
->dst
.s6_addr
[0xC],
280 &match
.mask
->dst
.s6_addr
[0xC], 4);
283 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp
*mlxsw_sp
,
284 struct mlxsw_sp_acl_rule_info
*rulei
,
285 struct flow_cls_offload
*f
,
288 const struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
289 struct flow_match_ports match
;
291 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
))
294 if (ip_proto
!= IPPROTO_TCP
&& ip_proto
!= IPPROTO_UDP
) {
295 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Only UDP and TCP keys are supported");
296 dev_err(mlxsw_sp
->bus_info
->dev
, "Only UDP and TCP keys are supported\n");
300 flow_rule_match_ports(rule
, &match
);
301 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_DST_L4_PORT
,
302 ntohs(match
.key
->dst
),
303 ntohs(match
.mask
->dst
));
304 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_SRC_L4_PORT
,
305 ntohs(match
.key
->src
),
306 ntohs(match
.mask
->src
));
310 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp
*mlxsw_sp
,
311 struct mlxsw_sp_acl_rule_info
*rulei
,
312 struct flow_cls_offload
*f
,
315 const struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
316 struct flow_match_tcp match
;
318 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_TCP
))
321 if (ip_proto
!= IPPROTO_TCP
) {
322 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "TCP keys supported only for TCP");
323 dev_err(mlxsw_sp
->bus_info
->dev
, "TCP keys supported only for TCP\n");
327 flow_rule_match_tcp(rule
, &match
);
329 if (match
.mask
->flags
& htons(0x0E00)) {
330 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "TCP flags match not supported on reserved bits");
331 dev_err(mlxsw_sp
->bus_info
->dev
, "TCP flags match not supported on reserved bits\n");
335 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_TCP_FLAGS
,
336 ntohs(match
.key
->flags
),
337 ntohs(match
.mask
->flags
));
341 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp
*mlxsw_sp
,
342 struct mlxsw_sp_acl_rule_info
*rulei
,
343 struct flow_cls_offload
*f
,
346 const struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
347 struct flow_match_ip match
;
349 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
))
352 if (n_proto
!= ETH_P_IP
&& n_proto
!= ETH_P_IPV6
) {
353 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "IP keys supported only for IPv4/6");
354 dev_err(mlxsw_sp
->bus_info
->dev
, "IP keys supported only for IPv4/6\n");
358 flow_rule_match_ip(rule
, &match
);
360 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_IP_TTL_
,
361 match
.key
->ttl
, match
.mask
->ttl
);
363 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_IP_ECN
,
364 match
.key
->tos
& 0x3,
365 match
.mask
->tos
& 0x3);
367 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_IP_DSCP
,
369 match
.mask
->tos
>> 2);
374 static int mlxsw_sp_flower_parse(struct mlxsw_sp
*mlxsw_sp
,
375 struct mlxsw_sp_acl_block
*block
,
376 struct mlxsw_sp_acl_rule_info
*rulei
,
377 struct flow_cls_offload
*f
)
379 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
380 struct flow_dissector
*dissector
= rule
->match
.dissector
;
381 u16 n_proto_mask
= 0;
387 if (dissector
->used_keys
&
388 ~(BIT(FLOW_DISSECTOR_KEY_META
) |
389 BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
390 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
391 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
392 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
393 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
394 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
395 BIT(FLOW_DISSECTOR_KEY_TCP
) |
396 BIT(FLOW_DISSECTOR_KEY_IP
) |
397 BIT(FLOW_DISSECTOR_KEY_VLAN
))) {
398 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported key\n");
399 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Unsupported key");
403 mlxsw_sp_acl_rulei_priority(rulei
, f
->common
.prio
);
405 err
= mlxsw_sp_flower_parse_meta(rulei
, f
, block
);
409 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
410 struct flow_match_control match
;
412 flow_rule_match_control(rule
, &match
);
413 addr_type
= match
.key
->addr_type
;
416 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
417 struct flow_match_basic match
;
419 flow_rule_match_basic(rule
, &match
);
420 n_proto_key
= ntohs(match
.key
->n_proto
);
421 n_proto_mask
= ntohs(match
.mask
->n_proto
);
423 if (n_proto_key
== ETH_P_ALL
) {
427 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
428 MLXSW_AFK_ELEMENT_ETHERTYPE
,
429 n_proto_key
, n_proto_mask
);
431 ip_proto
= match
.key
->ip_proto
;
432 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
433 MLXSW_AFK_ELEMENT_IP_PROTO
,
435 match
.mask
->ip_proto
);
438 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
439 struct flow_match_eth_addrs match
;
441 flow_rule_match_eth_addrs(rule
, &match
);
442 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
443 MLXSW_AFK_ELEMENT_DMAC_32_47
,
446 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
447 MLXSW_AFK_ELEMENT_DMAC_0_31
,
449 match
.mask
->dst
+ 2, 4);
450 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
451 MLXSW_AFK_ELEMENT_SMAC_32_47
,
454 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
455 MLXSW_AFK_ELEMENT_SMAC_0_31
,
457 match
.mask
->src
+ 2, 4);
460 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
461 struct flow_match_vlan match
;
463 flow_rule_match_vlan(rule
, &match
);
464 if (mlxsw_sp_acl_block_is_egress_bound(block
)) {
465 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "vlan_id key is not supported on egress");
469 /* Forbid block with this rulei to be bound
470 * to egress in future.
472 rulei
->egress_bind_blocker
= 1;
474 if (match
.mask
->vlan_id
!= 0)
475 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
476 MLXSW_AFK_ELEMENT_VID
,
478 match
.mask
->vlan_id
);
479 if (match
.mask
->vlan_priority
!= 0)
480 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
481 MLXSW_AFK_ELEMENT_PCP
,
482 match
.key
->vlan_priority
,
483 match
.mask
->vlan_priority
);
486 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
)
487 mlxsw_sp_flower_parse_ipv4(rulei
, f
);
489 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
)
490 mlxsw_sp_flower_parse_ipv6(rulei
, f
);
492 err
= mlxsw_sp_flower_parse_ports(mlxsw_sp
, rulei
, f
, ip_proto
);
495 err
= mlxsw_sp_flower_parse_tcp(mlxsw_sp
, rulei
, f
, ip_proto
);
499 err
= mlxsw_sp_flower_parse_ip(mlxsw_sp
, rulei
, f
, n_proto_key
& n_proto_mask
);
503 return mlxsw_sp_flower_parse_actions(mlxsw_sp
, block
, rulei
,
508 int mlxsw_sp_flower_replace(struct mlxsw_sp
*mlxsw_sp
,
509 struct mlxsw_sp_acl_block
*block
,
510 struct flow_cls_offload
*f
)
512 struct mlxsw_sp_acl_rule_info
*rulei
;
513 struct mlxsw_sp_acl_ruleset
*ruleset
;
514 struct mlxsw_sp_acl_rule
*rule
;
517 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
518 f
->common
.chain_index
,
519 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
521 return PTR_ERR(ruleset
);
523 rule
= mlxsw_sp_acl_rule_create(mlxsw_sp
, ruleset
, f
->cookie
, NULL
,
527 goto err_rule_create
;
530 rulei
= mlxsw_sp_acl_rule_rulei(rule
);
531 err
= mlxsw_sp_flower_parse(mlxsw_sp
, block
, rulei
, f
);
533 goto err_flower_parse
;
535 err
= mlxsw_sp_acl_rulei_commit(rulei
);
537 goto err_rulei_commit
;
539 err
= mlxsw_sp_acl_rule_add(mlxsw_sp
, rule
);
543 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
549 mlxsw_sp_acl_rule_destroy(mlxsw_sp
, rule
);
551 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
555 void mlxsw_sp_flower_destroy(struct mlxsw_sp
*mlxsw_sp
,
556 struct mlxsw_sp_acl_block
*block
,
557 struct flow_cls_offload
*f
)
559 struct mlxsw_sp_acl_ruleset
*ruleset
;
560 struct mlxsw_sp_acl_rule
*rule
;
562 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
563 f
->common
.chain_index
,
564 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
568 rule
= mlxsw_sp_acl_rule_lookup(mlxsw_sp
, ruleset
, f
->cookie
);
570 mlxsw_sp_acl_rule_del(mlxsw_sp
, rule
);
571 mlxsw_sp_acl_rule_destroy(mlxsw_sp
, rule
);
574 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
577 int mlxsw_sp_flower_stats(struct mlxsw_sp
*mlxsw_sp
,
578 struct mlxsw_sp_acl_block
*block
,
579 struct flow_cls_offload
*f
)
581 enum flow_action_hw_stats used_hw_stats
= FLOW_ACTION_HW_STATS_DISABLED
;
582 struct mlxsw_sp_acl_ruleset
*ruleset
;
583 struct mlxsw_sp_acl_rule
*rule
;
589 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
590 f
->common
.chain_index
,
591 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
592 if (WARN_ON(IS_ERR(ruleset
)))
595 rule
= mlxsw_sp_acl_rule_lookup(mlxsw_sp
, ruleset
, f
->cookie
);
599 err
= mlxsw_sp_acl_rule_get_stats(mlxsw_sp
, rule
, &packets
, &bytes
,
600 &lastuse
, &used_hw_stats
);
602 goto err_rule_get_stats
;
604 flow_stats_update(&f
->stats
, bytes
, packets
, lastuse
, used_hw_stats
);
606 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
610 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
614 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp
*mlxsw_sp
,
615 struct mlxsw_sp_acl_block
*block
,
616 struct flow_cls_offload
*f
)
618 struct mlxsw_sp_acl_ruleset
*ruleset
;
619 struct mlxsw_sp_acl_rule_info rulei
;
622 memset(&rulei
, 0, sizeof(rulei
));
623 err
= mlxsw_sp_flower_parse(mlxsw_sp
, block
, &rulei
, f
);
626 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
627 f
->common
.chain_index
,
628 MLXSW_SP_ACL_PROFILE_FLOWER
,
629 &rulei
.values
.elusage
);
631 /* keep the reference to the ruleset */
632 return PTR_ERR_OR_ZERO(ruleset
);
635 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp
*mlxsw_sp
,
636 struct mlxsw_sp_acl_block
*block
,
637 struct flow_cls_offload
*f
)
639 struct mlxsw_sp_acl_ruleset
*ruleset
;
641 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
642 f
->common
.chain_index
,
643 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
646 /* put the reference to the ruleset kept in create */
647 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
648 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);