1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
15 #include "core_acl_flex_keys.h"
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp
*mlxsw_sp
,
18 struct mlxsw_sp_acl_block
*block
,
19 struct mlxsw_sp_acl_rule_info
*rulei
,
20 struct flow_action
*flow_action
,
21 struct netlink_ext_ack
*extack
)
23 const struct flow_action_entry
*act
;
24 int mirror_act_count
= 0;
27 if (!flow_action_has_entries(flow_action
))
29 if (!flow_action_mixed_hw_stats_check(flow_action
, extack
))
32 act
= flow_action_first_entry_get(flow_action
);
33 if (act
->hw_stats
== FLOW_ACTION_HW_STATS_ANY
||
34 act
->hw_stats
== FLOW_ACTION_HW_STATS_IMMEDIATE
) {
35 /* Count action is inserted first */
36 err
= mlxsw_sp_acl_rulei_act_count(mlxsw_sp
, rulei
, extack
);
39 } else if (act
->hw_stats
!= FLOW_ACTION_HW_STATS_DISABLED
) {
40 NL_SET_ERR_MSG_MOD(extack
, "Unsupported action HW stats type");
44 flow_action_for_each(i
, act
, flow_action
) {
46 case FLOW_ACTION_ACCEPT
:
47 err
= mlxsw_sp_acl_rulei_act_terminate(rulei
);
49 NL_SET_ERR_MSG_MOD(extack
, "Cannot append terminate action");
53 case FLOW_ACTION_DROP
: {
56 if (mlxsw_sp_acl_block_is_mixed_bound(block
)) {
57 NL_SET_ERR_MSG_MOD(extack
, "Drop action is not supported when block is bound to ingress and egress");
60 ingress
= mlxsw_sp_acl_block_is_ingress_bound(block
);
61 err
= mlxsw_sp_acl_rulei_act_drop(rulei
, ingress
,
64 NL_SET_ERR_MSG_MOD(extack
, "Cannot append drop action");
68 /* Forbid block with this rulei to be bound
69 * to ingress/egress in future. Ingress rule is
70 * a blocker for egress and vice versa.
73 rulei
->egress_bind_blocker
= 1;
75 rulei
->ingress_bind_blocker
= 1;
78 case FLOW_ACTION_TRAP
:
79 err
= mlxsw_sp_acl_rulei_act_trap(rulei
);
81 NL_SET_ERR_MSG_MOD(extack
, "Cannot append trap action");
85 case FLOW_ACTION_GOTO
: {
86 u32 chain_index
= act
->chain_index
;
87 struct mlxsw_sp_acl_ruleset
*ruleset
;
90 ruleset
= mlxsw_sp_acl_ruleset_lookup(mlxsw_sp
, block
,
92 MLXSW_SP_ACL_PROFILE_FLOWER
);
94 return PTR_ERR(ruleset
);
96 group_id
= mlxsw_sp_acl_ruleset_group_id(ruleset
);
97 err
= mlxsw_sp_acl_rulei_act_jump(rulei
, group_id
);
99 NL_SET_ERR_MSG_MOD(extack
, "Cannot append jump action");
104 case FLOW_ACTION_REDIRECT
: {
105 struct net_device
*out_dev
;
106 struct mlxsw_sp_fid
*fid
;
109 if (mlxsw_sp_acl_block_is_egress_bound(block
)) {
110 NL_SET_ERR_MSG_MOD(extack
, "Redirect action is not supported on egress");
114 /* Forbid block with this rulei to be bound
115 * to egress in future.
117 rulei
->egress_bind_blocker
= 1;
119 fid
= mlxsw_sp_acl_dummy_fid(mlxsw_sp
);
120 fid_index
= mlxsw_sp_fid_index(fid
);
121 err
= mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp
, rulei
,
127 err
= mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp
, rulei
,
133 case FLOW_ACTION_MIRRED
: {
134 struct net_device
*out_dev
= act
->dev
;
136 if (mirror_act_count
++) {
137 NL_SET_ERR_MSG_MOD(extack
, "Multiple mirror actions per rule are not supported");
141 err
= mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp
, rulei
,
148 case FLOW_ACTION_VLAN_MANGLE
: {
149 u16 proto
= be16_to_cpu(act
->vlan
.proto
);
150 u8 prio
= act
->vlan
.prio
;
151 u16 vid
= act
->vlan
.vid
;
153 err
= mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp
, rulei
,
155 proto
, prio
, extack
);
160 case FLOW_ACTION_PRIORITY
:
161 err
= mlxsw_sp_acl_rulei_act_priority(mlxsw_sp
, rulei
,
167 case FLOW_ACTION_MANGLE
: {
168 enum flow_action_mangle_base htype
= act
->mangle
.htype
;
169 __be32 be_mask
= (__force __be32
) act
->mangle
.mask
;
170 __be32 be_val
= (__force __be32
) act
->mangle
.val
;
171 u32 offset
= act
->mangle
.offset
;
172 u32 mask
= be32_to_cpu(be_mask
);
173 u32 val
= be32_to_cpu(be_val
);
175 err
= mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp
, rulei
,
183 NL_SET_ERR_MSG_MOD(extack
, "Unsupported action");
184 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported action\n");
191 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info
*rulei
,
192 struct flow_cls_offload
*f
,
193 struct mlxsw_sp_acl_block
*block
)
195 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
196 struct mlxsw_sp_port
*mlxsw_sp_port
;
197 struct net_device
*ingress_dev
;
198 struct flow_match_meta match
;
200 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_META
))
203 flow_rule_match_meta(rule
, &match
);
204 if (match
.mask
->ingress_ifindex
!= 0xFFFFFFFF) {
205 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Unsupported ingress ifindex mask");
209 ingress_dev
= __dev_get_by_index(block
->net
,
210 match
.key
->ingress_ifindex
);
212 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Can't find specified ingress port to match on");
216 if (!mlxsw_sp_port_dev_check(ingress_dev
)) {
217 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Can't match on non-mlxsw ingress port");
221 mlxsw_sp_port
= netdev_priv(ingress_dev
);
222 if (mlxsw_sp_port
->mlxsw_sp
!= block
->mlxsw_sp
) {
223 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Can't match on a port from different device");
227 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
228 MLXSW_AFK_ELEMENT_SRC_SYS_PORT
,
229 mlxsw_sp_port
->local_port
,
234 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info
*rulei
,
235 struct flow_cls_offload
*f
)
237 struct flow_match_ipv4_addrs match
;
239 flow_rule_match_ipv4_addrs(f
->rule
, &match
);
241 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_0_31
,
242 (char *) &match
.key
->src
,
243 (char *) &match
.mask
->src
, 4);
244 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_0_31
,
245 (char *) &match
.key
->dst
,
246 (char *) &match
.mask
->dst
, 4);
249 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info
*rulei
,
250 struct flow_cls_offload
*f
)
252 struct flow_match_ipv6_addrs match
;
254 flow_rule_match_ipv6_addrs(f
->rule
, &match
);
256 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_96_127
,
257 &match
.key
->src
.s6_addr
[0x0],
258 &match
.mask
->src
.s6_addr
[0x0], 4);
259 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_64_95
,
260 &match
.key
->src
.s6_addr
[0x4],
261 &match
.mask
->src
.s6_addr
[0x4], 4);
262 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_32_63
,
263 &match
.key
->src
.s6_addr
[0x8],
264 &match
.mask
->src
.s6_addr
[0x8], 4);
265 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_SRC_IP_0_31
,
266 &match
.key
->src
.s6_addr
[0xC],
267 &match
.mask
->src
.s6_addr
[0xC], 4);
268 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_96_127
,
269 &match
.key
->dst
.s6_addr
[0x0],
270 &match
.mask
->dst
.s6_addr
[0x0], 4);
271 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_64_95
,
272 &match
.key
->dst
.s6_addr
[0x4],
273 &match
.mask
->dst
.s6_addr
[0x4], 4);
274 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_32_63
,
275 &match
.key
->dst
.s6_addr
[0x8],
276 &match
.mask
->dst
.s6_addr
[0x8], 4);
277 mlxsw_sp_acl_rulei_keymask_buf(rulei
, MLXSW_AFK_ELEMENT_DST_IP_0_31
,
278 &match
.key
->dst
.s6_addr
[0xC],
279 &match
.mask
->dst
.s6_addr
[0xC], 4);
282 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp
*mlxsw_sp
,
283 struct mlxsw_sp_acl_rule_info
*rulei
,
284 struct flow_cls_offload
*f
,
287 const struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
288 struct flow_match_ports match
;
290 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
))
293 if (ip_proto
!= IPPROTO_TCP
&& ip_proto
!= IPPROTO_UDP
) {
294 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Only UDP and TCP keys are supported");
295 dev_err(mlxsw_sp
->bus_info
->dev
, "Only UDP and TCP keys are supported\n");
299 flow_rule_match_ports(rule
, &match
);
300 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_DST_L4_PORT
,
301 ntohs(match
.key
->dst
),
302 ntohs(match
.mask
->dst
));
303 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_SRC_L4_PORT
,
304 ntohs(match
.key
->src
),
305 ntohs(match
.mask
->src
));
309 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp
*mlxsw_sp
,
310 struct mlxsw_sp_acl_rule_info
*rulei
,
311 struct flow_cls_offload
*f
,
314 const struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
315 struct flow_match_tcp match
;
317 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_TCP
))
320 if (ip_proto
!= IPPROTO_TCP
) {
321 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "TCP keys supported only for TCP");
322 dev_err(mlxsw_sp
->bus_info
->dev
, "TCP keys supported only for TCP\n");
326 flow_rule_match_tcp(rule
, &match
);
328 if (match
.mask
->flags
& htons(0x0E00)) {
329 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "TCP flags match not supported on reserved bits");
330 dev_err(mlxsw_sp
->bus_info
->dev
, "TCP flags match not supported on reserved bits\n");
334 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_TCP_FLAGS
,
335 ntohs(match
.key
->flags
),
336 ntohs(match
.mask
->flags
));
340 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp
*mlxsw_sp
,
341 struct mlxsw_sp_acl_rule_info
*rulei
,
342 struct flow_cls_offload
*f
,
345 const struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
346 struct flow_match_ip match
;
348 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
))
351 if (n_proto
!= ETH_P_IP
&& n_proto
!= ETH_P_IPV6
) {
352 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "IP keys supported only for IPv4/6");
353 dev_err(mlxsw_sp
->bus_info
->dev
, "IP keys supported only for IPv4/6\n");
357 flow_rule_match_ip(rule
, &match
);
359 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_IP_TTL_
,
360 match
.key
->ttl
, match
.mask
->ttl
);
362 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_IP_ECN
,
363 match
.key
->tos
& 0x3,
364 match
.mask
->tos
& 0x3);
366 mlxsw_sp_acl_rulei_keymask_u32(rulei
, MLXSW_AFK_ELEMENT_IP_DSCP
,
368 match
.mask
->tos
>> 2);
373 static int mlxsw_sp_flower_parse(struct mlxsw_sp
*mlxsw_sp
,
374 struct mlxsw_sp_acl_block
*block
,
375 struct mlxsw_sp_acl_rule_info
*rulei
,
376 struct flow_cls_offload
*f
)
378 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
379 struct flow_dissector
*dissector
= rule
->match
.dissector
;
380 u16 n_proto_mask
= 0;
386 if (dissector
->used_keys
&
387 ~(BIT(FLOW_DISSECTOR_KEY_META
) |
388 BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
389 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
390 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
391 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
392 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
393 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
394 BIT(FLOW_DISSECTOR_KEY_TCP
) |
395 BIT(FLOW_DISSECTOR_KEY_IP
) |
396 BIT(FLOW_DISSECTOR_KEY_VLAN
))) {
397 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported key\n");
398 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "Unsupported key");
402 mlxsw_sp_acl_rulei_priority(rulei
, f
->common
.prio
);
404 err
= mlxsw_sp_flower_parse_meta(rulei
, f
, block
);
408 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
409 struct flow_match_control match
;
411 flow_rule_match_control(rule
, &match
);
412 addr_type
= match
.key
->addr_type
;
415 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
416 struct flow_match_basic match
;
418 flow_rule_match_basic(rule
, &match
);
419 n_proto_key
= ntohs(match
.key
->n_proto
);
420 n_proto_mask
= ntohs(match
.mask
->n_proto
);
422 if (n_proto_key
== ETH_P_ALL
) {
426 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
427 MLXSW_AFK_ELEMENT_ETHERTYPE
,
428 n_proto_key
, n_proto_mask
);
430 ip_proto
= match
.key
->ip_proto
;
431 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
432 MLXSW_AFK_ELEMENT_IP_PROTO
,
434 match
.mask
->ip_proto
);
437 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
438 struct flow_match_eth_addrs match
;
440 flow_rule_match_eth_addrs(rule
, &match
);
441 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
442 MLXSW_AFK_ELEMENT_DMAC_32_47
,
445 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
446 MLXSW_AFK_ELEMENT_DMAC_0_31
,
448 match
.mask
->dst
+ 2, 4);
449 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
450 MLXSW_AFK_ELEMENT_SMAC_32_47
,
453 mlxsw_sp_acl_rulei_keymask_buf(rulei
,
454 MLXSW_AFK_ELEMENT_SMAC_0_31
,
456 match
.mask
->src
+ 2, 4);
459 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
460 struct flow_match_vlan match
;
462 flow_rule_match_vlan(rule
, &match
);
463 if (mlxsw_sp_acl_block_is_egress_bound(block
)) {
464 NL_SET_ERR_MSG_MOD(f
->common
.extack
, "vlan_id key is not supported on egress");
468 /* Forbid block with this rulei to be bound
469 * to egress in future.
471 rulei
->egress_bind_blocker
= 1;
473 if (match
.mask
->vlan_id
!= 0)
474 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
475 MLXSW_AFK_ELEMENT_VID
,
477 match
.mask
->vlan_id
);
478 if (match
.mask
->vlan_priority
!= 0)
479 mlxsw_sp_acl_rulei_keymask_u32(rulei
,
480 MLXSW_AFK_ELEMENT_PCP
,
481 match
.key
->vlan_priority
,
482 match
.mask
->vlan_priority
);
485 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
)
486 mlxsw_sp_flower_parse_ipv4(rulei
, f
);
488 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
)
489 mlxsw_sp_flower_parse_ipv6(rulei
, f
);
491 err
= mlxsw_sp_flower_parse_ports(mlxsw_sp
, rulei
, f
, ip_proto
);
494 err
= mlxsw_sp_flower_parse_tcp(mlxsw_sp
, rulei
, f
, ip_proto
);
498 err
= mlxsw_sp_flower_parse_ip(mlxsw_sp
, rulei
, f
, n_proto_key
& n_proto_mask
);
502 return mlxsw_sp_flower_parse_actions(mlxsw_sp
, block
, rulei
,
507 int mlxsw_sp_flower_replace(struct mlxsw_sp
*mlxsw_sp
,
508 struct mlxsw_sp_acl_block
*block
,
509 struct flow_cls_offload
*f
)
511 struct mlxsw_sp_acl_rule_info
*rulei
;
512 struct mlxsw_sp_acl_ruleset
*ruleset
;
513 struct mlxsw_sp_acl_rule
*rule
;
516 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
517 f
->common
.chain_index
,
518 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
520 return PTR_ERR(ruleset
);
522 rule
= mlxsw_sp_acl_rule_create(mlxsw_sp
, ruleset
, f
->cookie
, NULL
,
526 goto err_rule_create
;
529 rulei
= mlxsw_sp_acl_rule_rulei(rule
);
530 err
= mlxsw_sp_flower_parse(mlxsw_sp
, block
, rulei
, f
);
532 goto err_flower_parse
;
534 err
= mlxsw_sp_acl_rulei_commit(rulei
);
536 goto err_rulei_commit
;
538 err
= mlxsw_sp_acl_rule_add(mlxsw_sp
, rule
);
542 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
548 mlxsw_sp_acl_rule_destroy(mlxsw_sp
, rule
);
550 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
554 void mlxsw_sp_flower_destroy(struct mlxsw_sp
*mlxsw_sp
,
555 struct mlxsw_sp_acl_block
*block
,
556 struct flow_cls_offload
*f
)
558 struct mlxsw_sp_acl_ruleset
*ruleset
;
559 struct mlxsw_sp_acl_rule
*rule
;
561 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
562 f
->common
.chain_index
,
563 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
567 rule
= mlxsw_sp_acl_rule_lookup(mlxsw_sp
, ruleset
, f
->cookie
);
569 mlxsw_sp_acl_rule_del(mlxsw_sp
, rule
);
570 mlxsw_sp_acl_rule_destroy(mlxsw_sp
, rule
);
573 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
576 int mlxsw_sp_flower_stats(struct mlxsw_sp
*mlxsw_sp
,
577 struct mlxsw_sp_acl_block
*block
,
578 struct flow_cls_offload
*f
)
580 enum flow_action_hw_stats used_hw_stats
= FLOW_ACTION_HW_STATS_DISABLED
;
581 struct mlxsw_sp_acl_ruleset
*ruleset
;
582 struct mlxsw_sp_acl_rule
*rule
;
588 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
589 f
->common
.chain_index
,
590 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
591 if (WARN_ON(IS_ERR(ruleset
)))
594 rule
= mlxsw_sp_acl_rule_lookup(mlxsw_sp
, ruleset
, f
->cookie
);
598 err
= mlxsw_sp_acl_rule_get_stats(mlxsw_sp
, rule
, &packets
, &bytes
,
599 &lastuse
, &used_hw_stats
);
601 goto err_rule_get_stats
;
603 flow_stats_update(&f
->stats
, bytes
, packets
, lastuse
, used_hw_stats
);
605 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
609 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
613 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp
*mlxsw_sp
,
614 struct mlxsw_sp_acl_block
*block
,
615 struct flow_cls_offload
*f
)
617 struct mlxsw_sp_acl_ruleset
*ruleset
;
618 struct mlxsw_sp_acl_rule_info rulei
;
621 memset(&rulei
, 0, sizeof(rulei
));
622 err
= mlxsw_sp_flower_parse(mlxsw_sp
, block
, &rulei
, f
);
625 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
626 f
->common
.chain_index
,
627 MLXSW_SP_ACL_PROFILE_FLOWER
,
628 &rulei
.values
.elusage
);
630 /* keep the reference to the ruleset */
631 return PTR_ERR_OR_ZERO(ruleset
);
634 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp
*mlxsw_sp
,
635 struct mlxsw_sp_acl_block
*block
,
636 struct flow_cls_offload
*f
)
638 struct mlxsw_sp_acl_ruleset
*ruleset
;
640 ruleset
= mlxsw_sp_acl_ruleset_get(mlxsw_sp
, block
,
641 f
->common
.chain_index
,
642 MLXSW_SP_ACL_PROFILE_FLOWER
, NULL
);
645 /* put the reference to the ruleset kept in create */
646 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);
647 mlxsw_sp_acl_ruleset_put(mlxsw_sp
, ruleset
);