1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
16 #include "resources.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp
*mlxsw_sp
)
23 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
25 return ops
->priv_size
;
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
32 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp
*mlxsw_sp
,
33 struct mlxsw_sp_acl_tcam
*tcam
)
35 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
42 mutex_init(&tcam
->lock
);
43 tcam
->vregion_rehash_intrvl
=
44 MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT
;
45 INIT_LIST_HEAD(&tcam
->vregion_list
);
47 max_tcam_regions
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
48 ACL_MAX_TCAM_REGIONS
);
49 max_regions
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, ACL_MAX_REGIONS
);
51 /* Use 1:1 mapping between ACL region and TCAM region */
52 if (max_tcam_regions
< max_regions
)
53 max_regions
= max_tcam_regions
;
55 alloc_size
= sizeof(tcam
->used_regions
[0]) * BITS_TO_LONGS(max_regions
);
56 tcam
->used_regions
= kzalloc(alloc_size
, GFP_KERNEL
);
57 if (!tcam
->used_regions
)
59 tcam
->max_regions
= max_regions
;
61 max_groups
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, ACL_MAX_GROUPS
);
62 alloc_size
= sizeof(tcam
->used_groups
[0]) * BITS_TO_LONGS(max_groups
);
63 tcam
->used_groups
= kzalloc(alloc_size
, GFP_KERNEL
);
64 if (!tcam
->used_groups
) {
66 goto err_alloc_used_groups
;
68 tcam
->max_groups
= max_groups
;
69 tcam
->max_group_size
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
72 err
= ops
->init(mlxsw_sp
, tcam
->priv
, tcam
);
79 kfree(tcam
->used_groups
);
80 err_alloc_used_groups
:
81 kfree(tcam
->used_regions
);
85 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp
*mlxsw_sp
,
86 struct mlxsw_sp_acl_tcam
*tcam
)
88 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
90 mutex_destroy(&tcam
->lock
);
91 ops
->fini(mlxsw_sp
, tcam
->priv
);
92 kfree(tcam
->used_groups
);
93 kfree(tcam
->used_regions
);
96 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp
*mlxsw_sp
,
97 struct mlxsw_sp_acl_rule_info
*rulei
,
98 u32
*priority
, bool fillup_priority
)
102 if (!fillup_priority
) {
107 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, KVD_SIZE
))
110 /* Priority range is 1..cap_kvd_size-1. */
111 max_priority
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, KVD_SIZE
) - 1;
112 if (rulei
->priority
>= max_priority
)
115 /* Unlike in TC, in HW, higher number means higher priority. */
116 *priority
= max_priority
- rulei
->priority
;
120 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam
*tcam
,
125 id
= find_first_zero_bit(tcam
->used_regions
, tcam
->max_regions
);
126 if (id
< tcam
->max_regions
) {
127 __set_bit(id
, tcam
->used_regions
);
134 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam
*tcam
,
137 __clear_bit(id
, tcam
->used_regions
);
140 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam
*tcam
,
145 id
= find_first_zero_bit(tcam
->used_groups
, tcam
->max_groups
);
146 if (id
< tcam
->max_groups
) {
147 __set_bit(id
, tcam
->used_groups
);
154 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam
*tcam
,
157 __clear_bit(id
, tcam
->used_groups
);
160 struct mlxsw_sp_acl_tcam_pattern
{
161 const enum mlxsw_afk_element
*elements
;
162 unsigned int elements_count
;
165 struct mlxsw_sp_acl_tcam_group
{
166 struct mlxsw_sp_acl_tcam
*tcam
;
168 struct mutex lock
; /* guards region list updates */
169 struct list_head region_list
;
170 unsigned int region_count
;
173 struct mlxsw_sp_acl_tcam_vgroup
{
174 struct mlxsw_sp_acl_tcam_group group
;
175 struct list_head vregion_list
;
176 struct rhashtable vchunk_ht
;
177 const struct mlxsw_sp_acl_tcam_pattern
*patterns
;
178 unsigned int patterns_count
;
179 bool tmplt_elusage_set
;
180 struct mlxsw_afk_element_usage tmplt_elusage
;
181 bool vregion_rehash_enabled
;
184 struct mlxsw_sp_acl_tcam_rehash_ctx
{
186 bool this_is_rollback
;
187 struct mlxsw_sp_acl_tcam_vchunk
*current_vchunk
; /* vchunk being
188 * currently migrated.
190 struct mlxsw_sp_acl_tcam_ventry
*start_ventry
; /* ventry to start
193 * currently migrated.
195 struct mlxsw_sp_acl_tcam_ventry
*stop_ventry
; /* ventry to stop
198 * currently migrated.
202 struct mlxsw_sp_acl_tcam_vregion
{
203 struct mutex lock
; /* Protects consistency of region, region2 pointers
206 struct mlxsw_sp_acl_tcam_region
*region
;
207 struct mlxsw_sp_acl_tcam_region
*region2
; /* Used during migration */
208 struct list_head list
; /* Member of a TCAM group */
209 struct list_head tlist
; /* Member of a TCAM */
210 struct list_head vchunk_list
; /* List of vchunks under this vregion */
211 struct mlxsw_afk_key_info
*key_info
;
212 struct mlxsw_sp_acl_tcam
*tcam
;
213 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
;
215 struct delayed_work dw
;
216 struct mlxsw_sp_acl_tcam_rehash_ctx ctx
;
218 struct mlxsw_sp
*mlxsw_sp
;
219 unsigned int ref_count
;
222 struct mlxsw_sp_acl_tcam_vchunk
;
224 struct mlxsw_sp_acl_tcam_chunk
{
225 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
226 struct mlxsw_sp_acl_tcam_region
*region
;
227 unsigned long priv
[];
228 /* priv has to be always the last item */
231 struct mlxsw_sp_acl_tcam_vchunk
{
232 struct mlxsw_sp_acl_tcam_chunk
*chunk
;
233 struct mlxsw_sp_acl_tcam_chunk
*chunk2
; /* Used during migration */
234 struct list_head list
; /* Member of a TCAM vregion */
235 struct rhash_head ht_node
; /* Member of a chunk HT */
236 struct list_head ventry_list
;
237 unsigned int priority
; /* Priority within the vregion and group */
238 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
;
239 struct mlxsw_sp_acl_tcam_vregion
*vregion
;
240 unsigned int ref_count
;
243 struct mlxsw_sp_acl_tcam_entry
{
244 struct mlxsw_sp_acl_tcam_ventry
*ventry
;
245 struct mlxsw_sp_acl_tcam_chunk
*chunk
;
246 unsigned long priv
[];
247 /* priv has to be always the last item */
250 struct mlxsw_sp_acl_tcam_ventry
{
251 struct mlxsw_sp_acl_tcam_entry
*entry
;
252 struct list_head list
; /* Member of a TCAM vchunk */
253 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
254 struct mlxsw_sp_acl_rule_info
*rulei
;
257 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params
= {
258 .key_len
= sizeof(unsigned int),
259 .key_offset
= offsetof(struct mlxsw_sp_acl_tcam_vchunk
, priority
),
260 .head_offset
= offsetof(struct mlxsw_sp_acl_tcam_vchunk
, ht_node
),
261 .automatic_shrinking
= true,
264 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp
*mlxsw_sp
,
265 struct mlxsw_sp_acl_tcam_group
*group
)
267 struct mlxsw_sp_acl_tcam_region
*region
;
268 char pagt_pl
[MLXSW_REG_PAGT_LEN
];
271 mlxsw_reg_pagt_pack(pagt_pl
, group
->id
);
272 list_for_each_entry(region
, &group
->region_list
, list
) {
275 /* Check if the next entry in the list has the same vregion. */
276 if (region
->list
.next
!= &group
->region_list
&&
277 list_next_entry(region
, list
)->vregion
== region
->vregion
)
279 mlxsw_reg_pagt_acl_id_pack(pagt_pl
, acl_index
++,
282 mlxsw_reg_pagt_size_set(pagt_pl
, acl_index
);
283 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pagt
), pagt_pl
);
287 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam
*tcam
,
288 struct mlxsw_sp_acl_tcam_group
*group
)
293 mutex_init(&group
->lock
);
294 INIT_LIST_HEAD(&group
->region_list
);
296 err
= mlxsw_sp_acl_tcam_group_id_get(tcam
, &group
->id
);
303 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group
*group
)
305 struct mlxsw_sp_acl_tcam
*tcam
= group
->tcam
;
307 mutex_destroy(&group
->lock
);
308 mlxsw_sp_acl_tcam_group_id_put(tcam
, group
->id
);
309 WARN_ON(!list_empty(&group
->region_list
));
313 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp
*mlxsw_sp
,
314 struct mlxsw_sp_acl_tcam
*tcam
,
315 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
316 const struct mlxsw_sp_acl_tcam_pattern
*patterns
,
317 unsigned int patterns_count
,
318 struct mlxsw_afk_element_usage
*tmplt_elusage
,
319 bool vregion_rehash_enabled
)
323 vgroup
->patterns
= patterns
;
324 vgroup
->patterns_count
= patterns_count
;
325 vgroup
->vregion_rehash_enabled
= vregion_rehash_enabled
;
328 vgroup
->tmplt_elusage_set
= true;
329 memcpy(&vgroup
->tmplt_elusage
, tmplt_elusage
,
330 sizeof(vgroup
->tmplt_elusage
));
332 INIT_LIST_HEAD(&vgroup
->vregion_list
);
334 err
= mlxsw_sp_acl_tcam_group_add(tcam
, &vgroup
->group
);
338 err
= rhashtable_init(&vgroup
->vchunk_ht
,
339 &mlxsw_sp_acl_tcam_vchunk_ht_params
);
341 goto err_rhashtable_init
;
346 mlxsw_sp_acl_tcam_group_del(&vgroup
->group
);
351 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup
*vgroup
)
353 rhashtable_destroy(&vgroup
->vchunk_ht
);
354 mlxsw_sp_acl_tcam_group_del(&vgroup
->group
);
355 WARN_ON(!list_empty(&vgroup
->vregion_list
));
359 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp
*mlxsw_sp
,
360 struct mlxsw_sp_acl_tcam_group
*group
,
361 struct mlxsw_sp_port
*mlxsw_sp_port
,
364 char ppbt_pl
[MLXSW_REG_PPBT_LEN
];
366 mlxsw_reg_ppbt_pack(ppbt_pl
, ingress
? MLXSW_REG_PXBT_E_IACL
:
367 MLXSW_REG_PXBT_E_EACL
,
368 MLXSW_REG_PXBT_OP_BIND
, mlxsw_sp_port
->local_port
,
370 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppbt
), ppbt_pl
);
374 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp
*mlxsw_sp
,
375 struct mlxsw_sp_acl_tcam_group
*group
,
376 struct mlxsw_sp_port
*mlxsw_sp_port
,
379 char ppbt_pl
[MLXSW_REG_PPBT_LEN
];
381 mlxsw_reg_ppbt_pack(ppbt_pl
, ingress
? MLXSW_REG_PXBT_E_IACL
:
382 MLXSW_REG_PXBT_E_EACL
,
383 MLXSW_REG_PXBT_OP_UNBIND
, mlxsw_sp_port
->local_port
,
385 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppbt
), ppbt_pl
);
389 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group
*group
)
395 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion
*vregion
)
397 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
399 if (list_empty(&vregion
->vchunk_list
))
401 /* As a priority of a vregion, return priority of the first vchunk */
402 vchunk
= list_first_entry(&vregion
->vchunk_list
,
403 typeof(*vchunk
), list
);
404 return vchunk
->priority
;
408 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion
*vregion
)
410 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
412 if (list_empty(&vregion
->vchunk_list
))
414 vchunk
= list_last_entry(&vregion
->vchunk_list
,
415 typeof(*vchunk
), list
);
416 return vchunk
->priority
;
420 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp
*mlxsw_sp
,
421 struct mlxsw_sp_acl_tcam_group
*group
,
422 struct mlxsw_sp_acl_tcam_region
*region
,
423 unsigned int priority
,
424 struct mlxsw_sp_acl_tcam_region
*next_region
)
426 struct mlxsw_sp_acl_tcam_region
*region2
;
427 struct list_head
*pos
;
430 mutex_lock(&group
->lock
);
431 if (group
->region_count
== group
->tcam
->max_group_size
) {
433 goto err_region_count_check
;
437 /* If the next region is defined, place the new one
438 * before it. The next one is a sibling.
440 pos
= &next_region
->list
;
442 /* Position the region inside the list according to priority */
443 list_for_each(pos
, &group
->region_list
) {
444 region2
= list_entry(pos
, typeof(*region2
), list
);
445 if (mlxsw_sp_acl_tcam_vregion_prio(region2
->vregion
) >
450 list_add_tail(®ion
->list
, pos
);
451 region
->group
= group
;
453 err
= mlxsw_sp_acl_tcam_group_update(mlxsw_sp
, group
);
455 goto err_group_update
;
457 group
->region_count
++;
458 mutex_unlock(&group
->lock
);
462 list_del(®ion
->list
);
463 err_region_count_check
:
464 mutex_unlock(&group
->lock
);
469 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp
*mlxsw_sp
,
470 struct mlxsw_sp_acl_tcam_region
*region
)
472 struct mlxsw_sp_acl_tcam_group
*group
= region
->group
;
474 mutex_lock(&group
->lock
);
475 list_del(®ion
->list
);
476 group
->region_count
--;
477 mlxsw_sp_acl_tcam_group_update(mlxsw_sp
, group
);
478 mutex_unlock(&group
->lock
);
482 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp
*mlxsw_sp
,
483 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
484 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
485 unsigned int priority
)
487 struct mlxsw_sp_acl_tcam_vregion
*vregion2
;
488 struct list_head
*pos
;
491 /* Position the vregion inside the list according to priority */
492 list_for_each(pos
, &vgroup
->vregion_list
) {
493 vregion2
= list_entry(pos
, typeof(*vregion2
), list
);
494 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2
) > priority
)
497 list_add_tail(&vregion
->list
, pos
);
499 err
= mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp
, &vgroup
->group
,
503 goto err_region_attach
;
508 list_del(&vregion
->list
);
513 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp
*mlxsw_sp
,
514 struct mlxsw_sp_acl_tcam_vregion
*vregion
)
516 list_del(&vregion
->list
);
517 if (vregion
->region2
)
518 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp
,
520 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp
, vregion
->region
);
523 static struct mlxsw_sp_acl_tcam_vregion
*
524 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
525 unsigned int priority
,
526 struct mlxsw_afk_element_usage
*elusage
,
529 struct mlxsw_sp_acl_tcam_vregion
*vregion
, *vregion2
;
530 struct list_head
*pos
;
533 list_for_each(pos
, &vgroup
->vregion_list
) {
534 vregion
= list_entry(pos
, typeof(*vregion
), list
);
536 /* First, check if the requested priority does not rather belong
537 * under some of the next vregions.
539 if (pos
->next
!= &vgroup
->vregion_list
) { /* not last */
540 vregion2
= list_entry(pos
->next
, typeof(*vregion2
),
543 mlxsw_sp_acl_tcam_vregion_prio(vregion2
))
547 issubset
= mlxsw_afk_key_info_subset(vregion
->key_info
,
550 /* If requested element usage would not fit and the priority
551 * is lower than the currently inspected vregion we cannot
552 * use this region, so return NULL to indicate new vregion has
556 priority
< mlxsw_sp_acl_tcam_vregion_prio(vregion
))
559 /* If requested element usage would not fit and the priority
560 * is higher than the currently inspected vregion we cannot
561 * use this vregion. There is still some hope that the next
562 * vregion would be the fit. So let it be processed and
563 * eventually break at the check right above this.
566 priority
> mlxsw_sp_acl_tcam_vregion_max_prio(vregion
))
569 /* Indicate if the vregion needs to be split in order to add
570 * the requested priority. Split is needed when requested
571 * element usage won't fit into the found vregion.
573 *p_need_split
= !issubset
;
576 return NULL
; /* New vregion has to be created. */
580 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
581 struct mlxsw_afk_element_usage
*elusage
,
582 struct mlxsw_afk_element_usage
*out
)
584 const struct mlxsw_sp_acl_tcam_pattern
*pattern
;
587 /* In case the template is set, we don't have to look up the pattern
588 * and just use the template.
590 if (vgroup
->tmplt_elusage_set
) {
591 memcpy(out
, &vgroup
->tmplt_elusage
, sizeof(*out
));
592 WARN_ON(!mlxsw_afk_element_usage_subset(elusage
, out
));
596 for (i
= 0; i
< vgroup
->patterns_count
; i
++) {
597 pattern
= &vgroup
->patterns
[i
];
598 mlxsw_afk_element_usage_fill(out
, pattern
->elements
,
599 pattern
->elements_count
);
600 if (mlxsw_afk_element_usage_subset(elusage
, out
))
603 memcpy(out
, elusage
, sizeof(*out
));
607 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp
*mlxsw_sp
,
608 struct mlxsw_sp_acl_tcam_region
*region
)
610 struct mlxsw_afk_key_info
*key_info
= region
->key_info
;
611 char ptar_pl
[MLXSW_REG_PTAR_LEN
];
612 unsigned int encodings_count
;
616 mlxsw_reg_ptar_pack(ptar_pl
, MLXSW_REG_PTAR_OP_ALLOC
,
618 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT
,
619 region
->id
, region
->tcam_region_info
);
620 encodings_count
= mlxsw_afk_key_info_blocks_count_get(key_info
);
621 for (i
= 0; i
< encodings_count
; i
++) {
624 encoding
= mlxsw_afk_key_info_block_encoding_get(key_info
, i
);
625 mlxsw_reg_ptar_key_id_pack(ptar_pl
, i
, encoding
);
627 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptar
), ptar_pl
);
630 mlxsw_reg_ptar_unpack(ptar_pl
, region
->tcam_region_info
);
635 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp
*mlxsw_sp
,
636 struct mlxsw_sp_acl_tcam_region
*region
)
638 char ptar_pl
[MLXSW_REG_PTAR_LEN
];
640 mlxsw_reg_ptar_pack(ptar_pl
, MLXSW_REG_PTAR_OP_FREE
,
641 region
->key_type
, 0, region
->id
,
642 region
->tcam_region_info
);
643 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptar
), ptar_pl
);
647 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp
*mlxsw_sp
,
648 struct mlxsw_sp_acl_tcam_region
*region
)
650 char pacl_pl
[MLXSW_REG_PACL_LEN
];
652 mlxsw_reg_pacl_pack(pacl_pl
, region
->id
, true,
653 region
->tcam_region_info
);
654 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pacl
), pacl_pl
);
658 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp
*mlxsw_sp
,
659 struct mlxsw_sp_acl_tcam_region
*region
)
661 char pacl_pl
[MLXSW_REG_PACL_LEN
];
663 mlxsw_reg_pacl_pack(pacl_pl
, region
->id
, false,
664 region
->tcam_region_info
);
665 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pacl
), pacl_pl
);
668 static struct mlxsw_sp_acl_tcam_region
*
669 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp
*mlxsw_sp
,
670 struct mlxsw_sp_acl_tcam
*tcam
,
671 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
674 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
675 struct mlxsw_sp_acl_tcam_region
*region
;
678 region
= kzalloc(sizeof(*region
) + ops
->region_priv_size
, GFP_KERNEL
);
680 return ERR_PTR(-ENOMEM
);
681 region
->mlxsw_sp
= mlxsw_sp
;
682 region
->vregion
= vregion
;
683 region
->key_info
= vregion
->key_info
;
685 err
= mlxsw_sp_acl_tcam_region_id_get(tcam
, ®ion
->id
);
687 goto err_region_id_get
;
689 err
= ops
->region_associate(mlxsw_sp
, region
);
691 goto err_tcam_region_associate
;
693 region
->key_type
= ops
->key_type
;
694 err
= mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp
, region
);
696 goto err_tcam_region_alloc
;
698 err
= mlxsw_sp_acl_tcam_region_enable(mlxsw_sp
, region
);
700 goto err_tcam_region_enable
;
702 err
= ops
->region_init(mlxsw_sp
, region
->priv
, tcam
->priv
,
705 goto err_tcam_region_init
;
709 err_tcam_region_init
:
710 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp
, region
);
711 err_tcam_region_enable
:
712 mlxsw_sp_acl_tcam_region_free(mlxsw_sp
, region
);
713 err_tcam_region_alloc
:
714 err_tcam_region_associate
:
715 mlxsw_sp_acl_tcam_region_id_put(tcam
, region
->id
);
722 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp
*mlxsw_sp
,
723 struct mlxsw_sp_acl_tcam_region
*region
)
725 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
727 ops
->region_fini(mlxsw_sp
, region
->priv
);
728 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp
, region
);
729 mlxsw_sp_acl_tcam_region_free(mlxsw_sp
, region
);
730 mlxsw_sp_acl_tcam_region_id_put(region
->group
->tcam
,
736 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion
*vregion
)
738 unsigned long interval
= vregion
->tcam
->vregion_rehash_intrvl
;
742 mlxsw_core_schedule_dw(&vregion
->rehash
.dw
,
743 msecs_to_jiffies(interval
));
747 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp
*mlxsw_sp
,
748 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
751 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct
*work
)
753 struct mlxsw_sp_acl_tcam_vregion
*vregion
=
754 container_of(work
, struct mlxsw_sp_acl_tcam_vregion
,
756 int credits
= MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS
;
758 mlxsw_sp_acl_tcam_vregion_rehash(vregion
->mlxsw_sp
, vregion
, &credits
);
760 /* Rehash gone out of credits so it was interrupted.
761 * Schedule the work as soon as possible to continue.
763 mlxsw_core_schedule_dw(&vregion
->rehash
.dw
, 0);
765 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion
);
769 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk
*vchunk
)
771 struct mlxsw_sp_acl_tcam_vregion
*vregion
= vchunk
->vregion
;
773 /* If a rule was added or deleted from vchunk which is currently
774 * under rehash migration, we have to reset the ventry pointers
775 * to make sure all rules are properly migrated.
777 if (vregion
->rehash
.ctx
.current_vchunk
== vchunk
) {
778 vregion
->rehash
.ctx
.start_ventry
= NULL
;
779 vregion
->rehash
.ctx
.stop_ventry
= NULL
;
784 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion
*vregion
)
786 /* If a chunk was added or deleted from vregion we have to reset
787 * the current chunk pointer to make sure all chunks
788 * are properly migrated.
790 vregion
->rehash
.ctx
.current_vchunk
= NULL
;
793 static struct mlxsw_sp_acl_tcam_vregion
*
794 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp
*mlxsw_sp
,
795 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
796 unsigned int priority
,
797 struct mlxsw_afk_element_usage
*elusage
)
799 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
800 struct mlxsw_afk
*afk
= mlxsw_sp_acl_afk(mlxsw_sp
->acl
);
801 struct mlxsw_sp_acl_tcam
*tcam
= vgroup
->group
.tcam
;
802 struct mlxsw_sp_acl_tcam_vregion
*vregion
;
805 vregion
= kzalloc(sizeof(*vregion
), GFP_KERNEL
);
807 return ERR_PTR(-ENOMEM
);
808 INIT_LIST_HEAD(&vregion
->vchunk_list
);
809 mutex_init(&vregion
->lock
);
810 vregion
->tcam
= tcam
;
811 vregion
->mlxsw_sp
= mlxsw_sp
;
812 vregion
->vgroup
= vgroup
;
813 vregion
->ref_count
= 1;
815 vregion
->key_info
= mlxsw_afk_key_info_get(afk
, elusage
);
816 if (IS_ERR(vregion
->key_info
)) {
817 err
= PTR_ERR(vregion
->key_info
);
818 goto err_key_info_get
;
821 vregion
->region
= mlxsw_sp_acl_tcam_region_create(mlxsw_sp
, tcam
,
823 if (IS_ERR(vregion
->region
)) {
824 err
= PTR_ERR(vregion
->region
);
825 goto err_region_create
;
828 err
= mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp
, vgroup
, vregion
,
831 goto err_vgroup_vregion_attach
;
833 if (vgroup
->vregion_rehash_enabled
&& ops
->region_rehash_hints_get
) {
834 /* Create the delayed work for vregion periodic rehash */
835 INIT_DELAYED_WORK(&vregion
->rehash
.dw
,
836 mlxsw_sp_acl_tcam_vregion_rehash_work
);
837 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion
);
838 mutex_lock(&tcam
->lock
);
839 list_add_tail(&vregion
->tlist
, &tcam
->vregion_list
);
840 mutex_unlock(&tcam
->lock
);
845 err_vgroup_vregion_attach
:
846 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp
, vregion
->region
);
848 mlxsw_afk_key_info_put(vregion
->key_info
);
855 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp
*mlxsw_sp
,
856 struct mlxsw_sp_acl_tcam_vregion
*vregion
)
858 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
859 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
= vregion
->vgroup
;
860 struct mlxsw_sp_acl_tcam
*tcam
= vregion
->tcam
;
862 if (vgroup
->vregion_rehash_enabled
&& ops
->region_rehash_hints_get
) {
863 mutex_lock(&tcam
->lock
);
864 list_del(&vregion
->tlist
);
865 mutex_unlock(&tcam
->lock
);
866 cancel_delayed_work_sync(&vregion
->rehash
.dw
);
868 mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp
, vregion
);
869 if (vregion
->region2
)
870 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp
, vregion
->region2
);
871 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp
, vregion
->region
);
872 mlxsw_afk_key_info_put(vregion
->key_info
);
873 mutex_destroy(&vregion
->lock
);
877 u32
mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp
*mlxsw_sp
,
878 struct mlxsw_sp_acl_tcam
*tcam
)
880 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
881 u32 vregion_rehash_intrvl
;
883 if (WARN_ON(!ops
->region_rehash_hints_get
))
885 vregion_rehash_intrvl
= tcam
->vregion_rehash_intrvl
;
886 return vregion_rehash_intrvl
;
889 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp
*mlxsw_sp
,
890 struct mlxsw_sp_acl_tcam
*tcam
,
893 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
894 struct mlxsw_sp_acl_tcam_vregion
*vregion
;
896 if (val
< MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN
&& val
)
898 if (WARN_ON(!ops
->region_rehash_hints_get
))
900 tcam
->vregion_rehash_intrvl
= val
;
901 mutex_lock(&tcam
->lock
);
902 list_for_each_entry(vregion
, &tcam
->vregion_list
, tlist
) {
904 mlxsw_core_schedule_dw(&vregion
->rehash
.dw
, 0);
906 cancel_delayed_work_sync(&vregion
->rehash
.dw
);
908 mutex_unlock(&tcam
->lock
);
912 static struct mlxsw_sp_acl_tcam_vregion
*
913 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp
*mlxsw_sp
,
914 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
915 unsigned int priority
,
916 struct mlxsw_afk_element_usage
*elusage
)
918 struct mlxsw_afk_element_usage vregion_elusage
;
919 struct mlxsw_sp_acl_tcam_vregion
*vregion
;
922 vregion
= mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup
, priority
,
923 elusage
, &need_split
);
926 /* According to priority, new vchunk should belong to
927 * an existing vregion. However, this vchunk needs
928 * elements that vregion does not contain. We need
929 * to split the existing vregion into two and create
930 * a new vregion for the new vchunk in between.
931 * This is not supported now.
933 return ERR_PTR(-EOPNOTSUPP
);
935 vregion
->ref_count
++;
939 mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup
, elusage
,
942 return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp
, vgroup
, priority
,
947 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp
*mlxsw_sp
,
948 struct mlxsw_sp_acl_tcam_vregion
*vregion
)
950 if (--vregion
->ref_count
)
952 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp
, vregion
);
955 static struct mlxsw_sp_acl_tcam_chunk
*
956 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp
*mlxsw_sp
,
957 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
,
958 struct mlxsw_sp_acl_tcam_region
*region
)
960 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
961 struct mlxsw_sp_acl_tcam_chunk
*chunk
;
963 chunk
= kzalloc(sizeof(*chunk
) + ops
->chunk_priv_size
, GFP_KERNEL
);
965 return ERR_PTR(-ENOMEM
);
966 chunk
->vchunk
= vchunk
;
967 chunk
->region
= region
;
969 ops
->chunk_init(region
->priv
, chunk
->priv
, vchunk
->priority
);
974 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp
*mlxsw_sp
,
975 struct mlxsw_sp_acl_tcam_chunk
*chunk
)
977 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
979 ops
->chunk_fini(chunk
->priv
);
983 static struct mlxsw_sp_acl_tcam_vchunk
*
984 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp
*mlxsw_sp
,
985 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
986 unsigned int priority
,
987 struct mlxsw_afk_element_usage
*elusage
)
989 struct mlxsw_sp_acl_tcam_vregion
*vregion
;
990 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
993 if (priority
== MLXSW_SP_ACL_TCAM_CATCHALL_PRIO
)
994 return ERR_PTR(-EINVAL
);
996 vchunk
= kzalloc(sizeof(*vchunk
), GFP_KERNEL
);
998 return ERR_PTR(-ENOMEM
);
999 INIT_LIST_HEAD(&vchunk
->ventry_list
);
1000 vchunk
->priority
= priority
;
1001 vchunk
->vgroup
= vgroup
;
1002 vchunk
->ref_count
= 1;
1004 vregion
= mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp
, vgroup
,
1006 if (IS_ERR(vregion
)) {
1007 err
= PTR_ERR(vregion
);
1008 goto err_vregion_get
;
1011 vchunk
->vregion
= vregion
;
1013 err
= rhashtable_insert_fast(&vgroup
->vchunk_ht
, &vchunk
->ht_node
,
1014 mlxsw_sp_acl_tcam_vchunk_ht_params
);
1016 goto err_rhashtable_insert
;
1018 mutex_lock(&vregion
->lock
);
1019 vchunk
->chunk
= mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp
, vchunk
,
1020 vchunk
->vregion
->region
);
1021 if (IS_ERR(vchunk
->chunk
)) {
1022 mutex_unlock(&vregion
->lock
);
1023 err
= PTR_ERR(vchunk
->chunk
);
1024 goto err_chunk_create
;
1027 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion
);
1028 list_add_tail(&vchunk
->list
, &vregion
->vchunk_list
);
1029 mutex_unlock(&vregion
->lock
);
1034 rhashtable_remove_fast(&vgroup
->vchunk_ht
, &vchunk
->ht_node
,
1035 mlxsw_sp_acl_tcam_vchunk_ht_params
);
1036 err_rhashtable_insert
:
1037 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp
, vregion
);
1040 return ERR_PTR(err
);
1044 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp
*mlxsw_sp
,
1045 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
)
1047 struct mlxsw_sp_acl_tcam_vregion
*vregion
= vchunk
->vregion
;
1048 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
= vchunk
->vgroup
;
1050 mutex_lock(&vregion
->lock
);
1051 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion
);
1052 list_del(&vchunk
->list
);
1054 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp
, vchunk
->chunk2
);
1055 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp
, vchunk
->chunk
);
1056 mutex_unlock(&vregion
->lock
);
1057 rhashtable_remove_fast(&vgroup
->vchunk_ht
, &vchunk
->ht_node
,
1058 mlxsw_sp_acl_tcam_vchunk_ht_params
);
1059 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp
, vchunk
->vregion
);
1063 static struct mlxsw_sp_acl_tcam_vchunk
*
1064 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp
*mlxsw_sp
,
1065 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
1066 unsigned int priority
,
1067 struct mlxsw_afk_element_usage
*elusage
)
1069 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
1071 vchunk
= rhashtable_lookup_fast(&vgroup
->vchunk_ht
, &priority
,
1072 mlxsw_sp_acl_tcam_vchunk_ht_params
);
1074 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk
->vregion
->key_info
,
1076 return ERR_PTR(-EINVAL
);
1077 vchunk
->ref_count
++;
1080 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp
, vgroup
,
1085 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp
*mlxsw_sp
,
1086 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
)
1088 if (--vchunk
->ref_count
)
1090 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp
, vchunk
);
1093 static struct mlxsw_sp_acl_tcam_entry
*
1094 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp
*mlxsw_sp
,
1095 struct mlxsw_sp_acl_tcam_ventry
*ventry
,
1096 struct mlxsw_sp_acl_tcam_chunk
*chunk
)
1098 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
1099 struct mlxsw_sp_acl_tcam_entry
*entry
;
1102 entry
= kzalloc(sizeof(*entry
) + ops
->entry_priv_size
, GFP_KERNEL
);
1104 return ERR_PTR(-ENOMEM
);
1105 entry
->ventry
= ventry
;
1106 entry
->chunk
= chunk
;
1108 err
= ops
->entry_add(mlxsw_sp
, chunk
->region
->priv
, chunk
->priv
,
1109 entry
->priv
, ventry
->rulei
);
1117 return ERR_PTR(err
);
1120 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
1121 struct mlxsw_sp_acl_tcam_entry
*entry
)
1123 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
1125 ops
->entry_del(mlxsw_sp
, entry
->chunk
->region
->priv
,
1126 entry
->chunk
->priv
, entry
->priv
);
1131 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp
*mlxsw_sp
,
1132 struct mlxsw_sp_acl_tcam_region
*region
,
1133 struct mlxsw_sp_acl_tcam_entry
*entry
,
1134 struct mlxsw_sp_acl_rule_info
*rulei
)
1136 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
1138 return ops
->entry_action_replace(mlxsw_sp
, region
->priv
,
1139 entry
->priv
, rulei
);
1143 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp
*mlxsw_sp
,
1144 struct mlxsw_sp_acl_tcam_entry
*entry
,
1147 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
1149 return ops
->entry_activity_get(mlxsw_sp
, entry
->chunk
->region
->priv
,
1150 entry
->priv
, activity
);
1153 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp
*mlxsw_sp
,
1154 struct mlxsw_sp_acl_tcam_vgroup
*vgroup
,
1155 struct mlxsw_sp_acl_tcam_ventry
*ventry
,
1156 struct mlxsw_sp_acl_rule_info
*rulei
)
1158 struct mlxsw_sp_acl_tcam_vregion
*vregion
;
1159 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
1162 vchunk
= mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp
, vgroup
, rulei
->priority
,
1163 &rulei
->values
.elusage
);
1165 return PTR_ERR(vchunk
);
1167 ventry
->vchunk
= vchunk
;
1168 ventry
->rulei
= rulei
;
1169 vregion
= vchunk
->vregion
;
1171 mutex_lock(&vregion
->lock
);
1172 ventry
->entry
= mlxsw_sp_acl_tcam_entry_create(mlxsw_sp
, ventry
,
1174 if (IS_ERR(ventry
->entry
)) {
1175 mutex_unlock(&vregion
->lock
);
1176 err
= PTR_ERR(ventry
->entry
);
1177 goto err_entry_create
;
1180 list_add_tail(&ventry
->list
, &vchunk
->ventry_list
);
1181 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk
);
1182 mutex_unlock(&vregion
->lock
);
1187 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp
, vchunk
);
1191 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp
*mlxsw_sp
,
1192 struct mlxsw_sp_acl_tcam_ventry
*ventry
)
1194 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
= ventry
->vchunk
;
1195 struct mlxsw_sp_acl_tcam_vregion
*vregion
= vchunk
->vregion
;
1197 mutex_lock(&vregion
->lock
);
1198 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk
);
1199 list_del(&ventry
->list
);
1200 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp
, ventry
->entry
);
1201 mutex_unlock(&vregion
->lock
);
1202 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp
, vchunk
);
1206 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp
*mlxsw_sp
,
1207 struct mlxsw_sp_acl_tcam_ventry
*ventry
,
1208 struct mlxsw_sp_acl_rule_info
*rulei
)
1210 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
= ventry
->vchunk
;
1212 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp
,
1213 vchunk
->vregion
->region
,
1214 ventry
->entry
, rulei
);
1218 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp
*mlxsw_sp
,
1219 struct mlxsw_sp_acl_tcam_ventry
*ventry
,
1222 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp
,
1223 ventry
->entry
, activity
);
1227 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp
*mlxsw_sp
,
1228 struct mlxsw_sp_acl_tcam_ventry
*ventry
,
1229 struct mlxsw_sp_acl_tcam_chunk
*chunk
,
1232 struct mlxsw_sp_acl_tcam_entry
*new_entry
;
1234 /* First check if the entry is not already where we want it to be. */
1235 if (ventry
->entry
->chunk
== chunk
)
1238 if (--(*credits
) < 0)
1241 new_entry
= mlxsw_sp_acl_tcam_entry_create(mlxsw_sp
, ventry
, chunk
);
1242 if (IS_ERR(new_entry
))
1243 return PTR_ERR(new_entry
);
1244 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp
, ventry
->entry
);
1245 ventry
->entry
= new_entry
;
1250 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp
*mlxsw_sp
,
1251 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
,
1252 struct mlxsw_sp_acl_tcam_region
*region
,
1253 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
)
1255 struct mlxsw_sp_acl_tcam_chunk
*new_chunk
;
1257 new_chunk
= mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp
, vchunk
, region
);
1258 if (IS_ERR(new_chunk
))
1259 return PTR_ERR(new_chunk
);
1260 vchunk
->chunk2
= vchunk
->chunk
;
1261 vchunk
->chunk
= new_chunk
;
1262 ctx
->current_vchunk
= vchunk
;
1263 ctx
->start_ventry
= NULL
;
1264 ctx
->stop_ventry
= NULL
;
1269 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp
*mlxsw_sp
,
1270 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
,
1271 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
)
1273 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp
, vchunk
->chunk2
);
1274 vchunk
->chunk2
= NULL
;
1275 ctx
->current_vchunk
= NULL
;
1279 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp
*mlxsw_sp
,
1280 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
,
1281 struct mlxsw_sp_acl_tcam_region
*region
,
1282 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
,
1285 struct mlxsw_sp_acl_tcam_ventry
*ventry
;
1288 if (vchunk
->chunk
->region
!= region
) {
1289 err
= mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp
, vchunk
,
1293 } else if (!vchunk
->chunk2
) {
1294 /* The chunk is already as it should be, nothing to do. */
1298 /* If the migration got interrupted, we have the ventry to start from
1299 * stored in context.
1301 if (ctx
->start_ventry
)
1302 ventry
= ctx
->start_ventry
;
1304 ventry
= list_first_entry(&vchunk
->ventry_list
,
1305 typeof(*ventry
), list
);
1307 list_for_each_entry_from(ventry
, &vchunk
->ventry_list
, list
) {
1308 /* During rollback, once we reach the ventry that failed
1309 * to migrate, we are done.
1311 if (ventry
== ctx
->stop_ventry
)
1314 err
= mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp
, ventry
,
1315 vchunk
->chunk
, credits
);
1317 if (ctx
->this_is_rollback
) {
1318 /* Save the ventry which we ended with and try
1319 * to continue later on.
1321 ctx
->start_ventry
= ventry
;
1324 /* Swap the chunk and chunk2 pointers so the follow-up
1325 * rollback call will see the original chunk pointer
1328 swap(vchunk
->chunk
, vchunk
->chunk2
);
1329 /* The rollback has to be done from beginning of the
1330 * chunk, that is why we have to null the start_ventry.
1331 * However, we know where to stop the rollback,
1332 * at the current ventry.
1334 ctx
->start_ventry
= NULL
;
1335 ctx
->stop_ventry
= ventry
;
1337 } else if (*credits
< 0) {
1338 /* We are out of credits, the rest of the ventries
1339 * will be migrated later. Save the ventry
1340 * which we ended with.
1342 ctx
->start_ventry
= ventry
;
1347 mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp
, vchunk
, ctx
);
1352 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp
*mlxsw_sp
,
1353 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
1354 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
,
1357 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
1360 /* If the migration got interrupted, we have the vchunk
1361 * we are working on stored in context.
1363 if (ctx
->current_vchunk
)
1364 vchunk
= ctx
->current_vchunk
;
1366 vchunk
= list_first_entry(&vregion
->vchunk_list
,
1367 typeof(*vchunk
), list
);
1369 list_for_each_entry_from(vchunk
, &vregion
->vchunk_list
, list
) {
1370 err
= mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp
, vchunk
,
1373 if (err
|| *credits
< 0)
1380 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp
*mlxsw_sp
,
1381 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
1382 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
,
1387 trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp
, vregion
);
1388 mutex_lock(&vregion
->lock
);
1389 err
= mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp
, vregion
,
1392 /* In case migration was not successful, we need to swap
1393 * so the original region pointer is assigned again
1394 * to vregion->region.
1396 swap(vregion
->region
, vregion
->region2
);
1397 ctx
->current_vchunk
= NULL
;
1398 ctx
->this_is_rollback
= true;
1399 err2
= mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp
, vregion
,
1402 trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp
,
1404 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to rollback during vregion migration fail\n");
1405 /* Let the rollback to be continued later on. */
1408 mutex_unlock(&vregion
->lock
);
1409 trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp
, vregion
);
1414 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
)
1416 return ctx
->hints_priv
;
1420 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp
*mlxsw_sp
,
1421 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
1422 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
)
1424 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
1425 unsigned int priority
= mlxsw_sp_acl_tcam_vregion_prio(vregion
);
1426 struct mlxsw_sp_acl_tcam_region
*new_region
;
1430 trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp
, vregion
);
1432 hints_priv
= ops
->region_rehash_hints_get(vregion
->region
->priv
);
1433 if (IS_ERR(hints_priv
))
1434 return PTR_ERR(hints_priv
);
1436 new_region
= mlxsw_sp_acl_tcam_region_create(mlxsw_sp
, vregion
->tcam
,
1437 vregion
, hints_priv
);
1438 if (IS_ERR(new_region
)) {
1439 err
= PTR_ERR(new_region
);
1440 goto err_region_create
;
1443 /* vregion->region contains the pointer to the new region
1444 * we are going to migrate to.
1446 vregion
->region2
= vregion
->region
;
1447 vregion
->region
= new_region
;
1448 err
= mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp
,
1449 vregion
->region2
->group
,
1450 new_region
, priority
,
1453 goto err_group_region_attach
;
1455 ctx
->hints_priv
= hints_priv
;
1456 ctx
->this_is_rollback
= false;
1460 err_group_region_attach
:
1461 vregion
->region
= vregion
->region2
;
1462 vregion
->region2
= NULL
;
1463 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp
, new_region
);
1465 ops
->region_rehash_hints_put(hints_priv
);
1470 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp
*mlxsw_sp
,
1471 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
1472 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
)
1474 struct mlxsw_sp_acl_tcam_region
*unused_region
= vregion
->region2
;
1475 const struct mlxsw_sp_acl_tcam_ops
*ops
= mlxsw_sp
->acl_tcam_ops
;
1477 vregion
->region2
= NULL
;
1478 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp
, unused_region
);
1479 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp
, unused_region
);
1480 ops
->region_rehash_hints_put(ctx
->hints_priv
);
1481 ctx
->hints_priv
= NULL
;
1485 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp
*mlxsw_sp
,
1486 struct mlxsw_sp_acl_tcam_vregion
*vregion
,
1489 struct mlxsw_sp_acl_tcam_rehash_ctx
*ctx
= &vregion
->rehash
.ctx
;
1492 /* Check if the previous rehash work was interrupted
1493 * which means we have to continue it now.
1494 * If not, start a new rehash.
1496 if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx
)) {
1497 err
= mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp
,
1501 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed get rehash hints\n");
1506 err
= mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp
, vregion
,
1509 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to migrate vregion\n");
1513 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp
, vregion
, ctx
);
1516 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4
[] = {
1517 MLXSW_AFK_ELEMENT_SRC_SYS_PORT
,
1518 MLXSW_AFK_ELEMENT_DMAC_32_47
,
1519 MLXSW_AFK_ELEMENT_DMAC_0_31
,
1520 MLXSW_AFK_ELEMENT_SMAC_32_47
,
1521 MLXSW_AFK_ELEMENT_SMAC_0_31
,
1522 MLXSW_AFK_ELEMENT_ETHERTYPE
,
1523 MLXSW_AFK_ELEMENT_IP_PROTO
,
1524 MLXSW_AFK_ELEMENT_SRC_IP_0_31
,
1525 MLXSW_AFK_ELEMENT_DST_IP_0_31
,
1526 MLXSW_AFK_ELEMENT_DST_L4_PORT
,
1527 MLXSW_AFK_ELEMENT_SRC_L4_PORT
,
1528 MLXSW_AFK_ELEMENT_VID
,
1529 MLXSW_AFK_ELEMENT_PCP
,
1530 MLXSW_AFK_ELEMENT_TCP_FLAGS
,
1531 MLXSW_AFK_ELEMENT_IP_TTL_
,
1532 MLXSW_AFK_ELEMENT_IP_ECN
,
1533 MLXSW_AFK_ELEMENT_IP_DSCP
,
1536 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6
[] = {
1537 MLXSW_AFK_ELEMENT_ETHERTYPE
,
1538 MLXSW_AFK_ELEMENT_IP_PROTO
,
1539 MLXSW_AFK_ELEMENT_SRC_IP_96_127
,
1540 MLXSW_AFK_ELEMENT_SRC_IP_64_95
,
1541 MLXSW_AFK_ELEMENT_SRC_IP_32_63
,
1542 MLXSW_AFK_ELEMENT_SRC_IP_0_31
,
1543 MLXSW_AFK_ELEMENT_DST_IP_96_127
,
1544 MLXSW_AFK_ELEMENT_DST_IP_64_95
,
1545 MLXSW_AFK_ELEMENT_DST_IP_32_63
,
1546 MLXSW_AFK_ELEMENT_DST_IP_0_31
,
1547 MLXSW_AFK_ELEMENT_DST_L4_PORT
,
1548 MLXSW_AFK_ELEMENT_SRC_L4_PORT
,
1551 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns
[] = {
1553 .elements
= mlxsw_sp_acl_tcam_pattern_ipv4
,
1554 .elements_count
= ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4
),
1557 .elements
= mlxsw_sp_acl_tcam_pattern_ipv6
,
1558 .elements_count
= ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6
),
1562 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1563 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1565 struct mlxsw_sp_acl_tcam_flower_ruleset
{
1566 struct mlxsw_sp_acl_tcam_vgroup vgroup
;
1569 struct mlxsw_sp_acl_tcam_flower_rule
{
1570 struct mlxsw_sp_acl_tcam_ventry ventry
;
1574 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp
*mlxsw_sp
,
1575 struct mlxsw_sp_acl_tcam
*tcam
,
1577 struct mlxsw_afk_element_usage
*tmplt_elusage
)
1579 struct mlxsw_sp_acl_tcam_flower_ruleset
*ruleset
= ruleset_priv
;
1581 return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp
, tcam
, &ruleset
->vgroup
,
1582 mlxsw_sp_acl_tcam_patterns
,
1583 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT
,
1584 tmplt_elusage
, true);
1588 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp
*mlxsw_sp
,
1591 struct mlxsw_sp_acl_tcam_flower_ruleset
*ruleset
= ruleset_priv
;
1593 mlxsw_sp_acl_tcam_vgroup_del(&ruleset
->vgroup
);
1597 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp
*mlxsw_sp
,
1599 struct mlxsw_sp_port
*mlxsw_sp_port
,
1602 struct mlxsw_sp_acl_tcam_flower_ruleset
*ruleset
= ruleset_priv
;
1604 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp
, &ruleset
->vgroup
.group
,
1605 mlxsw_sp_port
, ingress
);
1609 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp
*mlxsw_sp
,
1611 struct mlxsw_sp_port
*mlxsw_sp_port
,
1614 struct mlxsw_sp_acl_tcam_flower_ruleset
*ruleset
= ruleset_priv
;
1616 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp
, &ruleset
->vgroup
.group
,
1617 mlxsw_sp_port
, ingress
);
1621 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv
)
1623 struct mlxsw_sp_acl_tcam_flower_ruleset
*ruleset
= ruleset_priv
;
1625 return mlxsw_sp_acl_tcam_group_id(&ruleset
->vgroup
.group
);
1629 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp
*mlxsw_sp
,
1630 void *ruleset_priv
, void *rule_priv
,
1631 struct mlxsw_sp_acl_rule_info
*rulei
)
1633 struct mlxsw_sp_acl_tcam_flower_ruleset
*ruleset
= ruleset_priv
;
1634 struct mlxsw_sp_acl_tcam_flower_rule
*rule
= rule_priv
;
1636 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp
, &ruleset
->vgroup
,
1637 &rule
->ventry
, rulei
);
1641 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp
*mlxsw_sp
, void *rule_priv
)
1643 struct mlxsw_sp_acl_tcam_flower_rule
*rule
= rule_priv
;
1645 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp
, &rule
->ventry
);
1649 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp
*mlxsw_sp
,
1651 struct mlxsw_sp_acl_rule_info
*rulei
)
1657 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp
*mlxsw_sp
,
1658 void *rule_priv
, bool *activity
)
1660 struct mlxsw_sp_acl_tcam_flower_rule
*rule
= rule_priv
;
1662 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp
, &rule
->ventry
,
1666 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops
= {
1667 .ruleset_priv_size
= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset
),
1668 .ruleset_add
= mlxsw_sp_acl_tcam_flower_ruleset_add
,
1669 .ruleset_del
= mlxsw_sp_acl_tcam_flower_ruleset_del
,
1670 .ruleset_bind
= mlxsw_sp_acl_tcam_flower_ruleset_bind
,
1671 .ruleset_unbind
= mlxsw_sp_acl_tcam_flower_ruleset_unbind
,
1672 .ruleset_group_id
= mlxsw_sp_acl_tcam_flower_ruleset_group_id
,
1673 .rule_priv_size
= sizeof(struct mlxsw_sp_acl_tcam_flower_rule
),
1674 .rule_add
= mlxsw_sp_acl_tcam_flower_rule_add
,
1675 .rule_del
= mlxsw_sp_acl_tcam_flower_rule_del
,
1676 .rule_action_replace
= mlxsw_sp_acl_tcam_flower_rule_action_replace
,
1677 .rule_activity_get
= mlxsw_sp_acl_tcam_flower_rule_activity_get
,
1680 struct mlxsw_sp_acl_tcam_mr_ruleset
{
1681 struct mlxsw_sp_acl_tcam_vchunk
*vchunk
;
1682 struct mlxsw_sp_acl_tcam_vgroup vgroup
;
1685 struct mlxsw_sp_acl_tcam_mr_rule
{
1686 struct mlxsw_sp_acl_tcam_ventry ventry
;
1690 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp
*mlxsw_sp
,
1691 struct mlxsw_sp_acl_tcam
*tcam
,
1693 struct mlxsw_afk_element_usage
*tmplt_elusage
)
1695 struct mlxsw_sp_acl_tcam_mr_ruleset
*ruleset
= ruleset_priv
;
1698 err
= mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp
, tcam
, &ruleset
->vgroup
,
1699 mlxsw_sp_acl_tcam_patterns
,
1700 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT
,
1701 tmplt_elusage
, false);
1705 /* For most of the TCAM clients it would make sense to take a tcam chunk
1706 * only when the first rule is written. This is not the case for
1707 * multicast router as it is required to bind the multicast router to a
1708 * specific ACL Group ID which must exist in HW before multicast router
1711 ruleset
->vchunk
= mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp
,
1712 &ruleset
->vgroup
, 1,
1714 if (IS_ERR(ruleset
->vchunk
)) {
1715 err
= PTR_ERR(ruleset
->vchunk
);
1722 mlxsw_sp_acl_tcam_vgroup_del(&ruleset
->vgroup
);
1727 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp
*mlxsw_sp
, void *ruleset_priv
)
1729 struct mlxsw_sp_acl_tcam_mr_ruleset
*ruleset
= ruleset_priv
;
1731 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp
, ruleset
->vchunk
);
1732 mlxsw_sp_acl_tcam_vgroup_del(&ruleset
->vgroup
);
1736 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp
*mlxsw_sp
, void *ruleset_priv
,
1737 struct mlxsw_sp_port
*mlxsw_sp_port
,
1740 /* Binding is done when initializing multicast router */
1745 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp
*mlxsw_sp
,
1747 struct mlxsw_sp_port
*mlxsw_sp_port
,
1753 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv
)
1755 struct mlxsw_sp_acl_tcam_mr_ruleset
*ruleset
= ruleset_priv
;
1757 return mlxsw_sp_acl_tcam_group_id(&ruleset
->vgroup
.group
);
1761 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp
*mlxsw_sp
, void *ruleset_priv
,
1763 struct mlxsw_sp_acl_rule_info
*rulei
)
1765 struct mlxsw_sp_acl_tcam_mr_ruleset
*ruleset
= ruleset_priv
;
1766 struct mlxsw_sp_acl_tcam_mr_rule
*rule
= rule_priv
;
1768 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp
, &ruleset
->vgroup
,
1769 &rule
->ventry
, rulei
);
1773 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp
*mlxsw_sp
, void *rule_priv
)
1775 struct mlxsw_sp_acl_tcam_mr_rule
*rule
= rule_priv
;
1777 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp
, &rule
->ventry
);
1781 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp
*mlxsw_sp
,
1783 struct mlxsw_sp_acl_rule_info
*rulei
)
1785 struct mlxsw_sp_acl_tcam_mr_rule
*rule
= rule_priv
;
1787 return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp
, &rule
->ventry
,
1792 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp
*mlxsw_sp
,
1793 void *rule_priv
, bool *activity
)
1795 struct mlxsw_sp_acl_tcam_mr_rule
*rule
= rule_priv
;
1797 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp
, &rule
->ventry
,
1801 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops
= {
1802 .ruleset_priv_size
= sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset
),
1803 .ruleset_add
= mlxsw_sp_acl_tcam_mr_ruleset_add
,
1804 .ruleset_del
= mlxsw_sp_acl_tcam_mr_ruleset_del
,
1805 .ruleset_bind
= mlxsw_sp_acl_tcam_mr_ruleset_bind
,
1806 .ruleset_unbind
= mlxsw_sp_acl_tcam_mr_ruleset_unbind
,
1807 .ruleset_group_id
= mlxsw_sp_acl_tcam_mr_ruleset_group_id
,
1808 .rule_priv_size
= sizeof(struct mlxsw_sp_acl_tcam_mr_rule
),
1809 .rule_add
= mlxsw_sp_acl_tcam_mr_rule_add
,
1810 .rule_del
= mlxsw_sp_acl_tcam_mr_rule_del
,
1811 .rule_action_replace
= mlxsw_sp_acl_tcam_mr_rule_action_replace
,
1812 .rule_activity_get
= mlxsw_sp_acl_tcam_mr_rule_activity_get
,
1815 static const struct mlxsw_sp_acl_profile_ops
*
1816 mlxsw_sp_acl_tcam_profile_ops_arr
[] = {
1817 [MLXSW_SP_ACL_PROFILE_FLOWER
] = &mlxsw_sp_acl_tcam_flower_ops
,
1818 [MLXSW_SP_ACL_PROFILE_MR
] = &mlxsw_sp_acl_tcam_mr_ops
,
1821 const struct mlxsw_sp_acl_profile_ops
*
1822 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp
*mlxsw_sp
,
1823 enum mlxsw_sp_acl_profile profile
)
1825 const struct mlxsw_sp_acl_profile_ops
*ops
;
1827 if (WARN_ON(profile
>= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr
)))
1829 ops
= mlxsw_sp_acl_tcam_profile_ops_arr
[profile
];