]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
13
14 #include "reg.h"
15 #include "core.h"
16 #include "resources.h"
17 #include "spectrum.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
20
21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
22 {
23 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
24
25 return ops->priv_size;
26 }
27
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
31
32 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
33 struct mlxsw_sp_acl_tcam *tcam)
34 {
35 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
36 u64 max_tcam_regions;
37 u64 max_regions;
38 u64 max_groups;
39 size_t alloc_size;
40 int err;
41
42 mutex_init(&tcam->lock);
43 tcam->vregion_rehash_intrvl =
44 MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
45 INIT_LIST_HEAD(&tcam->vregion_list);
46
47 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
48 ACL_MAX_TCAM_REGIONS);
49 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
50
51 /* Use 1:1 mapping between ACL region and TCAM region */
52 if (max_tcam_regions < max_regions)
53 max_regions = max_tcam_regions;
54
55 alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
56 tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
57 if (!tcam->used_regions)
58 return -ENOMEM;
59 tcam->max_regions = max_regions;
60
61 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
62 alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
63 tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
64 if (!tcam->used_groups) {
65 err = -ENOMEM;
66 goto err_alloc_used_groups;
67 }
68 tcam->max_groups = max_groups;
69 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
70 ACL_MAX_GROUP_SIZE);
71
72 err = ops->init(mlxsw_sp, tcam->priv, tcam);
73 if (err)
74 goto err_tcam_init;
75
76 return 0;
77
78 err_tcam_init:
79 kfree(tcam->used_groups);
80 err_alloc_used_groups:
81 kfree(tcam->used_regions);
82 return err;
83 }
84
85 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
86 struct mlxsw_sp_acl_tcam *tcam)
87 {
88 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
89
90 mutex_destroy(&tcam->lock);
91 ops->fini(mlxsw_sp, tcam->priv);
92 kfree(tcam->used_groups);
93 kfree(tcam->used_regions);
94 }
95
96 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
97 struct mlxsw_sp_acl_rule_info *rulei,
98 u32 *priority, bool fillup_priority)
99 {
100 u64 max_priority;
101
102 if (!fillup_priority) {
103 *priority = 0;
104 return 0;
105 }
106
107 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
108 return -EIO;
109
110 /* Priority range is 1..cap_kvd_size-1. */
111 max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
112 if (rulei->priority >= max_priority)
113 return -EINVAL;
114
115 /* Unlike in TC, in HW, higher number means higher priority. */
116 *priority = max_priority - rulei->priority;
117 return 0;
118 }
119
120 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
121 u16 *p_id)
122 {
123 u16 id;
124
125 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
126 if (id < tcam->max_regions) {
127 __set_bit(id, tcam->used_regions);
128 *p_id = id;
129 return 0;
130 }
131 return -ENOBUFS;
132 }
133
134 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
135 u16 id)
136 {
137 __clear_bit(id, tcam->used_regions);
138 }
139
140 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
141 u16 *p_id)
142 {
143 u16 id;
144
145 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
146 if (id < tcam->max_groups) {
147 __set_bit(id, tcam->used_groups);
148 *p_id = id;
149 return 0;
150 }
151 return -ENOBUFS;
152 }
153
154 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
155 u16 id)
156 {
157 __clear_bit(id, tcam->used_groups);
158 }
159
160 struct mlxsw_sp_acl_tcam_pattern {
161 const enum mlxsw_afk_element *elements;
162 unsigned int elements_count;
163 };
164
165 struct mlxsw_sp_acl_tcam_group {
166 struct mlxsw_sp_acl_tcam *tcam;
167 u16 id;
168 struct mutex lock; /* guards region list updates */
169 struct list_head region_list;
170 unsigned int region_count;
171 };
172
173 struct mlxsw_sp_acl_tcam_vgroup {
174 struct mlxsw_sp_acl_tcam_group group;
175 struct list_head vregion_list;
176 struct rhashtable vchunk_ht;
177 const struct mlxsw_sp_acl_tcam_pattern *patterns;
178 unsigned int patterns_count;
179 bool tmplt_elusage_set;
180 struct mlxsw_afk_element_usage tmplt_elusage;
181 bool vregion_rehash_enabled;
182 };
183
184 struct mlxsw_sp_acl_tcam_rehash_ctx {
185 void *hints_priv;
186 bool this_is_rollback;
187 struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
188 * currently migrated.
189 */
190 struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
191 * migration from in
192 * a vchunk being
193 * currently migrated.
194 */
195 struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
196 * migration at
197 * a vchunk being
198 * currently migrated.
199 */
200 };
201
202 struct mlxsw_sp_acl_tcam_vregion {
203 struct mutex lock; /* Protects consistency of region, region2 pointers
204 * and vchunk_list.
205 */
206 struct mlxsw_sp_acl_tcam_region *region;
207 struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
208 struct list_head list; /* Member of a TCAM group */
209 struct list_head tlist; /* Member of a TCAM */
210 struct list_head vchunk_list; /* List of vchunks under this vregion */
211 struct mlxsw_afk_key_info *key_info;
212 struct mlxsw_sp_acl_tcam *tcam;
213 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
214 struct {
215 struct delayed_work dw;
216 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
217 } rehash;
218 struct mlxsw_sp *mlxsw_sp;
219 unsigned int ref_count;
220 };
221
222 struct mlxsw_sp_acl_tcam_vchunk;
223
224 struct mlxsw_sp_acl_tcam_chunk {
225 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
226 struct mlxsw_sp_acl_tcam_region *region;
227 unsigned long priv[];
228 /* priv has to be always the last item */
229 };
230
231 struct mlxsw_sp_acl_tcam_vchunk {
232 struct mlxsw_sp_acl_tcam_chunk *chunk;
233 struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
234 struct list_head list; /* Member of a TCAM vregion */
235 struct rhash_head ht_node; /* Member of a chunk HT */
236 struct list_head ventry_list;
237 unsigned int priority; /* Priority within the vregion and group */
238 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
239 struct mlxsw_sp_acl_tcam_vregion *vregion;
240 unsigned int ref_count;
241 };
242
243 struct mlxsw_sp_acl_tcam_entry {
244 struct mlxsw_sp_acl_tcam_ventry *ventry;
245 struct mlxsw_sp_acl_tcam_chunk *chunk;
246 unsigned long priv[];
247 /* priv has to be always the last item */
248 };
249
250 struct mlxsw_sp_acl_tcam_ventry {
251 struct mlxsw_sp_acl_tcam_entry *entry;
252 struct list_head list; /* Member of a TCAM vchunk */
253 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
254 struct mlxsw_sp_acl_rule_info *rulei;
255 };
256
257 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
258 .key_len = sizeof(unsigned int),
259 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
260 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
261 .automatic_shrinking = true,
262 };
263
264 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
265 struct mlxsw_sp_acl_tcam_group *group)
266 {
267 struct mlxsw_sp_acl_tcam_region *region;
268 char pagt_pl[MLXSW_REG_PAGT_LEN];
269 int acl_index = 0;
270
271 mlxsw_reg_pagt_pack(pagt_pl, group->id);
272 list_for_each_entry(region, &group->region_list, list) {
273 bool multi = false;
274
275 /* Check if the next entry in the list has the same vregion. */
276 if (region->list.next != &group->region_list &&
277 list_next_entry(region, list)->vregion == region->vregion)
278 multi = true;
279 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
280 region->id, multi);
281 }
282 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
283 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
284 }
285
286 static int
287 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
288 struct mlxsw_sp_acl_tcam_group *group)
289 {
290 int err;
291
292 group->tcam = tcam;
293 mutex_init(&group->lock);
294 INIT_LIST_HEAD(&group->region_list);
295
296 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
297 if (err)
298 return err;
299
300 return 0;
301 }
302
303 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
304 {
305 struct mlxsw_sp_acl_tcam *tcam = group->tcam;
306
307 mutex_destroy(&group->lock);
308 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
309 WARN_ON(!list_empty(&group->region_list));
310 }
311
312 static int
313 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
314 struct mlxsw_sp_acl_tcam *tcam,
315 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
316 const struct mlxsw_sp_acl_tcam_pattern *patterns,
317 unsigned int patterns_count,
318 struct mlxsw_afk_element_usage *tmplt_elusage,
319 bool vregion_rehash_enabled)
320 {
321 int err;
322
323 vgroup->patterns = patterns;
324 vgroup->patterns_count = patterns_count;
325 vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
326
327 if (tmplt_elusage) {
328 vgroup->tmplt_elusage_set = true;
329 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
330 sizeof(vgroup->tmplt_elusage));
331 }
332 INIT_LIST_HEAD(&vgroup->vregion_list);
333
334 err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
335 if (err)
336 return err;
337
338 err = rhashtable_init(&vgroup->vchunk_ht,
339 &mlxsw_sp_acl_tcam_vchunk_ht_params);
340 if (err)
341 goto err_rhashtable_init;
342
343 return 0;
344
345 err_rhashtable_init:
346 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
347 return err;
348 }
349
350 static void
351 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
352 {
353 rhashtable_destroy(&vgroup->vchunk_ht);
354 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
355 WARN_ON(!list_empty(&vgroup->vregion_list));
356 }
357
358 static int
359 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
360 struct mlxsw_sp_acl_tcam_group *group,
361 struct mlxsw_sp_port *mlxsw_sp_port,
362 bool ingress)
363 {
364 char ppbt_pl[MLXSW_REG_PPBT_LEN];
365
366 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
367 MLXSW_REG_PXBT_E_EACL,
368 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
369 group->id);
370 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
371 }
372
373 static void
374 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
375 struct mlxsw_sp_acl_tcam_group *group,
376 struct mlxsw_sp_port *mlxsw_sp_port,
377 bool ingress)
378 {
379 char ppbt_pl[MLXSW_REG_PPBT_LEN];
380
381 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
382 MLXSW_REG_PXBT_E_EACL,
383 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
384 group->id);
385 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
386 }
387
388 static u16
389 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
390 {
391 return group->id;
392 }
393
394 static unsigned int
395 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
396 {
397 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
398
399 if (list_empty(&vregion->vchunk_list))
400 return 0;
401 /* As a priority of a vregion, return priority of the first vchunk */
402 vchunk = list_first_entry(&vregion->vchunk_list,
403 typeof(*vchunk), list);
404 return vchunk->priority;
405 }
406
407 static unsigned int
408 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
409 {
410 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
411
412 if (list_empty(&vregion->vchunk_list))
413 return 0;
414 vchunk = list_last_entry(&vregion->vchunk_list,
415 typeof(*vchunk), list);
416 return vchunk->priority;
417 }
418
419 static int
420 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
421 struct mlxsw_sp_acl_tcam_group *group,
422 struct mlxsw_sp_acl_tcam_region *region,
423 unsigned int priority,
424 struct mlxsw_sp_acl_tcam_region *next_region)
425 {
426 struct mlxsw_sp_acl_tcam_region *region2;
427 struct list_head *pos;
428 int err;
429
430 mutex_lock(&group->lock);
431 if (group->region_count == group->tcam->max_group_size) {
432 err = -ENOBUFS;
433 goto err_region_count_check;
434 }
435
436 if (next_region) {
437 /* If the next region is defined, place the new one
438 * before it. The next one is a sibling.
439 */
440 pos = &next_region->list;
441 } else {
442 /* Position the region inside the list according to priority */
443 list_for_each(pos, &group->region_list) {
444 region2 = list_entry(pos, typeof(*region2), list);
445 if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
446 priority)
447 break;
448 }
449 }
450 list_add_tail(&region->list, pos);
451 region->group = group;
452
453 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
454 if (err)
455 goto err_group_update;
456
457 group->region_count++;
458 mutex_unlock(&group->lock);
459 return 0;
460
461 err_group_update:
462 list_del(&region->list);
463 err_region_count_check:
464 mutex_unlock(&group->lock);
465 return err;
466 }
467
468 static void
469 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
470 struct mlxsw_sp_acl_tcam_region *region)
471 {
472 struct mlxsw_sp_acl_tcam_group *group = region->group;
473
474 mutex_lock(&group->lock);
475 list_del(&region->list);
476 group->region_count--;
477 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
478 mutex_unlock(&group->lock);
479 }
480
481 static int
482 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
483 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
484 struct mlxsw_sp_acl_tcam_vregion *vregion,
485 unsigned int priority)
486 {
487 struct mlxsw_sp_acl_tcam_vregion *vregion2;
488 struct list_head *pos;
489 int err;
490
491 /* Position the vregion inside the list according to priority */
492 list_for_each(pos, &vgroup->vregion_list) {
493 vregion2 = list_entry(pos, typeof(*vregion2), list);
494 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
495 break;
496 }
497 list_add_tail(&vregion->list, pos);
498
499 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
500 vregion->region,
501 priority, NULL);
502 if (err)
503 goto err_region_attach;
504
505 return 0;
506
507 err_region_attach:
508 list_del(&vregion->list);
509 return err;
510 }
511
512 static void
513 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
514 struct mlxsw_sp_acl_tcam_vregion *vregion)
515 {
516 list_del(&vregion->list);
517 if (vregion->region2)
518 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
519 vregion->region2);
520 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
521 }
522
523 static struct mlxsw_sp_acl_tcam_vregion *
524 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
525 unsigned int priority,
526 struct mlxsw_afk_element_usage *elusage,
527 bool *p_need_split)
528 {
529 struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
530 struct list_head *pos;
531 bool issubset;
532
533 list_for_each(pos, &vgroup->vregion_list) {
534 vregion = list_entry(pos, typeof(*vregion), list);
535
536 /* First, check if the requested priority does not rather belong
537 * under some of the next vregions.
538 */
539 if (pos->next != &vgroup->vregion_list) { /* not last */
540 vregion2 = list_entry(pos->next, typeof(*vregion2),
541 list);
542 if (priority >=
543 mlxsw_sp_acl_tcam_vregion_prio(vregion2))
544 continue;
545 }
546
547 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
548 elusage);
549
550 /* If requested element usage would not fit and the priority
551 * is lower than the currently inspected vregion we cannot
552 * use this region, so return NULL to indicate new vregion has
553 * to be created.
554 */
555 if (!issubset &&
556 priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
557 return NULL;
558
559 /* If requested element usage would not fit and the priority
560 * is higher than the currently inspected vregion we cannot
561 * use this vregion. There is still some hope that the next
562 * vregion would be the fit. So let it be processed and
563 * eventually break at the check right above this.
564 */
565 if (!issubset &&
566 priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
567 continue;
568
569 /* Indicate if the vregion needs to be split in order to add
570 * the requested priority. Split is needed when requested
571 * element usage won't fit into the found vregion.
572 */
573 *p_need_split = !issubset;
574 return vregion;
575 }
576 return NULL; /* New vregion has to be created. */
577 }
578
579 static void
580 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
581 struct mlxsw_afk_element_usage *elusage,
582 struct mlxsw_afk_element_usage *out)
583 {
584 const struct mlxsw_sp_acl_tcam_pattern *pattern;
585 int i;
586
587 /* In case the template is set, we don't have to look up the pattern
588 * and just use the template.
589 */
590 if (vgroup->tmplt_elusage_set) {
591 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
592 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
593 return;
594 }
595
596 for (i = 0; i < vgroup->patterns_count; i++) {
597 pattern = &vgroup->patterns[i];
598 mlxsw_afk_element_usage_fill(out, pattern->elements,
599 pattern->elements_count);
600 if (mlxsw_afk_element_usage_subset(elusage, out))
601 return;
602 }
603 memcpy(out, elusage, sizeof(*out));
604 }
605
606 static int
607 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
608 struct mlxsw_sp_acl_tcam_region *region)
609 {
610 struct mlxsw_afk_key_info *key_info = region->key_info;
611 char ptar_pl[MLXSW_REG_PTAR_LEN];
612 unsigned int encodings_count;
613 int i;
614 int err;
615
616 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
617 region->key_type,
618 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
619 region->id, region->tcam_region_info);
620 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
621 for (i = 0; i < encodings_count; i++) {
622 u16 encoding;
623
624 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
625 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
626 }
627 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
628 if (err)
629 return err;
630 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
631 return 0;
632 }
633
634 static void
635 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
636 struct mlxsw_sp_acl_tcam_region *region)
637 {
638 char ptar_pl[MLXSW_REG_PTAR_LEN];
639
640 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
641 region->key_type, 0, region->id,
642 region->tcam_region_info);
643 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
644 }
645
646 static int
647 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
648 struct mlxsw_sp_acl_tcam_region *region)
649 {
650 char pacl_pl[MLXSW_REG_PACL_LEN];
651
652 mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
653 region->tcam_region_info);
654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
655 }
656
657 static void
658 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
659 struct mlxsw_sp_acl_tcam_region *region)
660 {
661 char pacl_pl[MLXSW_REG_PACL_LEN];
662
663 mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
664 region->tcam_region_info);
665 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
666 }
667
668 static struct mlxsw_sp_acl_tcam_region *
669 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
670 struct mlxsw_sp_acl_tcam *tcam,
671 struct mlxsw_sp_acl_tcam_vregion *vregion,
672 void *hints_priv)
673 {
674 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
675 struct mlxsw_sp_acl_tcam_region *region;
676 int err;
677
678 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
679 if (!region)
680 return ERR_PTR(-ENOMEM);
681 region->mlxsw_sp = mlxsw_sp;
682 region->vregion = vregion;
683 region->key_info = vregion->key_info;
684
685 err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
686 if (err)
687 goto err_region_id_get;
688
689 err = ops->region_associate(mlxsw_sp, region);
690 if (err)
691 goto err_tcam_region_associate;
692
693 region->key_type = ops->key_type;
694 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
695 if (err)
696 goto err_tcam_region_alloc;
697
698 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
699 if (err)
700 goto err_tcam_region_enable;
701
702 err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
703 region, hints_priv);
704 if (err)
705 goto err_tcam_region_init;
706
707 return region;
708
709 err_tcam_region_init:
710 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
711 err_tcam_region_enable:
712 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
713 err_tcam_region_alloc:
714 err_tcam_region_associate:
715 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
716 err_region_id_get:
717 kfree(region);
718 return ERR_PTR(err);
719 }
720
721 static void
722 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
723 struct mlxsw_sp_acl_tcam_region *region)
724 {
725 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
726
727 ops->region_fini(mlxsw_sp, region->priv);
728 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
729 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
730 mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
731 region->id);
732 kfree(region);
733 }
734
735 static void
736 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
737 {
738 unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
739
740 if (!interval)
741 return;
742 mlxsw_core_schedule_dw(&vregion->rehash.dw,
743 msecs_to_jiffies(interval));
744 }
745
746 static void
747 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
748 struct mlxsw_sp_acl_tcam_vregion *vregion,
749 int *credits);
750
751 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
752 {
753 struct mlxsw_sp_acl_tcam_vregion *vregion =
754 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
755 rehash.dw.work);
756 int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
757
758 mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
759 if (credits < 0)
760 /* Rehash gone out of credits so it was interrupted.
761 * Schedule the work as soon as possible to continue.
762 */
763 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
764 else
765 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
766 }
767
768 static void
769 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
770 {
771 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
772
773 /* If a rule was added or deleted from vchunk which is currently
774 * under rehash migration, we have to reset the ventry pointers
775 * to make sure all rules are properly migrated.
776 */
777 if (vregion->rehash.ctx.current_vchunk == vchunk) {
778 vregion->rehash.ctx.start_ventry = NULL;
779 vregion->rehash.ctx.stop_ventry = NULL;
780 }
781 }
782
783 static void
784 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
785 {
786 /* If a chunk was added or deleted from vregion we have to reset
787 * the current chunk pointer to make sure all chunks
788 * are properly migrated.
789 */
790 vregion->rehash.ctx.current_vchunk = NULL;
791 }
792
793 static struct mlxsw_sp_acl_tcam_vregion *
794 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
795 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
796 unsigned int priority,
797 struct mlxsw_afk_element_usage *elusage)
798 {
799 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
800 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
801 struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
802 struct mlxsw_sp_acl_tcam_vregion *vregion;
803 int err;
804
805 vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
806 if (!vregion)
807 return ERR_PTR(-ENOMEM);
808 INIT_LIST_HEAD(&vregion->vchunk_list);
809 mutex_init(&vregion->lock);
810 vregion->tcam = tcam;
811 vregion->mlxsw_sp = mlxsw_sp;
812 vregion->vgroup = vgroup;
813 vregion->ref_count = 1;
814
815 vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
816 if (IS_ERR(vregion->key_info)) {
817 err = PTR_ERR(vregion->key_info);
818 goto err_key_info_get;
819 }
820
821 vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
822 vregion, NULL);
823 if (IS_ERR(vregion->region)) {
824 err = PTR_ERR(vregion->region);
825 goto err_region_create;
826 }
827
828 err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
829 priority);
830 if (err)
831 goto err_vgroup_vregion_attach;
832
833 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
834 /* Create the delayed work for vregion periodic rehash */
835 INIT_DELAYED_WORK(&vregion->rehash.dw,
836 mlxsw_sp_acl_tcam_vregion_rehash_work);
837 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
838 mutex_lock(&tcam->lock);
839 list_add_tail(&vregion->tlist, &tcam->vregion_list);
840 mutex_unlock(&tcam->lock);
841 }
842
843 return vregion;
844
845 err_vgroup_vregion_attach:
846 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
847 err_region_create:
848 mlxsw_afk_key_info_put(vregion->key_info);
849 err_key_info_get:
850 kfree(vregion);
851 return ERR_PTR(err);
852 }
853
854 static void
855 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
856 struct mlxsw_sp_acl_tcam_vregion *vregion)
857 {
858 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
859 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
860 struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
861
862 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
863 mutex_lock(&tcam->lock);
864 list_del(&vregion->tlist);
865 mutex_unlock(&tcam->lock);
866 cancel_delayed_work_sync(&vregion->rehash.dw);
867 }
868 mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
869 if (vregion->region2)
870 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
871 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
872 mlxsw_afk_key_info_put(vregion->key_info);
873 mutex_destroy(&vregion->lock);
874 kfree(vregion);
875 }
876
877 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
878 struct mlxsw_sp_acl_tcam *tcam)
879 {
880 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
881 u32 vregion_rehash_intrvl;
882
883 if (WARN_ON(!ops->region_rehash_hints_get))
884 return 0;
885 vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
886 return vregion_rehash_intrvl;
887 }
888
889 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
890 struct mlxsw_sp_acl_tcam *tcam,
891 u32 val)
892 {
893 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
894 struct mlxsw_sp_acl_tcam_vregion *vregion;
895
896 if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
897 return -EINVAL;
898 if (WARN_ON(!ops->region_rehash_hints_get))
899 return -EOPNOTSUPP;
900 tcam->vregion_rehash_intrvl = val;
901 mutex_lock(&tcam->lock);
902 list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
903 if (val)
904 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
905 else
906 cancel_delayed_work_sync(&vregion->rehash.dw);
907 }
908 mutex_unlock(&tcam->lock);
909 return 0;
910 }
911
912 static struct mlxsw_sp_acl_tcam_vregion *
913 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
914 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
915 unsigned int priority,
916 struct mlxsw_afk_element_usage *elusage)
917 {
918 struct mlxsw_afk_element_usage vregion_elusage;
919 struct mlxsw_sp_acl_tcam_vregion *vregion;
920 bool need_split;
921
922 vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
923 elusage, &need_split);
924 if (vregion) {
925 if (need_split) {
926 /* According to priority, new vchunk should belong to
927 * an existing vregion. However, this vchunk needs
928 * elements that vregion does not contain. We need
929 * to split the existing vregion into two and create
930 * a new vregion for the new vchunk in between.
931 * This is not supported now.
932 */
933 return ERR_PTR(-EOPNOTSUPP);
934 }
935 vregion->ref_count++;
936 return vregion;
937 }
938
939 mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
940 &vregion_elusage);
941
942 return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
943 &vregion_elusage);
944 }
945
946 static void
947 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
948 struct mlxsw_sp_acl_tcam_vregion *vregion)
949 {
950 if (--vregion->ref_count)
951 return;
952 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
953 }
954
955 static struct mlxsw_sp_acl_tcam_chunk *
956 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
957 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
958 struct mlxsw_sp_acl_tcam_region *region)
959 {
960 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
961 struct mlxsw_sp_acl_tcam_chunk *chunk;
962
963 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
964 if (!chunk)
965 return ERR_PTR(-ENOMEM);
966 chunk->vchunk = vchunk;
967 chunk->region = region;
968
969 ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
970 return chunk;
971 }
972
973 static void
974 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
975 struct mlxsw_sp_acl_tcam_chunk *chunk)
976 {
977 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
978
979 ops->chunk_fini(chunk->priv);
980 kfree(chunk);
981 }
982
983 static struct mlxsw_sp_acl_tcam_vchunk *
984 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
985 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
986 unsigned int priority,
987 struct mlxsw_afk_element_usage *elusage)
988 {
989 struct mlxsw_sp_acl_tcam_vregion *vregion;
990 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
991 int err;
992
993 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
994 return ERR_PTR(-EINVAL);
995
996 vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
997 if (!vchunk)
998 return ERR_PTR(-ENOMEM);
999 INIT_LIST_HEAD(&vchunk->ventry_list);
1000 vchunk->priority = priority;
1001 vchunk->vgroup = vgroup;
1002 vchunk->ref_count = 1;
1003
1004 vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1005 priority, elusage);
1006 if (IS_ERR(vregion)) {
1007 err = PTR_ERR(vregion);
1008 goto err_vregion_get;
1009 }
1010
1011 vchunk->vregion = vregion;
1012
1013 err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1014 mlxsw_sp_acl_tcam_vchunk_ht_params);
1015 if (err)
1016 goto err_rhashtable_insert;
1017
1018 mutex_lock(&vregion->lock);
1019 vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1020 vchunk->vregion->region);
1021 if (IS_ERR(vchunk->chunk)) {
1022 mutex_unlock(&vregion->lock);
1023 err = PTR_ERR(vchunk->chunk);
1024 goto err_chunk_create;
1025 }
1026
1027 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1028 list_add_tail(&vchunk->list, &vregion->vchunk_list);
1029 mutex_unlock(&vregion->lock);
1030
1031 return vchunk;
1032
1033 err_chunk_create:
1034 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1035 mlxsw_sp_acl_tcam_vchunk_ht_params);
1036 err_rhashtable_insert:
1037 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1038 err_vregion_get:
1039 kfree(vchunk);
1040 return ERR_PTR(err);
1041 }
1042
1043 static void
1044 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1045 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1046 {
1047 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1048 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1049
1050 mutex_lock(&vregion->lock);
1051 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1052 list_del(&vchunk->list);
1053 if (vchunk->chunk2)
1054 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1055 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1056 mutex_unlock(&vregion->lock);
1057 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1058 mlxsw_sp_acl_tcam_vchunk_ht_params);
1059 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1060 kfree(vchunk);
1061 }
1062
1063 static struct mlxsw_sp_acl_tcam_vchunk *
1064 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1065 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1066 unsigned int priority,
1067 struct mlxsw_afk_element_usage *elusage)
1068 {
1069 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1070
1071 vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1072 mlxsw_sp_acl_tcam_vchunk_ht_params);
1073 if (vchunk) {
1074 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1075 elusage)))
1076 return ERR_PTR(-EINVAL);
1077 vchunk->ref_count++;
1078 return vchunk;
1079 }
1080 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1081 priority, elusage);
1082 }
1083
1084 static void
1085 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1086 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1087 {
1088 if (--vchunk->ref_count)
1089 return;
1090 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1091 }
1092
1093 static struct mlxsw_sp_acl_tcam_entry *
1094 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1095 struct mlxsw_sp_acl_tcam_ventry *ventry,
1096 struct mlxsw_sp_acl_tcam_chunk *chunk)
1097 {
1098 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1099 struct mlxsw_sp_acl_tcam_entry *entry;
1100 int err;
1101
1102 entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1103 if (!entry)
1104 return ERR_PTR(-ENOMEM);
1105 entry->ventry = ventry;
1106 entry->chunk = chunk;
1107
1108 err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1109 entry->priv, ventry->rulei);
1110 if (err)
1111 goto err_entry_add;
1112
1113 return entry;
1114
1115 err_entry_add:
1116 kfree(entry);
1117 return ERR_PTR(err);
1118 }
1119
1120 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1121 struct mlxsw_sp_acl_tcam_entry *entry)
1122 {
1123 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1124
1125 ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1126 entry->chunk->priv, entry->priv);
1127 kfree(entry);
1128 }
1129
1130 static int
1131 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1132 struct mlxsw_sp_acl_tcam_region *region,
1133 struct mlxsw_sp_acl_tcam_entry *entry,
1134 struct mlxsw_sp_acl_rule_info *rulei)
1135 {
1136 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1137
1138 return ops->entry_action_replace(mlxsw_sp, region->priv,
1139 entry->priv, rulei);
1140 }
1141
1142 static int
1143 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1144 struct mlxsw_sp_acl_tcam_entry *entry,
1145 bool *activity)
1146 {
1147 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1148
1149 return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1150 entry->priv, activity);
1151 }
1152
1153 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1154 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1155 struct mlxsw_sp_acl_tcam_ventry *ventry,
1156 struct mlxsw_sp_acl_rule_info *rulei)
1157 {
1158 struct mlxsw_sp_acl_tcam_vregion *vregion;
1159 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1160 int err;
1161
1162 vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1163 &rulei->values.elusage);
1164 if (IS_ERR(vchunk))
1165 return PTR_ERR(vchunk);
1166
1167 ventry->vchunk = vchunk;
1168 ventry->rulei = rulei;
1169 vregion = vchunk->vregion;
1170
1171 mutex_lock(&vregion->lock);
1172 ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1173 vchunk->chunk);
1174 if (IS_ERR(ventry->entry)) {
1175 mutex_unlock(&vregion->lock);
1176 err = PTR_ERR(ventry->entry);
1177 goto err_entry_create;
1178 }
1179
1180 list_add_tail(&ventry->list, &vchunk->ventry_list);
1181 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1182 mutex_unlock(&vregion->lock);
1183
1184 return 0;
1185
1186 err_entry_create:
1187 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1188 return err;
1189 }
1190
1191 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1192 struct mlxsw_sp_acl_tcam_ventry *ventry)
1193 {
1194 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1195 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1196
1197 mutex_lock(&vregion->lock);
1198 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1199 list_del(&ventry->list);
1200 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1201 mutex_unlock(&vregion->lock);
1202 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1203 }
1204
1205 static int
1206 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1207 struct mlxsw_sp_acl_tcam_ventry *ventry,
1208 struct mlxsw_sp_acl_rule_info *rulei)
1209 {
1210 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1211
1212 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1213 vchunk->vregion->region,
1214 ventry->entry, rulei);
1215 }
1216
1217 static int
1218 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1219 struct mlxsw_sp_acl_tcam_ventry *ventry,
1220 bool *activity)
1221 {
1222 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1223 ventry->entry, activity);
1224 }
1225
1226 static int
1227 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1228 struct mlxsw_sp_acl_tcam_ventry *ventry,
1229 struct mlxsw_sp_acl_tcam_chunk *chunk,
1230 int *credits)
1231 {
1232 struct mlxsw_sp_acl_tcam_entry *new_entry;
1233
1234 /* First check if the entry is not already where we want it to be. */
1235 if (ventry->entry->chunk == chunk)
1236 return 0;
1237
1238 if (--(*credits) < 0)
1239 return 0;
1240
1241 new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1242 if (IS_ERR(new_entry))
1243 return PTR_ERR(new_entry);
1244 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1245 ventry->entry = new_entry;
1246 return 0;
1247 }
1248
1249 static int
1250 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1251 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1252 struct mlxsw_sp_acl_tcam_region *region,
1253 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1254 {
1255 struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1256
1257 new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1258 if (IS_ERR(new_chunk))
1259 return PTR_ERR(new_chunk);
1260 vchunk->chunk2 = vchunk->chunk;
1261 vchunk->chunk = new_chunk;
1262 ctx->current_vchunk = vchunk;
1263 ctx->start_ventry = NULL;
1264 ctx->stop_ventry = NULL;
1265 return 0;
1266 }
1267
1268 static void
1269 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1270 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1271 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1272 {
1273 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1274 vchunk->chunk2 = NULL;
1275 ctx->current_vchunk = NULL;
1276 }
1277
1278 static int
1279 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1280 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1281 struct mlxsw_sp_acl_tcam_region *region,
1282 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1283 int *credits)
1284 {
1285 struct mlxsw_sp_acl_tcam_ventry *ventry;
1286 int err;
1287
1288 if (vchunk->chunk->region != region) {
1289 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1290 region, ctx);
1291 if (err)
1292 return err;
1293 } else if (!vchunk->chunk2) {
1294 /* The chunk is already as it should be, nothing to do. */
1295 return 0;
1296 }
1297
1298 /* If the migration got interrupted, we have the ventry to start from
1299 * stored in context.
1300 */
1301 if (ctx->start_ventry)
1302 ventry = ctx->start_ventry;
1303 else
1304 ventry = list_first_entry(&vchunk->ventry_list,
1305 typeof(*ventry), list);
1306
1307 list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1308 /* During rollback, once we reach the ventry that failed
1309 * to migrate, we are done.
1310 */
1311 if (ventry == ctx->stop_ventry)
1312 break;
1313
1314 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1315 vchunk->chunk, credits);
1316 if (err) {
1317 if (ctx->this_is_rollback) {
1318 /* Save the ventry which we ended with and try
1319 * to continue later on.
1320 */
1321 ctx->start_ventry = ventry;
1322 return err;
1323 }
1324 /* Swap the chunk and chunk2 pointers so the follow-up
1325 * rollback call will see the original chunk pointer
1326 * in vchunk->chunk.
1327 */
1328 swap(vchunk->chunk, vchunk->chunk2);
1329 /* The rollback has to be done from beginning of the
1330 * chunk, that is why we have to null the start_ventry.
1331 * However, we know where to stop the rollback,
1332 * at the current ventry.
1333 */
1334 ctx->start_ventry = NULL;
1335 ctx->stop_ventry = ventry;
1336 return err;
1337 } else if (*credits < 0) {
1338 /* We are out of credits, the rest of the ventries
1339 * will be migrated later. Save the ventry
1340 * which we ended with.
1341 */
1342 ctx->start_ventry = ventry;
1343 return 0;
1344 }
1345 }
1346
1347 mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1348 return 0;
1349 }
1350
1351 static int
1352 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1353 struct mlxsw_sp_acl_tcam_vregion *vregion,
1354 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1355 int *credits)
1356 {
1357 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1358 int err;
1359
1360 /* If the migration got interrupted, we have the vchunk
1361 * we are working on stored in context.
1362 */
1363 if (ctx->current_vchunk)
1364 vchunk = ctx->current_vchunk;
1365 else
1366 vchunk = list_first_entry(&vregion->vchunk_list,
1367 typeof(*vchunk), list);
1368
1369 list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1370 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1371 vregion->region,
1372 ctx, credits);
1373 if (err || *credits < 0)
1374 return err;
1375 }
1376 return 0;
1377 }
1378
1379 static int
1380 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1381 struct mlxsw_sp_acl_tcam_vregion *vregion,
1382 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1383 int *credits)
1384 {
1385 int err, err2;
1386
1387 trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1388 mutex_lock(&vregion->lock);
1389 err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1390 ctx, credits);
1391 if (err) {
1392 /* In case migration was not successful, we need to swap
1393 * so the original region pointer is assigned again
1394 * to vregion->region.
1395 */
1396 swap(vregion->region, vregion->region2);
1397 ctx->current_vchunk = NULL;
1398 ctx->this_is_rollback = true;
1399 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1400 ctx, credits);
1401 if (err2) {
1402 trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1403 vregion);
1404 dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1405 /* Let the rollback to be continued later on. */
1406 }
1407 }
1408 mutex_unlock(&vregion->lock);
1409 trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1410 return err;
1411 }
1412
1413 static bool
1414 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1415 {
1416 return ctx->hints_priv;
1417 }
1418
1419 static int
1420 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1421 struct mlxsw_sp_acl_tcam_vregion *vregion,
1422 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1423 {
1424 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1425 unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1426 struct mlxsw_sp_acl_tcam_region *new_region;
1427 void *hints_priv;
1428 int err;
1429
1430 trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1431
1432 hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1433 if (IS_ERR(hints_priv))
1434 return PTR_ERR(hints_priv);
1435
1436 new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1437 vregion, hints_priv);
1438 if (IS_ERR(new_region)) {
1439 err = PTR_ERR(new_region);
1440 goto err_region_create;
1441 }
1442
1443 /* vregion->region contains the pointer to the new region
1444 * we are going to migrate to.
1445 */
1446 vregion->region2 = vregion->region;
1447 vregion->region = new_region;
1448 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1449 vregion->region2->group,
1450 new_region, priority,
1451 vregion->region2);
1452 if (err)
1453 goto err_group_region_attach;
1454
1455 ctx->hints_priv = hints_priv;
1456 ctx->this_is_rollback = false;
1457
1458 return 0;
1459
1460 err_group_region_attach:
1461 vregion->region = vregion->region2;
1462 vregion->region2 = NULL;
1463 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1464 err_region_create:
1465 ops->region_rehash_hints_put(hints_priv);
1466 return err;
1467 }
1468
1469 static void
1470 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1471 struct mlxsw_sp_acl_tcam_vregion *vregion,
1472 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1473 {
1474 struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1475 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1476
1477 vregion->region2 = NULL;
1478 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1479 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1480 ops->region_rehash_hints_put(ctx->hints_priv);
1481 ctx->hints_priv = NULL;
1482 }
1483
1484 static void
1485 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1486 struct mlxsw_sp_acl_tcam_vregion *vregion,
1487 int *credits)
1488 {
1489 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1490 int err;
1491
1492 /* Check if the previous rehash work was interrupted
1493 * which means we have to continue it now.
1494 * If not, start a new rehash.
1495 */
1496 if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1497 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1498 vregion, ctx);
1499 if (err) {
1500 if (err != -EAGAIN)
1501 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1502 return;
1503 }
1504 }
1505
1506 err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1507 ctx, credits);
1508 if (err) {
1509 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1510 }
1511
1512 if (*credits >= 0)
1513 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1514 }
1515
1516 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1517 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1518 MLXSW_AFK_ELEMENT_DMAC_32_47,
1519 MLXSW_AFK_ELEMENT_DMAC_0_31,
1520 MLXSW_AFK_ELEMENT_SMAC_32_47,
1521 MLXSW_AFK_ELEMENT_SMAC_0_31,
1522 MLXSW_AFK_ELEMENT_ETHERTYPE,
1523 MLXSW_AFK_ELEMENT_IP_PROTO,
1524 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1525 MLXSW_AFK_ELEMENT_DST_IP_0_31,
1526 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1527 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1528 MLXSW_AFK_ELEMENT_VID,
1529 MLXSW_AFK_ELEMENT_PCP,
1530 MLXSW_AFK_ELEMENT_TCP_FLAGS,
1531 MLXSW_AFK_ELEMENT_IP_TTL_,
1532 MLXSW_AFK_ELEMENT_IP_ECN,
1533 MLXSW_AFK_ELEMENT_IP_DSCP,
1534 };
1535
1536 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1537 MLXSW_AFK_ELEMENT_ETHERTYPE,
1538 MLXSW_AFK_ELEMENT_IP_PROTO,
1539 MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1540 MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1541 MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1542 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1543 MLXSW_AFK_ELEMENT_DST_IP_96_127,
1544 MLXSW_AFK_ELEMENT_DST_IP_64_95,
1545 MLXSW_AFK_ELEMENT_DST_IP_32_63,
1546 MLXSW_AFK_ELEMENT_DST_IP_0_31,
1547 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1548 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1549 };
1550
1551 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1552 {
1553 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1554 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1555 },
1556 {
1557 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1558 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1559 },
1560 };
1561
1562 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1563 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1564
1565 struct mlxsw_sp_acl_tcam_flower_ruleset {
1566 struct mlxsw_sp_acl_tcam_vgroup vgroup;
1567 };
1568
1569 struct mlxsw_sp_acl_tcam_flower_rule {
1570 struct mlxsw_sp_acl_tcam_ventry ventry;
1571 };
1572
1573 static int
1574 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1575 struct mlxsw_sp_acl_tcam *tcam,
1576 void *ruleset_priv,
1577 struct mlxsw_afk_element_usage *tmplt_elusage)
1578 {
1579 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1580
1581 return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1582 mlxsw_sp_acl_tcam_patterns,
1583 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1584 tmplt_elusage, true);
1585 }
1586
1587 static void
1588 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1589 void *ruleset_priv)
1590 {
1591 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1592
1593 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1594 }
1595
1596 static int
1597 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1598 void *ruleset_priv,
1599 struct mlxsw_sp_port *mlxsw_sp_port,
1600 bool ingress)
1601 {
1602 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1603
1604 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1605 mlxsw_sp_port, ingress);
1606 }
1607
1608 static void
1609 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1610 void *ruleset_priv,
1611 struct mlxsw_sp_port *mlxsw_sp_port,
1612 bool ingress)
1613 {
1614 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1615
1616 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1617 mlxsw_sp_port, ingress);
1618 }
1619
1620 static u16
1621 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1622 {
1623 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1624
1625 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1626 }
1627
1628 static int
1629 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1630 void *ruleset_priv, void *rule_priv,
1631 struct mlxsw_sp_acl_rule_info *rulei)
1632 {
1633 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1634 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1635
1636 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1637 &rule->ventry, rulei);
1638 }
1639
1640 static void
1641 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1642 {
1643 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1644
1645 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1646 }
1647
1648 static int
1649 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1650 void *rule_priv,
1651 struct mlxsw_sp_acl_rule_info *rulei)
1652 {
1653 return -EOPNOTSUPP;
1654 }
1655
1656 static int
1657 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1658 void *rule_priv, bool *activity)
1659 {
1660 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1661
1662 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1663 activity);
1664 }
1665
1666 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1667 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1668 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
1669 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
1670 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1671 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1672 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1673 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1674 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
1675 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
1676 .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1677 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1678 };
1679
1680 struct mlxsw_sp_acl_tcam_mr_ruleset {
1681 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1682 struct mlxsw_sp_acl_tcam_vgroup vgroup;
1683 };
1684
1685 struct mlxsw_sp_acl_tcam_mr_rule {
1686 struct mlxsw_sp_acl_tcam_ventry ventry;
1687 };
1688
1689 static int
1690 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1691 struct mlxsw_sp_acl_tcam *tcam,
1692 void *ruleset_priv,
1693 struct mlxsw_afk_element_usage *tmplt_elusage)
1694 {
1695 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1696 int err;
1697
1698 err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1699 mlxsw_sp_acl_tcam_patterns,
1700 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1701 tmplt_elusage, false);
1702 if (err)
1703 return err;
1704
1705 /* For most of the TCAM clients it would make sense to take a tcam chunk
1706 * only when the first rule is written. This is not the case for
1707 * multicast router as it is required to bind the multicast router to a
1708 * specific ACL Group ID which must exist in HW before multicast router
1709 * is initialized.
1710 */
1711 ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1712 &ruleset->vgroup, 1,
1713 tmplt_elusage);
1714 if (IS_ERR(ruleset->vchunk)) {
1715 err = PTR_ERR(ruleset->vchunk);
1716 goto err_chunk_get;
1717 }
1718
1719 return 0;
1720
1721 err_chunk_get:
1722 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1723 return err;
1724 }
1725
1726 static void
1727 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1728 {
1729 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1730
1731 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1732 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1733 }
1734
1735 static int
1736 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1737 struct mlxsw_sp_port *mlxsw_sp_port,
1738 bool ingress)
1739 {
1740 /* Binding is done when initializing multicast router */
1741 return 0;
1742 }
1743
1744 static void
1745 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1746 void *ruleset_priv,
1747 struct mlxsw_sp_port *mlxsw_sp_port,
1748 bool ingress)
1749 {
1750 }
1751
1752 static u16
1753 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1754 {
1755 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1756
1757 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1758 }
1759
1760 static int
1761 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1762 void *rule_priv,
1763 struct mlxsw_sp_acl_rule_info *rulei)
1764 {
1765 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1766 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1767
1768 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1769 &rule->ventry, rulei);
1770 }
1771
1772 static void
1773 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1774 {
1775 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1776
1777 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1778 }
1779
1780 static int
1781 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1782 void *rule_priv,
1783 struct mlxsw_sp_acl_rule_info *rulei)
1784 {
1785 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1786
1787 return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1788 rulei);
1789 }
1790
1791 static int
1792 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1793 void *rule_priv, bool *activity)
1794 {
1795 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1796
1797 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1798 activity);
1799 }
1800
1801 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1802 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1803 .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
1804 .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
1805 .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1806 .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1807 .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1808 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1809 .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
1810 .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
1811 .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1812 .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1813 };
1814
1815 static const struct mlxsw_sp_acl_profile_ops *
1816 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1817 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1818 [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1819 };
1820
1821 const struct mlxsw_sp_acl_profile_ops *
1822 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1823 enum mlxsw_sp_acl_profile profile)
1824 {
1825 const struct mlxsw_sp_acl_profile_ops *ops;
1826
1827 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1828 return NULL;
1829 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1830 if (WARN_ON(!ops))
1831 return NULL;
1832 return ops;
1833 }