]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
Merge tag 'driver-core-5.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
10
11 #include "spectrum.h"
12 #include "core.h"
13 #include "port.h"
14 #include "reg.h"
15
16 struct mlxsw_sp_sb_pr {
17 enum mlxsw_reg_sbpr_mode mode;
18 u32 size;
19 u8 freeze_mode:1,
20 freeze_size:1;
21 };
22
23 struct mlxsw_cp_sb_occ {
24 u32 cur;
25 u32 max;
26 };
27
28 struct mlxsw_sp_sb_cm {
29 u32 min_buff;
30 u32 max_buff;
31 u16 pool_index;
32 struct mlxsw_cp_sb_occ occ;
33 u8 freeze_pool:1,
34 freeze_thresh:1;
35 };
36
37 #define MLXSW_SP_SB_INFI -1U
38
39 struct mlxsw_sp_sb_pm {
40 u32 min_buff;
41 u32 max_buff;
42 struct mlxsw_cp_sb_occ occ;
43 };
44
45 struct mlxsw_sp_sb_mm {
46 u32 min_buff;
47 u32 max_buff;
48 u16 pool_index;
49 };
50
51 struct mlxsw_sp_sb_pool_des {
52 enum mlxsw_reg_sbxx_dir dir;
53 u8 pool;
54 };
55
56 #define MLXSW_SP_SB_POOL_ING 0
57 #define MLXSW_SP_SB_POOL_EGR 4
58 #define MLXSW_SP_SB_POOL_EGR_MC 8
59 #define MLXSW_SP_SB_POOL_ING_CPU 9
60 #define MLXSW_SP_SB_POOL_EGR_CPU 10
61
62 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
63 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
64 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
65 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
66 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
67 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
68 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
69 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
70 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
72 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
73 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
74 };
75
76 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
77 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
78 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
79 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
80 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
81 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
82 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
83 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
84 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
85 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
86 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
87 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
88 };
89
90 #define MLXSW_SP_SB_ING_TC_COUNT 8
91 #define MLXSW_SP_SB_EG_TC_COUNT 16
92
93 struct mlxsw_sp_sb_port {
94 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
95 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
96 struct mlxsw_sp_sb_pm *pms;
97 };
98
99 struct mlxsw_sp_sb {
100 struct mlxsw_sp_sb_pr *prs;
101 struct mlxsw_sp_sb_port *ports;
102 u32 cell_size;
103 u32 max_headroom_cells;
104 u64 sb_size;
105 };
106
107 struct mlxsw_sp_sb_vals {
108 unsigned int pool_count;
109 const struct mlxsw_sp_sb_pool_des *pool_dess;
110 const struct mlxsw_sp_sb_pm *pms;
111 const struct mlxsw_sp_sb_pm *pms_cpu;
112 const struct mlxsw_sp_sb_pr *prs;
113 const struct mlxsw_sp_sb_mm *mms;
114 const struct mlxsw_sp_sb_cm *cms_ingress;
115 const struct mlxsw_sp_sb_cm *cms_egress;
116 const struct mlxsw_sp_sb_cm *cms_cpu;
117 unsigned int mms_count;
118 unsigned int cms_ingress_count;
119 unsigned int cms_egress_count;
120 unsigned int cms_cpu_count;
121 };
122
123 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
124 {
125 return mlxsw_sp->sb->cell_size * cells;
126 }
127
128 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
129 {
130 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
131 }
132
133 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
134 {
135 return mlxsw_sp->sb->max_headroom_cells;
136 }
137
138 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
139 u16 pool_index)
140 {
141 return &mlxsw_sp->sb->prs[pool_index];
142 }
143
144 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
145 {
146 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
147 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
148 else
149 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
150 }
151
152 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
153 u8 local_port, u8 pg_buff,
154 enum mlxsw_reg_sbxx_dir dir)
155 {
156 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
157
158 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
159 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
160 return &sb_port->ing_cms[pg_buff];
161 else
162 return &sb_port->eg_cms[pg_buff];
163 }
164
165 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
166 u8 local_port, u16 pool_index)
167 {
168 return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
169 }
170
171 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
172 enum mlxsw_reg_sbpr_mode mode,
173 u32 size, bool infi_size)
174 {
175 const struct mlxsw_sp_sb_pool_des *des =
176 &mlxsw_sp->sb_vals->pool_dess[pool_index];
177 char sbpr_pl[MLXSW_REG_SBPR_LEN];
178 struct mlxsw_sp_sb_pr *pr;
179 int err;
180
181 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
182 size, infi_size);
183 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
184 if (err)
185 return err;
186
187 if (infi_size)
188 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
189 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
190 pr->mode = mode;
191 pr->size = size;
192 return 0;
193 }
194
195 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
196 u8 pg_buff, u32 min_buff, u32 max_buff,
197 bool infi_max, u16 pool_index)
198 {
199 const struct mlxsw_sp_sb_pool_des *des =
200 &mlxsw_sp->sb_vals->pool_dess[pool_index];
201 char sbcm_pl[MLXSW_REG_SBCM_LEN];
202 struct mlxsw_sp_sb_cm *cm;
203 int err;
204
205 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
206 min_buff, max_buff, infi_max, des->pool);
207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
208 if (err)
209 return err;
210
211 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
212 if (infi_max)
213 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
214 mlxsw_sp->sb->sb_size);
215
216 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
217 des->dir);
218 cm->min_buff = min_buff;
219 cm->max_buff = max_buff;
220 cm->pool_index = pool_index;
221 }
222 return 0;
223 }
224
225 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
226 u16 pool_index, u32 min_buff, u32 max_buff)
227 {
228 const struct mlxsw_sp_sb_pool_des *des =
229 &mlxsw_sp->sb_vals->pool_dess[pool_index];
230 char sbpm_pl[MLXSW_REG_SBPM_LEN];
231 struct mlxsw_sp_sb_pm *pm;
232 int err;
233
234 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
235 min_buff, max_buff);
236 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
237 if (err)
238 return err;
239
240 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
241 pm->min_buff = min_buff;
242 pm->max_buff = max_buff;
243 return 0;
244 }
245
246 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
247 u16 pool_index, struct list_head *bulk_list)
248 {
249 const struct mlxsw_sp_sb_pool_des *des =
250 &mlxsw_sp->sb_vals->pool_dess[pool_index];
251 char sbpm_pl[MLXSW_REG_SBPM_LEN];
252
253 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
254 true, 0, 0);
255 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
256 bulk_list, NULL, 0);
257 }
258
259 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
260 char *sbpm_pl, size_t sbpm_pl_len,
261 unsigned long cb_priv)
262 {
263 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
264
265 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
266 }
267
268 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
269 u16 pool_index, struct list_head *bulk_list)
270 {
271 const struct mlxsw_sp_sb_pool_des *des =
272 &mlxsw_sp->sb_vals->pool_dess[pool_index];
273 char sbpm_pl[MLXSW_REG_SBPM_LEN];
274 struct mlxsw_sp_sb_pm *pm;
275
276 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
277 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
278 false, 0, 0);
279 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
280 bulk_list,
281 mlxsw_sp_sb_pm_occ_query_cb,
282 (unsigned long) pm);
283 }
284
285 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
286 #define MLXSW_SP_PB_HEADROOM 25632
287 #define MLXSW_SP_PB_UNUSED 8
288
289 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
290 {
291 const u32 pbs[] = {
292 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
293 [9] = MLXSW_PORT_MAX_MTU,
294 };
295 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
296 char pbmc_pl[MLXSW_REG_PBMC_LEN];
297 int i;
298
299 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
300 0xffff, 0xffff / 2);
301 for (i = 0; i < ARRAY_SIZE(pbs); i++) {
302 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
303
304 if (i == MLXSW_SP_PB_UNUSED)
305 continue;
306 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
307 }
308 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
309 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
310 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
311 }
312
313 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
314 {
315 char pptb_pl[MLXSW_REG_PPTB_LEN];
316 int i;
317
318 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
319 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
320 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
321 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
322 pptb_pl);
323 }
324
325 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
326 {
327 int err;
328
329 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
330 if (err)
331 return err;
332 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
333 }
334
335 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_sb_port *sb_port)
337 {
338 struct mlxsw_sp_sb_pm *pms;
339
340 pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
341 GFP_KERNEL);
342 if (!pms)
343 return -ENOMEM;
344 sb_port->pms = pms;
345 return 0;
346 }
347
348 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
349 {
350 kfree(sb_port->pms);
351 }
352
353 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
354 {
355 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
356 struct mlxsw_sp_sb_pr *prs;
357 int i;
358 int err;
359
360 mlxsw_sp->sb->ports = kcalloc(max_ports,
361 sizeof(struct mlxsw_sp_sb_port),
362 GFP_KERNEL);
363 if (!mlxsw_sp->sb->ports)
364 return -ENOMEM;
365
366 prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
367 GFP_KERNEL);
368 if (!prs) {
369 err = -ENOMEM;
370 goto err_alloc_prs;
371 }
372 mlxsw_sp->sb->prs = prs;
373
374 for (i = 0; i < max_ports; i++) {
375 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
376 if (err)
377 goto err_sb_port_init;
378 }
379
380 return 0;
381
382 err_sb_port_init:
383 for (i--; i >= 0; i--)
384 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
385 kfree(mlxsw_sp->sb->prs);
386 err_alloc_prs:
387 kfree(mlxsw_sp->sb->ports);
388 return err;
389 }
390
391 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
392 {
393 int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
394 int i;
395
396 for (i = max_ports - 1; i >= 0; i--)
397 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
398 kfree(mlxsw_sp->sb->prs);
399 kfree(mlxsw_sp->sb->ports);
400 }
401
402 #define MLXSW_SP_SB_PR(_mode, _size) \
403 { \
404 .mode = _mode, \
405 .size = _size, \
406 }
407
408 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
409 { \
410 .mode = _mode, \
411 .size = _size, \
412 .freeze_mode = _freeze_mode, \
413 .freeze_size = _freeze_size, \
414 }
415
416 #define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
417 #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
418 #define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
419
420 /* Order according to mlxsw_sp1_sb_pool_dess */
421 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
422 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
423 MLXSW_SP1_SB_PR_INGRESS_SIZE),
424 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
425 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
426 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
427 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
428 MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
429 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
431 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
432 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
433 true, true),
434 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
435 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
436 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
437 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
438 };
439
440 #define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
441 #define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
442 #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
443
444 /* Order according to mlxsw_sp2_sb_pool_dess */
445 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
446 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
447 MLXSW_SP2_SB_PR_INGRESS_SIZE),
448 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
449 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
450 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
451 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
452 MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
453 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
454 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
455 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
456 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
457 true, true),
458 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
459 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
460 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
461 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
462 };
463
464 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
465 const struct mlxsw_sp_sb_pr *prs,
466 size_t prs_len)
467 {
468 int i;
469 int err;
470
471 for (i = 0; i < prs_len; i++) {
472 u32 size = prs[i].size;
473 u32 size_cells;
474
475 if (size == MLXSW_SP_SB_INFI) {
476 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
477 0, true);
478 } else {
479 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
480 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
481 size_cells, false);
482 }
483 if (err)
484 return err;
485 }
486 return 0;
487 }
488
489 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
490 { \
491 .min_buff = _min_buff, \
492 .max_buff = _max_buff, \
493 .pool_index = _pool, \
494 }
495
496 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
497 { \
498 .min_buff = _min_buff, \
499 .max_buff = _max_buff, \
500 .pool_index = MLXSW_SP_SB_POOL_ING, \
501 }
502
503 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
504 { \
505 .min_buff = _min_buff, \
506 .max_buff = _max_buff, \
507 .pool_index = MLXSW_SP_SB_POOL_EGR, \
508 }
509
510 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
511 { \
512 .min_buff = _min_buff, \
513 .max_buff = _max_buff, \
514 .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
515 .freeze_pool = true, \
516 .freeze_thresh = true, \
517 }
518
519 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
520 MLXSW_SP_SB_CM_ING(10000, 8),
521 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
522 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
523 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
524 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
525 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
526 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
527 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
528 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
529 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
530 };
531
532 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
533 MLXSW_SP_SB_CM_ING(0, 7),
534 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
535 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
536 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
537 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
538 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
539 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
540 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
541 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
542 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
543 };
544
545 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
546 MLXSW_SP_SB_CM_EGR(1500, 9),
547 MLXSW_SP_SB_CM_EGR(1500, 9),
548 MLXSW_SP_SB_CM_EGR(1500, 9),
549 MLXSW_SP_SB_CM_EGR(1500, 9),
550 MLXSW_SP_SB_CM_EGR(1500, 9),
551 MLXSW_SP_SB_CM_EGR(1500, 9),
552 MLXSW_SP_SB_CM_EGR(1500, 9),
553 MLXSW_SP_SB_CM_EGR(1500, 9),
554 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
555 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
556 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
557 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
558 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
559 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
560 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
561 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
562 MLXSW_SP_SB_CM_EGR(1, 0xff),
563 };
564
565 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
566 MLXSW_SP_SB_CM_EGR(0, 7),
567 MLXSW_SP_SB_CM_EGR(0, 7),
568 MLXSW_SP_SB_CM_EGR(0, 7),
569 MLXSW_SP_SB_CM_EGR(0, 7),
570 MLXSW_SP_SB_CM_EGR(0, 7),
571 MLXSW_SP_SB_CM_EGR(0, 7),
572 MLXSW_SP_SB_CM_EGR(0, 7),
573 MLXSW_SP_SB_CM_EGR(0, 7),
574 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
575 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
576 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
577 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
578 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
579 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
580 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
581 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
582 MLXSW_SP_SB_CM_EGR(1, 0xff),
583 };
584
585 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
586
587 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
588 MLXSW_SP_CPU_PORT_SB_CM,
589 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
590 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
591 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
592 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
593 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
594 MLXSW_SP_CPU_PORT_SB_CM,
595 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
596 MLXSW_SP_CPU_PORT_SB_CM,
597 MLXSW_SP_CPU_PORT_SB_CM,
598 MLXSW_SP_CPU_PORT_SB_CM,
599 MLXSW_SP_CPU_PORT_SB_CM,
600 MLXSW_SP_CPU_PORT_SB_CM,
601 MLXSW_SP_CPU_PORT_SB_CM,
602 MLXSW_SP_CPU_PORT_SB_CM,
603 MLXSW_SP_CPU_PORT_SB_CM,
604 MLXSW_SP_CPU_PORT_SB_CM,
605 MLXSW_SP_CPU_PORT_SB_CM,
606 MLXSW_SP_CPU_PORT_SB_CM,
607 MLXSW_SP_CPU_PORT_SB_CM,
608 MLXSW_SP_CPU_PORT_SB_CM,
609 MLXSW_SP_CPU_PORT_SB_CM,
610 MLXSW_SP_CPU_PORT_SB_CM,
611 MLXSW_SP_CPU_PORT_SB_CM,
612 MLXSW_SP_CPU_PORT_SB_CM,
613 MLXSW_SP_CPU_PORT_SB_CM,
614 MLXSW_SP_CPU_PORT_SB_CM,
615 MLXSW_SP_CPU_PORT_SB_CM,
616 MLXSW_SP_CPU_PORT_SB_CM,
617 MLXSW_SP_CPU_PORT_SB_CM,
618 MLXSW_SP_CPU_PORT_SB_CM,
619 MLXSW_SP_CPU_PORT_SB_CM,
620 };
621
622 static bool
623 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
624 {
625 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
626
627 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
628 }
629
630 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
631 enum mlxsw_reg_sbxx_dir dir,
632 const struct mlxsw_sp_sb_cm *cms,
633 size_t cms_len)
634 {
635 const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
636 int i;
637 int err;
638
639 for (i = 0; i < cms_len; i++) {
640 const struct mlxsw_sp_sb_cm *cm;
641 u32 min_buff;
642 u32 max_buff;
643
644 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
645 continue; /* PG number 8 does not exist, skip it */
646 cm = &cms[i];
647 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
648 continue;
649
650 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
651 max_buff = cm->max_buff;
652 if (max_buff == MLXSW_SP_SB_INFI) {
653 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
654 min_buff, 0,
655 true, cm->pool_index);
656 } else {
657 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
658 cm->pool_index))
659 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
660 max_buff);
661 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
662 min_buff, max_buff,
663 false, cm->pool_index);
664 }
665 if (err)
666 return err;
667 }
668 return 0;
669 }
670
671 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
672 {
673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
674 int err;
675
676 err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
677 mlxsw_sp_port->local_port,
678 MLXSW_REG_SBXX_DIR_INGRESS,
679 mlxsw_sp->sb_vals->cms_ingress,
680 mlxsw_sp->sb_vals->cms_ingress_count);
681 if (err)
682 return err;
683 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
684 mlxsw_sp_port->local_port,
685 MLXSW_REG_SBXX_DIR_EGRESS,
686 mlxsw_sp->sb_vals->cms_egress,
687 mlxsw_sp->sb_vals->cms_egress_count);
688 }
689
690 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
691 {
692 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
693 mlxsw_sp->sb_vals->cms_cpu,
694 mlxsw_sp->sb_vals->cms_cpu_count);
695 }
696
697 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
698 { \
699 .min_buff = _min_buff, \
700 .max_buff = _max_buff, \
701 }
702
703 /* Order according to mlxsw_sp1_sb_pool_dess */
704 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
705 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
706 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
707 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
708 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
709 MLXSW_SP_SB_PM(0, 7),
710 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
711 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
712 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
713 MLXSW_SP_SB_PM(10000, 90000),
714 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
715 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
716 };
717
718 /* Order according to mlxsw_sp2_sb_pool_dess */
719 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
720 MLXSW_SP_SB_PM(0, 7),
721 MLXSW_SP_SB_PM(0, 0),
722 MLXSW_SP_SB_PM(0, 0),
723 MLXSW_SP_SB_PM(0, 0),
724 MLXSW_SP_SB_PM(0, 7),
725 MLXSW_SP_SB_PM(0, 0),
726 MLXSW_SP_SB_PM(0, 0),
727 MLXSW_SP_SB_PM(0, 0),
728 MLXSW_SP_SB_PM(10000, 90000),
729 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
730 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
731 };
732
733 /* Order according to mlxsw_sp*_sb_pool_dess */
734 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
735 MLXSW_SP_SB_PM(0, 0),
736 MLXSW_SP_SB_PM(0, 0),
737 MLXSW_SP_SB_PM(0, 0),
738 MLXSW_SP_SB_PM(0, 0),
739 MLXSW_SP_SB_PM(0, 0),
740 MLXSW_SP_SB_PM(0, 0),
741 MLXSW_SP_SB_PM(0, 0),
742 MLXSW_SP_SB_PM(0, 0),
743 MLXSW_SP_SB_PM(0, 90000),
744 MLXSW_SP_SB_PM(0, 0),
745 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
746 };
747
748 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
749 const struct mlxsw_sp_sb_pm *pms,
750 bool skip_ingress)
751 {
752 int i, err;
753
754 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
755 const struct mlxsw_sp_sb_pm *pm = &pms[i];
756 const struct mlxsw_sp_sb_pool_des *des;
757 u32 max_buff;
758 u32 min_buff;
759
760 des = &mlxsw_sp->sb_vals->pool_dess[i];
761 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
762 continue;
763
764 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
765 max_buff = pm->max_buff;
766 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
767 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
768 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
769 max_buff);
770 if (err)
771 return err;
772 }
773 return 0;
774 }
775
776 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
777 {
778 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
779
780 return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
781 mlxsw_sp->sb_vals->pms, false);
782 }
783
784 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
785 {
786 return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
787 true);
788 }
789
790 #define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
791 { \
792 .min_buff = _min_buff, \
793 .max_buff = _max_buff, \
794 .pool_index = MLXSW_SP_SB_POOL_EGR, \
795 }
796
797 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
798 MLXSW_SP_SB_MM(0, 6),
799 MLXSW_SP_SB_MM(0, 6),
800 MLXSW_SP_SB_MM(0, 6),
801 MLXSW_SP_SB_MM(0, 6),
802 MLXSW_SP_SB_MM(0, 6),
803 MLXSW_SP_SB_MM(0, 6),
804 MLXSW_SP_SB_MM(0, 6),
805 MLXSW_SP_SB_MM(0, 6),
806 MLXSW_SP_SB_MM(0, 6),
807 MLXSW_SP_SB_MM(0, 6),
808 MLXSW_SP_SB_MM(0, 6),
809 MLXSW_SP_SB_MM(0, 6),
810 MLXSW_SP_SB_MM(0, 6),
811 MLXSW_SP_SB_MM(0, 6),
812 MLXSW_SP_SB_MM(0, 6),
813 };
814
815 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
816 {
817 char sbmm_pl[MLXSW_REG_SBMM_LEN];
818 int i;
819 int err;
820
821 for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
822 const struct mlxsw_sp_sb_pool_des *des;
823 const struct mlxsw_sp_sb_mm *mc;
824 u32 min_buff;
825
826 mc = &mlxsw_sp->sb_vals->mms[i];
827 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
828 /* All pools used by sb_mm's are initialized using dynamic
829 * thresholds, therefore 'max_buff' isn't specified in cells.
830 */
831 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
832 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
833 des->pool);
834 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
835 if (err)
836 return err;
837 }
838 return 0;
839 }
840
841 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
842 u16 *p_ingress_len, u16 *p_egress_len)
843 {
844 int i;
845
846 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
847 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
848 MLXSW_REG_SBXX_DIR_INGRESS)
849 (*p_ingress_len)++;
850 else
851 (*p_egress_len)++;
852 }
853
854 WARN(*p_egress_len == 0, "No egress pools\n");
855 }
856
857 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
858 .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
859 .pool_dess = mlxsw_sp1_sb_pool_dess,
860 .pms = mlxsw_sp1_sb_pms,
861 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
862 .prs = mlxsw_sp1_sb_prs,
863 .mms = mlxsw_sp_sb_mms,
864 .cms_ingress = mlxsw_sp1_sb_cms_ingress,
865 .cms_egress = mlxsw_sp1_sb_cms_egress,
866 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
867 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
868 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
869 .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
870 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
871 };
872
873 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
874 .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
875 .pool_dess = mlxsw_sp2_sb_pool_dess,
876 .pms = mlxsw_sp2_sb_pms,
877 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
878 .prs = mlxsw_sp2_sb_prs,
879 .mms = mlxsw_sp_sb_mms,
880 .cms_ingress = mlxsw_sp2_sb_cms_ingress,
881 .cms_egress = mlxsw_sp2_sb_cms_egress,
882 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
883 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
884 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
885 .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
886 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
887 };
888
889 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
890 {
891 u32 max_headroom_size;
892 u16 ing_pool_count = 0;
893 u16 eg_pool_count = 0;
894 int err;
895
896 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
897 return -EIO;
898
899 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
900 return -EIO;
901
902 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
903 return -EIO;
904
905 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
906 if (!mlxsw_sp->sb)
907 return -ENOMEM;
908 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
909 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
910 MAX_BUFFER_SIZE);
911 max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
912 MAX_HEADROOM_SIZE);
913 /* Round down, because this limit must not be overstepped. */
914 mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
915 mlxsw_sp->sb->cell_size;
916
917 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
918 if (err)
919 goto err_sb_ports_init;
920 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
921 mlxsw_sp->sb_vals->pool_count);
922 if (err)
923 goto err_sb_prs_init;
924 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
925 if (err)
926 goto err_sb_cpu_port_sb_cms_init;
927 err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
928 if (err)
929 goto err_sb_cpu_port_pms_init;
930 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
931 if (err)
932 goto err_sb_mms_init;
933 mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
934 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
935 mlxsw_sp->sb->sb_size,
936 ing_pool_count,
937 eg_pool_count,
938 MLXSW_SP_SB_ING_TC_COUNT,
939 MLXSW_SP_SB_EG_TC_COUNT);
940 if (err)
941 goto err_devlink_sb_register;
942
943 return 0;
944
945 err_devlink_sb_register:
946 err_sb_mms_init:
947 err_sb_cpu_port_pms_init:
948 err_sb_cpu_port_sb_cms_init:
949 err_sb_prs_init:
950 mlxsw_sp_sb_ports_fini(mlxsw_sp);
951 err_sb_ports_init:
952 kfree(mlxsw_sp->sb);
953 return err;
954 }
955
956 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
957 {
958 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
959 mlxsw_sp_sb_ports_fini(mlxsw_sp);
960 kfree(mlxsw_sp->sb);
961 }
962
963 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
964 {
965 int err;
966
967 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
968 if (err)
969 return err;
970 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
971 if (err)
972 return err;
973 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
974
975 return err;
976 }
977
978 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
979 unsigned int sb_index, u16 pool_index,
980 struct devlink_sb_pool_info *pool_info)
981 {
982 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
983 enum mlxsw_reg_sbxx_dir dir;
984 struct mlxsw_sp_sb_pr *pr;
985
986 dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
987 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
988 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
989 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
990 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
991 pool_info->cell_size = mlxsw_sp->sb->cell_size;
992 return 0;
993 }
994
995 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
996 unsigned int sb_index, u16 pool_index, u32 size,
997 enum devlink_sb_threshold_type threshold_type,
998 struct netlink_ext_ack *extack)
999 {
1000 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1001 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1002 const struct mlxsw_sp_sb_pr *pr;
1003 enum mlxsw_reg_sbpr_mode mode;
1004
1005 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1006 pr = &mlxsw_sp->sb_vals->prs[pool_index];
1007
1008 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
1009 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1010 return -EINVAL;
1011 }
1012
1013 if (pr->freeze_mode && pr->mode != mode) {
1014 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1015 return -EINVAL;
1016 };
1017
1018 if (pr->freeze_size && pr->size != size) {
1019 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1020 return -EINVAL;
1021 };
1022
1023 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1024 pool_size, false);
1025 }
1026
1027 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1028
1029 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1030 u32 max_buff)
1031 {
1032 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1033
1034 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1035 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1036 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1037 }
1038
1039 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1040 u32 threshold, u32 *p_max_buff,
1041 struct netlink_ext_ack *extack)
1042 {
1043 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1044
1045 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1046 int val;
1047
1048 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1049 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1050 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1051 NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1052 return -EINVAL;
1053 }
1054 *p_max_buff = val;
1055 } else {
1056 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1057 }
1058 return 0;
1059 }
1060
1061 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1062 unsigned int sb_index, u16 pool_index,
1063 u32 *p_threshold)
1064 {
1065 struct mlxsw_sp_port *mlxsw_sp_port =
1066 mlxsw_core_port_driver_priv(mlxsw_core_port);
1067 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1068 u8 local_port = mlxsw_sp_port->local_port;
1069 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1070 pool_index);
1071
1072 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1073 pm->max_buff);
1074 return 0;
1075 }
1076
1077 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1078 unsigned int sb_index, u16 pool_index,
1079 u32 threshold, struct netlink_ext_ack *extack)
1080 {
1081 struct mlxsw_sp_port *mlxsw_sp_port =
1082 mlxsw_core_port_driver_priv(mlxsw_core_port);
1083 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1084 u8 local_port = mlxsw_sp_port->local_port;
1085 u32 max_buff;
1086 int err;
1087
1088 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1089 threshold, &max_buff, extack);
1090 if (err)
1091 return err;
1092
1093 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1094 0, max_buff);
1095 }
1096
1097 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1098 unsigned int sb_index, u16 tc_index,
1099 enum devlink_sb_pool_type pool_type,
1100 u16 *p_pool_index, u32 *p_threshold)
1101 {
1102 struct mlxsw_sp_port *mlxsw_sp_port =
1103 mlxsw_core_port_driver_priv(mlxsw_core_port);
1104 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1105 u8 local_port = mlxsw_sp_port->local_port;
1106 u8 pg_buff = tc_index;
1107 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1108 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1109 pg_buff, dir);
1110
1111 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1112 cm->max_buff);
1113 *p_pool_index = cm->pool_index;
1114 return 0;
1115 }
1116
1117 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1118 unsigned int sb_index, u16 tc_index,
1119 enum devlink_sb_pool_type pool_type,
1120 u16 pool_index, u32 threshold,
1121 struct netlink_ext_ack *extack)
1122 {
1123 struct mlxsw_sp_port *mlxsw_sp_port =
1124 mlxsw_core_port_driver_priv(mlxsw_core_port);
1125 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1126 u8 local_port = mlxsw_sp_port->local_port;
1127 const struct mlxsw_sp_sb_cm *cm;
1128 u8 pg_buff = tc_index;
1129 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1130 u32 max_buff;
1131 int err;
1132
1133 if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1134 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1135 return -EINVAL;
1136 }
1137
1138 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1139 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1140 else
1141 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1142
1143 if (cm->freeze_pool && cm->pool_index != pool_index) {
1144 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1145 return -EINVAL;
1146 }
1147
1148 if (cm->freeze_thresh && cm->max_buff != threshold) {
1149 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1150 return -EINVAL;
1151 }
1152
1153 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1154 threshold, &max_buff, extack);
1155 if (err)
1156 return err;
1157
1158 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1159 0, max_buff, false, pool_index);
1160 }
1161
1162 #define MASKED_COUNT_MAX \
1163 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1164 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1165
1166 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1167 u8 masked_count;
1168 u8 local_port_1;
1169 };
1170
1171 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1172 char *sbsr_pl, size_t sbsr_pl_len,
1173 unsigned long cb_priv)
1174 {
1175 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1176 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1177 u8 masked_count;
1178 u8 local_port;
1179 int rec_index = 0;
1180 struct mlxsw_sp_sb_cm *cm;
1181 int i;
1182
1183 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1184
1185 masked_count = 0;
1186 for (local_port = cb_ctx.local_port_1;
1187 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1188 if (!mlxsw_sp->ports[local_port])
1189 continue;
1190 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1191 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1192 MLXSW_REG_SBXX_DIR_INGRESS);
1193 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1194 &cm->occ.cur, &cm->occ.max);
1195 }
1196 if (++masked_count == cb_ctx.masked_count)
1197 break;
1198 }
1199 masked_count = 0;
1200 for (local_port = cb_ctx.local_port_1;
1201 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1202 if (!mlxsw_sp->ports[local_port])
1203 continue;
1204 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1205 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1206 MLXSW_REG_SBXX_DIR_EGRESS);
1207 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1208 &cm->occ.cur, &cm->occ.max);
1209 }
1210 if (++masked_count == cb_ctx.masked_count)
1211 break;
1212 }
1213 }
1214
1215 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1216 unsigned int sb_index)
1217 {
1218 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1219 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1220 unsigned long cb_priv;
1221 LIST_HEAD(bulk_list);
1222 char *sbsr_pl;
1223 u8 masked_count;
1224 u8 local_port_1;
1225 u8 local_port = 0;
1226 int i;
1227 int err;
1228 int err2;
1229
1230 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1231 if (!sbsr_pl)
1232 return -ENOMEM;
1233
1234 next_batch:
1235 local_port++;
1236 local_port_1 = local_port;
1237 masked_count = 0;
1238 mlxsw_reg_sbsr_pack(sbsr_pl, false);
1239 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1240 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1241 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1242 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1243 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1244 if (!mlxsw_sp->ports[local_port])
1245 continue;
1246 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1247 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1248 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1249 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1250 &bulk_list);
1251 if (err)
1252 goto out;
1253 }
1254 if (++masked_count == MASKED_COUNT_MAX)
1255 goto do_query;
1256 }
1257
1258 do_query:
1259 cb_ctx.masked_count = masked_count;
1260 cb_ctx.local_port_1 = local_port_1;
1261 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1262 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1263 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1264 cb_priv);
1265 if (err)
1266 goto out;
1267 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1268 goto next_batch;
1269
1270 out:
1271 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1272 if (!err)
1273 err = err2;
1274 kfree(sbsr_pl);
1275 return err;
1276 }
1277
1278 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1279 unsigned int sb_index)
1280 {
1281 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1282 LIST_HEAD(bulk_list);
1283 char *sbsr_pl;
1284 unsigned int masked_count;
1285 u8 local_port = 0;
1286 int i;
1287 int err;
1288 int err2;
1289
1290 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1291 if (!sbsr_pl)
1292 return -ENOMEM;
1293
1294 next_batch:
1295 local_port++;
1296 masked_count = 0;
1297 mlxsw_reg_sbsr_pack(sbsr_pl, true);
1298 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1299 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1300 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1301 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1302 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1303 if (!mlxsw_sp->ports[local_port])
1304 continue;
1305 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1306 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1307 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1308 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1309 &bulk_list);
1310 if (err)
1311 goto out;
1312 }
1313 if (++masked_count == MASKED_COUNT_MAX)
1314 goto do_query;
1315 }
1316
1317 do_query:
1318 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1319 &bulk_list, NULL, 0);
1320 if (err)
1321 goto out;
1322 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1323 goto next_batch;
1324
1325 out:
1326 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1327 if (!err)
1328 err = err2;
1329 kfree(sbsr_pl);
1330 return err;
1331 }
1332
1333 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1334 unsigned int sb_index, u16 pool_index,
1335 u32 *p_cur, u32 *p_max)
1336 {
1337 struct mlxsw_sp_port *mlxsw_sp_port =
1338 mlxsw_core_port_driver_priv(mlxsw_core_port);
1339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1340 u8 local_port = mlxsw_sp_port->local_port;
1341 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1342 pool_index);
1343
1344 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1345 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1346 return 0;
1347 }
1348
1349 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1350 unsigned int sb_index, u16 tc_index,
1351 enum devlink_sb_pool_type pool_type,
1352 u32 *p_cur, u32 *p_max)
1353 {
1354 struct mlxsw_sp_port *mlxsw_sp_port =
1355 mlxsw_core_port_driver_priv(mlxsw_core_port);
1356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1357 u8 local_port = mlxsw_sp_port->local_port;
1358 u8 pg_buff = tc_index;
1359 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1360 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1361 pg_buff, dir);
1362
1363 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1364 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1365 return 0;
1366 }