]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
Merge tag 'kvm-x86-generic-6.8' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_devlink.c
CommitLineData
fae06da4 1// SPDX-License-Identifier: GPL-2.0
c7cd6c5a 2/* Marvell RVU Admin Function Devlink
fae06da4
GC
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
f1168d1e
GC
8#include<linux/bitfield.h>
9
fae06da4 10#include "rvu.h"
f1168d1e
GC
11#include "rvu_reg.h"
12#include "rvu_struct.h"
ef83e186 13#include "rvu_npc_hash.h"
fae06da4
GC
14
15#define DRV_NAME "octeontx2-af"
16
d8cf03fc 17static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
f1168d1e 18{
d8cf03fc
PK
19 devlink_fmsg_pair_nest_start(fmsg, name);
20 devlink_fmsg_obj_nest_start(fmsg);
f1168d1e
GC
21}
22
d8cf03fc 23static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
f1168d1e 24{
d8cf03fc
PK
25 devlink_fmsg_obj_nest_end(fmsg);
26 devlink_fmsg_pair_nest_end(fmsg);
f1168d1e
GC
27}
28
29static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 const char *name, irq_handler_t fn)
31{
32 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 int rc;
34
6dc9a23e 35 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
f1168d1e
GC
36 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
37 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
38 if (rc)
39 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 else
41 rvu->irq_allocated[offset] = true;
42
43 return rvu->irq_allocated[offset];
44}
45
5ed66306
GC
46static void rvu_nix_intr_work(struct work_struct *work)
47{
48 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49
50 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 "NIX_AF_RVU Error",
53 rvu_nix_health_reporter->nix_event_ctx);
54}
55
56static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57{
58 struct rvu_nix_event_ctx *nix_event_context;
59 struct rvu_devlink *rvu_dl = rvu_irq;
60 struct rvu *rvu;
61 int blkaddr;
62 u64 intr;
63
64 rvu = rvu_dl->rvu;
65 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 if (blkaddr < 0)
67 return IRQ_NONE;
68
69 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
71 nix_event_context->nix_af_rvu_int = intr;
72
73 /* Clear interrupts */
74 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
75 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
76 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
77
78 return IRQ_HANDLED;
79}
80
81static void rvu_nix_gen_work(struct work_struct *work)
82{
83 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84
85 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 "NIX_AF_GEN Error",
88 rvu_nix_health_reporter->nix_event_ctx);
89}
90
91static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92{
93 struct rvu_nix_event_ctx *nix_event_context;
94 struct rvu_devlink *rvu_dl = rvu_irq;
95 struct rvu *rvu;
96 int blkaddr;
97 u64 intr;
98
99 rvu = rvu_dl->rvu;
100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
101 if (blkaddr < 0)
102 return IRQ_NONE;
103
104 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
106 nix_event_context->nix_af_rvu_gen = intr;
107
108 /* Clear interrupts */
109 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
110 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
111 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
112
113 return IRQ_HANDLED;
114}
115
116static void rvu_nix_err_work(struct work_struct *work)
117{
118 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119
120 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 "NIX_AF_ERR Error",
123 rvu_nix_health_reporter->nix_event_ctx);
124}
125
126static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127{
128 struct rvu_nix_event_ctx *nix_event_context;
129 struct rvu_devlink *rvu_dl = rvu_irq;
130 struct rvu *rvu;
131 int blkaddr;
132 u64 intr;
133
134 rvu = rvu_dl->rvu;
135 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
136 if (blkaddr < 0)
137 return IRQ_NONE;
138
139 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
141 nix_event_context->nix_af_rvu_err = intr;
142
143 /* Clear interrupts */
144 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
145 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
146 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
147
148 return IRQ_HANDLED;
149}
150
151static void rvu_nix_ras_work(struct work_struct *work)
152{
153 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154
155 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 "NIX_AF_RAS Error",
158 rvu_nix_health_reporter->nix_event_ctx);
159}
160
161static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162{
163 struct rvu_nix_event_ctx *nix_event_context;
164 struct rvu_devlink *rvu_dl = rvu_irq;
165 struct rvu *rvu;
166 int blkaddr;
167 u64 intr;
168
169 rvu = rvu_dl->rvu;
170 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
171 if (blkaddr < 0)
172 return IRQ_NONE;
173
174 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
176 nix_event_context->nix_af_rvu_ras = intr;
177
178 /* Clear interrupts */
179 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
180 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
181 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
182
183 return IRQ_HANDLED;
184}
185
186static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187{
188 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 int offs, i, blkaddr;
190
191 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
192 if (blkaddr < 0)
193 return;
194
195 offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 if (!offs)
197 return;
198
199 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
200 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
201 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
202 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
203
204 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
206 rvu_dl);
207 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 }
209
210 for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
211 if (rvu->irq_allocated[offs + i]) {
212 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
213 rvu->irq_allocated[offs + i] = false;
214 }
215}
216
217static int rvu_nix_register_interrupts(struct rvu *rvu)
218{
219 int blkaddr, base;
220 bool rc;
221
222 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
223 if (blkaddr < 0)
224 return blkaddr;
225
226 /* Get NIX AF MSIX vectors offset. */
227 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 if (!base) {
229 dev_warn(rvu->dev,
230 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 blkaddr - BLKADDR_NIX0);
232 return 0;
233 }
234 /* Register and enable NIX_AF_RVU_INT interrupt */
235 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
236 "NIX_AF_RVU_INT",
237 rvu_nix_af_rvu_intr_handler);
238 if (!rc)
239 goto err;
240 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
241
242 /* Register and enable NIX_AF_GEN_INT interrupt */
243 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
244 "NIX_AF_GEN_INT",
245 rvu_nix_af_rvu_gen_handler);
246 if (!rc)
247 goto err;
248 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
249
250 /* Register and enable NIX_AF_ERR_INT interrupt */
251 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
252 "NIX_AF_ERR_INT",
253 rvu_nix_af_rvu_err_handler);
254 if (!rc)
255 goto err;
256 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
257
258 /* Register and enable NIX_AF_RAS interrupt */
259 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
260 "NIX_AF_RAS",
261 rvu_nix_af_rvu_ras_handler);
262 if (!rc)
263 goto err;
264 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
265
266 return 0;
267err:
268 rvu_nix_unregister_interrupts(rvu);
269 return rc;
270}
271
272static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 enum nix_af_rvu_health health_reporter)
274{
275 struct rvu_nix_event_ctx *nix_event_context;
276 u64 intr_val;
5ed66306
GC
277
278 nix_event_context = ctx;
279 switch (health_reporter) {
280 case NIX_AF_RVU_INTR:
281 intr_val = nix_event_context->nix_af_rvu_int;
d8cf03fc
PK
282 rvu_report_pair_start(fmsg, "NIX_AF_RVU");
283 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
284 nix_event_context->nix_af_rvu_int);
285 if (intr_val & BIT_ULL(0))
286 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
287 rvu_report_pair_end(fmsg);
5ed66306
GC
288 break;
289 case NIX_AF_RVU_GEN:
290 intr_val = nix_event_context->nix_af_rvu_gen;
d8cf03fc
PK
291 rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
292 devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
293 nix_event_context->nix_af_rvu_gen);
294 if (intr_val & BIT_ULL(0))
295 devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
296 if (intr_val & BIT_ULL(1))
297 devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
298 if (intr_val & BIT_ULL(4))
299 devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
300 rvu_report_pair_end(fmsg);
5ed66306
GC
301 break;
302 case NIX_AF_RVU_ERR:
303 intr_val = nix_event_context->nix_af_rvu_err;
d8cf03fc
PK
304 rvu_report_pair_start(fmsg, "NIX_AF_ERR");
305 devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
306 nix_event_context->nix_af_rvu_err);
307 if (intr_val & BIT_ULL(14))
308 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
309 if (intr_val & BIT_ULL(13))
310 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
311 if (intr_val & BIT_ULL(12))
312 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
313 if (intr_val & BIT_ULL(6))
314 devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
315 if (intr_val & BIT_ULL(5))
316 devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
317 if (intr_val & BIT_ULL(4))
318 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
319 if (intr_val & BIT_ULL(3))
320 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
321 if (intr_val & BIT_ULL(2))
322 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
323 if (intr_val & BIT_ULL(1))
324 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
325 if (intr_val & BIT_ULL(0))
326 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
327 rvu_report_pair_end(fmsg);
5ed66306
GC
328 break;
329 case NIX_AF_RVU_RAS:
330 intr_val = nix_event_context->nix_af_rvu_err;
d8cf03fc
PK
331 rvu_report_pair_start(fmsg, "NIX_AF_RAS");
332 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
333 nix_event_context->nix_af_rvu_err);
334 devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
335 if (intr_val & BIT_ULL(34))
336 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
337 if (intr_val & BIT_ULL(33))
338 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
339 if (intr_val & BIT_ULL(32))
340 devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
341 if (intr_val & BIT_ULL(4))
342 devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
343 if (intr_val & BIT_ULL(3))
344 devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
345 if (intr_val & BIT_ULL(2))
346 devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
347 if (intr_val & BIT_ULL(1))
348 devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
349 if (intr_val & BIT_ULL(0))
350 devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
351 rvu_report_pair_end(fmsg);
5ed66306
GC
352 break;
353 default:
354 return -EINVAL;
355 }
356
357 return 0;
358}
359
360static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 struct devlink_fmsg *fmsg, void *ctx,
362 struct netlink_ext_ack *netlink_extack)
363{
364 struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 struct rvu_nix_event_ctx *nix_ctx;
367
368 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369
370 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
371 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
372}
373
374static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 void *ctx, struct netlink_ext_ack *netlink_extack)
376{
377 struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 int blkaddr;
380
381 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
382 if (blkaddr < 0)
383 return blkaddr;
384
385 if (nix_event_ctx->nix_af_rvu_int)
386 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
387
388 return 0;
389}
390
391static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 struct devlink_fmsg *fmsg, void *ctx,
393 struct netlink_ext_ack *netlink_extack)
394{
395 struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 struct rvu_nix_event_ctx *nix_ctx;
398
399 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400
401 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
402 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
403}
404
405static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 void *ctx, struct netlink_ext_ack *netlink_extack)
407{
408 struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 int blkaddr;
411
412 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
413 if (blkaddr < 0)
414 return blkaddr;
415
416 if (nix_event_ctx->nix_af_rvu_gen)
417 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
418
419 return 0;
420}
421
422static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 struct devlink_fmsg *fmsg, void *ctx,
424 struct netlink_ext_ack *netlink_extack)
425{
426 struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 struct rvu_nix_event_ctx *nix_ctx;
429
430 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431
432 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
433 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
434}
435
436static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 void *ctx, struct netlink_ext_ack *netlink_extack)
438{
439 struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 int blkaddr;
442
443 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
444 if (blkaddr < 0)
445 return blkaddr;
446
447 if (nix_event_ctx->nix_af_rvu_err)
448 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
449
450 return 0;
451}
452
453static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 struct devlink_fmsg *fmsg, void *ctx,
455 struct netlink_ext_ack *netlink_extack)
456{
457 struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 struct rvu_nix_event_ctx *nix_ctx;
460
461 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462
463 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
464 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
465}
466
467static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 void *ctx, struct netlink_ext_ack *netlink_extack)
469{
470 struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 int blkaddr;
473
474 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
475 if (blkaddr < 0)
476 return blkaddr;
477
478 if (nix_event_ctx->nix_af_rvu_int)
479 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
480
481 return 0;
482}
483
484RVU_REPORTERS(hw_nix_intr);
485RVU_REPORTERS(hw_nix_gen);
486RVU_REPORTERS(hw_nix_err);
487RVU_REPORTERS(hw_nix_ras);
488
489static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490
491static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492{
493 struct rvu_nix_health_reporters *rvu_reporters;
494 struct rvu_nix_event_ctx *nix_event_context;
495 struct rvu *rvu = rvu_dl->rvu;
496
497 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
498 if (!rvu_reporters)
499 return -ENOMEM;
500
501 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
503 if (!nix_event_context)
504 return -ENOMEM;
505
506 rvu_reporters->nix_event_ctx = nix_event_context;
507 rvu_reporters->rvu_hw_nix_intr_reporter =
508 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
509 if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
510 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
511 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
512 return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
513 }
514
515 rvu_reporters->rvu_hw_nix_gen_reporter =
516 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
517 if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
518 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
519 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
520 return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
521 }
522
523 rvu_reporters->rvu_hw_nix_err_reporter =
524 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
525 if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
526 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
527 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
528 return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
529 }
530
531 rvu_reporters->rvu_hw_nix_ras_reporter =
532 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
533 if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
534 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
535 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
536 return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
537 }
538
539 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
540 if (!rvu_dl->devlink_wq)
28a7cb04 541 return -ENOMEM;
5ed66306
GC
542
543 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
544 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
545 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
546 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
547
548 return 0;
5ed66306
GC
549}
550
551static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
552{
553 struct rvu *rvu = rvu_dl->rvu;
554 int err;
555
556 err = rvu_nix_register_reporters(rvu_dl);
557 if (err) {
558 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
559 err);
560 return err;
561 }
562 rvu_nix_register_interrupts(rvu);
563
564 return 0;
565}
566
567static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
568{
569 struct rvu_nix_health_reporters *nix_reporters;
570 struct rvu *rvu = rvu_dl->rvu;
571
572 nix_reporters = rvu_dl->rvu_nix_health_reporter;
573
574 if (!nix_reporters->rvu_hw_nix_ras_reporter)
575 return;
576 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
577 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
578
579 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
580 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
581
582 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
583 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
584
585 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
586 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
587
588 rvu_nix_unregister_interrupts(rvu);
589 kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
590 kfree(rvu_dl->rvu_nix_health_reporter);
591}
592
f1168d1e
GC
593static void rvu_npa_intr_work(struct work_struct *work)
594{
595 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
596
597 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
598 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
599 "NPA_AF_RVU Error",
600 rvu_npa_health_reporter->npa_event_ctx);
601}
602
603static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
604{
605 struct rvu_npa_event_ctx *npa_event_context;
606 struct rvu_devlink *rvu_dl = rvu_irq;
607 struct rvu *rvu;
608 int blkaddr;
609 u64 intr;
610
611 rvu = rvu_dl->rvu;
612 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
613 if (blkaddr < 0)
614 return IRQ_NONE;
615
616 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
617 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
618 npa_event_context->npa_af_rvu_int = intr;
619
620 /* Clear interrupts */
621 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
622 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
623 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
624
625 return IRQ_HANDLED;
626}
627
628static void rvu_npa_gen_work(struct work_struct *work)
629{
630 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
631
632 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
633 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
634 "NPA_AF_GEN Error",
635 rvu_npa_health_reporter->npa_event_ctx);
636}
637
638static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
639{
640 struct rvu_npa_event_ctx *npa_event_context;
641 struct rvu_devlink *rvu_dl = rvu_irq;
642 struct rvu *rvu;
643 int blkaddr;
644 u64 intr;
645
646 rvu = rvu_dl->rvu;
647 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
648 if (blkaddr < 0)
649 return IRQ_NONE;
650
651 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
652 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
653 npa_event_context->npa_af_rvu_gen = intr;
654
655 /* Clear interrupts */
656 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
657 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
658 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
659
660 return IRQ_HANDLED;
661}
662
663static void rvu_npa_err_work(struct work_struct *work)
664{
665 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
666
667 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
668 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
669 "NPA_AF_ERR Error",
670 rvu_npa_health_reporter->npa_event_ctx);
671}
672
673static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
674{
675 struct rvu_npa_event_ctx *npa_event_context;
676 struct rvu_devlink *rvu_dl = rvu_irq;
677 struct rvu *rvu;
678 int blkaddr;
679 u64 intr;
680
681 rvu = rvu_dl->rvu;
682 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
683 if (blkaddr < 0)
684 return IRQ_NONE;
685 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
686 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
687 npa_event_context->npa_af_rvu_err = intr;
688
689 /* Clear interrupts */
690 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
691 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
692 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
693
694 return IRQ_HANDLED;
695}
696
697static void rvu_npa_ras_work(struct work_struct *work)
698{
699 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
700
701 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
702 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
703 "HW NPA_AF_RAS Error reported",
704 rvu_npa_health_reporter->npa_event_ctx);
705}
706
707static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
708{
709 struct rvu_npa_event_ctx *npa_event_context;
710 struct rvu_devlink *rvu_dl = rvu_irq;
711 struct rvu *rvu;
712 int blkaddr;
713 u64 intr;
714
715 rvu = rvu_dl->rvu;
716 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
717 if (blkaddr < 0)
718 return IRQ_NONE;
719
720 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
721 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
722 npa_event_context->npa_af_rvu_ras = intr;
723
724 /* Clear interrupts */
725 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
726 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
727 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
728
729 return IRQ_HANDLED;
730}
731
732static void rvu_npa_unregister_interrupts(struct rvu *rvu)
733{
734 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
735 int i, offs, blkaddr;
736 u64 reg;
737
738 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
739 if (blkaddr < 0)
740 return;
741
742 reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
743 offs = reg & 0x3FF;
744
745 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
746 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
747 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
748 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
749
750 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
751 if (rvu->irq_allocated[offs + i]) {
752 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
753 rvu->irq_allocated[offs + i] = false;
754 }
755}
756
757static int rvu_npa_register_interrupts(struct rvu *rvu)
758{
759 int blkaddr, base;
760 bool rc;
761
762 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
763 if (blkaddr < 0)
764 return blkaddr;
765
766 /* Get NPA AF MSIX vectors offset. */
767 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
768 if (!base) {
769 dev_warn(rvu->dev,
770 "Failed to get NPA_AF_INT vector offsets\n");
771 return 0;
772 }
773
774 /* Register and enable NPA_AF_RVU_INT interrupt */
775 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
776 "NPA_AF_RVU_INT",
777 rvu_npa_af_rvu_intr_handler);
778 if (!rc)
779 goto err;
780 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
781
782 /* Register and enable NPA_AF_GEN_INT interrupt */
783 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
784 "NPA_AF_RVU_GEN",
785 rvu_npa_af_gen_intr_handler);
786 if (!rc)
787 goto err;
788 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
789
790 /* Register and enable NPA_AF_ERR_INT interrupt */
791 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
792 "NPA_AF_ERR_INT",
793 rvu_npa_af_err_intr_handler);
794 if (!rc)
795 goto err;
796 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
797
798 /* Register and enable NPA_AF_RAS interrupt */
799 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
800 "NPA_AF_RAS",
801 rvu_npa_af_ras_intr_handler);
802 if (!rc)
803 goto err;
804 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
805
806 return 0;
807err:
808 rvu_npa_unregister_interrupts(rvu);
809 return rc;
810}
811
812static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
813 enum npa_af_rvu_health health_reporter)
814{
815 struct rvu_npa_event_ctx *npa_event_context;
d8a4ea35
CIK
816 unsigned int alloc_dis, free_dis;
817 u64 intr_val;
f1168d1e
GC
818
819 npa_event_context = ctx;
820 switch (health_reporter) {
821 case NPA_AF_RVU_GEN:
822 intr_val = npa_event_context->npa_af_rvu_gen;
d8cf03fc
PK
823 rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
824 devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
825 npa_event_context->npa_af_rvu_gen);
826 if (intr_val & BIT_ULL(32))
827 devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
f1168d1e
GC
828
829 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
d8cf03fc
PK
830 if (free_dis & BIT(NPA_INPQ_NIX0_RX))
831 devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
832 if (free_dis & BIT(NPA_INPQ_NIX0_TX))
833 devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
834 if (free_dis & BIT(NPA_INPQ_NIX1_RX))
835 devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
836 if (free_dis & BIT(NPA_INPQ_NIX1_TX))
837 devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
838 if (free_dis & BIT(NPA_INPQ_SSO))
839 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
840 if (free_dis & BIT(NPA_INPQ_TIM))
841 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
842 if (free_dis & BIT(NPA_INPQ_DPI))
843 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
844 if (free_dis & BIT(NPA_INPQ_AURA_OP))
845 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
f1168d1e
GC
846
847 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
d8cf03fc
PK
848 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
849 devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
850 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
851 devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
852 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
853 devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
854 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
855 devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
856 if (alloc_dis & BIT(NPA_INPQ_SSO))
857 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
858 if (alloc_dis & BIT(NPA_INPQ_TIM))
859 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
860 if (alloc_dis & BIT(NPA_INPQ_DPI))
861 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
862 if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
863 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
864
865 rvu_report_pair_end(fmsg);
f1168d1e
GC
866 break;
867 case NPA_AF_RVU_ERR:
d8cf03fc
PK
868 rvu_report_pair_start(fmsg, "NPA_AF_ERR");
869 devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
870 npa_event_context->npa_af_rvu_err);
871 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
872 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
873 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
874 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
875 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
876 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
877 rvu_report_pair_end(fmsg);
f1168d1e
GC
878 break;
879 case NPA_AF_RVU_RAS:
d8cf03fc
PK
880 rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
881 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
882 npa_event_context->npa_af_rvu_ras);
883 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
884 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
885 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
886 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
887 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
888 devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
889 rvu_report_pair_end(fmsg);
f1168d1e
GC
890 break;
891 case NPA_AF_RVU_INTR:
d8cf03fc
PK
892 rvu_report_pair_start(fmsg, "NPA_AF_RVU");
893 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
894 npa_event_context->npa_af_rvu_int);
895 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
896 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
897 rvu_report_pair_end(fmsg);
898 break;
f1168d1e
GC
899 default:
900 return -EINVAL;
901 }
902
903 return 0;
904}
905
906static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
907 struct devlink_fmsg *fmsg, void *ctx,
908 struct netlink_ext_ack *netlink_extack)
909{
910 struct rvu *rvu = devlink_health_reporter_priv(reporter);
911 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
912 struct rvu_npa_event_ctx *npa_ctx;
913
914 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
915
916 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
917 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
918}
919
920static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
921 void *ctx, struct netlink_ext_ack *netlink_extack)
922{
923 struct rvu *rvu = devlink_health_reporter_priv(reporter);
924 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
925 int blkaddr;
926
927 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
928 if (blkaddr < 0)
929 return blkaddr;
930
931 if (npa_event_ctx->npa_af_rvu_int)
932 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
933
934 return 0;
935}
936
937static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
938 struct devlink_fmsg *fmsg, void *ctx,
939 struct netlink_ext_ack *netlink_extack)
940{
941 struct rvu *rvu = devlink_health_reporter_priv(reporter);
942 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
943 struct rvu_npa_event_ctx *npa_ctx;
944
945 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
946
947 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
948 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
949}
950
951static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
952 void *ctx, struct netlink_ext_ack *netlink_extack)
953{
954 struct rvu *rvu = devlink_health_reporter_priv(reporter);
955 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
956 int blkaddr;
957
958 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
959 if (blkaddr < 0)
960 return blkaddr;
961
962 if (npa_event_ctx->npa_af_rvu_gen)
963 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
964
965 return 0;
966}
967
968static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
969 struct devlink_fmsg *fmsg, void *ctx,
970 struct netlink_ext_ack *netlink_extack)
971{
972 struct rvu *rvu = devlink_health_reporter_priv(reporter);
973 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
974 struct rvu_npa_event_ctx *npa_ctx;
975
976 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
977
978 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
979 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
980}
981
982static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
983 void *ctx, struct netlink_ext_ack *netlink_extack)
984{
985 struct rvu *rvu = devlink_health_reporter_priv(reporter);
986 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
987 int blkaddr;
988
989 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
990 if (blkaddr < 0)
991 return blkaddr;
992
993 if (npa_event_ctx->npa_af_rvu_err)
994 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
995
996 return 0;
997}
998
999static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1000 struct devlink_fmsg *fmsg, void *ctx,
1001 struct netlink_ext_ack *netlink_extack)
1002{
1003 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1004 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1005 struct rvu_npa_event_ctx *npa_ctx;
1006
1007 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1008
1009 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1010 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1011}
1012
1013static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1014 void *ctx, struct netlink_ext_ack *netlink_extack)
1015{
1016 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1017 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1018 int blkaddr;
1019
1020 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1021 if (blkaddr < 0)
1022 return blkaddr;
1023
1024 if (npa_event_ctx->npa_af_rvu_ras)
1025 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1026
1027 return 0;
1028}
1029
1030RVU_REPORTERS(hw_npa_intr);
1031RVU_REPORTERS(hw_npa_gen);
1032RVU_REPORTERS(hw_npa_err);
1033RVU_REPORTERS(hw_npa_ras);
1034
1035static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1036
1037static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1038{
1039 struct rvu_npa_health_reporters *rvu_reporters;
1040 struct rvu_npa_event_ctx *npa_event_context;
1041 struct rvu *rvu = rvu_dl->rvu;
1042
1043 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1044 if (!rvu_reporters)
1045 return -ENOMEM;
1046
1047 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1048 npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1049 if (!npa_event_context)
1050 return -ENOMEM;
1051
1052 rvu_reporters->npa_event_ctx = npa_event_context;
1053 rvu_reporters->rvu_hw_npa_intr_reporter =
1054 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1055 if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1056 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1057 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1058 return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1059 }
1060
1061 rvu_reporters->rvu_hw_npa_gen_reporter =
1062 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1063 if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1064 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1065 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1066 return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1067 }
1068
1069 rvu_reporters->rvu_hw_npa_err_reporter =
1070 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1071 if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1072 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1073 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1074 return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1075 }
1076
1077 rvu_reporters->rvu_hw_npa_ras_reporter =
1078 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1079 if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1080 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1081 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1082 return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1083 }
1084
1085 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1086 if (!rvu_dl->devlink_wq)
3c91c909 1087 return -ENOMEM;
f1168d1e
GC
1088
1089 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1090 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1091 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1092 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1093
1094 return 0;
f1168d1e
GC
1095}
1096
1097static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1098{
1099 struct rvu *rvu = rvu_dl->rvu;
1100 int err;
1101
1102 err = rvu_npa_register_reporters(rvu_dl);
1103 if (err) {
1104 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1105 err);
1106 return err;
1107 }
1108 rvu_npa_register_interrupts(rvu);
1109
1110 return 0;
1111}
1112
1113static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1114{
1115 struct rvu_npa_health_reporters *npa_reporters;
1116 struct rvu *rvu = rvu_dl->rvu;
1117
1118 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1119
1120 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1121 return;
1122 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1123 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1124
1125 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1126 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1127
1128 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1129 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1130
1131 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1132 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1133
1134 rvu_npa_unregister_interrupts(rvu);
1135 kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1136 kfree(rvu_dl->rvu_npa_health_reporter);
1137}
1138
1139static int rvu_health_reporters_create(struct rvu *rvu)
1140{
1141 struct rvu_devlink *rvu_dl;
5ed66306 1142 int err;
f1168d1e
GC
1143
1144 rvu_dl = rvu->rvu_dl;
5ed66306
GC
1145 err = rvu_npa_health_reporters_create(rvu_dl);
1146 if (err)
1147 return err;
1148
1149 return rvu_nix_health_reporters_create(rvu_dl);
f1168d1e
GC
1150}
1151
1152static void rvu_health_reporters_destroy(struct rvu *rvu)
1153{
1154 struct rvu_devlink *rvu_dl;
1155
1156 if (!rvu->rvu_dl)
1157 return;
1158
1159 rvu_dl = rvu->rvu_dl;
1160 rvu_npa_health_reporters_destroy(rvu_dl);
5ed66306 1161 rvu_nix_health_reporters_destroy(rvu_dl);
f1168d1e
GC
1162}
1163
76660df2
SG
1164/* Devlink Params APIs */
1165static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1166 union devlink_param_value val,
1167 struct netlink_ext_ack *extack)
1168{
1169 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1170 struct rvu *rvu = rvu_dl->rvu;
1171 int dwrr_mtu = val.vu32;
1172 struct nix_txsch *txsch;
1173 struct nix_hw *nix_hw;
1174
1175 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1176 NL_SET_ERR_MSG_MOD(extack,
1177 "Setting DWRR_MTU is not supported on this silicon");
1178 return -EOPNOTSUPP;
1179 }
1180
1181 if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1182 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1183 NL_SET_ERR_MSG_MOD(extack,
1184 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1185 return -EINVAL;
1186 }
1187
1188 nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1189 if (!nix_hw)
1190 return -ENODEV;
1191
1192 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1193 if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1194 NL_SET_ERR_MSG_MOD(extack,
1195 "Changing DWRR MTU is not supported when there are active NIXLFs");
1196 NL_SET_ERR_MSG_MOD(extack,
8578880d 1197 "Make sure none of the PF/VF interfaces are initialized and retry");
76660df2
SG
1198 return -EOPNOTSUPP;
1199 }
1200
1201 return 0;
1202}
1203
1204static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1205 struct devlink_param_gset_ctx *ctx)
1206{
1207 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1208 struct rvu *rvu = rvu_dl->rvu;
1209 u64 dwrr_mtu;
1210
1211 dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
bbba125e
SG
1212 rvu_write64(rvu, BLKADDR_NIX0,
1213 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
76660df2
SG
1214
1215 return 0;
1216}
1217
1218static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1219 struct devlink_param_gset_ctx *ctx)
1220{
1221 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1222 struct rvu *rvu = rvu_dl->rvu;
1223 u64 dwrr_mtu;
1224
1225 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1226 return -EOPNOTSUPP;
1227
bbba125e
SG
1228 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1229 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
76660df2
SG
1230 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1231
1232 return 0;
1233}
1234
1235enum rvu_af_dl_param_id {
1236 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1237 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
ef83e186 1238 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
09de114c 1239 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
76660df2
SG
1240};
1241
ef83e186
RK
1242static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1243 struct devlink_param_gset_ctx *ctx)
1244{
1245 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1246 struct rvu *rvu = rvu_dl->rvu;
1247 bool enabled;
1248
1249 enabled = rvu_npc_exact_has_match_table(rvu);
1250
1251 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1252 enabled ? "enabled" : "disabled");
1253
1254 return 0;
1255}
1256
1257static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1258 struct devlink_param_gset_ctx *ctx)
1259{
1260 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1261 struct rvu *rvu = rvu_dl->rvu;
1262
1263 rvu_npc_exact_disable_feature(rvu);
1264
1265 return 0;
1266}
1267
1268static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1269 union devlink_param_value val,
1270 struct netlink_ext_ack *extack)
1271{
1272 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1273 struct rvu *rvu = rvu_dl->rvu;
1274 u64 enable;
1275
1276 if (kstrtoull(val.vstr, 10, &enable)) {
1277 NL_SET_ERR_MSG_MOD(extack,
1278 "Only 1 value is supported");
1279 return -EINVAL;
1280 }
1281
1282 if (enable != 1) {
1283 NL_SET_ERR_MSG_MOD(extack,
1284 "Only disabling exact match feature is supported");
1285 return -EINVAL;
1286 }
1287
1288 if (rvu_npc_exact_can_disable_feature(rvu))
1289 return 0;
1290
1291 NL_SET_ERR_MSG_MOD(extack,
1292 "Can't disable exact match feature; Please try before any configuration");
1293 return -EFAULT;
1294}
1295
09de114c
NM
1296static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1297 struct devlink_param_gset_ctx *ctx)
1298{
1299 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1300 struct rvu *rvu = rvu_dl->rvu;
1301 struct npc_mcam *mcam;
1302 u32 percent;
1303
1304 mcam = &rvu->hw->mcam;
1305 percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1306 ctx->val.vu8 = (u8)percent;
1307
1308 return 0;
1309}
1310
1311static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1312 struct devlink_param_gset_ctx *ctx)
1313{
1314 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1315 struct rvu *rvu = rvu_dl->rvu;
1316 struct npc_mcam *mcam;
1317 u32 percent;
1318
1319 percent = ctx->val.vu8;
1320 mcam = &rvu->hw->mcam;
1321 mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1322 mcam->hprio_end = mcam->hprio_count;
1323 mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1324 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1325
1326 return 0;
1327}
1328
1329static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1330 union devlink_param_value val,
1331 struct netlink_ext_ack *extack)
1332{
1333 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1334 struct rvu *rvu = rvu_dl->rvu;
1335 struct npc_mcam *mcam;
1336
1337 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1338 if (val.vu8 < 12 || val.vu8 > 100) {
1339 NL_SET_ERR_MSG_MOD(extack,
1340 "mcam high zone percent must be between 12% to 100%");
1341 return -EINVAL;
1342 }
1343
1344 /* Do not allow user to modify the high priority zone entries while mcam entries
1345 * have already been assigned.
1346 */
1347 mcam = &rvu->hw->mcam;
1348 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1349 NL_SET_ERR_MSG_MOD(extack,
1350 "mcam entries have already been assigned, can't resize");
1351 return -EPERM;
1352 }
1353
1354 return 0;
1355}
1356
76660df2
SG
1357static const struct devlink_param rvu_af_dl_params[] = {
1358 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1359 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1360 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1361 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1362 rvu_af_dl_dwrr_mtu_validate),
917d5e04
RK
1363};
1364
1365static const struct devlink_param rvu_af_dl_param_exact_match[] = {
ef83e186
RK
1366 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1367 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1368 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1369 rvu_af_npc_exact_feature_get,
1370 rvu_af_npc_exact_feature_disable,
1371 rvu_af_npc_exact_feature_validate),
09de114c
NM
1372 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1373 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1374 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1375 rvu_af_dl_npc_mcam_high_zone_percent_get,
1376 rvu_af_dl_npc_mcam_high_zone_percent_set,
1377 rvu_af_dl_npc_mcam_high_zone_percent_validate),
76660df2
SG
1378};
1379
1380/* Devlink switch mode */
23109f8d
SS
1381static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1382{
1383 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1384 struct rvu *rvu = rvu_dl->rvu;
1385 struct rvu_switch *rswitch;
1386
1387 rswitch = &rvu->rswitch;
1388 *mode = rswitch->mode;
1389
1390 return 0;
1391}
1392
1393static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1394 struct netlink_ext_ack *extack)
1395{
1396 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1397 struct rvu *rvu = rvu_dl->rvu;
1398 struct rvu_switch *rswitch;
1399
1400 rswitch = &rvu->rswitch;
1401 switch (mode) {
1402 case DEVLINK_ESWITCH_MODE_LEGACY:
1403 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1404 if (rswitch->mode == mode)
1405 return 0;
1406 rswitch->mode = mode;
1407 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1408 rvu_switch_enable(rvu);
1409 else
1410 rvu_switch_disable(rvu);
1411 break;
1412 default:
1413 return -EINVAL;
1414 }
1415
1416 return 0;
1417}
1418
fae06da4 1419static const struct devlink_ops rvu_devlink_ops = {
23109f8d
SS
1420 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1421 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
fae06da4
GC
1422};
1423
1424int rvu_register_dl(struct rvu *rvu)
1425{
1426 struct rvu_devlink *rvu_dl;
1427 struct devlink *dl;
1428 int err;
1429
919d13a7
LR
1430 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1431 rvu->dev);
fae06da4
GC
1432 if (!dl) {
1433 dev_warn(rvu->dev, "devlink_alloc failed\n");
fae06da4
GC
1434 return -ENOMEM;
1435 }
1436
23109f8d 1437 rvu_dl = devlink_priv(dl);
fae06da4
GC
1438 rvu_dl->dl = dl;
1439 rvu_dl->rvu = rvu;
1440 rvu->rvu_dl = rvu_dl;
f1168d1e 1441
76660df2
SG
1442 err = rvu_health_reporters_create(rvu);
1443 if (err) {
1444 dev_err(rvu->dev,
1445 "devlink health reporter creation failed with error %d\n", err);
1446 goto err_dl_health;
1447 }
1448
917d5e04
RK
1449 err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1450 if (err) {
1451 dev_err(rvu->dev,
1452 "devlink params register failed with error %d", err);
1453 goto err_dl_health;
1454 }
1455
ef83e186 1456 /* Register exact match devlink only for CN10K-B */
ef83e186 1457 if (!rvu_npc_exact_has_match_table(rvu))
917d5e04 1458 goto done;
ef83e186 1459
917d5e04
RK
1460 err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1461 ARRAY_SIZE(rvu_af_dl_param_exact_match));
76660df2
SG
1462 if (err) {
1463 dev_err(rvu->dev,
917d5e04
RK
1464 "devlink exact match params register failed with error %d", err);
1465 goto err_dl_exact_match;
76660df2
SG
1466 }
1467
917d5e04 1468done:
1d264db4 1469 devlink_register(dl);
76660df2
SG
1470 return 0;
1471
917d5e04
RK
1472err_dl_exact_match:
1473 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1474
76660df2
SG
1475err_dl_health:
1476 rvu_health_reporters_destroy(rvu);
76660df2
SG
1477 devlink_free(dl);
1478 return err;
fae06da4
GC
1479}
1480
1481void rvu_unregister_dl(struct rvu *rvu)
1482{
1483 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1484 struct devlink *dl = rvu_dl->dl;
1485
1d264db4 1486 devlink_unregister(dl);
917d5e04
RK
1487
1488 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1489
1490 /* Unregister exact match devlink only for CN10K-B */
1491 if (rvu_npc_exact_has_match_table(rvu))
1492 devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1493 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1494
f1168d1e 1495 rvu_health_reporters_destroy(rvu);
fae06da4 1496 devlink_free(dl);
fae06da4 1497}