1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
4 * Copyright (C) 2020 Marvell.
8 #include<linux/bitfield.h>
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
15 #define DRV_NAME "octeontx2-af"
17 static void rvu_report_pair_start(struct devlink_fmsg
*fmsg
, const char *name
)
19 devlink_fmsg_pair_nest_start(fmsg
, name
);
20 devlink_fmsg_obj_nest_start(fmsg
);
23 static void rvu_report_pair_end(struct devlink_fmsg
*fmsg
)
25 devlink_fmsg_obj_nest_end(fmsg
);
26 devlink_fmsg_pair_nest_end(fmsg
);
29 static bool rvu_common_request_irq(struct rvu
*rvu
, int offset
,
30 const char *name
, irq_handler_t fn
)
32 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
35 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "%s", name
);
36 rc
= request_irq(pci_irq_vector(rvu
->pdev
, offset
), fn
, 0,
37 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu_dl
);
39 dev_warn(rvu
->dev
, "Failed to register %s irq\n", name
);
41 rvu
->irq_allocated
[offset
] = true;
43 return rvu
->irq_allocated
[offset
];
46 static void rvu_nix_intr_work(struct work_struct
*work
)
48 struct rvu_nix_health_reporters
*rvu_nix_health_reporter
;
50 rvu_nix_health_reporter
= container_of(work
, struct rvu_nix_health_reporters
, intr_work
);
51 devlink_health_report(rvu_nix_health_reporter
->rvu_hw_nix_intr_reporter
,
53 rvu_nix_health_reporter
->nix_event_ctx
);
56 static irqreturn_t
rvu_nix_af_rvu_intr_handler(int irq
, void *rvu_irq
)
58 struct rvu_nix_event_ctx
*nix_event_context
;
59 struct rvu_devlink
*rvu_dl
= rvu_irq
;
65 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
69 nix_event_context
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
70 intr
= rvu_read64(rvu
, blkaddr
, NIX_AF_RVU_INT
);
71 nix_event_context
->nix_af_rvu_int
= intr
;
73 /* Clear interrupts */
74 rvu_write64(rvu
, blkaddr
, NIX_AF_RVU_INT
, intr
);
75 rvu_write64(rvu
, blkaddr
, NIX_AF_RVU_INT_ENA_W1C
, ~0ULL);
76 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_nix_health_reporter
->intr_work
);
81 static void rvu_nix_gen_work(struct work_struct
*work
)
83 struct rvu_nix_health_reporters
*rvu_nix_health_reporter
;
85 rvu_nix_health_reporter
= container_of(work
, struct rvu_nix_health_reporters
, gen_work
);
86 devlink_health_report(rvu_nix_health_reporter
->rvu_hw_nix_gen_reporter
,
88 rvu_nix_health_reporter
->nix_event_ctx
);
91 static irqreturn_t
rvu_nix_af_rvu_gen_handler(int irq
, void *rvu_irq
)
93 struct rvu_nix_event_ctx
*nix_event_context
;
94 struct rvu_devlink
*rvu_dl
= rvu_irq
;
100 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
104 nix_event_context
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
105 intr
= rvu_read64(rvu
, blkaddr
, NIX_AF_GEN_INT
);
106 nix_event_context
->nix_af_rvu_gen
= intr
;
108 /* Clear interrupts */
109 rvu_write64(rvu
, blkaddr
, NIX_AF_GEN_INT
, intr
);
110 rvu_write64(rvu
, blkaddr
, NIX_AF_GEN_INT_ENA_W1C
, ~0ULL);
111 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_nix_health_reporter
->gen_work
);
116 static void rvu_nix_err_work(struct work_struct
*work
)
118 struct rvu_nix_health_reporters
*rvu_nix_health_reporter
;
120 rvu_nix_health_reporter
= container_of(work
, struct rvu_nix_health_reporters
, err_work
);
121 devlink_health_report(rvu_nix_health_reporter
->rvu_hw_nix_err_reporter
,
123 rvu_nix_health_reporter
->nix_event_ctx
);
126 static irqreturn_t
rvu_nix_af_rvu_err_handler(int irq
, void *rvu_irq
)
128 struct rvu_nix_event_ctx
*nix_event_context
;
129 struct rvu_devlink
*rvu_dl
= rvu_irq
;
135 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
139 nix_event_context
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
140 intr
= rvu_read64(rvu
, blkaddr
, NIX_AF_ERR_INT
);
141 nix_event_context
->nix_af_rvu_err
= intr
;
143 /* Clear interrupts */
144 rvu_write64(rvu
, blkaddr
, NIX_AF_ERR_INT
, intr
);
145 rvu_write64(rvu
, blkaddr
, NIX_AF_ERR_INT_ENA_W1C
, ~0ULL);
146 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_nix_health_reporter
->err_work
);
151 static void rvu_nix_ras_work(struct work_struct
*work
)
153 struct rvu_nix_health_reporters
*rvu_nix_health_reporter
;
155 rvu_nix_health_reporter
= container_of(work
, struct rvu_nix_health_reporters
, ras_work
);
156 devlink_health_report(rvu_nix_health_reporter
->rvu_hw_nix_ras_reporter
,
158 rvu_nix_health_reporter
->nix_event_ctx
);
161 static irqreturn_t
rvu_nix_af_rvu_ras_handler(int irq
, void *rvu_irq
)
163 struct rvu_nix_event_ctx
*nix_event_context
;
164 struct rvu_devlink
*rvu_dl
= rvu_irq
;
170 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
174 nix_event_context
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
175 intr
= rvu_read64(rvu
, blkaddr
, NIX_AF_ERR_INT
);
176 nix_event_context
->nix_af_rvu_ras
= intr
;
178 /* Clear interrupts */
179 rvu_write64(rvu
, blkaddr
, NIX_AF_RAS
, intr
);
180 rvu_write64(rvu
, blkaddr
, NIX_AF_RAS_ENA_W1C
, ~0ULL);
181 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_nix_health_reporter
->ras_work
);
186 static void rvu_nix_unregister_interrupts(struct rvu
*rvu
)
188 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
189 int offs
, i
, blkaddr
;
191 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
195 offs
= rvu_read64(rvu
, blkaddr
, NIX_PRIV_AF_INT_CFG
) & 0x3ff;
199 rvu_write64(rvu
, blkaddr
, NIX_AF_RVU_INT_ENA_W1C
, ~0ULL);
200 rvu_write64(rvu
, blkaddr
, NIX_AF_GEN_INT_ENA_W1C
, ~0ULL);
201 rvu_write64(rvu
, blkaddr
, NIX_AF_ERR_INT_ENA_W1C
, ~0ULL);
202 rvu_write64(rvu
, blkaddr
, NIX_AF_RAS_ENA_W1C
, ~0ULL);
204 if (rvu
->irq_allocated
[offs
+ NIX_AF_INT_VEC_RVU
]) {
205 free_irq(pci_irq_vector(rvu
->pdev
, offs
+ NIX_AF_INT_VEC_RVU
),
207 rvu
->irq_allocated
[offs
+ NIX_AF_INT_VEC_RVU
] = false;
210 for (i
= NIX_AF_INT_VEC_AF_ERR
; i
< NIX_AF_INT_VEC_CNT
; i
++)
211 if (rvu
->irq_allocated
[offs
+ i
]) {
212 free_irq(pci_irq_vector(rvu
->pdev
, offs
+ i
), rvu_dl
);
213 rvu
->irq_allocated
[offs
+ i
] = false;
217 static int rvu_nix_register_interrupts(struct rvu
*rvu
)
222 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
226 /* Get NIX AF MSIX vectors offset. */
227 base
= rvu_read64(rvu
, blkaddr
, NIX_PRIV_AF_INT_CFG
) & 0x3ff;
230 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 blkaddr
- BLKADDR_NIX0
);
234 /* Register and enable NIX_AF_RVU_INT interrupt */
235 rc
= rvu_common_request_irq(rvu
, base
+ NIX_AF_INT_VEC_RVU
,
237 rvu_nix_af_rvu_intr_handler
);
240 rvu_write64(rvu
, blkaddr
, NIX_AF_RVU_INT_ENA_W1S
, ~0ULL);
242 /* Register and enable NIX_AF_GEN_INT interrupt */
243 rc
= rvu_common_request_irq(rvu
, base
+ NIX_AF_INT_VEC_GEN
,
245 rvu_nix_af_rvu_gen_handler
);
248 rvu_write64(rvu
, blkaddr
, NIX_AF_GEN_INT_ENA_W1S
, ~0ULL);
250 /* Register and enable NIX_AF_ERR_INT interrupt */
251 rc
= rvu_common_request_irq(rvu
, base
+ NIX_AF_INT_VEC_AF_ERR
,
253 rvu_nix_af_rvu_err_handler
);
256 rvu_write64(rvu
, blkaddr
, NIX_AF_ERR_INT_ENA_W1S
, ~0ULL);
258 /* Register and enable NIX_AF_RAS interrupt */
259 rc
= rvu_common_request_irq(rvu
, base
+ NIX_AF_INT_VEC_POISON
,
261 rvu_nix_af_rvu_ras_handler
);
264 rvu_write64(rvu
, blkaddr
, NIX_AF_RAS_ENA_W1S
, ~0ULL);
268 rvu_nix_unregister_interrupts(rvu
);
272 static int rvu_nix_report_show(struct devlink_fmsg
*fmsg
, void *ctx
,
273 enum nix_af_rvu_health health_reporter
)
275 struct rvu_nix_event_ctx
*nix_event_context
;
278 nix_event_context
= ctx
;
279 switch (health_reporter
) {
280 case NIX_AF_RVU_INTR
:
281 intr_val
= nix_event_context
->nix_af_rvu_int
;
282 rvu_report_pair_start(fmsg
, "NIX_AF_RVU");
283 devlink_fmsg_u64_pair_put(fmsg
, "\tNIX RVU Interrupt Reg ",
284 nix_event_context
->nix_af_rvu_int
);
285 if (intr_val
& BIT_ULL(0))
286 devlink_fmsg_string_put(fmsg
, "\n\tUnmap Slot Error");
287 rvu_report_pair_end(fmsg
);
290 intr_val
= nix_event_context
->nix_af_rvu_gen
;
291 rvu_report_pair_start(fmsg
, "NIX_AF_GENERAL");
292 devlink_fmsg_u64_pair_put(fmsg
, "\tNIX General Interrupt Reg ",
293 nix_event_context
->nix_af_rvu_gen
);
294 if (intr_val
& BIT_ULL(0))
295 devlink_fmsg_string_put(fmsg
, "\n\tRx multicast pkt drop");
296 if (intr_val
& BIT_ULL(1))
297 devlink_fmsg_string_put(fmsg
, "\n\tRx mirror pkt drop");
298 if (intr_val
& BIT_ULL(4))
299 devlink_fmsg_string_put(fmsg
, "\n\tSMQ flush done");
300 rvu_report_pair_end(fmsg
);
303 intr_val
= nix_event_context
->nix_af_rvu_err
;
304 rvu_report_pair_start(fmsg
, "NIX_AF_ERR");
305 devlink_fmsg_u64_pair_put(fmsg
, "\tNIX Error Interrupt Reg ",
306 nix_event_context
->nix_af_rvu_err
);
307 if (intr_val
& BIT_ULL(14))
308 devlink_fmsg_string_put(fmsg
, "\n\tFault on NIX_AQ_INST_S read");
309 if (intr_val
& BIT_ULL(13))
310 devlink_fmsg_string_put(fmsg
, "\n\tFault on NIX_AQ_RES_S write");
311 if (intr_val
& BIT_ULL(12))
312 devlink_fmsg_string_put(fmsg
, "\n\tAQ Doorbell Error");
313 if (intr_val
& BIT_ULL(6))
314 devlink_fmsg_string_put(fmsg
, "\n\tRx on unmapped PF_FUNC");
315 if (intr_val
& BIT_ULL(5))
316 devlink_fmsg_string_put(fmsg
, "\n\tRx multicast replication error");
317 if (intr_val
& BIT_ULL(4))
318 devlink_fmsg_string_put(fmsg
, "\n\tFault on NIX_RX_MCE_S read");
319 if (intr_val
& BIT_ULL(3))
320 devlink_fmsg_string_put(fmsg
, "\n\tFault on multicast WQE read");
321 if (intr_val
& BIT_ULL(2))
322 devlink_fmsg_string_put(fmsg
, "\n\tFault on mirror WQE read");
323 if (intr_val
& BIT_ULL(1))
324 devlink_fmsg_string_put(fmsg
, "\n\tFault on mirror pkt write");
325 if (intr_val
& BIT_ULL(0))
326 devlink_fmsg_string_put(fmsg
, "\n\tFault on multicast pkt write");
327 rvu_report_pair_end(fmsg
);
330 intr_val
= nix_event_context
->nix_af_rvu_err
;
331 rvu_report_pair_start(fmsg
, "NIX_AF_RAS");
332 devlink_fmsg_u64_pair_put(fmsg
, "\tNIX RAS Interrupt Reg ",
333 nix_event_context
->nix_af_rvu_err
);
334 devlink_fmsg_string_put(fmsg
, "\n\tPoison Data on:");
335 if (intr_val
& BIT_ULL(34))
336 devlink_fmsg_string_put(fmsg
, "\n\tNIX_AQ_INST_S");
337 if (intr_val
& BIT_ULL(33))
338 devlink_fmsg_string_put(fmsg
, "\n\tNIX_AQ_RES_S");
339 if (intr_val
& BIT_ULL(32))
340 devlink_fmsg_string_put(fmsg
, "\n\tHW ctx");
341 if (intr_val
& BIT_ULL(4))
342 devlink_fmsg_string_put(fmsg
, "\n\tPacket from mirror buffer");
343 if (intr_val
& BIT_ULL(3))
344 devlink_fmsg_string_put(fmsg
, "\n\tPacket from multicast buffer");
345 if (intr_val
& BIT_ULL(2))
346 devlink_fmsg_string_put(fmsg
, "\n\tWQE read from mirror buffer");
347 if (intr_val
& BIT_ULL(1))
348 devlink_fmsg_string_put(fmsg
, "\n\tWQE read from multicast buffer");
349 if (intr_val
& BIT_ULL(0))
350 devlink_fmsg_string_put(fmsg
, "\n\tNIX_RX_MCE_S read");
351 rvu_report_pair_end(fmsg
);
360 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter
*reporter
,
361 struct devlink_fmsg
*fmsg
, void *ctx
,
362 struct netlink_ext_ack
*netlink_extack
)
364 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
365 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
366 struct rvu_nix_event_ctx
*nix_ctx
;
368 nix_ctx
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
370 return ctx
? rvu_nix_report_show(fmsg
, ctx
, NIX_AF_RVU_INTR
) :
371 rvu_nix_report_show(fmsg
, nix_ctx
, NIX_AF_RVU_INTR
);
374 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter
*reporter
,
375 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
377 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
378 struct rvu_nix_event_ctx
*nix_event_ctx
= ctx
;
381 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
385 if (nix_event_ctx
->nix_af_rvu_int
)
386 rvu_write64(rvu
, blkaddr
, NIX_AF_RVU_INT_ENA_W1S
, ~0ULL);
391 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter
*reporter
,
392 struct devlink_fmsg
*fmsg
, void *ctx
,
393 struct netlink_ext_ack
*netlink_extack
)
395 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
396 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
397 struct rvu_nix_event_ctx
*nix_ctx
;
399 nix_ctx
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
401 return ctx
? rvu_nix_report_show(fmsg
, ctx
, NIX_AF_RVU_GEN
) :
402 rvu_nix_report_show(fmsg
, nix_ctx
, NIX_AF_RVU_GEN
);
405 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter
*reporter
,
406 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
408 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
409 struct rvu_nix_event_ctx
*nix_event_ctx
= ctx
;
412 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
416 if (nix_event_ctx
->nix_af_rvu_gen
)
417 rvu_write64(rvu
, blkaddr
, NIX_AF_GEN_INT_ENA_W1S
, ~0ULL);
422 static int rvu_hw_nix_err_dump(struct devlink_health_reporter
*reporter
,
423 struct devlink_fmsg
*fmsg
, void *ctx
,
424 struct netlink_ext_ack
*netlink_extack
)
426 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
427 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
428 struct rvu_nix_event_ctx
*nix_ctx
;
430 nix_ctx
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
432 return ctx
? rvu_nix_report_show(fmsg
, ctx
, NIX_AF_RVU_ERR
) :
433 rvu_nix_report_show(fmsg
, nix_ctx
, NIX_AF_RVU_ERR
);
436 static int rvu_hw_nix_err_recover(struct devlink_health_reporter
*reporter
,
437 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
439 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
440 struct rvu_nix_event_ctx
*nix_event_ctx
= ctx
;
443 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
447 if (nix_event_ctx
->nix_af_rvu_err
)
448 rvu_write64(rvu
, blkaddr
, NIX_AF_ERR_INT_ENA_W1S
, ~0ULL);
453 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter
*reporter
,
454 struct devlink_fmsg
*fmsg
, void *ctx
,
455 struct netlink_ext_ack
*netlink_extack
)
457 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
458 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
459 struct rvu_nix_event_ctx
*nix_ctx
;
461 nix_ctx
= rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
;
463 return ctx
? rvu_nix_report_show(fmsg
, ctx
, NIX_AF_RVU_RAS
) :
464 rvu_nix_report_show(fmsg
, nix_ctx
, NIX_AF_RVU_RAS
);
467 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter
*reporter
,
468 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
470 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
471 struct rvu_nix_event_ctx
*nix_event_ctx
= ctx
;
474 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, 0);
478 if (nix_event_ctx
->nix_af_rvu_int
)
479 rvu_write64(rvu
, blkaddr
, NIX_AF_RAS_ENA_W1S
, ~0ULL);
484 RVU_REPORTERS(hw_nix_intr
);
485 RVU_REPORTERS(hw_nix_gen
);
486 RVU_REPORTERS(hw_nix_err
);
487 RVU_REPORTERS(hw_nix_ras
);
489 static void rvu_nix_health_reporters_destroy(struct rvu_devlink
*rvu_dl
);
491 static int rvu_nix_register_reporters(struct rvu_devlink
*rvu_dl
)
493 struct rvu_nix_health_reporters
*rvu_reporters
;
494 struct rvu_nix_event_ctx
*nix_event_context
;
495 struct rvu
*rvu
= rvu_dl
->rvu
;
497 rvu_reporters
= kzalloc(sizeof(*rvu_reporters
), GFP_KERNEL
);
501 rvu_dl
->rvu_nix_health_reporter
= rvu_reporters
;
502 nix_event_context
= kzalloc(sizeof(*nix_event_context
), GFP_KERNEL
);
503 if (!nix_event_context
)
506 rvu_reporters
->nix_event_ctx
= nix_event_context
;
507 rvu_reporters
->rvu_hw_nix_intr_reporter
=
508 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_nix_intr_reporter_ops
, 0, rvu
);
509 if (IS_ERR(rvu_reporters
->rvu_hw_nix_intr_reporter
)) {
510 dev_warn(rvu
->dev
, "Failed to create hw_nix_intr reporter, err=%ld\n",
511 PTR_ERR(rvu_reporters
->rvu_hw_nix_intr_reporter
));
512 return PTR_ERR(rvu_reporters
->rvu_hw_nix_intr_reporter
);
515 rvu_reporters
->rvu_hw_nix_gen_reporter
=
516 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_nix_gen_reporter_ops
, 0, rvu
);
517 if (IS_ERR(rvu_reporters
->rvu_hw_nix_gen_reporter
)) {
518 dev_warn(rvu
->dev
, "Failed to create hw_nix_gen reporter, err=%ld\n",
519 PTR_ERR(rvu_reporters
->rvu_hw_nix_gen_reporter
));
520 return PTR_ERR(rvu_reporters
->rvu_hw_nix_gen_reporter
);
523 rvu_reporters
->rvu_hw_nix_err_reporter
=
524 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_nix_err_reporter_ops
, 0, rvu
);
525 if (IS_ERR(rvu_reporters
->rvu_hw_nix_err_reporter
)) {
526 dev_warn(rvu
->dev
, "Failed to create hw_nix_err reporter, err=%ld\n",
527 PTR_ERR(rvu_reporters
->rvu_hw_nix_err_reporter
));
528 return PTR_ERR(rvu_reporters
->rvu_hw_nix_err_reporter
);
531 rvu_reporters
->rvu_hw_nix_ras_reporter
=
532 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_nix_ras_reporter_ops
, 0, rvu
);
533 if (IS_ERR(rvu_reporters
->rvu_hw_nix_ras_reporter
)) {
534 dev_warn(rvu
->dev
, "Failed to create hw_nix_ras reporter, err=%ld\n",
535 PTR_ERR(rvu_reporters
->rvu_hw_nix_ras_reporter
));
536 return PTR_ERR(rvu_reporters
->rvu_hw_nix_ras_reporter
);
539 rvu_dl
->devlink_wq
= create_workqueue("rvu_devlink_wq");
540 if (!rvu_dl
->devlink_wq
)
543 INIT_WORK(&rvu_reporters
->intr_work
, rvu_nix_intr_work
);
544 INIT_WORK(&rvu_reporters
->gen_work
, rvu_nix_gen_work
);
545 INIT_WORK(&rvu_reporters
->err_work
, rvu_nix_err_work
);
546 INIT_WORK(&rvu_reporters
->ras_work
, rvu_nix_ras_work
);
551 static int rvu_nix_health_reporters_create(struct rvu_devlink
*rvu_dl
)
553 struct rvu
*rvu
= rvu_dl
->rvu
;
556 err
= rvu_nix_register_reporters(rvu_dl
);
558 dev_warn(rvu
->dev
, "Failed to create nix reporter, err =%d\n",
562 rvu_nix_register_interrupts(rvu
);
567 static void rvu_nix_health_reporters_destroy(struct rvu_devlink
*rvu_dl
)
569 struct rvu_nix_health_reporters
*nix_reporters
;
570 struct rvu
*rvu
= rvu_dl
->rvu
;
572 nix_reporters
= rvu_dl
->rvu_nix_health_reporter
;
574 if (!nix_reporters
->rvu_hw_nix_ras_reporter
)
576 if (!IS_ERR_OR_NULL(nix_reporters
->rvu_hw_nix_intr_reporter
))
577 devlink_health_reporter_destroy(nix_reporters
->rvu_hw_nix_intr_reporter
);
579 if (!IS_ERR_OR_NULL(nix_reporters
->rvu_hw_nix_gen_reporter
))
580 devlink_health_reporter_destroy(nix_reporters
->rvu_hw_nix_gen_reporter
);
582 if (!IS_ERR_OR_NULL(nix_reporters
->rvu_hw_nix_err_reporter
))
583 devlink_health_reporter_destroy(nix_reporters
->rvu_hw_nix_err_reporter
);
585 if (!IS_ERR_OR_NULL(nix_reporters
->rvu_hw_nix_ras_reporter
))
586 devlink_health_reporter_destroy(nix_reporters
->rvu_hw_nix_ras_reporter
);
588 rvu_nix_unregister_interrupts(rvu
);
589 kfree(rvu_dl
->rvu_nix_health_reporter
->nix_event_ctx
);
590 kfree(rvu_dl
->rvu_nix_health_reporter
);
593 static void rvu_npa_intr_work(struct work_struct
*work
)
595 struct rvu_npa_health_reporters
*rvu_npa_health_reporter
;
597 rvu_npa_health_reporter
= container_of(work
, struct rvu_npa_health_reporters
, intr_work
);
598 devlink_health_report(rvu_npa_health_reporter
->rvu_hw_npa_intr_reporter
,
600 rvu_npa_health_reporter
->npa_event_ctx
);
603 static irqreturn_t
rvu_npa_af_rvu_intr_handler(int irq
, void *rvu_irq
)
605 struct rvu_npa_event_ctx
*npa_event_context
;
606 struct rvu_devlink
*rvu_dl
= rvu_irq
;
612 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
616 npa_event_context
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
617 intr
= rvu_read64(rvu
, blkaddr
, NPA_AF_RVU_INT
);
618 npa_event_context
->npa_af_rvu_int
= intr
;
620 /* Clear interrupts */
621 rvu_write64(rvu
, blkaddr
, NPA_AF_RVU_INT
, intr
);
622 rvu_write64(rvu
, blkaddr
, NPA_AF_RVU_INT_ENA_W1C
, ~0ULL);
623 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_npa_health_reporter
->intr_work
);
628 static void rvu_npa_gen_work(struct work_struct
*work
)
630 struct rvu_npa_health_reporters
*rvu_npa_health_reporter
;
632 rvu_npa_health_reporter
= container_of(work
, struct rvu_npa_health_reporters
, gen_work
);
633 devlink_health_report(rvu_npa_health_reporter
->rvu_hw_npa_gen_reporter
,
635 rvu_npa_health_reporter
->npa_event_ctx
);
638 static irqreturn_t
rvu_npa_af_gen_intr_handler(int irq
, void *rvu_irq
)
640 struct rvu_npa_event_ctx
*npa_event_context
;
641 struct rvu_devlink
*rvu_dl
= rvu_irq
;
647 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
651 npa_event_context
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
652 intr
= rvu_read64(rvu
, blkaddr
, NPA_AF_GEN_INT
);
653 npa_event_context
->npa_af_rvu_gen
= intr
;
655 /* Clear interrupts */
656 rvu_write64(rvu
, blkaddr
, NPA_AF_GEN_INT
, intr
);
657 rvu_write64(rvu
, blkaddr
, NPA_AF_GEN_INT_ENA_W1C
, ~0ULL);
658 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_npa_health_reporter
->gen_work
);
663 static void rvu_npa_err_work(struct work_struct
*work
)
665 struct rvu_npa_health_reporters
*rvu_npa_health_reporter
;
667 rvu_npa_health_reporter
= container_of(work
, struct rvu_npa_health_reporters
, err_work
);
668 devlink_health_report(rvu_npa_health_reporter
->rvu_hw_npa_err_reporter
,
670 rvu_npa_health_reporter
->npa_event_ctx
);
673 static irqreturn_t
rvu_npa_af_err_intr_handler(int irq
, void *rvu_irq
)
675 struct rvu_npa_event_ctx
*npa_event_context
;
676 struct rvu_devlink
*rvu_dl
= rvu_irq
;
682 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
685 npa_event_context
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
686 intr
= rvu_read64(rvu
, blkaddr
, NPA_AF_ERR_INT
);
687 npa_event_context
->npa_af_rvu_err
= intr
;
689 /* Clear interrupts */
690 rvu_write64(rvu
, blkaddr
, NPA_AF_ERR_INT
, intr
);
691 rvu_write64(rvu
, blkaddr
, NPA_AF_ERR_INT_ENA_W1C
, ~0ULL);
692 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_npa_health_reporter
->err_work
);
697 static void rvu_npa_ras_work(struct work_struct
*work
)
699 struct rvu_npa_health_reporters
*rvu_npa_health_reporter
;
701 rvu_npa_health_reporter
= container_of(work
, struct rvu_npa_health_reporters
, ras_work
);
702 devlink_health_report(rvu_npa_health_reporter
->rvu_hw_npa_ras_reporter
,
703 "HW NPA_AF_RAS Error reported",
704 rvu_npa_health_reporter
->npa_event_ctx
);
707 static irqreturn_t
rvu_npa_af_ras_intr_handler(int irq
, void *rvu_irq
)
709 struct rvu_npa_event_ctx
*npa_event_context
;
710 struct rvu_devlink
*rvu_dl
= rvu_irq
;
716 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
720 npa_event_context
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
721 intr
= rvu_read64(rvu
, blkaddr
, NPA_AF_RAS
);
722 npa_event_context
->npa_af_rvu_ras
= intr
;
724 /* Clear interrupts */
725 rvu_write64(rvu
, blkaddr
, NPA_AF_RAS
, intr
);
726 rvu_write64(rvu
, blkaddr
, NPA_AF_RAS_ENA_W1C
, ~0ULL);
727 queue_work(rvu_dl
->devlink_wq
, &rvu_dl
->rvu_npa_health_reporter
->ras_work
);
732 static void rvu_npa_unregister_interrupts(struct rvu
*rvu
)
734 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
735 int i
, offs
, blkaddr
;
738 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
742 reg
= rvu_read64(rvu
, blkaddr
, NPA_PRIV_AF_INT_CFG
);
745 rvu_write64(rvu
, blkaddr
, NPA_AF_RVU_INT_ENA_W1C
, ~0ULL);
746 rvu_write64(rvu
, blkaddr
, NPA_AF_GEN_INT_ENA_W1C
, ~0ULL);
747 rvu_write64(rvu
, blkaddr
, NPA_AF_ERR_INT_ENA_W1C
, ~0ULL);
748 rvu_write64(rvu
, blkaddr
, NPA_AF_RAS_ENA_W1C
, ~0ULL);
750 for (i
= 0; i
< NPA_AF_INT_VEC_CNT
; i
++)
751 if (rvu
->irq_allocated
[offs
+ i
]) {
752 free_irq(pci_irq_vector(rvu
->pdev
, offs
+ i
), rvu_dl
);
753 rvu
->irq_allocated
[offs
+ i
] = false;
757 static int rvu_npa_register_interrupts(struct rvu
*rvu
)
762 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
766 /* Get NPA AF MSIX vectors offset. */
767 base
= rvu_read64(rvu
, blkaddr
, NPA_PRIV_AF_INT_CFG
) & 0x3ff;
770 "Failed to get NPA_AF_INT vector offsets\n");
774 /* Register and enable NPA_AF_RVU_INT interrupt */
775 rc
= rvu_common_request_irq(rvu
, base
+ NPA_AF_INT_VEC_RVU
,
777 rvu_npa_af_rvu_intr_handler
);
780 rvu_write64(rvu
, blkaddr
, NPA_AF_RVU_INT_ENA_W1S
, ~0ULL);
782 /* Register and enable NPA_AF_GEN_INT interrupt */
783 rc
= rvu_common_request_irq(rvu
, base
+ NPA_AF_INT_VEC_GEN
,
785 rvu_npa_af_gen_intr_handler
);
788 rvu_write64(rvu
, blkaddr
, NPA_AF_GEN_INT_ENA_W1S
, ~0ULL);
790 /* Register and enable NPA_AF_ERR_INT interrupt */
791 rc
= rvu_common_request_irq(rvu
, base
+ NPA_AF_INT_VEC_AF_ERR
,
793 rvu_npa_af_err_intr_handler
);
796 rvu_write64(rvu
, blkaddr
, NPA_AF_ERR_INT_ENA_W1S
, ~0ULL);
798 /* Register and enable NPA_AF_RAS interrupt */
799 rc
= rvu_common_request_irq(rvu
, base
+ NPA_AF_INT_VEC_POISON
,
801 rvu_npa_af_ras_intr_handler
);
804 rvu_write64(rvu
, blkaddr
, NPA_AF_RAS_ENA_W1S
, ~0ULL);
808 rvu_npa_unregister_interrupts(rvu
);
812 static int rvu_npa_report_show(struct devlink_fmsg
*fmsg
, void *ctx
,
813 enum npa_af_rvu_health health_reporter
)
815 struct rvu_npa_event_ctx
*npa_event_context
;
816 unsigned int alloc_dis
, free_dis
;
819 npa_event_context
= ctx
;
820 switch (health_reporter
) {
822 intr_val
= npa_event_context
->npa_af_rvu_gen
;
823 rvu_report_pair_start(fmsg
, "NPA_AF_GENERAL");
824 devlink_fmsg_u64_pair_put(fmsg
, "\tNPA General Interrupt Reg ",
825 npa_event_context
->npa_af_rvu_gen
);
826 if (intr_val
& BIT_ULL(32))
827 devlink_fmsg_string_put(fmsg
, "\n\tUnmap PF Error");
829 free_dis
= FIELD_GET(GENMASK(15, 0), intr_val
);
830 if (free_dis
& BIT(NPA_INPQ_NIX0_RX
))
831 devlink_fmsg_string_put(fmsg
, "\n\tNIX0: free disabled RX");
832 if (free_dis
& BIT(NPA_INPQ_NIX0_TX
))
833 devlink_fmsg_string_put(fmsg
, "\n\tNIX0:free disabled TX");
834 if (free_dis
& BIT(NPA_INPQ_NIX1_RX
))
835 devlink_fmsg_string_put(fmsg
, "\n\tNIX1: free disabled RX");
836 if (free_dis
& BIT(NPA_INPQ_NIX1_TX
))
837 devlink_fmsg_string_put(fmsg
, "\n\tNIX1:free disabled TX");
838 if (free_dis
& BIT(NPA_INPQ_SSO
))
839 devlink_fmsg_string_put(fmsg
, "\n\tFree Disabled for SSO");
840 if (free_dis
& BIT(NPA_INPQ_TIM
))
841 devlink_fmsg_string_put(fmsg
, "\n\tFree Disabled for TIM");
842 if (free_dis
& BIT(NPA_INPQ_DPI
))
843 devlink_fmsg_string_put(fmsg
, "\n\tFree Disabled for DPI");
844 if (free_dis
& BIT(NPA_INPQ_AURA_OP
))
845 devlink_fmsg_string_put(fmsg
, "\n\tFree Disabled for AURA");
847 alloc_dis
= FIELD_GET(GENMASK(31, 16), intr_val
);
848 if (alloc_dis
& BIT(NPA_INPQ_NIX0_RX
))
849 devlink_fmsg_string_put(fmsg
, "\n\tNIX0: alloc disabled RX");
850 if (alloc_dis
& BIT(NPA_INPQ_NIX0_TX
))
851 devlink_fmsg_string_put(fmsg
, "\n\tNIX0:alloc disabled TX");
852 if (alloc_dis
& BIT(NPA_INPQ_NIX1_RX
))
853 devlink_fmsg_string_put(fmsg
, "\n\tNIX1: alloc disabled RX");
854 if (alloc_dis
& BIT(NPA_INPQ_NIX1_TX
))
855 devlink_fmsg_string_put(fmsg
, "\n\tNIX1:alloc disabled TX");
856 if (alloc_dis
& BIT(NPA_INPQ_SSO
))
857 devlink_fmsg_string_put(fmsg
, "\n\tAlloc Disabled for SSO");
858 if (alloc_dis
& BIT(NPA_INPQ_TIM
))
859 devlink_fmsg_string_put(fmsg
, "\n\tAlloc Disabled for TIM");
860 if (alloc_dis
& BIT(NPA_INPQ_DPI
))
861 devlink_fmsg_string_put(fmsg
, "\n\tAlloc Disabled for DPI");
862 if (alloc_dis
& BIT(NPA_INPQ_AURA_OP
))
863 devlink_fmsg_string_put(fmsg
, "\n\tAlloc Disabled for AURA");
865 rvu_report_pair_end(fmsg
);
868 rvu_report_pair_start(fmsg
, "NPA_AF_ERR");
869 devlink_fmsg_u64_pair_put(fmsg
, "\tNPA Error Interrupt Reg ",
870 npa_event_context
->npa_af_rvu_err
);
871 if (npa_event_context
->npa_af_rvu_err
& BIT_ULL(14))
872 devlink_fmsg_string_put(fmsg
, "\n\tFault on NPA_AQ_INST_S read");
873 if (npa_event_context
->npa_af_rvu_err
& BIT_ULL(13))
874 devlink_fmsg_string_put(fmsg
, "\n\tFault on NPA_AQ_RES_S write");
875 if (npa_event_context
->npa_af_rvu_err
& BIT_ULL(12))
876 devlink_fmsg_string_put(fmsg
, "\n\tAQ Doorbell Error");
877 rvu_report_pair_end(fmsg
);
880 rvu_report_pair_start(fmsg
, "NPA_AF_RVU_RAS");
881 devlink_fmsg_u64_pair_put(fmsg
, "\tNPA RAS Interrupt Reg ",
882 npa_event_context
->npa_af_rvu_ras
);
883 if (npa_event_context
->npa_af_rvu_ras
& BIT_ULL(34))
884 devlink_fmsg_string_put(fmsg
, "\n\tPoison data on NPA_AQ_INST_S");
885 if (npa_event_context
->npa_af_rvu_ras
& BIT_ULL(33))
886 devlink_fmsg_string_put(fmsg
, "\n\tPoison data on NPA_AQ_RES_S");
887 if (npa_event_context
->npa_af_rvu_ras
& BIT_ULL(32))
888 devlink_fmsg_string_put(fmsg
, "\n\tPoison data on HW context");
889 rvu_report_pair_end(fmsg
);
891 case NPA_AF_RVU_INTR
:
892 rvu_report_pair_start(fmsg
, "NPA_AF_RVU");
893 devlink_fmsg_u64_pair_put(fmsg
, "\tNPA RVU Interrupt Reg ",
894 npa_event_context
->npa_af_rvu_int
);
895 if (npa_event_context
->npa_af_rvu_int
& BIT_ULL(0))
896 devlink_fmsg_string_put(fmsg
, "\n\tUnmap Slot Error");
897 rvu_report_pair_end(fmsg
);
906 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter
*reporter
,
907 struct devlink_fmsg
*fmsg
, void *ctx
,
908 struct netlink_ext_ack
*netlink_extack
)
910 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
911 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
912 struct rvu_npa_event_ctx
*npa_ctx
;
914 npa_ctx
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
916 return ctx
? rvu_npa_report_show(fmsg
, ctx
, NPA_AF_RVU_INTR
) :
917 rvu_npa_report_show(fmsg
, npa_ctx
, NPA_AF_RVU_INTR
);
920 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter
*reporter
,
921 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
923 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
924 struct rvu_npa_event_ctx
*npa_event_ctx
= ctx
;
927 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
931 if (npa_event_ctx
->npa_af_rvu_int
)
932 rvu_write64(rvu
, blkaddr
, NPA_AF_RVU_INT_ENA_W1S
, ~0ULL);
937 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter
*reporter
,
938 struct devlink_fmsg
*fmsg
, void *ctx
,
939 struct netlink_ext_ack
*netlink_extack
)
941 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
942 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
943 struct rvu_npa_event_ctx
*npa_ctx
;
945 npa_ctx
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
947 return ctx
? rvu_npa_report_show(fmsg
, ctx
, NPA_AF_RVU_GEN
) :
948 rvu_npa_report_show(fmsg
, npa_ctx
, NPA_AF_RVU_GEN
);
951 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter
*reporter
,
952 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
954 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
955 struct rvu_npa_event_ctx
*npa_event_ctx
= ctx
;
958 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
962 if (npa_event_ctx
->npa_af_rvu_gen
)
963 rvu_write64(rvu
, blkaddr
, NPA_AF_GEN_INT_ENA_W1S
, ~0ULL);
968 static int rvu_hw_npa_err_dump(struct devlink_health_reporter
*reporter
,
969 struct devlink_fmsg
*fmsg
, void *ctx
,
970 struct netlink_ext_ack
*netlink_extack
)
972 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
973 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
974 struct rvu_npa_event_ctx
*npa_ctx
;
976 npa_ctx
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
978 return ctx
? rvu_npa_report_show(fmsg
, ctx
, NPA_AF_RVU_ERR
) :
979 rvu_npa_report_show(fmsg
, npa_ctx
, NPA_AF_RVU_ERR
);
982 static int rvu_hw_npa_err_recover(struct devlink_health_reporter
*reporter
,
983 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
985 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
986 struct rvu_npa_event_ctx
*npa_event_ctx
= ctx
;
989 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
993 if (npa_event_ctx
->npa_af_rvu_err
)
994 rvu_write64(rvu
, blkaddr
, NPA_AF_ERR_INT_ENA_W1S
, ~0ULL);
999 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter
*reporter
,
1000 struct devlink_fmsg
*fmsg
, void *ctx
,
1001 struct netlink_ext_ack
*netlink_extack
)
1003 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
1004 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
1005 struct rvu_npa_event_ctx
*npa_ctx
;
1007 npa_ctx
= rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
;
1009 return ctx
? rvu_npa_report_show(fmsg
, ctx
, NPA_AF_RVU_RAS
) :
1010 rvu_npa_report_show(fmsg
, npa_ctx
, NPA_AF_RVU_RAS
);
1013 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter
*reporter
,
1014 void *ctx
, struct netlink_ext_ack
*netlink_extack
)
1016 struct rvu
*rvu
= devlink_health_reporter_priv(reporter
);
1017 struct rvu_npa_event_ctx
*npa_event_ctx
= ctx
;
1020 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPA
, 0);
1024 if (npa_event_ctx
->npa_af_rvu_ras
)
1025 rvu_write64(rvu
, blkaddr
, NPA_AF_RAS_ENA_W1S
, ~0ULL);
1030 RVU_REPORTERS(hw_npa_intr
);
1031 RVU_REPORTERS(hw_npa_gen
);
1032 RVU_REPORTERS(hw_npa_err
);
1033 RVU_REPORTERS(hw_npa_ras
);
1035 static void rvu_npa_health_reporters_destroy(struct rvu_devlink
*rvu_dl
);
1037 static int rvu_npa_register_reporters(struct rvu_devlink
*rvu_dl
)
1039 struct rvu_npa_health_reporters
*rvu_reporters
;
1040 struct rvu_npa_event_ctx
*npa_event_context
;
1041 struct rvu
*rvu
= rvu_dl
->rvu
;
1043 rvu_reporters
= kzalloc(sizeof(*rvu_reporters
), GFP_KERNEL
);
1047 rvu_dl
->rvu_npa_health_reporter
= rvu_reporters
;
1048 npa_event_context
= kzalloc(sizeof(*npa_event_context
), GFP_KERNEL
);
1049 if (!npa_event_context
)
1052 rvu_reporters
->npa_event_ctx
= npa_event_context
;
1053 rvu_reporters
->rvu_hw_npa_intr_reporter
=
1054 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_npa_intr_reporter_ops
, 0, rvu
);
1055 if (IS_ERR(rvu_reporters
->rvu_hw_npa_intr_reporter
)) {
1056 dev_warn(rvu
->dev
, "Failed to create hw_npa_intr reporter, err=%ld\n",
1057 PTR_ERR(rvu_reporters
->rvu_hw_npa_intr_reporter
));
1058 return PTR_ERR(rvu_reporters
->rvu_hw_npa_intr_reporter
);
1061 rvu_reporters
->rvu_hw_npa_gen_reporter
=
1062 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_npa_gen_reporter_ops
, 0, rvu
);
1063 if (IS_ERR(rvu_reporters
->rvu_hw_npa_gen_reporter
)) {
1064 dev_warn(rvu
->dev
, "Failed to create hw_npa_gen reporter, err=%ld\n",
1065 PTR_ERR(rvu_reporters
->rvu_hw_npa_gen_reporter
));
1066 return PTR_ERR(rvu_reporters
->rvu_hw_npa_gen_reporter
);
1069 rvu_reporters
->rvu_hw_npa_err_reporter
=
1070 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_npa_err_reporter_ops
, 0, rvu
);
1071 if (IS_ERR(rvu_reporters
->rvu_hw_npa_err_reporter
)) {
1072 dev_warn(rvu
->dev
, "Failed to create hw_npa_err reporter, err=%ld\n",
1073 PTR_ERR(rvu_reporters
->rvu_hw_npa_err_reporter
));
1074 return PTR_ERR(rvu_reporters
->rvu_hw_npa_err_reporter
);
1077 rvu_reporters
->rvu_hw_npa_ras_reporter
=
1078 devlink_health_reporter_create(rvu_dl
->dl
, &rvu_hw_npa_ras_reporter_ops
, 0, rvu
);
1079 if (IS_ERR(rvu_reporters
->rvu_hw_npa_ras_reporter
)) {
1080 dev_warn(rvu
->dev
, "Failed to create hw_npa_ras reporter, err=%ld\n",
1081 PTR_ERR(rvu_reporters
->rvu_hw_npa_ras_reporter
));
1082 return PTR_ERR(rvu_reporters
->rvu_hw_npa_ras_reporter
);
1085 rvu_dl
->devlink_wq
= create_workqueue("rvu_devlink_wq");
1086 if (!rvu_dl
->devlink_wq
)
1089 INIT_WORK(&rvu_reporters
->intr_work
, rvu_npa_intr_work
);
1090 INIT_WORK(&rvu_reporters
->err_work
, rvu_npa_err_work
);
1091 INIT_WORK(&rvu_reporters
->gen_work
, rvu_npa_gen_work
);
1092 INIT_WORK(&rvu_reporters
->ras_work
, rvu_npa_ras_work
);
1097 static int rvu_npa_health_reporters_create(struct rvu_devlink
*rvu_dl
)
1099 struct rvu
*rvu
= rvu_dl
->rvu
;
1102 err
= rvu_npa_register_reporters(rvu_dl
);
1104 dev_warn(rvu
->dev
, "Failed to create npa reporter, err =%d\n",
1108 rvu_npa_register_interrupts(rvu
);
1113 static void rvu_npa_health_reporters_destroy(struct rvu_devlink
*rvu_dl
)
1115 struct rvu_npa_health_reporters
*npa_reporters
;
1116 struct rvu
*rvu
= rvu_dl
->rvu
;
1118 npa_reporters
= rvu_dl
->rvu_npa_health_reporter
;
1120 if (!npa_reporters
->rvu_hw_npa_ras_reporter
)
1122 if (!IS_ERR_OR_NULL(npa_reporters
->rvu_hw_npa_intr_reporter
))
1123 devlink_health_reporter_destroy(npa_reporters
->rvu_hw_npa_intr_reporter
);
1125 if (!IS_ERR_OR_NULL(npa_reporters
->rvu_hw_npa_gen_reporter
))
1126 devlink_health_reporter_destroy(npa_reporters
->rvu_hw_npa_gen_reporter
);
1128 if (!IS_ERR_OR_NULL(npa_reporters
->rvu_hw_npa_err_reporter
))
1129 devlink_health_reporter_destroy(npa_reporters
->rvu_hw_npa_err_reporter
);
1131 if (!IS_ERR_OR_NULL(npa_reporters
->rvu_hw_npa_ras_reporter
))
1132 devlink_health_reporter_destroy(npa_reporters
->rvu_hw_npa_ras_reporter
);
1134 rvu_npa_unregister_interrupts(rvu
);
1135 kfree(rvu_dl
->rvu_npa_health_reporter
->npa_event_ctx
);
1136 kfree(rvu_dl
->rvu_npa_health_reporter
);
1139 static int rvu_health_reporters_create(struct rvu
*rvu
)
1141 struct rvu_devlink
*rvu_dl
;
1144 rvu_dl
= rvu
->rvu_dl
;
1145 err
= rvu_npa_health_reporters_create(rvu_dl
);
1149 return rvu_nix_health_reporters_create(rvu_dl
);
1152 static void rvu_health_reporters_destroy(struct rvu
*rvu
)
1154 struct rvu_devlink
*rvu_dl
;
1159 rvu_dl
= rvu
->rvu_dl
;
1160 rvu_npa_health_reporters_destroy(rvu_dl
);
1161 rvu_nix_health_reporters_destroy(rvu_dl
);
1164 /* Devlink Params APIs */
1165 static int rvu_af_dl_dwrr_mtu_validate(struct devlink
*devlink
, u32 id
,
1166 union devlink_param_value val
,
1167 struct netlink_ext_ack
*extack
)
1169 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1170 struct rvu
*rvu
= rvu_dl
->rvu
;
1171 int dwrr_mtu
= val
.vu32
;
1172 struct nix_txsch
*txsch
;
1173 struct nix_hw
*nix_hw
;
1175 if (!rvu
->hw
->cap
.nix_common_dwrr_mtu
) {
1176 NL_SET_ERR_MSG_MOD(extack
,
1177 "Setting DWRR_MTU is not supported on this silicon");
1181 if ((dwrr_mtu
> 65536 || !is_power_of_2(dwrr_mtu
)) &&
1182 (dwrr_mtu
!= 9728 && dwrr_mtu
!= 10240)) {
1183 NL_SET_ERR_MSG_MOD(extack
,
1184 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1188 nix_hw
= get_nix_hw(rvu
->hw
, BLKADDR_NIX0
);
1192 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
1193 if (rvu_rsrc_free_count(&txsch
->schq
) != txsch
->schq
.max
) {
1194 NL_SET_ERR_MSG_MOD(extack
,
1195 "Changing DWRR MTU is not supported when there are active NIXLFs");
1196 NL_SET_ERR_MSG_MOD(extack
,
1197 "Make sure none of the PF/VF interfaces are initialized and retry");
1204 static int rvu_af_dl_dwrr_mtu_set(struct devlink
*devlink
, u32 id
,
1205 struct devlink_param_gset_ctx
*ctx
)
1207 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1208 struct rvu
*rvu
= rvu_dl
->rvu
;
1211 dwrr_mtu
= convert_bytes_to_dwrr_mtu(ctx
->val
.vu32
);
1212 rvu_write64(rvu
, BLKADDR_NIX0
,
1213 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_RPM
), dwrr_mtu
);
1218 static int rvu_af_dl_dwrr_mtu_get(struct devlink
*devlink
, u32 id
,
1219 struct devlink_param_gset_ctx
*ctx
)
1221 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1222 struct rvu
*rvu
= rvu_dl
->rvu
;
1225 if (!rvu
->hw
->cap
.nix_common_dwrr_mtu
)
1228 dwrr_mtu
= rvu_read64(rvu
, BLKADDR_NIX0
,
1229 nix_get_dwrr_mtu_reg(rvu
->hw
, SMQ_LINK_TYPE_RPM
));
1230 ctx
->val
.vu32
= convert_dwrr_mtu_to_bytes(dwrr_mtu
);
1235 enum rvu_af_dl_param_id
{
1236 RVU_AF_DEVLINK_PARAM_ID_BASE
= DEVLINK_PARAM_GENERIC_ID_MAX
,
1237 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU
,
1238 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE
,
1239 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT
,
1242 static int rvu_af_npc_exact_feature_get(struct devlink
*devlink
, u32 id
,
1243 struct devlink_param_gset_ctx
*ctx
)
1245 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1246 struct rvu
*rvu
= rvu_dl
->rvu
;
1249 enabled
= rvu_npc_exact_has_match_table(rvu
);
1251 snprintf(ctx
->val
.vstr
, sizeof(ctx
->val
.vstr
), "%s",
1252 enabled
? "enabled" : "disabled");
1257 static int rvu_af_npc_exact_feature_disable(struct devlink
*devlink
, u32 id
,
1258 struct devlink_param_gset_ctx
*ctx
)
1260 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1261 struct rvu
*rvu
= rvu_dl
->rvu
;
1263 rvu_npc_exact_disable_feature(rvu
);
1268 static int rvu_af_npc_exact_feature_validate(struct devlink
*devlink
, u32 id
,
1269 union devlink_param_value val
,
1270 struct netlink_ext_ack
*extack
)
1272 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1273 struct rvu
*rvu
= rvu_dl
->rvu
;
1276 if (kstrtoull(val
.vstr
, 10, &enable
)) {
1277 NL_SET_ERR_MSG_MOD(extack
,
1278 "Only 1 value is supported");
1283 NL_SET_ERR_MSG_MOD(extack
,
1284 "Only disabling exact match feature is supported");
1288 if (rvu_npc_exact_can_disable_feature(rvu
))
1291 NL_SET_ERR_MSG_MOD(extack
,
1292 "Can't disable exact match feature; Please try before any configuration");
1296 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink
*devlink
, u32 id
,
1297 struct devlink_param_gset_ctx
*ctx
)
1299 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1300 struct rvu
*rvu
= rvu_dl
->rvu
;
1301 struct npc_mcam
*mcam
;
1304 mcam
= &rvu
->hw
->mcam
;
1305 percent
= (mcam
->hprio_count
* 100) / mcam
->bmap_entries
;
1306 ctx
->val
.vu8
= (u8
)percent
;
1311 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink
*devlink
, u32 id
,
1312 struct devlink_param_gset_ctx
*ctx
)
1314 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1315 struct rvu
*rvu
= rvu_dl
->rvu
;
1316 struct npc_mcam
*mcam
;
1319 percent
= ctx
->val
.vu8
;
1320 mcam
= &rvu
->hw
->mcam
;
1321 mcam
->hprio_count
= (mcam
->bmap_entries
* percent
) / 100;
1322 mcam
->hprio_end
= mcam
->hprio_count
;
1323 mcam
->lprio_count
= (mcam
->bmap_entries
- mcam
->hprio_count
) / 2;
1324 mcam
->lprio_start
= mcam
->bmap_entries
- mcam
->lprio_count
;
1329 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink
*devlink
, u32 id
,
1330 union devlink_param_value val
,
1331 struct netlink_ext_ack
*extack
)
1333 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1334 struct rvu
*rvu
= rvu_dl
->rvu
;
1335 struct npc_mcam
*mcam
;
1337 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1338 if (val
.vu8
< 12 || val
.vu8
> 100) {
1339 NL_SET_ERR_MSG_MOD(extack
,
1340 "mcam high zone percent must be between 12% to 100%");
1344 /* Do not allow user to modify the high priority zone entries while mcam entries
1345 * have already been assigned.
1347 mcam
= &rvu
->hw
->mcam
;
1348 if (mcam
->bmap_fcnt
< mcam
->bmap_entries
) {
1349 NL_SET_ERR_MSG_MOD(extack
,
1350 "mcam entries have already been assigned, can't resize");
1357 static const struct devlink_param rvu_af_dl_params
[] = {
1358 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU
,
1359 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32
,
1360 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
1361 rvu_af_dl_dwrr_mtu_get
, rvu_af_dl_dwrr_mtu_set
,
1362 rvu_af_dl_dwrr_mtu_validate
),
1365 static const struct devlink_param rvu_af_dl_param_exact_match
[] = {
1366 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE
,
1367 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING
,
1368 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
1369 rvu_af_npc_exact_feature_get
,
1370 rvu_af_npc_exact_feature_disable
,
1371 rvu_af_npc_exact_feature_validate
),
1372 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT
,
1373 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8
,
1374 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
1375 rvu_af_dl_npc_mcam_high_zone_percent_get
,
1376 rvu_af_dl_npc_mcam_high_zone_percent_set
,
1377 rvu_af_dl_npc_mcam_high_zone_percent_validate
),
1380 /* Devlink switch mode */
1381 static int rvu_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
1383 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1384 struct rvu
*rvu
= rvu_dl
->rvu
;
1385 struct rvu_switch
*rswitch
;
1387 rswitch
= &rvu
->rswitch
;
1388 *mode
= rswitch
->mode
;
1393 static int rvu_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
1394 struct netlink_ext_ack
*extack
)
1396 struct rvu_devlink
*rvu_dl
= devlink_priv(devlink
);
1397 struct rvu
*rvu
= rvu_dl
->rvu
;
1398 struct rvu_switch
*rswitch
;
1400 rswitch
= &rvu
->rswitch
;
1402 case DEVLINK_ESWITCH_MODE_LEGACY
:
1403 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
1404 if (rswitch
->mode
== mode
)
1406 rswitch
->mode
= mode
;
1407 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
1408 rvu_switch_enable(rvu
);
1410 rvu_switch_disable(rvu
);
1419 static const struct devlink_ops rvu_devlink_ops
= {
1420 .eswitch_mode_get
= rvu_devlink_eswitch_mode_get
,
1421 .eswitch_mode_set
= rvu_devlink_eswitch_mode_set
,
1424 int rvu_register_dl(struct rvu
*rvu
)
1426 struct rvu_devlink
*rvu_dl
;
1430 dl
= devlink_alloc(&rvu_devlink_ops
, sizeof(struct rvu_devlink
),
1433 dev_warn(rvu
->dev
, "devlink_alloc failed\n");
1437 rvu_dl
= devlink_priv(dl
);
1440 rvu
->rvu_dl
= rvu_dl
;
1442 err
= rvu_health_reporters_create(rvu
);
1445 "devlink health reporter creation failed with error %d\n", err
);
1449 err
= devlink_params_register(dl
, rvu_af_dl_params
, ARRAY_SIZE(rvu_af_dl_params
));
1452 "devlink params register failed with error %d", err
);
1456 /* Register exact match devlink only for CN10K-B */
1457 if (!rvu_npc_exact_has_match_table(rvu
))
1460 err
= devlink_params_register(dl
, rvu_af_dl_param_exact_match
,
1461 ARRAY_SIZE(rvu_af_dl_param_exact_match
));
1464 "devlink exact match params register failed with error %d", err
);
1465 goto err_dl_exact_match
;
1469 devlink_register(dl
);
1473 devlink_params_unregister(dl
, rvu_af_dl_params
, ARRAY_SIZE(rvu_af_dl_params
));
1476 rvu_health_reporters_destroy(rvu
);
1481 void rvu_unregister_dl(struct rvu
*rvu
)
1483 struct rvu_devlink
*rvu_dl
= rvu
->rvu_dl
;
1484 struct devlink
*dl
= rvu_dl
->dl
;
1486 devlink_unregister(dl
);
1488 devlink_params_unregister(dl
, rvu_af_dl_params
, ARRAY_SIZE(rvu_af_dl_params
));
1490 /* Unregister exact match devlink only for CN10K-B */
1491 if (rvu_npc_exact_has_match_table(rvu
))
1492 devlink_params_unregister(dl
, rvu_af_dl_param_exact_match
,
1493 ARRAY_SIZE(rvu_af_dl_param_exact_match
));
1495 rvu_health_reporters_destroy(rvu
);