2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Main component of the bnxt_re driver
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
51 #include <linux/auxiliary_bus.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_user_verbs.h>
55 #include <rdma/ib_umem.h>
56 #include <rdma/ib_addr.h>
60 #include "qplib_res.h"
63 #include "qplib_rcfw.h"
66 #include <rdma/bnxt_re-abi.h>
68 #include "hw_counters.h"
70 static char version
[] =
73 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
74 MODULE_DESCRIPTION(BNXT_RE_DESC
);
75 MODULE_LICENSE("Dual BSD/GPL");
78 static DEFINE_MUTEX(bnxt_re_mutex
);
80 static void bnxt_re_stop_irq(void *handle
);
81 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
);
82 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
83 unsigned long event
, void *ptr
);
84 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
);
85 static void bnxt_re_dev_uninit(struct bnxt_re_dev
*rdev
);
86 static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev
*rdev
);
88 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev
*rdev
, u32
*db_len
,
90 static void bnxt_re_set_db_offset(struct bnxt_re_dev
*rdev
)
92 struct bnxt_qplib_chip_ctx
*cctx
;
93 struct bnxt_en_dev
*en_dev
;
94 struct bnxt_qplib_res
*res
;
100 res
= &rdev
->qplib_res
;
101 en_dev
= rdev
->en_dev
;
102 cctx
= rdev
->chip_ctx
;
105 rc
= bnxt_re_hwrm_qcfg(rdev
, &l2db_len
, &offset
);
107 dev_info(rdev_to_dev(rdev
),
108 "Couldn't get DB bar size, Low latency framework is disabled\n");
109 /* set register offsets for both UC and WC */
110 res
->dpi_tbl
.ucreg
.offset
= res
->is_vf
? BNXT_QPLIB_DBR_VF_DB_OFFSET
:
111 BNXT_QPLIB_DBR_PF_DB_OFFSET
;
112 res
->dpi_tbl
.wcreg
.offset
= res
->dpi_tbl
.ucreg
.offset
;
114 /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
115 * is equal to the DB-Bar actual size. This indicates that L2
116 * is mapping entire bar as UC-. RoCE driver can't enable WC mapping
117 * in such cases and DB-push will be disabled.
119 barlen
= pci_resource_len(res
->pdev
, RCFW_DBR_PCI_BAR_REGION
);
120 if (cctx
->modes
.db_push
&& l2db_len
&& en_dev
->l2_db_size
!= barlen
) {
121 res
->dpi_tbl
.wcreg
.offset
= en_dev
->l2_db_size
;
122 dev_info(rdev_to_dev(rdev
), "Low latency framework is enabled\n");
126 static void bnxt_re_set_drv_mode(struct bnxt_re_dev
*rdev
, u8 mode
)
128 struct bnxt_qplib_chip_ctx
*cctx
;
130 cctx
= rdev
->chip_ctx
;
131 cctx
->modes
.wqe_mode
= bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
132 mode
: BNXT_QPLIB_WQE_MODE_STATIC
;
133 if (bnxt_re_hwrm_qcaps(rdev
))
134 dev_err(rdev_to_dev(rdev
),
135 "Failed to query hwrm qcaps\n");
138 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev
*rdev
)
140 struct bnxt_qplib_chip_ctx
*chip_ctx
;
144 chip_ctx
= rdev
->chip_ctx
;
145 rdev
->chip_ctx
= NULL
;
146 rdev
->rcfw
.res
= NULL
;
147 rdev
->qplib_res
.cctx
= NULL
;
148 rdev
->qplib_res
.pdev
= NULL
;
149 rdev
->qplib_res
.netdev
= NULL
;
153 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev
*rdev
, u8 wqe_mode
)
155 struct bnxt_qplib_chip_ctx
*chip_ctx
;
156 struct bnxt_en_dev
*en_dev
;
159 en_dev
= rdev
->en_dev
;
161 chip_ctx
= kzalloc(sizeof(*chip_ctx
), GFP_KERNEL
);
164 chip_ctx
->chip_num
= en_dev
->chip_num
;
165 chip_ctx
->hw_stats_size
= en_dev
->hw_ring_stats_size
;
167 rdev
->chip_ctx
= chip_ctx
;
168 /* rest members to follow eventually */
170 rdev
->qplib_res
.cctx
= rdev
->chip_ctx
;
171 rdev
->rcfw
.res
= &rdev
->qplib_res
;
172 rdev
->qplib_res
.dattr
= &rdev
->dev_attr
;
173 rdev
->qplib_res
.is_vf
= BNXT_EN_VF(en_dev
);
175 bnxt_re_set_drv_mode(rdev
, wqe_mode
);
177 bnxt_re_set_db_offset(rdev
);
178 rc
= bnxt_qplib_map_db_bar(&rdev
->qplib_res
);
182 if (bnxt_qplib_determine_atomics(en_dev
->pdev
))
183 ibdev_info(&rdev
->ibdev
,
184 "platform doesn't support global atomics.");
188 /* SR-IOV helper functions */
190 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev
*rdev
)
192 if (BNXT_EN_VF(rdev
->en_dev
))
196 /* Set the maximum number of each resource that the driver actually wants
197 * to allocate. This may be up to the maximum number the firmware has
198 * reserved for the function. The driver may choose to allocate fewer
199 * resources than the firmware maximum.
201 static void bnxt_re_limit_pf_res(struct bnxt_re_dev
*rdev
)
203 struct bnxt_qplib_dev_attr
*attr
;
204 struct bnxt_qplib_ctx
*ctx
;
207 attr
= &rdev
->dev_attr
;
208 ctx
= &rdev
->qplib_ctx
;
210 ctx
->qpc_count
= min_t(u32
, BNXT_RE_MAX_QPC_COUNT
,
212 ctx
->mrw_count
= BNXT_RE_MAX_MRW_COUNT_256K
;
213 /* Use max_mr from fw since max_mrw does not get set */
214 ctx
->mrw_count
= min_t(u32
, ctx
->mrw_count
, attr
->max_mr
);
215 ctx
->srqc_count
= min_t(u32
, BNXT_RE_MAX_SRQC_COUNT
,
217 ctx
->cq_count
= min_t(u32
, BNXT_RE_MAX_CQ_COUNT
, attr
->max_cq
);
218 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
))
219 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
220 rdev
->qplib_ctx
.tqm_ctx
.qcount
[i
] =
221 rdev
->dev_attr
.tqm_alloc_reqs
[i
];
224 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx
*qplib_ctx
, u32 num_vf
)
226 struct bnxt_qplib_vf_res
*vf_res
;
231 vf_res
= &qplib_ctx
->vf_res
;
233 * Reserve a set of resources for the PF. Divide the remaining
234 * resources among the VFs
236 vf_pct
= 100 - BNXT_RE_PCT_RSVD_FOR_PF
;
238 num_vf
= 100 * num_vf
;
239 vf_res
->max_qp_per_vf
= (qplib_ctx
->qpc_count
* vf_pct
) / num_vf
;
240 vf_res
->max_srq_per_vf
= (qplib_ctx
->srqc_count
* vf_pct
) / num_vf
;
241 vf_res
->max_cq_per_vf
= (qplib_ctx
->cq_count
* vf_pct
) / num_vf
;
243 * The driver allows many more MRs than other resources. If the
244 * firmware does also, then reserve a fixed amount for the PF and
245 * divide the rest among VFs. VFs may use many MRs for NFS
246 * mounts, ISER, NVME applications, etc. If the firmware severely
247 * restricts the number of MRs, then let PF have half and divide
248 * the rest among VFs, as for the other resource types.
250 if (qplib_ctx
->mrw_count
< BNXT_RE_MAX_MRW_COUNT_64K
) {
251 mrws
= qplib_ctx
->mrw_count
* vf_pct
;
254 mrws
= qplib_ctx
->mrw_count
- BNXT_RE_RESVD_MR_FOR_PF
;
256 vf_res
->max_mrw_per_vf
= (mrws
/ nvfs
);
257 vf_res
->max_gid_per_vf
= BNXT_RE_MAX_GID_PER_VF
;
260 static void bnxt_re_set_resource_limits(struct bnxt_re_dev
*rdev
)
264 memset(&rdev
->qplib_ctx
.vf_res
, 0, sizeof(struct bnxt_qplib_vf_res
));
265 bnxt_re_limit_pf_res(rdev
);
267 num_vfs
= bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
268 BNXT_RE_GEN_P5_MAX_VF
: rdev
->num_vfs
;
270 bnxt_re_limit_vf_res(&rdev
->qplib_ctx
, num_vfs
);
273 static void bnxt_re_vf_res_config(struct bnxt_re_dev
*rdev
)
276 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED
, &rdev
->flags
))
278 rdev
->num_vfs
= pci_sriov_get_totalvfs(rdev
->en_dev
->pdev
);
279 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
)) {
280 bnxt_re_set_resource_limits(rdev
);
281 bnxt_qplib_set_func_resources(&rdev
->qplib_res
, &rdev
->rcfw
,
286 static void bnxt_re_shutdown(struct auxiliary_device
*adev
)
288 struct bnxt_re_dev
*rdev
= auxiliary_get_drvdata(adev
);
292 ib_unregister_device(&rdev
->ibdev
);
293 bnxt_re_dev_uninit(rdev
);
296 static void bnxt_re_stop_irq(void *handle
)
298 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
299 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
300 struct bnxt_qplib_nq
*nq
;
303 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
304 nq
= &rdev
->nq
[indx
- 1];
305 bnxt_qplib_nq_stop_irq(nq
, false);
308 bnxt_qplib_rcfw_stop_irq(rcfw
, false);
311 static void bnxt_re_start_irq(void *handle
, struct bnxt_msix_entry
*ent
)
313 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
314 struct bnxt_msix_entry
*msix_ent
= rdev
->en_dev
->msix_entries
;
315 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
316 struct bnxt_qplib_nq
*nq
;
320 /* Not setting the f/w timeout bit in rcfw.
321 * During the driver unload the first command
322 * to f/w will timeout and that will set the
325 ibdev_err(&rdev
->ibdev
, "Failed to re-start IRQs\n");
329 /* Vectors may change after restart, so update with new vectors
330 * in device sctructure.
332 for (indx
= 0; indx
< rdev
->num_msix
; indx
++)
333 rdev
->en_dev
->msix_entries
[indx
].vector
= ent
[indx
].vector
;
335 rc
= bnxt_qplib_rcfw_start_irq(rcfw
, msix_ent
[BNXT_RE_AEQ_IDX
].vector
,
338 ibdev_warn(&rdev
->ibdev
, "Failed to reinit CREQ\n");
341 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
342 nq
= &rdev
->nq
[indx
- 1];
343 rc
= bnxt_qplib_nq_start_irq(nq
, indx
- 1,
344 msix_ent
[indx
].vector
, false);
346 ibdev_warn(&rdev
->ibdev
, "Failed to reinit NQ index %d\n",
353 static struct bnxt_ulp_ops bnxt_re_ulp_ops
= {
354 .ulp_irq_stop
= bnxt_re_stop_irq
,
355 .ulp_irq_restart
= bnxt_re_start_irq
358 /* RoCE -> Net driver */
360 static int bnxt_re_register_netdev(struct bnxt_re_dev
*rdev
)
362 struct bnxt_en_dev
*en_dev
;
365 en_dev
= rdev
->en_dev
;
367 rc
= bnxt_register_dev(en_dev
, &bnxt_re_ulp_ops
, rdev
);
369 rdev
->qplib_res
.pdev
= rdev
->en_dev
->pdev
;
373 static void bnxt_re_init_hwrm_hdr(struct input
*hdr
, u16 opcd
)
375 hdr
->req_type
= cpu_to_le16(opcd
);
376 hdr
->cmpl_ring
= cpu_to_le16(-1);
377 hdr
->target_id
= cpu_to_le16(-1);
380 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg
*fw_msg
, void *msg
,
381 int msg_len
, void *resp
, int resp_max_len
,
385 fw_msg
->msg_len
= msg_len
;
387 fw_msg
->resp_max_len
= resp_max_len
;
388 fw_msg
->timeout
= timeout
;
391 /* Query device config using common hwrm */
392 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev
*rdev
, u32
*db_len
,
395 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
396 struct hwrm_func_qcfg_output resp
= {0};
397 struct hwrm_func_qcfg_input req
= {0};
398 struct bnxt_fw_msg fw_msg
= {};
401 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_FUNC_QCFG
);
402 req
.fid
= cpu_to_le16(0xffff);
403 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
404 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
405 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
407 *db_len
= PAGE_ALIGN(le16_to_cpu(resp
.l2_doorbell_bar_size_kb
) * 1024);
408 *offset
= PAGE_ALIGN(le16_to_cpu(resp
.legacy_l2_db_size_kb
) * 1024);
413 /* Query function capabilities using common hwrm */
414 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev
*rdev
)
416 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
417 struct hwrm_func_qcaps_output resp
= {};
418 struct hwrm_func_qcaps_input req
= {};
419 struct bnxt_qplib_chip_ctx
*cctx
;
420 struct bnxt_fw_msg fw_msg
= {};
423 cctx
= rdev
->chip_ctx
;
424 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_FUNC_QCAPS
);
425 req
.fid
= cpu_to_le16(0xffff);
426 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
427 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
429 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
432 cctx
->modes
.db_push
= le32_to_cpu(resp
.flags
) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE
;
434 cctx
->modes
.dbr_pacing
=
435 le32_to_cpu(resp
.flags_ext2
) &
436 FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED
;
440 static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev
*rdev
)
442 struct hwrm_func_dbr_pacing_qcfg_output resp
= {};
443 struct hwrm_func_dbr_pacing_qcfg_input req
= {};
444 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
445 struct bnxt_qplib_chip_ctx
*cctx
;
446 struct bnxt_fw_msg fw_msg
= {};
449 cctx
= rdev
->chip_ctx
;
450 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_FUNC_DBR_PACING_QCFG
);
451 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
452 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
453 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
457 if ((le32_to_cpu(resp
.dbr_stat_db_fifo_reg
) &
458 FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK
) ==
459 FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC
)
460 cctx
->dbr_stat_db_fifo
=
461 le32_to_cpu(resp
.dbr_stat_db_fifo_reg
) &
462 ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK
;
466 /* Update the pacing tunable parameters to the default values */
467 static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev
*rdev
)
469 struct bnxt_qplib_db_pacing_data
*pacing_data
= rdev
->qplib_res
.pacing_data
;
471 pacing_data
->do_pacing
= rdev
->pacing
.dbr_def_do_pacing
;
472 pacing_data
->pacing_th
= rdev
->pacing
.pacing_algo_th
;
473 pacing_data
->alarm_th
=
474 pacing_data
->pacing_th
* BNXT_RE_PACING_ALARM_TH_MULTIPLE
;
477 static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev
*rdev
)
479 u32 read_val
, fifo_occup
;
481 /* loop shouldn't run infintely as the occupancy usually goes
482 * below pacing algo threshold as soon as pacing kicks in.
485 read_val
= readl(rdev
->en_dev
->bar0
+ rdev
->pacing
.dbr_db_fifo_reg_off
);
486 fifo_occup
= BNXT_RE_MAX_FIFO_DEPTH
-
487 ((read_val
& BNXT_RE_DB_FIFO_ROOM_MASK
) >>
488 BNXT_RE_DB_FIFO_ROOM_SHIFT
);
489 /* Fifo occupancy cannot be greater the MAX FIFO depth */
490 if (fifo_occup
> BNXT_RE_MAX_FIFO_DEPTH
)
493 if (fifo_occup
< rdev
->qplib_res
.pacing_data
->pacing_th
)
498 static void bnxt_re_db_fifo_check(struct work_struct
*work
)
500 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
501 dbq_fifo_check_work
);
502 struct bnxt_qplib_db_pacing_data
*pacing_data
;
505 if (!mutex_trylock(&rdev
->pacing
.dbq_lock
))
507 pacing_data
= rdev
->qplib_res
.pacing_data
;
508 pacing_save
= rdev
->pacing
.do_pacing_save
;
509 __wait_for_fifo_occupancy_below_th(rdev
);
510 cancel_delayed_work_sync(&rdev
->dbq_pacing_work
);
511 if (pacing_save
> rdev
->pacing
.dbr_def_do_pacing
) {
512 /* Double the do_pacing value during the congestion */
513 pacing_save
= pacing_save
<< 1;
516 * when a new congestion is detected increase the do_pacing
517 * by 8 times. And also increase the pacing_th by 4 times. The
518 * reason to increase pacing_th is to give more space for the
519 * queue to oscillate down without getting empty, but also more
520 * room for the queue to increase without causing another alarm.
522 pacing_save
= pacing_save
<< 3;
523 pacing_data
->pacing_th
= rdev
->pacing
.pacing_algo_th
* 4;
526 if (pacing_save
> BNXT_RE_MAX_DBR_DO_PACING
)
527 pacing_save
= BNXT_RE_MAX_DBR_DO_PACING
;
529 pacing_data
->do_pacing
= pacing_save
;
530 rdev
->pacing
.do_pacing_save
= pacing_data
->do_pacing
;
531 pacing_data
->alarm_th
=
532 pacing_data
->pacing_th
* BNXT_RE_PACING_ALARM_TH_MULTIPLE
;
533 schedule_delayed_work(&rdev
->dbq_pacing_work
,
534 msecs_to_jiffies(rdev
->pacing
.dbq_pacing_time
));
535 rdev
->stats
.pacing
.alerts
++;
536 mutex_unlock(&rdev
->pacing
.dbq_lock
);
539 static void bnxt_re_pacing_timer_exp(struct work_struct
*work
)
541 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
542 dbq_pacing_work
.work
);
543 struct bnxt_qplib_db_pacing_data
*pacing_data
;
544 u32 read_val
, fifo_occup
;
546 if (!mutex_trylock(&rdev
->pacing
.dbq_lock
))
549 pacing_data
= rdev
->qplib_res
.pacing_data
;
550 read_val
= readl(rdev
->en_dev
->bar0
+ rdev
->pacing
.dbr_db_fifo_reg_off
);
551 fifo_occup
= BNXT_RE_MAX_FIFO_DEPTH
-
552 ((read_val
& BNXT_RE_DB_FIFO_ROOM_MASK
) >>
553 BNXT_RE_DB_FIFO_ROOM_SHIFT
);
555 if (fifo_occup
> pacing_data
->pacing_th
)
559 * Instead of immediately going back to the default do_pacing
560 * reduce it by 1/8 times and restart the timer.
562 pacing_data
->do_pacing
= pacing_data
->do_pacing
- (pacing_data
->do_pacing
>> 3);
563 pacing_data
->do_pacing
= max_t(u32
, rdev
->pacing
.dbr_def_do_pacing
, pacing_data
->do_pacing
);
564 if (pacing_data
->do_pacing
<= rdev
->pacing
.dbr_def_do_pacing
) {
565 bnxt_re_set_default_pacing_data(rdev
);
566 rdev
->stats
.pacing
.complete
++;
571 schedule_delayed_work(&rdev
->dbq_pacing_work
,
572 msecs_to_jiffies(rdev
->pacing
.dbq_pacing_time
));
573 rdev
->stats
.pacing
.resched
++;
575 rdev
->pacing
.do_pacing_save
= pacing_data
->do_pacing
;
576 mutex_unlock(&rdev
->pacing
.dbq_lock
);
579 void bnxt_re_pacing_alert(struct bnxt_re_dev
*rdev
)
581 struct bnxt_qplib_db_pacing_data
*pacing_data
;
583 if (!rdev
->pacing
.dbr_pacing
)
585 mutex_lock(&rdev
->pacing
.dbq_lock
);
586 pacing_data
= rdev
->qplib_res
.pacing_data
;
589 * Increase the alarm_th to max so that other user lib instances do not
590 * keep alerting the driver.
592 pacing_data
->alarm_th
= BNXT_RE_MAX_FIFO_DEPTH
;
593 pacing_data
->do_pacing
= BNXT_RE_MAX_DBR_DO_PACING
;
594 cancel_work_sync(&rdev
->dbq_fifo_check_work
);
595 schedule_work(&rdev
->dbq_fifo_check_work
);
596 mutex_unlock(&rdev
->pacing
.dbq_lock
);
599 static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev
*rdev
)
601 if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev
))
604 /* Allocate a page for app use */
605 rdev
->pacing
.dbr_page
= (void *)__get_free_page(GFP_KERNEL
);
606 if (!rdev
->pacing
.dbr_page
)
609 memset((u8
*)rdev
->pacing
.dbr_page
, 0, PAGE_SIZE
);
610 rdev
->qplib_res
.pacing_data
= (struct bnxt_qplib_db_pacing_data
*)rdev
->pacing
.dbr_page
;
612 /* MAP HW window 2 for reading db fifo depth */
613 writel(rdev
->chip_ctx
->dbr_stat_db_fifo
& BNXT_GRC_BASE_MASK
,
614 rdev
->en_dev
->bar0
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 4);
615 rdev
->pacing
.dbr_db_fifo_reg_off
=
616 (rdev
->chip_ctx
->dbr_stat_db_fifo
& BNXT_GRC_OFFSET_MASK
) +
617 BNXT_RE_GRC_FIFO_REG_BASE
;
618 rdev
->pacing
.dbr_bar_addr
=
619 pci_resource_start(rdev
->qplib_res
.pdev
, 0) + rdev
->pacing
.dbr_db_fifo_reg_off
;
621 rdev
->pacing
.pacing_algo_th
= BNXT_RE_PACING_ALGO_THRESHOLD
;
622 rdev
->pacing
.dbq_pacing_time
= BNXT_RE_DBR_PACING_TIME
;
623 rdev
->pacing
.dbr_def_do_pacing
= BNXT_RE_DBR_DO_PACING_NO_CONGESTION
;
624 rdev
->pacing
.do_pacing_save
= rdev
->pacing
.dbr_def_do_pacing
;
625 rdev
->qplib_res
.pacing_data
->fifo_max_depth
= BNXT_RE_MAX_FIFO_DEPTH
;
626 rdev
->qplib_res
.pacing_data
->fifo_room_mask
= BNXT_RE_DB_FIFO_ROOM_MASK
;
627 rdev
->qplib_res
.pacing_data
->fifo_room_shift
= BNXT_RE_DB_FIFO_ROOM_SHIFT
;
628 rdev
->qplib_res
.pacing_data
->grc_reg_offset
= rdev
->pacing
.dbr_db_fifo_reg_off
;
629 bnxt_re_set_default_pacing_data(rdev
);
630 /* Initialize worker for DBR Pacing */
631 INIT_WORK(&rdev
->dbq_fifo_check_work
, bnxt_re_db_fifo_check
);
632 INIT_DELAYED_WORK(&rdev
->dbq_pacing_work
, bnxt_re_pacing_timer_exp
);
636 static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev
*rdev
)
638 cancel_work_sync(&rdev
->dbq_fifo_check_work
);
639 cancel_delayed_work_sync(&rdev
->dbq_pacing_work
);
640 if (rdev
->pacing
.dbr_page
)
641 free_page((u64
)rdev
->pacing
.dbr_page
);
643 rdev
->pacing
.dbr_page
= NULL
;
644 rdev
->pacing
.dbr_pacing
= false;
647 static int bnxt_re_net_ring_free(struct bnxt_re_dev
*rdev
,
648 u16 fw_ring_id
, int type
)
650 struct bnxt_en_dev
*en_dev
;
651 struct hwrm_ring_free_input req
= {};
652 struct hwrm_ring_free_output resp
;
653 struct bnxt_fw_msg fw_msg
= {};
659 en_dev
= rdev
->en_dev
;
664 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED
, &rdev
->flags
))
667 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_RING_FREE
);
668 req
.ring_type
= type
;
669 req
.ring_id
= cpu_to_le16(fw_ring_id
);
670 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
671 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
672 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
674 ibdev_err(&rdev
->ibdev
, "Failed to free HW ring:%d :%#x",
679 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev
*rdev
,
680 struct bnxt_re_ring_attr
*ring_attr
,
683 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
684 struct hwrm_ring_alloc_input req
= {};
685 struct hwrm_ring_alloc_output resp
;
686 struct bnxt_fw_msg fw_msg
= {};
692 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_RING_ALLOC
);
694 req
.page_tbl_addr
= cpu_to_le64(ring_attr
->dma_arr
[0]);
695 if (ring_attr
->pages
> 1) {
696 /* Page size is in log2 units */
697 req
.page_size
= BNXT_PAGE_SHIFT
;
698 req
.page_tbl_depth
= 1;
701 /* Association of ring index with doorbell index and MSIX number */
702 req
.logical_id
= cpu_to_le16(ring_attr
->lrid
);
703 req
.length
= cpu_to_le32(ring_attr
->depth
+ 1);
704 req
.ring_type
= ring_attr
->type
;
705 req
.int_mode
= ring_attr
->mode
;
706 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
707 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
708 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
710 *fw_ring_id
= le16_to_cpu(resp
.ring_id
);
715 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev
*rdev
,
718 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
719 struct hwrm_stat_ctx_free_input req
= {};
720 struct hwrm_stat_ctx_free_output resp
= {};
721 struct bnxt_fw_msg fw_msg
= {};
727 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED
, &rdev
->flags
))
730 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_STAT_CTX_FREE
);
731 req
.stat_ctx_id
= cpu_to_le32(fw_stats_ctx_id
);
732 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
733 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
734 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
736 ibdev_err(&rdev
->ibdev
, "Failed to free HW stats context %#x",
742 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev
*rdev
,
744 u32
*fw_stats_ctx_id
)
746 struct bnxt_qplib_chip_ctx
*chip_ctx
= rdev
->chip_ctx
;
747 struct hwrm_stat_ctx_alloc_output resp
= {};
748 struct hwrm_stat_ctx_alloc_input req
= {};
749 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
750 struct bnxt_fw_msg fw_msg
= {};
753 *fw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
758 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_STAT_CTX_ALLOC
);
759 req
.update_period_ms
= cpu_to_le32(1000);
760 req
.stats_dma_addr
= cpu_to_le64(dma_map
);
761 req
.stats_dma_length
= cpu_to_le16(chip_ctx
->hw_stats_size
);
762 req
.stat_ctx_flags
= STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE
;
763 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
764 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
765 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
767 *fw_stats_ctx_id
= le32_to_cpu(resp
.stat_ctx_id
);
772 static void bnxt_re_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
778 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
)
780 struct ib_device
*ibdev
=
781 ib_device_get_by_netdev(netdev
, RDMA_DRIVER_BNXT_RE
);
785 return container_of(ibdev
, struct bnxt_re_dev
, ibdev
);
788 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
791 struct bnxt_re_dev
*rdev
=
792 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
794 return sysfs_emit(buf
, "0x%x\n", rdev
->en_dev
->pdev
->vendor
);
796 static DEVICE_ATTR_RO(hw_rev
);
798 static ssize_t
hca_type_show(struct device
*device
,
799 struct device_attribute
*attr
, char *buf
)
801 struct bnxt_re_dev
*rdev
=
802 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
804 return sysfs_emit(buf
, "%s\n", rdev
->ibdev
.node_desc
);
806 static DEVICE_ATTR_RO(hca_type
);
808 static struct attribute
*bnxt_re_attributes
[] = {
809 &dev_attr_hw_rev
.attr
,
810 &dev_attr_hca_type
.attr
,
814 static const struct attribute_group bnxt_re_dev_attr_group
= {
815 .attrs
= bnxt_re_attributes
,
818 static const struct ib_device_ops bnxt_re_dev_ops
= {
819 .owner
= THIS_MODULE
,
820 .driver_id
= RDMA_DRIVER_BNXT_RE
,
821 .uverbs_abi_ver
= BNXT_RE_ABI_VERSION
,
823 .add_gid
= bnxt_re_add_gid
,
824 .alloc_hw_port_stats
= bnxt_re_ib_alloc_hw_port_stats
,
825 .alloc_mr
= bnxt_re_alloc_mr
,
826 .alloc_pd
= bnxt_re_alloc_pd
,
827 .alloc_ucontext
= bnxt_re_alloc_ucontext
,
828 .create_ah
= bnxt_re_create_ah
,
829 .create_cq
= bnxt_re_create_cq
,
830 .create_qp
= bnxt_re_create_qp
,
831 .create_srq
= bnxt_re_create_srq
,
832 .create_user_ah
= bnxt_re_create_ah
,
833 .dealloc_pd
= bnxt_re_dealloc_pd
,
834 .dealloc_ucontext
= bnxt_re_dealloc_ucontext
,
835 .del_gid
= bnxt_re_del_gid
,
836 .dereg_mr
= bnxt_re_dereg_mr
,
837 .destroy_ah
= bnxt_re_destroy_ah
,
838 .destroy_cq
= bnxt_re_destroy_cq
,
839 .destroy_qp
= bnxt_re_destroy_qp
,
840 .destroy_srq
= bnxt_re_destroy_srq
,
841 .device_group
= &bnxt_re_dev_attr_group
,
842 .disassociate_ucontext
= bnxt_re_disassociate_ucontext
,
843 .get_dev_fw_str
= bnxt_re_query_fw_str
,
844 .get_dma_mr
= bnxt_re_get_dma_mr
,
845 .get_hw_stats
= bnxt_re_ib_get_hw_stats
,
846 .get_link_layer
= bnxt_re_get_link_layer
,
847 .get_port_immutable
= bnxt_re_get_port_immutable
,
848 .map_mr_sg
= bnxt_re_map_mr_sg
,
849 .mmap
= bnxt_re_mmap
,
850 .mmap_free
= bnxt_re_mmap_free
,
851 .modify_qp
= bnxt_re_modify_qp
,
852 .modify_srq
= bnxt_re_modify_srq
,
853 .poll_cq
= bnxt_re_poll_cq
,
854 .post_recv
= bnxt_re_post_recv
,
855 .post_send
= bnxt_re_post_send
,
856 .post_srq_recv
= bnxt_re_post_srq_recv
,
857 .query_ah
= bnxt_re_query_ah
,
858 .query_device
= bnxt_re_query_device
,
859 .query_pkey
= bnxt_re_query_pkey
,
860 .query_port
= bnxt_re_query_port
,
861 .query_qp
= bnxt_re_query_qp
,
862 .query_srq
= bnxt_re_query_srq
,
863 .reg_user_mr
= bnxt_re_reg_user_mr
,
864 .reg_user_mr_dmabuf
= bnxt_re_reg_user_mr_dmabuf
,
865 .req_notify_cq
= bnxt_re_req_notify_cq
,
866 .resize_cq
= bnxt_re_resize_cq
,
867 INIT_RDMA_OBJ_SIZE(ib_ah
, bnxt_re_ah
, ib_ah
),
868 INIT_RDMA_OBJ_SIZE(ib_cq
, bnxt_re_cq
, ib_cq
),
869 INIT_RDMA_OBJ_SIZE(ib_pd
, bnxt_re_pd
, ib_pd
),
870 INIT_RDMA_OBJ_SIZE(ib_qp
, bnxt_re_qp
, ib_qp
),
871 INIT_RDMA_OBJ_SIZE(ib_srq
, bnxt_re_srq
, ib_srq
),
872 INIT_RDMA_OBJ_SIZE(ib_ucontext
, bnxt_re_ucontext
, ib_uctx
),
875 static int bnxt_re_register_ib(struct bnxt_re_dev
*rdev
)
877 struct ib_device
*ibdev
= &rdev
->ibdev
;
881 ibdev
->node_type
= RDMA_NODE_IB_CA
;
882 strscpy(ibdev
->node_desc
, BNXT_RE_DESC
" HCA",
883 strlen(BNXT_RE_DESC
) + 5);
884 ibdev
->phys_port_cnt
= 1;
886 addrconf_addr_eui48((u8
*)&ibdev
->node_guid
, rdev
->netdev
->dev_addr
);
888 ibdev
->num_comp_vectors
= rdev
->num_msix
- 1;
889 ibdev
->dev
.parent
= &rdev
->en_dev
->pdev
->dev
;
890 ibdev
->local_dma_lkey
= BNXT_QPLIB_RSVD_LKEY
;
892 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS
))
893 ibdev
->driver_def
= bnxt_re_uapi_defs
;
895 ib_set_device_ops(ibdev
, &bnxt_re_dev_ops
);
896 ret
= ib_device_set_netdev(&rdev
->ibdev
, rdev
->netdev
, 1);
900 dma_set_max_seg_size(&rdev
->en_dev
->pdev
->dev
, UINT_MAX
);
901 ibdev
->uverbs_cmd_mask
|= BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ
);
902 return ib_register_device(ibdev
, "bnxt_re%d", &rdev
->en_dev
->pdev
->dev
);
905 static struct bnxt_re_dev
*bnxt_re_dev_add(struct bnxt_aux_priv
*aux_priv
,
906 struct bnxt_en_dev
*en_dev
)
908 struct bnxt_re_dev
*rdev
;
910 /* Allocate bnxt_re_dev instance here */
911 rdev
= ib_alloc_device(bnxt_re_dev
, ibdev
);
913 ibdev_err(NULL
, "%s: bnxt_re_dev allocation failure!",
914 ROCE_DRV_MODULE_NAME
);
918 rdev
->nb
.notifier_call
= NULL
;
919 rdev
->netdev
= en_dev
->net
;
920 rdev
->en_dev
= en_dev
;
921 rdev
->id
= rdev
->en_dev
->pdev
->devfn
;
922 INIT_LIST_HEAD(&rdev
->qp_list
);
923 mutex_init(&rdev
->qp_lock
);
924 mutex_init(&rdev
->pacing
.dbq_lock
);
925 atomic_set(&rdev
->stats
.res
.qp_count
, 0);
926 atomic_set(&rdev
->stats
.res
.cq_count
, 0);
927 atomic_set(&rdev
->stats
.res
.srq_count
, 0);
928 atomic_set(&rdev
->stats
.res
.mr_count
, 0);
929 atomic_set(&rdev
->stats
.res
.mw_count
, 0);
930 atomic_set(&rdev
->stats
.res
.ah_count
, 0);
931 atomic_set(&rdev
->stats
.res
.pd_count
, 0);
932 rdev
->cosq
[0] = 0xFFFF;
933 rdev
->cosq
[1] = 0xFFFF;
938 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
941 switch (unaffi_async
->event
) {
942 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
944 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
946 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
948 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
950 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
952 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
954 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
956 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
958 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
960 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
962 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
970 static int bnxt_re_handle_qp_async_event(struct creq_qp_event
*qp_event
,
971 struct bnxt_re_qp
*qp
)
973 struct bnxt_re_srq
*srq
= container_of(qp
->qplib_qp
.srq
, struct bnxt_re_srq
,
975 struct creq_qp_error_notification
*err_event
;
976 struct ib_event event
= {};
979 if (qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
&&
980 rdma_is_kernel_res(&qp
->ib_qp
.res
)) {
981 flags
= bnxt_re_lock_cqs(qp
);
982 bnxt_qplib_add_flush_qp(&qp
->qplib_qp
);
983 bnxt_re_unlock_cqs(qp
, flags
);
986 event
.device
= &qp
->rdev
->ibdev
;
987 event
.element
.qp
= &qp
->ib_qp
;
988 event
.event
= IB_EVENT_QP_FATAL
;
990 err_event
= (struct creq_qp_error_notification
*)qp_event
;
992 switch (err_event
->req_err_state_reason
) {
993 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR
:
994 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT
:
995 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT
:
996 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2
:
997 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3
:
998 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP
:
999 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND
:
1000 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG
:
1001 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE
:
1002 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR
:
1003 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR
:
1004 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR
:
1005 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR
:
1006 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
1008 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1
:
1009 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4
:
1010 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH
:
1011 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR
:
1012 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR
:
1013 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR
:
1014 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR
:
1015 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR
:
1016 event
.event
= IB_EVENT_QP_REQ_ERR
;
1018 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR
:
1019 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR
:
1020 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR
:
1021 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR
:
1022 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR
:
1023 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR
:
1024 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR
:
1025 event
.event
= IB_EVENT_QP_FATAL
;
1032 switch (err_event
->res_err_state_reason
) {
1033 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX
:
1034 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH
:
1035 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT
:
1036 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY
:
1037 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR
:
1038 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION
:
1039 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR
:
1040 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY
:
1041 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR
:
1042 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION
:
1043 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR
:
1044 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC
:
1045 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND
:
1046 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY
:
1047 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR
:
1048 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
1050 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE
:
1051 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR
:
1052 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE
:
1053 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE
:
1054 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR
:
1055 event
.event
= IB_EVENT_QP_REQ_ERR
;
1057 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW
:
1058 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR
:
1059 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR
:
1060 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR
:
1061 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR
:
1062 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR
:
1063 event
.event
= IB_EVENT_QP_FATAL
;
1065 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR
:
1066 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR
:
1068 event
.event
= IB_EVENT_SRQ_ERR
;
1074 if (err_event
->res_err_state_reason
|| err_event
->req_err_state_reason
) {
1075 ibdev_dbg(&qp
->rdev
->ibdev
,
1076 "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n",
1077 __func__
, rdma_is_kernel_res(&qp
->ib_qp
.res
) ? "kernel" : "user",
1079 err_event
->sq_cons_idx
,
1080 err_event
->rq_cons_idx
,
1081 err_event
->req_slow_path_state
,
1082 err_event
->req_err_state_reason
,
1083 err_event
->res_slow_path_state
,
1084 err_event
->res_err_state_reason
);
1087 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1090 if (event
.event
== IB_EVENT_SRQ_ERR
&& srq
->ib_srq
.event_handler
) {
1091 (*srq
->ib_srq
.event_handler
)(&event
,
1092 srq
->ib_srq
.srq_context
);
1093 } else if (event
.device
&& qp
->ib_qp
.event_handler
) {
1094 qp
->ib_qp
.event_handler(&event
, qp
->ib_qp
.qp_context
);
1100 static int bnxt_re_handle_cq_async_error(void *event
, struct bnxt_re_cq
*cq
)
1102 struct creq_cq_error_notification
*cqerr
;
1103 struct ib_event ibevent
= {};
1106 switch (cqerr
->cq_err_reason
) {
1107 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR
:
1108 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR
:
1109 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR
:
1110 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR
:
1111 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR
:
1112 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR
:
1113 ibevent
.event
= IB_EVENT_CQ_ERR
;
1119 if (ibevent
.event
== IB_EVENT_CQ_ERR
&& cq
->ib_cq
.event_handler
) {
1120 ibevent
.element
.cq
= &cq
->ib_cq
;
1121 ibevent
.device
= &cq
->rdev
->ibdev
;
1123 ibdev_dbg(&cq
->rdev
->ibdev
,
1124 "%s err reason %d\n", __func__
, cqerr
->cq_err_reason
);
1125 cq
->ib_cq
.event_handler(&ibevent
, cq
->ib_cq
.cq_context
);
1131 static int bnxt_re_handle_affi_async_event(struct creq_qp_event
*affi_async
,
1134 struct bnxt_qplib_qp
*lib_qp
;
1135 struct bnxt_qplib_cq
*lib_cq
;
1136 struct bnxt_re_qp
*qp
;
1137 struct bnxt_re_cq
*cq
;
1142 return rc
; /* QP was already dead, still return success */
1144 event
= affi_async
->event
;
1146 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
:
1148 qp
= container_of(lib_qp
, struct bnxt_re_qp
, qplib_qp
);
1149 rc
= bnxt_re_handle_qp_async_event(affi_async
, qp
);
1151 case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION
:
1153 cq
= container_of(lib_cq
, struct bnxt_re_cq
, qplib_cq
);
1154 rc
= bnxt_re_handle_cq_async_error(affi_async
, cq
);
1162 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw
*rcfw
,
1163 void *aeqe
, void *obj
)
1165 struct creq_qp_event
*affi_async
;
1166 struct creq_func_event
*unaffi_async
;
1170 type
= ((struct creq_base
*)aeqe
)->type
;
1171 if (type
== CREQ_BASE_TYPE_FUNC_EVENT
) {
1172 unaffi_async
= aeqe
;
1173 rc
= bnxt_re_handle_unaffi_async_event(unaffi_async
);
1176 rc
= bnxt_re_handle_affi_async_event(affi_async
, obj
);
1182 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq
*nq
,
1183 struct bnxt_qplib_srq
*handle
, u8 event
)
1185 struct bnxt_re_srq
*srq
= container_of(handle
, struct bnxt_re_srq
,
1187 struct ib_event ib_event
;
1189 ib_event
.device
= &srq
->rdev
->ibdev
;
1190 ib_event
.element
.srq
= &srq
->ib_srq
;
1192 if (srq
->ib_srq
.event_handler
) {
1193 if (event
== NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
)
1194 ib_event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
1195 (*srq
->ib_srq
.event_handler
)(&ib_event
,
1196 srq
->ib_srq
.srq_context
);
1201 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq
*nq
,
1202 struct bnxt_qplib_cq
*handle
)
1204 struct bnxt_re_cq
*cq
= container_of(handle
, struct bnxt_re_cq
,
1207 if (cq
->ib_cq
.comp_handler
) {
1208 /* Lock comp_handler? */
1209 (*cq
->ib_cq
.comp_handler
)(&cq
->ib_cq
, cq
->ib_cq
.cq_context
);
1215 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
1216 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
1217 static u32
bnxt_re_get_nqdb_offset(struct bnxt_re_dev
*rdev
, u16 indx
)
1219 return bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
1220 (rdev
->is_virtfn
? BNXT_RE_GEN_P5_VF_NQ_DB
:
1221 BNXT_RE_GEN_P5_PF_NQ_DB
) :
1222 rdev
->en_dev
->msix_entries
[indx
].db_offset
;
1225 static void bnxt_re_cleanup_res(struct bnxt_re_dev
*rdev
)
1229 for (i
= 1; i
< rdev
->num_msix
; i
++)
1230 bnxt_qplib_disable_nq(&rdev
->nq
[i
- 1]);
1232 if (rdev
->qplib_res
.rcfw
)
1233 bnxt_qplib_cleanup_res(&rdev
->qplib_res
);
1236 static int bnxt_re_init_res(struct bnxt_re_dev
*rdev
)
1238 int num_vec_enabled
= 0;
1242 bnxt_qplib_init_res(&rdev
->qplib_res
);
1244 for (i
= 1; i
< rdev
->num_msix
; i
++) {
1245 db_offt
= bnxt_re_get_nqdb_offset(rdev
, i
);
1246 rc
= bnxt_qplib_enable_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
- 1],
1247 i
- 1, rdev
->en_dev
->msix_entries
[i
].vector
,
1248 db_offt
, &bnxt_re_cqn_handler
,
1249 &bnxt_re_srqn_handler
);
1251 ibdev_err(&rdev
->ibdev
,
1252 "Failed to enable NQ with rc = 0x%x", rc
);
1259 for (i
= num_vec_enabled
; i
>= 0; i
--)
1260 bnxt_qplib_disable_nq(&rdev
->nq
[i
]);
1264 static void bnxt_re_free_nq_res(struct bnxt_re_dev
*rdev
)
1269 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
1270 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1271 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
1272 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1273 rdev
->nq
[i
].res
= NULL
;
1277 static void bnxt_re_free_res(struct bnxt_re_dev
*rdev
)
1279 bnxt_re_free_nq_res(rdev
);
1281 if (rdev
->qplib_res
.dpi_tbl
.max
) {
1282 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
1283 &rdev
->dpi_privileged
);
1285 if (rdev
->qplib_res
.rcfw
) {
1286 bnxt_qplib_free_res(&rdev
->qplib_res
);
1287 rdev
->qplib_res
.rcfw
= NULL
;
1291 static int bnxt_re_alloc_res(struct bnxt_re_dev
*rdev
)
1293 struct bnxt_re_ring_attr rattr
= {};
1294 int num_vec_created
= 0;
1298 /* Configure and allocate resources for qplib */
1299 rdev
->qplib_res
.rcfw
= &rdev
->rcfw
;
1300 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
);
1304 rc
= bnxt_qplib_alloc_res(&rdev
->qplib_res
, rdev
->en_dev
->pdev
,
1305 rdev
->netdev
, &rdev
->dev_attr
);
1309 rc
= bnxt_qplib_alloc_dpi(&rdev
->qplib_res
,
1310 &rdev
->dpi_privileged
,
1311 rdev
, BNXT_QPLIB_DPI_TYPE_KERNEL
);
1315 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
1316 struct bnxt_qplib_nq
*nq
;
1319 nq
->hwq
.max_elements
= BNXT_QPLIB_NQE_MAX_CNT
;
1320 rc
= bnxt_qplib_alloc_nq(&rdev
->qplib_res
, &rdev
->nq
[i
]);
1322 ibdev_err(&rdev
->ibdev
, "Alloc Failed NQ%d rc:%#x",
1326 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1327 rattr
.dma_arr
= nq
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1328 rattr
.pages
= nq
->hwq
.pbl
[rdev
->nq
[i
].hwq
.level
].pg_count
;
1330 rattr
.mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
1331 rattr
.depth
= BNXT_QPLIB_NQE_MAX_CNT
- 1;
1332 rattr
.lrid
= rdev
->en_dev
->msix_entries
[i
+ 1].ring_idx
;
1333 rc
= bnxt_re_net_ring_alloc(rdev
, &rattr
, &nq
->ring_id
);
1335 ibdev_err(&rdev
->ibdev
,
1336 "Failed to allocate NQ fw id with rc = 0x%x",
1338 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1345 for (i
= num_vec_created
- 1; i
>= 0; i
--) {
1346 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1347 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
1348 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1350 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
1351 &rdev
->dpi_privileged
);
1353 bnxt_qplib_free_res(&rdev
->qplib_res
);
1356 rdev
->qplib_res
.rcfw
= NULL
;
1360 static void bnxt_re_dispatch_event(struct ib_device
*ibdev
, struct ib_qp
*qp
,
1361 u8 port_num
, enum ib_event_type event
)
1363 struct ib_event ib_event
;
1365 ib_event
.device
= ibdev
;
1367 ib_event
.element
.qp
= qp
;
1368 ib_event
.event
= event
;
1369 if (qp
->event_handler
)
1370 qp
->event_handler(&ib_event
, qp
->qp_context
);
1373 ib_event
.element
.port_num
= port_num
;
1374 ib_event
.event
= event
;
1375 ib_dispatch_event(&ib_event
);
1379 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev
*rdev
,
1380 struct bnxt_re_qp
*qp
)
1382 return (qp
->ib_qp
.qp_type
== IB_QPT_GSI
) ||
1383 (qp
== rdev
->gsi_ctx
.gsi_sqp
);
1386 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
)
1388 int mask
= IB_QP_STATE
;
1389 struct ib_qp_attr qp_attr
;
1390 struct bnxt_re_qp
*qp
;
1392 qp_attr
.qp_state
= IB_QPS_ERR
;
1393 mutex_lock(&rdev
->qp_lock
);
1394 list_for_each_entry(qp
, &rdev
->qp_list
, list
) {
1395 /* Modify the state of all QPs except QP1/Shadow QP */
1396 if (!bnxt_re_is_qp1_or_shadow_qp(rdev
, qp
)) {
1397 if (qp
->qplib_qp
.state
!=
1398 CMDQ_MODIFY_QP_NEW_STATE_RESET
&&
1399 qp
->qplib_qp
.state
!=
1400 CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1401 bnxt_re_dispatch_event(&rdev
->ibdev
, &qp
->ib_qp
,
1402 1, IB_EVENT_QP_FATAL
);
1403 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, mask
,
1408 mutex_unlock(&rdev
->qp_lock
);
1411 static int bnxt_re_update_gid(struct bnxt_re_dev
*rdev
)
1413 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
1414 struct bnxt_qplib_gid gid
;
1418 if (!ib_device_try_get(&rdev
->ibdev
))
1421 for (index
= 0; index
< sgid_tbl
->active
; index
++) {
1422 gid_idx
= sgid_tbl
->hw_id
[index
];
1424 if (!memcmp(&sgid_tbl
->tbl
[index
], &bnxt_qplib_gid_zero
,
1425 sizeof(bnxt_qplib_gid_zero
)))
1427 /* need to modify the VLAN enable setting of non VLAN GID only
1428 * as setting is done for VLAN GID while adding GID
1430 if (sgid_tbl
->vlan
[index
])
1433 memcpy(&gid
, &sgid_tbl
->tbl
[index
], sizeof(gid
));
1435 rc
= bnxt_qplib_update_sgid(sgid_tbl
, &gid
, gid_idx
,
1436 rdev
->qplib_res
.netdev
->dev_addr
);
1439 ib_device_put(&rdev
->ibdev
);
1443 static u32
bnxt_re_get_priority_mask(struct bnxt_re_dev
*rdev
)
1445 u32 prio_map
= 0, tmp_map
= 0;
1446 struct net_device
*netdev
;
1447 struct dcb_app app
= {};
1449 netdev
= rdev
->netdev
;
1451 app
.selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
1452 app
.protocol
= ETH_P_IBOE
;
1453 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1456 app
.selector
= IEEE_8021QAZ_APP_SEL_DGRAM
;
1457 app
.protocol
= ROCE_V2_UDP_DPORT
;
1458 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1459 prio_map
|= tmp_map
;
1464 static int bnxt_re_setup_qos(struct bnxt_re_dev
*rdev
)
1468 /* Get priority for roce */
1469 prio_map
= bnxt_re_get_priority_mask(rdev
);
1471 if (prio_map
== rdev
->cur_prio_map
)
1473 rdev
->cur_prio_map
= prio_map
;
1474 /* Actual priorities are not programmed as they are already
1475 * done by L2 driver; just enable or disable priority vlan tagging
1477 if ((prio_map
== 0 && rdev
->qplib_res
.prio
) ||
1478 (prio_map
!= 0 && !rdev
->qplib_res
.prio
)) {
1479 rdev
->qplib_res
.prio
= prio_map
;
1480 bnxt_re_update_gid(rdev
);
1486 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev
*rdev
)
1488 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1489 struct hwrm_ver_get_output resp
= {};
1490 struct hwrm_ver_get_input req
= {};
1491 struct bnxt_qplib_chip_ctx
*cctx
;
1492 struct bnxt_fw_msg fw_msg
= {};
1495 bnxt_re_init_hwrm_hdr((void *)&req
, HWRM_VER_GET
);
1496 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
1497 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
1498 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
1499 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1500 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1501 rc
= bnxt_send_msg(en_dev
, &fw_msg
);
1503 ibdev_err(&rdev
->ibdev
, "Failed to query HW version, rc = 0x%x",
1508 cctx
= rdev
->chip_ctx
;
1509 cctx
->hwrm_intf_ver
=
1510 (u64
)le16_to_cpu(resp
.hwrm_intf_major
) << 48 |
1511 (u64
)le16_to_cpu(resp
.hwrm_intf_minor
) << 32 |
1512 (u64
)le16_to_cpu(resp
.hwrm_intf_build
) << 16 |
1513 le16_to_cpu(resp
.hwrm_intf_patch
);
1515 cctx
->hwrm_cmd_max_timeout
= le16_to_cpu(resp
.max_req_timeout
);
1517 if (!cctx
->hwrm_cmd_max_timeout
)
1518 cctx
->hwrm_cmd_max_timeout
= RCFW_FW_STALL_MAX_TIMEOUT
;
1521 static int bnxt_re_ib_init(struct bnxt_re_dev
*rdev
)
1526 /* Register ib dev */
1527 rc
= bnxt_re_register_ib(rdev
);
1529 pr_err("Failed to register with IB: %#x\n", rc
);
1532 dev_info(rdev_to_dev(rdev
), "Device registered with IB successfully");
1533 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS
, &rdev
->flags
);
1535 event
= netif_running(rdev
->netdev
) && netif_carrier_ok(rdev
->netdev
) ?
1536 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
1538 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, event
);
1543 static void bnxt_re_dev_uninit(struct bnxt_re_dev
*rdev
)
1548 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
))
1549 cancel_delayed_work_sync(&rdev
->worker
);
1551 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
,
1553 bnxt_re_cleanup_res(rdev
);
1554 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
))
1555 bnxt_re_free_res(rdev
);
1557 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
)) {
1558 rc
= bnxt_qplib_deinit_rcfw(&rdev
->rcfw
);
1560 ibdev_warn(&rdev
->ibdev
,
1561 "Failed to deinitialize RCFW: %#x", rc
);
1562 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1563 bnxt_qplib_free_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
);
1564 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1565 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1566 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq
.ring_id
, type
);
1567 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1572 if (rdev
->pacing
.dbr_pacing
)
1573 bnxt_re_deinitialize_dbr_pacing(rdev
);
1575 bnxt_re_destroy_chip_ctx(rdev
);
1576 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
))
1577 bnxt_unregister_dev(rdev
->en_dev
);
1580 /* worker thread for polling periodic events. Now used for QoS programming*/
1581 static void bnxt_re_worker(struct work_struct
*work
)
1583 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
1586 bnxt_re_setup_qos(rdev
);
1587 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1590 static int bnxt_re_dev_init(struct bnxt_re_dev
*rdev
, u8 wqe_mode
)
1592 struct bnxt_re_ring_attr rattr
= {};
1593 struct bnxt_qplib_creq_ctx
*creq
;
1599 /* Registered a new RoCE device instance to netdev */
1600 rc
= bnxt_re_register_netdev(rdev
);
1602 ibdev_err(&rdev
->ibdev
,
1603 "Failed to register with netedev: %#x\n", rc
);
1606 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1608 rc
= bnxt_re_setup_chip_ctx(rdev
, wqe_mode
);
1610 bnxt_unregister_dev(rdev
->en_dev
);
1611 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1612 ibdev_err(&rdev
->ibdev
, "Failed to get chip context\n");
1616 /* Check whether VF or PF */
1617 bnxt_re_get_sriov_func_type(rdev
);
1619 if (!rdev
->en_dev
->ulp_tbl
->msix_requested
) {
1620 ibdev_err(&rdev
->ibdev
,
1621 "Failed to get MSI-X vectors: %#x\n", rc
);
1625 ibdev_dbg(&rdev
->ibdev
, "Got %d MSI-X vectors\n",
1626 rdev
->en_dev
->ulp_tbl
->msix_requested
);
1627 rdev
->num_msix
= rdev
->en_dev
->ulp_tbl
->msix_requested
;
1629 bnxt_re_query_hwrm_intf_version(rdev
);
1631 /* Establish RCFW Communication Channel to initialize the context
1632 * memory for the function and all child VFs
1634 rc
= bnxt_qplib_alloc_rcfw_channel(&rdev
->qplib_res
, &rdev
->rcfw
,
1636 BNXT_RE_MAX_QPC_COUNT
);
1638 ibdev_err(&rdev
->ibdev
,
1639 "Failed to allocate RCFW Channel: %#x\n", rc
);
1643 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1644 creq
= &rdev
->rcfw
.creq
;
1645 rattr
.dma_arr
= creq
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1646 rattr
.pages
= creq
->hwq
.pbl
[creq
->hwq
.level
].pg_count
;
1648 rattr
.mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
1649 rattr
.depth
= BNXT_QPLIB_CREQE_MAX_CNT
- 1;
1650 rattr
.lrid
= rdev
->en_dev
->msix_entries
[BNXT_RE_AEQ_IDX
].ring_idx
;
1651 rc
= bnxt_re_net_ring_alloc(rdev
, &rattr
, &creq
->ring_id
);
1653 ibdev_err(&rdev
->ibdev
, "Failed to allocate CREQ: %#x\n", rc
);
1656 db_offt
= bnxt_re_get_nqdb_offset(rdev
, BNXT_RE_AEQ_IDX
);
1657 vid
= rdev
->en_dev
->msix_entries
[BNXT_RE_AEQ_IDX
].vector
;
1658 rc
= bnxt_qplib_enable_rcfw_channel(&rdev
->rcfw
,
1660 &bnxt_re_aeq_handler
);
1662 ibdev_err(&rdev
->ibdev
, "Failed to enable RCFW channel: %#x\n",
1667 if (bnxt_qplib_dbr_pacing_en(rdev
->chip_ctx
)) {
1668 rc
= bnxt_re_initialize_dbr_pacing(rdev
);
1670 rdev
->pacing
.dbr_pacing
= true;
1672 ibdev_err(&rdev
->ibdev
,
1673 "DBR pacing disabled with error : %d\n", rc
);
1674 rdev
->pacing
.dbr_pacing
= false;
1677 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
);
1681 bnxt_re_set_resource_limits(rdev
);
1683 rc
= bnxt_qplib_alloc_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
, 0,
1684 bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
));
1686 ibdev_err(&rdev
->ibdev
,
1687 "Failed to allocate QPLIB context: %#x\n", rc
);
1690 rc
= bnxt_re_net_stats_ctx_alloc(rdev
,
1691 rdev
->qplib_ctx
.stats
.dma_map
,
1692 &rdev
->qplib_ctx
.stats
.fw_id
);
1694 ibdev_err(&rdev
->ibdev
,
1695 "Failed to allocate stats context: %#x\n", rc
);
1699 rc
= bnxt_qplib_init_rcfw(&rdev
->rcfw
, &rdev
->qplib_ctx
,
1702 ibdev_err(&rdev
->ibdev
,
1703 "Failed to initialize RCFW: %#x\n", rc
);
1706 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
);
1708 /* Resources based on the 'new' device caps */
1709 rc
= bnxt_re_alloc_res(rdev
);
1711 ibdev_err(&rdev
->ibdev
,
1712 "Failed to allocate resources: %#x\n", rc
);
1715 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
);
1716 rc
= bnxt_re_init_res(rdev
);
1718 ibdev_err(&rdev
->ibdev
,
1719 "Failed to initialize resources: %#x\n", rc
);
1723 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
, &rdev
->flags
);
1725 if (!rdev
->is_virtfn
) {
1726 rc
= bnxt_re_setup_qos(rdev
);
1728 ibdev_info(&rdev
->ibdev
,
1729 "RoCE priority not yet configured\n");
1731 INIT_DELAYED_WORK(&rdev
->worker
, bnxt_re_worker
);
1732 set_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
);
1733 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1735 * Use the total VF count since the actual VF count may not be
1736 * available at this point.
1738 bnxt_re_vf_res_config(rdev
);
1743 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1745 bnxt_qplib_free_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
);
1747 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1749 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1750 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq
.ring_id
, type
);
1752 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1754 bnxt_re_dev_uninit(rdev
);
1759 static int bnxt_re_add_device(struct auxiliary_device
*adev
, u8 wqe_mode
)
1761 struct bnxt_aux_priv
*aux_priv
=
1762 container_of(adev
, struct bnxt_aux_priv
, aux_dev
);
1763 struct bnxt_en_dev
*en_dev
;
1764 struct bnxt_re_dev
*rdev
;
1767 /* en_dev should never be NULL as long as adev and aux_dev are valid. */
1768 en_dev
= aux_priv
->edev
;
1770 rdev
= bnxt_re_dev_add(aux_priv
, en_dev
);
1771 if (!rdev
|| !rdev_to_dev(rdev
)) {
1776 rc
= bnxt_re_dev_init(rdev
, wqe_mode
);
1778 goto re_dev_dealloc
;
1780 rc
= bnxt_re_ib_init(rdev
);
1782 pr_err("Failed to register with IB: %s",
1783 aux_priv
->aux_dev
.name
);
1786 auxiliary_set_drvdata(adev
, rdev
);
1791 bnxt_re_dev_uninit(rdev
);
1793 ib_dealloc_device(&rdev
->ibdev
);
1798 static void bnxt_re_setup_cc(struct bnxt_re_dev
*rdev
, bool enable
)
1800 struct bnxt_qplib_cc_param cc_param
= {};
1802 /* Do not enable congestion control on VFs */
1803 if (rdev
->is_virtfn
)
1806 /* Currently enabling only for GenP5 adapters */
1807 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
))
1811 cc_param
.enable
= 1;
1812 cc_param
.cc_mode
= CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE
;
1815 cc_param
.mask
= (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE
|
1816 CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC
|
1817 CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN
);
1819 if (bnxt_qplib_modify_cc(&rdev
->qplib_res
, &cc_param
))
1820 ibdev_err(&rdev
->ibdev
, "Failed to setup CC enable = %d\n", enable
);
1824 * "Notifier chain callback can be invoked for the same chain from
1825 * different CPUs at the same time".
1827 * For cases when the netdev is already present, our call to the
1828 * register_netdevice_notifier() will actually get the rtnl_lock()
1829 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1832 * But for cases when the netdev is not already present, the notifier
1833 * chain is subjected to be invoked from different CPUs simultaneously.
1835 * This is protected by the netdev_mutex.
1837 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
1838 unsigned long event
, void *ptr
)
1840 struct net_device
*real_dev
, *netdev
= netdev_notifier_info_to_dev(ptr
);
1841 struct bnxt_re_dev
*rdev
;
1843 real_dev
= rdma_vlan_dev_real_dev(netdev
);
1847 if (real_dev
!= netdev
)
1850 rdev
= bnxt_re_from_netdev(real_dev
);
1859 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1860 netif_carrier_ok(real_dev
) ?
1861 IB_EVENT_PORT_ACTIVE
:
1867 ib_device_put(&rdev
->ibdev
);
1872 #define BNXT_ADEV_NAME "bnxt_en"
1874 static void bnxt_re_remove(struct auxiliary_device
*adev
)
1876 struct bnxt_re_dev
*rdev
= auxiliary_get_drvdata(adev
);
1881 mutex_lock(&bnxt_re_mutex
);
1882 if (rdev
->nb
.notifier_call
) {
1883 unregister_netdevice_notifier(&rdev
->nb
);
1884 rdev
->nb
.notifier_call
= NULL
;
1886 /* If notifier is null, we should have already done a
1887 * clean up before coming here.
1891 bnxt_re_setup_cc(rdev
, false);
1892 ib_unregister_device(&rdev
->ibdev
);
1893 bnxt_re_dev_uninit(rdev
);
1894 ib_dealloc_device(&rdev
->ibdev
);
1896 mutex_unlock(&bnxt_re_mutex
);
1899 static int bnxt_re_probe(struct auxiliary_device
*adev
,
1900 const struct auxiliary_device_id
*id
)
1902 struct bnxt_re_dev
*rdev
;
1905 mutex_lock(&bnxt_re_mutex
);
1906 rc
= bnxt_re_add_device(adev
, BNXT_QPLIB_WQE_MODE_STATIC
);
1908 mutex_unlock(&bnxt_re_mutex
);
1912 rdev
= auxiliary_get_drvdata(adev
);
1914 rdev
->nb
.notifier_call
= bnxt_re_netdev_event
;
1915 rc
= register_netdevice_notifier(&rdev
->nb
);
1917 rdev
->nb
.notifier_call
= NULL
;
1918 pr_err("%s: Cannot register to netdevice_notifier",
1919 ROCE_DRV_MODULE_NAME
);
1923 bnxt_re_setup_cc(rdev
, true);
1924 mutex_unlock(&bnxt_re_mutex
);
1928 mutex_unlock(&bnxt_re_mutex
);
1929 bnxt_re_remove(adev
);
1934 static int bnxt_re_suspend(struct auxiliary_device
*adev
, pm_message_t state
)
1936 struct bnxt_re_dev
*rdev
= auxiliary_get_drvdata(adev
);
1941 mutex_lock(&bnxt_re_mutex
);
1942 /* L2 driver may invoke this callback during device error/crash or device
1943 * reset. Current RoCE driver doesn't recover the device in case of
1944 * error. Handle the error by dispatching fatal events to all qps
1945 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
1946 * L2 driver want to modify the MSIx table.
1949 ibdev_info(&rdev
->ibdev
, "Handle device suspend call");
1950 /* Check the current device state from bnxt_en_dev and move the
1951 * device to detached state if FW_FATAL_COND is set.
1952 * This prevents more commands to HW during clean-up,
1953 * in case the device is already in error.
1955 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &rdev
->en_dev
->en_state
))
1956 set_bit(ERR_DEVICE_DETACHED
, &rdev
->rcfw
.cmdq
.flags
);
1958 bnxt_re_dev_stop(rdev
);
1959 bnxt_re_stop_irq(rdev
);
1960 /* Move the device states to detached and avoid sending any more
1963 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED
, &rdev
->flags
);
1964 set_bit(ERR_DEVICE_DETACHED
, &rdev
->rcfw
.cmdq
.flags
);
1965 wake_up_all(&rdev
->rcfw
.cmdq
.waitq
);
1966 mutex_unlock(&bnxt_re_mutex
);
1971 static int bnxt_re_resume(struct auxiliary_device
*adev
)
1973 struct bnxt_re_dev
*rdev
= auxiliary_get_drvdata(adev
);
1978 mutex_lock(&bnxt_re_mutex
);
1979 /* L2 driver may invoke this callback during device recovery, resume.
1980 * reset. Current RoCE driver doesn't recover the device in case of
1981 * error. Handle the error by dispatching fatal events to all qps
1982 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
1983 * L2 driver want to modify the MSIx table.
1986 ibdev_info(&rdev
->ibdev
, "Handle device resume call");
1987 mutex_unlock(&bnxt_re_mutex
);
1992 static const struct auxiliary_device_id bnxt_re_id_table
[] = {
1993 { .name
= BNXT_ADEV_NAME
".rdma", },
1997 MODULE_DEVICE_TABLE(auxiliary
, bnxt_re_id_table
);
1999 static struct auxiliary_driver bnxt_re_driver
= {
2001 .probe
= bnxt_re_probe
,
2002 .remove
= bnxt_re_remove
,
2003 .shutdown
= bnxt_re_shutdown
,
2004 .suspend
= bnxt_re_suspend
,
2005 .resume
= bnxt_re_resume
,
2006 .id_table
= bnxt_re_id_table
,
2009 static int __init
bnxt_re_mod_init(void)
2013 pr_info("%s: %s", ROCE_DRV_MODULE_NAME
, version
);
2014 rc
= auxiliary_driver_register(&bnxt_re_driver
);
2016 pr_err("%s: Failed to register auxiliary driver\n",
2017 ROCE_DRV_MODULE_NAME
);
2023 static void __exit
bnxt_re_mod_exit(void)
2025 auxiliary_driver_unregister(&bnxt_re_driver
);
2028 module_init(bnxt_re_mod_init
);
2029 module_exit(bnxt_re_mod_exit
);