From: Linus Torvalds Date: Sat, 4 Oct 2025 01:35:22 +0000 (-0700) Subject: Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2ccb4d203fe4bec72fb333ccc2feb71a462c188d;p=thirdparty%2Fkernel%2Fstable.git Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma updates from Jason Gunthorpe: "A new Pensando ionic driver, a new Gen 3 HW support for Intel irdma, and lots of small bnxt_re improvements. - Small bug fixes and improves to hfi1, efa, mlx5, erdma, rdmarvt, siw - Allow userspace access to IB service records through the rdmacm - Optimize dma mapping for erdma - Fix shutdown of the GSI QP in mana - Support relaxed ordering MR and fix a corruption bug with mlx5 DMA Data Direct - Many improvement to bnxt_re: - Debugging features and counters - Improve performance of some commands - Change flow_label reporting in completions - Mirror vnic - RDMA flow support - New RDMA driver for Pensando Ethernet devices: ionic - Gen 3 hardware support for the Intel irdma driver - Fix rdma routing resolution with VRFs" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (85 commits) RDMA/ionic: Fix memory leak of admin q_wr RDMA/siw: Always report immediate post SQ errors RDMA/bnxt_re: improve clarity in ALLOC_PAGE handler RDMA/irdma: Remove unused struct irdma_cq fields RDMA/irdma: Fix positive vs negative error codes in irdma_post_send() RDMA/bnxt_re: Remove non-statistics counters from hw_counters RDMA/bnxt_re: Add debugfs info entry for device and resource information RDMA/bnxt_re: Fix incorrect errno used in function comments RDMA: Use %pe format specifier for error pointers RDMA/ionic: Use ether_addr_copy instead of memcpy RDMA/ionic: Fix build failure on SPARC due to xchg() operand size RDMA/rxe: Fix race in do_task() when draining IB/sa: Fix sa_local_svc_timeout_ms read race IB/ipoib: Ignore L3 master device RDMA/core: Use route entry flag to decide on loopback traffic RDMA/core: Resolve MAC of next-hop device without ARP support RDMA/core: Squash a single user static function RDMA/irdma: Update Kconfig RDMA/irdma: Extend CQE Error and Flush Handling for GEN3 Devices RDMA/irdma: Add Atomic Operations support ... --- 2ccb4d203fe4bec72fb333ccc2feb71a462c188d diff --cc Documentation/networking/device_drivers/ethernet/index.rst index 0b0a3eef6aae1,1fabfe02eb128..7cfcd183054f8 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@@ -50,7 -50,7 +50,8 @@@ Contents neterion/s2io netronome/nfp pensando/ionic + pensando/ionic_rdma + qualcomm/ppe/ppe smsc/smc9 stmicro/stmmac ti/cpsw diff --cc drivers/infiniband/hw/bnxt_re/main.c index df7cf8d68e273,d822cdc82e659..b13810572c2e8 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@@ -2017,28 -2045,72 +2045,94 @@@ static void bnxt_re_free_nqr_mem(struc rdev->nqr = NULL; } +/* When DEL_GID fails, driver is not freeing GID ctx memory. + * To avoid the memory leak, free the memory during unload + */ +static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; + int i; + + if (!sgid_tbl->active) + return; + + ctx_tbl = sgid_tbl->ctx; + for (i = 0; i < sgid_tbl->max; i++) { + if (sgid_tbl->hw_id[i] == 0xFFFF) + continue; + + ctx = ctx_tbl[i]; + kfree(ctx); + } +} + + static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev) + { + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + int rc; + + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats); + if (rc) + return rc; + + rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats); + if (rc) + goto free_stat_mem; + + return 0; + free_stat_mem: + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); + + return rc; + } + + static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev) + { + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + int rc; + + if (!rdev->rcfw.roce_mirror) + return 0; + + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3); + if (rc) + return rc; + + rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3); + if (rc) + goto free_stat_mem; + + return 0; + free_stat_mem: + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); + + return rc; + } + + static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev) + { + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + + if (!rdev->rcfw.roce_mirror) + return; + + bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id); + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); + } + + static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev) + { + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + + bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id); + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); + } + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; @@@ -2049,10 -2121,8 +2143,9 @@@ bnxt_re_net_unregister_async_event(rdev); bnxt_re_uninit_dcb_wq(rdev); - if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) - cancel_delayed_work_sync(&rdev->worker); + bnxt_re_put_stats3_ctx(rdev); + bnxt_re_free_gid_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags)) bnxt_re_cleanup_res(rdev); diff --cc drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1d0e0e7362bdc,751840fff0dcd..3fc33b1b4dfb1 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -9703,8 -9672,8 +9703,10 @@@ static int __bnxt_hwrm_func_qcaps(struc bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; flags_ext3 = le32_to_cpu(resp->flags_ext3); + if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT) + bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT; + if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&