From: Mustafa Ismail Date: Wed, 27 Aug 2025 15:25:36 +0000 (-0500) Subject: RDMA/irdma: Introduce GEN3 vPort driver support X-Git-Tag: v6.18-rc1~102^2~26 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2ad49ae7330b8a456edf639c92241a343641a763;p=thirdparty%2Flinux.git RDMA/irdma: Introduce GEN3 vPort driver support In the IPU model, a function can host one or more logical network endpoints called vPorts. Each vPort may be associated with either a physical or an internal communication port, and can be RDMA capable. A vPort features a netdev and, if RDMA capable, must have an associated ib_dev. This change introduces a GEN3 auxiliary vPort driver responsible for registering a verbs device for every RDMA-capable vPort. Additionally, the UAPI is updated to prevent the binding of GEN3 devices to older user-space providers. Signed-off-by: Mustafa Ismail Signed-off-by: Tatyana Nikolova Link: https://patch.msgid.link/20250827152545.2056-8-tatyana.e.nikolova@intel.com Tested-by: Jacob Moroni Signed-off-by: Leon Romanovsky --- diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index 162f1fab32b58..95957d52883dd 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" +#include MODULE_ALIAS("i40iw"); MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA"); @@ -46,6 +47,113 @@ void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev) ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu); } +static void ig3rdma_idc_vport_event_handler(struct iidc_rdma_vport_dev_info *cdev_info, + struct iidc_rdma_event *event) +{ + struct irdma_device *iwdev = auxiliary_get_drvdata(cdev_info->adev); + struct irdma_l2params l2params = {}; + + if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) { + ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); + if (iwdev->vsi.mtu != iwdev->netdev->mtu) { + l2params.mtu = iwdev->netdev->mtu; + l2params.mtu_changed = true; + irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); + irdma_change_l2params(&iwdev->vsi, &l2params); + } + } +} + +static int ig3rdma_vport_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *id) +{ + struct iidc_rdma_vport_auxiliary_dev *idc_adev = + container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev); + struct auxiliary_device *aux_core_dev = idc_adev->vdev_info->core_adev; + struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_core_dev); + struct irdma_l2params l2params = {}; + struct irdma_device *iwdev; + int err; + + if (!rf) { + WARN_ON_ONCE(1); + return -ENOMEM; + } + iwdev = ib_alloc_device(irdma_device, ibdev); + /* Fill iwdev info */ + iwdev->is_vport = true; + iwdev->rf = rf; + iwdev->vport_id = idc_adev->vdev_info->vport_id; + iwdev->netdev = idc_adev->vdev_info->netdev; + iwdev->init_state = INITIAL_STATE; + iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT; + iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT; + iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED; + iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE; + iwdev->roce_mode = true; + iwdev->push_mode = false; + + l2params.mtu = iwdev->netdev->mtu; + + err = irdma_rt_init_hw(iwdev, &l2params); + if (err) + goto err_rt_init; + + err = irdma_ib_register_device(iwdev); + if (err) + goto err_ibreg; + + auxiliary_set_drvdata(aux_dev, iwdev); + + ibdev_dbg(&iwdev->ibdev, + "INIT: Gen[%d] vport[%d] probe success. dev_name = %s, core_dev_name = %s, netdev=%s\n", + rf->rdma_ver, idc_adev->vdev_info->vport_id, + dev_name(&aux_dev->dev), + dev_name(&idc_adev->vdev_info->core_adev->dev), + netdev_name(idc_adev->vdev_info->netdev)); + + return 0; +err_ibreg: + irdma_rt_deinit_hw(iwdev); +err_rt_init: + ib_dealloc_device(&iwdev->ibdev); + + return err; +} + +static void ig3rdma_vport_remove(struct auxiliary_device *aux_dev) +{ + struct iidc_rdma_vport_auxiliary_dev *idc_adev = + container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev); + struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev); + + ibdev_dbg(&iwdev->ibdev, + "INIT: Gen[%d] dev_name = %s, core_dev_name = %s, netdev=%s\n", + iwdev->rf->rdma_ver, dev_name(&aux_dev->dev), + dev_name(&idc_adev->vdev_info->core_adev->dev), + netdev_name(idc_adev->vdev_info->netdev)); + + irdma_ib_unregister_device(iwdev); +} + +static const struct auxiliary_device_id ig3rdma_vport_auxiliary_id_table[] = { + {.name = "idpf.8086.rdma.vdev", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, ig3rdma_vport_auxiliary_id_table); + +static struct iidc_rdma_vport_auxiliary_drv ig3rdma_vport_auxiliary_drv = { + .adrv = { + .name = "vdev", + .id_table = ig3rdma_vport_auxiliary_id_table, + .probe = ig3rdma_vport_probe, + .remove = ig3rdma_vport_remove, + }, + .event_handler = ig3rdma_idc_vport_event_handler, +}; + + static int __init irdma_init_module(void) { int ret; @@ -74,6 +182,17 @@ static int __init irdma_init_module(void) return ret; } + + ret = auxiliary_driver_register(&ig3rdma_vport_auxiliary_drv.adrv); + if (ret) { + auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv); + auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv); + auxiliary_driver_unregister(&i40iw_auxiliary_drv); + pr_err("Failed ig3rdma vport auxiliary_driver_register() ret=%d\n", + ret); + + return ret; + } irdma_register_notifiers(); return 0; @@ -85,6 +204,7 @@ static void __exit irdma_exit_module(void) auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv); auxiliary_driver_unregister(&i40iw_auxiliary_drv); auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv); + auxiliary_driver_unregister(&ig3rdma_vport_auxiliary_drv.adrv); } module_init(irdma_init_module); diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 7300f8ab49cab..c64d772dc6446 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -354,12 +354,14 @@ struct irdma_device { u32 rcv_wnd; u16 mac_ip_table_idx; u16 vsi_num; + u16 vport_id; u8 rcv_wscale; u8 iw_status; bool roce_mode:1; bool roce_dcqcn_en:1; bool dcb_vlan_mode:1; bool iw_ooo:1; + bool is_vport:1; enum init_completion_state init_state; wait_queue_head_t suspend_wq; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 894c1f7bcb431..7f59bdd85da08 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -292,6 +292,10 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, ucontext->iwdev = iwdev; ucontext->abi_ver = req.userspace_ver; + if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) && + uk_attrs->hw_rev >= IRDMA_GEN_3) + return -EOPNOTSUPP; + if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) ucontext->use_raw_attrs = true; @@ -4891,5 +4895,9 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev) struct irdma_device *iwdev = to_iwdev(ibdev); irdma_rt_deinit_hw(iwdev); - irdma_ctrl_deinit_hw(iwdev->rf); + if (!iwdev->is_vport) { + irdma_ctrl_deinit_hw(iwdev->rf); + if (iwdev->rf->vchnl_wq) + destroy_workqueue(iwdev->rf->vchnl_wq); + } } diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h index bb18f15489e37..4e42054cca332 100644 --- a/include/uapi/rdma/irdma-abi.h +++ b/include/uapi/rdma/irdma-abi.h @@ -25,6 +25,7 @@ enum irdma_memreg_type { enum { IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0, IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1, + IRDMA_SUPPORT_WQE_FORMAT_V2 = 1 << 3, }; struct irdma_alloc_ucontext_req {