]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/mana_ib: Add device‑memory support
authorKonstantin Taranov <kotaranov@microsoft.com>
Tue, 27 Jan 2026 08:26:49 +0000 (00:26 -0800)
committerLeon Romanovsky <leon@kernel.org>
Tue, 27 Jan 2026 14:16:11 +0000 (09:16 -0500)
Introduce a basic DM implementation that enables creating and
registering device memory, and using the associated memory keys
for networking operations.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://patch.msgid.link/20260127082649.429018-1-kotaranov@linux.microsoft.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mana/device.c
drivers/infiniband/hw/mana/mana_ib.h
drivers/infiniband/hw/mana/mr.c
include/net/mana/gdma.h

index bdeddb642b8770ea165fe296417956961dea3fa8..ccc2279ca63c0b717bcf8056a68b4c871021dcd3 100644 (file)
@@ -69,6 +69,12 @@ static const struct ib_device_ops mana_ib_device_stats_ops = {
        .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
 };
 
+const struct ib_device_ops mana_ib_dev_dm_ops = {
+       .alloc_dm = mana_ib_alloc_dm,
+       .dealloc_dm = mana_ib_dealloc_dm,
+       .reg_dm_mr = mana_ib_reg_dm_mr,
+};
+
 static int mana_ib_netdev_event(struct notifier_block *this,
                                unsigned long event, void *ptr)
 {
@@ -139,6 +145,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
                ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
                if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
                        ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
+               ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_dm_ops);
 
                ret = mana_ib_create_eqs(dev);
                if (ret) {
index 9d36232ed880dcb7a5050680196ca50f71d5ae5c..e447acfd2071a439bf9f962b2ff6e5aff2a9a084 100644 (file)
@@ -131,6 +131,11 @@ struct mana_ib_mr {
        mana_handle_t mr_handle;
 };
 
+struct mana_ib_dm {
+       struct ib_dm ibdm;
+       mana_handle_t dm_handle;
+};
+
 struct mana_ib_cq {
        struct ib_cq ibcq;
        struct mana_ib_queue queue;
@@ -735,4 +740,11 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
                                         u64 iova, int fd, int mr_access_flags,
                                         struct ib_dmah *dmah,
                                         struct uverbs_attr_bundle *attrs);
+
+struct ib_dm *mana_ib_alloc_dm(struct ib_device *dev, struct ib_ucontext *context,
+                              struct ib_dm_alloc_attr *attr, struct uverbs_attr_bundle *attrs);
+int mana_ib_dealloc_dm(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
+struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, struct ib_dm_mr_attr *attr,
+                               struct uverbs_attr_bundle *attrs);
+
 #endif
index 3d0245a4c1edc61a5cc5d245d7656351dd24dcab..f979f26adc3bd00b2dd109c392c43884f4237fdb 100644 (file)
@@ -40,6 +40,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
 
        mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
                             sizeof(resp));
+       req.hdr.req.msg_version = GDMA_MESSAGE_V2;
        req.pd_handle = mr_params->pd_handle;
        req.mr_type = mr_params->mr_type;
 
@@ -55,6 +56,12 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
                req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
                req.zbva.access_flags = mr_params->zbva.access_flags;
                break;
+       case GDMA_MR_TYPE_DM:
+               req.da_ext.length = mr_params->da.length;
+               req.da.dm_handle = mr_params->da.dm_handle;
+               req.da.offset = mr_params->da.offset;
+               req.da.access_flags = mr_params->da.access_flags;
+               break;
        default:
                ibdev_dbg(&dev->ib_dev,
                          "invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -317,3 +324,126 @@ int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 
        return 0;
 }
+
+static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
+                              struct ib_dm_alloc_attr *attr)
+{
+       struct gdma_context *gc = mdev_to_gc(mdev);
+       struct gdma_alloc_dm_resp resp = {};
+       struct gdma_alloc_dm_req req = {};
+       int err;
+
+       mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOC_DM, sizeof(req), sizeof(resp));
+       req.length = attr->length;
+       req.alignment = attr->alignment;
+       req.flags =  attr->flags;
+
+       err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+       if (err || resp.hdr.status) {
+               if (!err)
+                       err = -EPROTO;
+
+               return err;
+       }
+
+       dm->dm_handle = resp.dm_handle;
+
+       return 0;
+}
+
+struct ib_dm *mana_ib_alloc_dm(struct ib_device *ibdev,
+                              struct ib_ucontext *context,
+                              struct ib_dm_alloc_attr *attr,
+                              struct uverbs_attr_bundle *attrs)
+{
+       struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+       struct mana_ib_dm *dm;
+       int err;
+
+       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return ERR_PTR(-ENOMEM);
+
+       err = mana_ib_gd_alloc_dm(dev, dm, attr);
+       if (err)
+               goto err_free;
+
+       return &dm->ibdm;
+
+err_free:
+       kfree(dm);
+       return ERR_PTR(err);
+}
+
+static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm)
+{
+       struct gdma_context *gc = mdev_to_gc(mdev);
+       struct gdma_destroy_dm_resp resp = {};
+       struct gdma_destroy_dm_req req = {};
+       int err;
+
+       mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
+       req.dm_handle = dm->dm_handle;
+
+       err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+       if (err || resp.hdr.status) {
+               if (!err)
+                       err = -EPROTO;
+
+               return err;
+       }
+
+       return 0;
+}
+
+int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
+{
+       struct mana_ib_dev *dev = container_of(ibdm->device, struct mana_ib_dev, ib_dev);
+       struct mana_ib_dm *dm = container_of(ibdm, struct mana_ib_dm, ibdm);
+       int err;
+
+       err = mana_ib_gd_destroy_dm(dev, dm);
+       if (err)
+               return err;
+
+       kfree(dm);
+       return 0;
+}
+
+struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *ibpd, struct ib_dm *ibdm,
+                               struct ib_dm_mr_attr *attr,
+                               struct uverbs_attr_bundle *attrs)
+{
+       struct mana_ib_dev *dev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
+       struct mana_ib_dm *mana_dm = container_of(ibdm, struct mana_ib_dm, ibdm);
+       struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+       struct gdma_create_mr_params mr_params = {};
+       struct mana_ib_mr *mr;
+       int err;
+
+       attr->access_flags &= ~IB_ACCESS_OPTIONAL;
+       if (attr->access_flags & ~VALID_MR_FLAGS)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+
+       mr_params.pd_handle = pd->pd_handle;
+       mr_params.mr_type = GDMA_MR_TYPE_DM;
+       mr_params.da.dm_handle = mana_dm->dm_handle;
+       mr_params.da.offset = attr->offset;
+       mr_params.da.length = attr->length;
+       mr_params.da.access_flags =
+               mana_ib_verbs_to_gdma_access_flags(attr->access_flags);
+
+       err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+       if (err)
+               goto err_free;
+
+       return &mr->ibmr;
+
+err_free:
+       kfree(mr);
+       return ERR_PTR(err);
+}
index eaa27483f99b2b5c7b55a26d5c3cd42dc4cf5870..8649eb789c0e07df29fae04dc5df3d7fafbabe93 100644 (file)
@@ -35,6 +35,8 @@ enum gdma_request_type {
        GDMA_CREATE_MR                  = 31,
        GDMA_DESTROY_MR                 = 32,
        GDMA_QUERY_HWC_TIMEOUT          = 84, /* 0x54 */
+       GDMA_ALLOC_DM                   = 96, /* 0x60 */
+       GDMA_DESTROY_DM                 = 97, /* 0x61 */
 };
 
 #define GDMA_RESOURCE_DOORBELL_PAGE    27
@@ -861,6 +863,8 @@ enum gdma_mr_type {
        GDMA_MR_TYPE_GVA = 2,
        /* Guest zero-based address MRs */
        GDMA_MR_TYPE_ZBVA = 4,
+       /* Device address MRs */
+       GDMA_MR_TYPE_DM = 5,
 };
 
 struct gdma_create_mr_params {
@@ -876,6 +880,12 @@ struct gdma_create_mr_params {
                        u64 dma_region_handle;
                        enum gdma_mr_access_flags access_flags;
                } zbva;
+               struct {
+                       u64 dm_handle;
+                       u64 offset;
+                       u64 length;
+                       enum gdma_mr_access_flags access_flags;
+               } da;
        };
 };
 
@@ -890,13 +900,23 @@ struct gdma_create_mr_request {
                        u64 dma_region_handle;
                        u64 virtual_address;
                        enum gdma_mr_access_flags access_flags;
-               } gva;
+               } __packed gva;
                struct {
                        u64 dma_region_handle;
                        enum gdma_mr_access_flags access_flags;
-               } zbva;
-       };
+               } __packed zbva;
+               struct {
+                       u64 dm_handle;
+                       u64 offset;
+                       enum gdma_mr_access_flags access_flags;
+               } __packed da;
+       } __packed;
        u32 reserved_2;
+       union {
+               struct {
+                       u64 length;
+               } da_ext;
+       };
 };/* HW DATA */
 
 struct gdma_create_mr_response {
@@ -915,6 +935,27 @@ struct gdma_destroy_mr_response {
        struct gdma_resp_hdr hdr;
 };/* HW DATA */
 
+struct gdma_alloc_dm_req {
+       struct gdma_req_hdr hdr;
+       u64 length;
+       u32 alignment;
+       u32 flags;
+}; /* HW Data */
+
+struct gdma_alloc_dm_resp {
+       struct gdma_resp_hdr hdr;
+       u64 dm_handle;
+}; /* HW Data */
+
+struct gdma_destroy_dm_req {
+       struct gdma_req_hdr hdr;
+       u64 dm_handle;
+}; /* HW Data */
+
+struct gdma_destroy_dm_resp {
+       struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
 int mana_gd_verify_vf_version(struct pci_dev *pdev);
 
 int mana_gd_register_device(struct gdma_dev *gd);