mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
sizeof(resp));
+ req.hdr.req.msg_version = GDMA_MESSAGE_V2;
req.pd_handle = mr_params->pd_handle;
req.mr_type = mr_params->mr_type;
req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
req.zbva.access_flags = mr_params->zbva.access_flags;
break;
+ case GDMA_MR_TYPE_DM:
+ req.da_ext.length = mr_params->da.length;
+ req.da.dm_handle = mr_params->da.dm_handle;
+ req.da.offset = mr_params->da.offset;
+ req.da.access_flags = mr_params->da.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
return 0;
}
+
+static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
+ struct ib_dm_alloc_attr *attr)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_alloc_dm_resp resp = {};
+ struct gdma_alloc_dm_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOC_DM, sizeof(req), sizeof(resp));
+ req.length = attr->length;
+ req.alignment = attr->alignment;
+ req.flags = attr->flags;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ dm->dm_handle = resp.dm_handle;
+
+ return 0;
+}
+
+struct ib_dm *mana_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct mana_ib_dm *dm;
+ int err;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ err = mana_ib_gd_alloc_dm(dev, dm, attr);
+ if (err)
+ goto err_free;
+
+ return &dm->ibdm;
+
+err_free:
+ kfree(dm);
+ return ERR_PTR(err);
+}
+
+static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_destroy_dm_resp resp = {};
+ struct gdma_destroy_dm_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
+ req.dm_handle = dm->dm_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
+{
+ struct mana_ib_dev *dev = container_of(ibdm->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_dm *dm = container_of(ibdm, struct mana_ib_dm, ibdm);
+ int err;
+
+ err = mana_ib_gd_destroy_dm(dev, dm);
+ if (err)
+ return err;
+
+ kfree(dm);
+ return 0;
+}
+
+struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *ibpd, struct ib_dm *ibdm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mana_ib_dev *dev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_dm *mana_dm = container_of(ibdm, struct mana_ib_dm, ibdm);
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct gdma_create_mr_params mr_params = {};
+ struct mana_ib_mr *mr;
+ int err;
+
+ attr->access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (attr->access_flags & ~VALID_MR_FLAGS)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr_params.pd_handle = pd->pd_handle;
+ mr_params.mr_type = GDMA_MR_TYPE_DM;
+ mr_params.da.dm_handle = mana_dm->dm_handle;
+ mr_params.da.offset = attr->offset;
+ mr_params.da.length = attr->length;
+ mr_params.da.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(attr->access_flags);
+
+ err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ if (err)
+ goto err_free;
+
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
+ GDMA_ALLOC_DM = 96, /* 0x60 */
+ GDMA_DESTROY_DM = 97, /* 0x61 */
};
#define GDMA_RESOURCE_DOORBELL_PAGE 27
GDMA_MR_TYPE_GVA = 2,
/* Guest zero-based address MRs */
GDMA_MR_TYPE_ZBVA = 4,
+ /* Device address MRs */
+ GDMA_MR_TYPE_DM = 5,
};
struct gdma_create_mr_params {
u64 dma_region_handle;
enum gdma_mr_access_flags access_flags;
} zbva;
+ struct {
+ u64 dm_handle;
+ u64 offset;
+ u64 length;
+ enum gdma_mr_access_flags access_flags;
+ } da;
};
};
u64 dma_region_handle;
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
- } gva;
+ } __packed gva;
struct {
u64 dma_region_handle;
enum gdma_mr_access_flags access_flags;
- } zbva;
- };
+ } __packed zbva;
+ struct {
+ u64 dm_handle;
+ u64 offset;
+ enum gdma_mr_access_flags access_flags;
+ } __packed da;
+ } __packed;
u32 reserved_2;
+ union {
+ struct {
+ u64 length;
+ } da_ext;
+ };
};/* HW DATA */
struct gdma_create_mr_response {
struct gdma_resp_hdr hdr;
};/* HW DATA */
+struct gdma_alloc_dm_req {
+ struct gdma_req_hdr hdr;
+ u64 length;
+ u32 alignment;
+ u32 flags;
+}; /* HW Data */
+
+struct gdma_alloc_dm_resp {
+ struct gdma_resp_hdr hdr;
+ u64 dm_handle;
+}; /* HW Data */
+
+struct gdma_destroy_dm_req {
+ struct gdma_req_hdr hdr;
+ u64 dm_handle;
+}; /* HW Data */
+
+struct gdma_destroy_dm_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
int mana_gd_verify_vf_version(struct pci_dev *pdev);
int mana_gd_register_device(struct gdma_dev *gd);