.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
.add_gid = mana_ib_gd_add_gid,
+ .alloc_mw = mana_ib_alloc_mw,
.alloc_pd = mana_ib_alloc_pd,
.alloc_ucontext = mana_ib_alloc_ucontext,
.create_ah = mana_ib_create_ah,
.create_qp = mana_ib_create_qp,
.create_rwq_ind_table = mana_ib_create_rwq_ind_table,
.create_wq = mana_ib_create_wq,
+ .dealloc_mw = mana_ib_dealloc_mw,
.dealloc_pd = mana_ib_dealloc_pd,
.dealloc_ucontext = mana_ib_dealloc_ucontext,
.del_gid = mana_ib_gd_del_gid,
INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, mana_ib_mw, ibmw),
INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext),
dma_addr_t dma_handle;
};
+struct mana_ib_mw {
+ struct ib_mw ibmw;
+ mana_handle_t mw_handle;
+};
+
struct mana_ib_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int mana_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
+int mana_ib_dealloc_mw(struct ib_mw *mw);
+
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int mr_access_flags,
struct ib_dmah *dmah,
#include "mana_ib.h"
#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
- IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+ if (access_flags & IB_ACCESS_MW_BIND)
+ flags |= GDMA_ACCESS_FLAG_BIND_MW;
+
return flags;
}
return ERR_PTR(err);
}
+static int mana_ib_gd_create_mw(struct mana_ib_dev *dev, struct mana_ib_pd *pd, struct ib_mw *ibmw)
+{
+ struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw, ibmw);
+ struct gdma_context *gc = mdev_to_gc(dev);
+ struct gdma_create_mr_response resp = {};
+ struct gdma_create_mr_request req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), sizeof(resp));
+ req.hdr.req.msg_version = GDMA_MESSAGE_V2;
+ req.pd_handle = pd->pd_handle;
+
+ switch (mw->ibmw.type) {
+ case IB_MW_TYPE_1:
+ req.mr_type = GDMA_MR_TYPE_MW1;
+ break;
+ case IB_MW_TYPE_2:
+ req.mr_type = GDMA_MR_TYPE_MW2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err)
+ return err;
+
+ mw->ibmw.rkey = resp.rkey;
+ mw->mw_handle = resp.mr_handle;
+
+ return 0;
+}
+
+int mana_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibmw->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_pd *pd = container_of(ibmw->pd, struct mana_ib_pd, ibpd);
+
+ return mana_ib_gd_create_mw(mdev, pd, ibmw);
+}
+
+int mana_ib_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct mana_ib_dev *dev = container_of(ibmw->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw, ibmw);
+
+ return mana_ib_gd_destroy_mr(dev, mw->mw_handle);
+}
+
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
+ GDMA_ACCESS_FLAG_BIND_MW = BIT_ULL(5),
};
/* GDMA_CREATE_DMA_REGION */
GDMA_MR_TYPE_ZBVA = 4,
/* Device address MRs */
GDMA_MR_TYPE_DM = 5,
+ /* Memory Window type 1 */
+ GDMA_MR_TYPE_MW1 = 6,
+ /* Memory Window type 2 */
+ GDMA_MR_TYPE_MW2 = 7,
};
struct gdma_create_mr_params {