static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
{
+ struct ib_device *ib_dev = clt_path->s.dev->ib_dev;
struct rtrs_clt_io_req *req;
+ enum ib_mr_type mr_type;
int i, err = -ENOMEM;
clt_path->reqs = kcalloc(clt_path->queue_depth,
if (!clt_path->reqs)
return -ENOMEM;
+ if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
for (i = 0; i < clt_path->queue_depth; ++i) {
req = &clt_path->reqs[i];
req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
if (!req->sge)
goto out;
- req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
- IB_MR_TYPE_MEM_REG,
+ req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, mr_type,
clt_path->max_pages_per_mr);
if (IS_ERR(req->mr)) {
err = PTR_ERR(req->mr);
static int map_cont_bufs(struct rtrs_srv_path *srv_path)
{
+ struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_path *ss = &srv_path->s;
int i, err, mrs_num;
unsigned int chunk_bits;
+ enum ib_mr_type mr_type;
int chunks_per_mr = 1;
- struct ib_mr *mr;
struct sg_table *sgt;
+ struct ib_mr *mr;
/*
* Here we map queue_depth chunks to MR. Firstly we have to
err = -EINVAL;
goto free_sg;
}
- mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- nr_sgt);
+
+ if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, mr_type, nr_sgt);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto unmap_sg;