*cqe_size = ucmd.cqe_size;
- cq->buf.umem =
- ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
- entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(cq->buf.umem)) {
- err = PTR_ERR(cq->buf.umem);
- return err;
- }
+ if (!cq->ibcq.umem)
+ cq->ibcq.umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->ibcq.umem))
+ return PTR_ERR(cq->ibcq.umem);
page_size = mlx5_umem_find_best_cq_quantized_pgoff(
- cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
+ cq->ibcq.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
page_offset, 64, &page_offset_quantized);
if (!page_size) {
err = -EINVAL;
if (err)
goto err_umem;
- ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
+ ncont = ib_umem_num_dma_blocks(cq->ibcq.umem, page_size);
mlx5_ib_dbg(
dev,
"addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size,
- ib_umem_num_pages(cq->buf.umem), page_size, ncont);
+ ib_umem_num_pages(cq->ibcq.umem), page_size, ncont);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
}
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
- mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
+ mlx5_ib_populate_pas(cq->ibcq.umem, page_size, pas, 0);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
mlx5_ib_db_unmap_user(context, &cq->db);
err_umem:
- ib_umem_release(cq->buf.umem);
+ /* UMEM is released by ib_core */
return err;
}
udata, struct mlx5_ib_ucontext, ibucontext);
mlx5_ib_db_unmap_user(context, &cq->db);
- ib_umem_release(cq->buf.umem);
}
static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
-int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct uverbs_attr_bundle *attrs)
+int mlx5_ib_create_user_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_udata *udata = &attrs->driver_udata;
struct ib_device *ibdev = ibcq->device;
int eqn;
int err;
- if (entries < 0 ||
- (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
+ if (attr->cqe > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
return -EINVAL;
if (check_cq_create_flags(attr->flags))
cq->ibcq.cqe = entries - 1;
mutex_init(&cq->resize_mutex);
spin_lock_init(&cq->lock);
- cq->resize_buf = NULL;
- cq->resize_umem = NULL;
if (attr->flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
cq->private_flags |= MLX5_IB_CQ_PR_TIMESTAMP_COMPLETION;
INIT_LIST_HEAD(&cq->list_send_qp);
INIT_LIST_HEAD(&cq->list_recv_qp);
- if (udata) {
- err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
- &index, &inlen, attrs);
- if (err)
- return err;
- } else {
- cqe_size = cache_line_size() == 128 ? 128 : 64;
- err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
- &index, &inlen);
- if (err)
- return err;
-
- INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
- }
+ err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, &index,
+ &inlen, attrs);
+ if (err)
+ return err;
err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
if (err)
if (attr->flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
- if (udata) {
- cq->mcq.comp = mlx5_add_cq_to_tasklet;
- cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
- } else {
- cq->mcq.comp = mlx5_ib_cq_comp;
- }
+ cq->mcq.comp = mlx5_add_cq_to_tasklet;
+ cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
INIT_LIST_HEAD(&cq->wc_list);
- if (udata)
- if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
- err = -EFAULT;
- goto err_cmd;
- }
-
+ if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
+ err = -EFAULT;
+ goto err_cmd;
+ }
kvfree(cqb);
return 0;
err_cqb:
kvfree(cqb);
- if (udata)
- destroy_cq_user(cq, udata);
- else
- destroy_cq_kernel(dev, cq);
+ destroy_cq_user(cq, udata);
+ return err;
+}
+
+
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_device *ibdev = ibcq->device;
+ int entries = attr->cqe;
+ int vector = attr->comp_vector;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ int index;
+ int inlen;
+ u32 *cqb = NULL;
+ void *cqc;
+ int cqe_size;
+ int eqn;
+ int err;
+
+ if (attr->cqe > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
+ return -EINVAL;
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
+ return -EINVAL;
+
+ cq->ibcq.cqe = entries - 1;
+ mutex_init(&cq->resize_mutex);
+ spin_lock_init(&cq->lock);
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
+
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
+ err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index,
+ &inlen);
+ if (err)
+ return err;
+
+ INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
+
+ err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
+ if (err)
+ goto err_cqb;
+
+ cq->cqe_size = cqe_size;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+ MLX5_SET(cqc, cqc, uar_page, index);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+
+ cq->mcq.comp = mlx5_ib_cq_comp;
+
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out,
+ sizeof(out));
+ if (err)
+ goto err_cqb;
+
+ mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
+ cq->mcq.event = mlx5_ib_cq_event;
+
+ INIT_LIST_HEAD(&cq->wc_list);
+ kvfree(cqb);
+ return 0;
+
+err_cqb:
+ kvfree(cqb);
+ destroy_cq_kernel(dev, cq);
return err;
}
if (udata) {
cq->ibcq.cqe = entries - 1;
- ib_umem_release(cq->buf.umem);
- cq->buf.umem = cq->resize_umem;
+ ib_umem_release(cq->ibcq.umem);
+ cq->ibcq.umem = cq->resize_umem;
cq->resize_umem = NULL;
} else {
struct mlx5_ib_cq_buf tbuf;