if (ret)
return ret;
+ if (!cmd.cqe)
+ return -EINVAL;
+
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (IS_ERR(cq))
return PTR_ERR(cq);
}
}
-int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+int bnxt_re_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
+ struct ib_udata *udata)
{
struct bnxt_qplib_sg_info sg_info = {};
struct bnxt_qplib_dpi *orig_dpi = NULL;
}
/* Check the requested cq depth out of supported depth */
- if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
- ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
- cq->qplib_cq.id, cqe);
+ if (cqe > dev_attr->max_cq_wqes)
return -EINVAL;
- }
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
entries = bnxt_re_init_depth(cqe + 1, uctx);
struct uverbs_attr_bundle *attrs);
int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
-int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+int bnxt_re_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
+ struct ib_udata *udata);
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
* @entries: desired cq size
* @udata: user data
*/
-static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
+static int irdma_resize_cq(struct ib_cq *ibcq, unsigned int entries,
struct ib_udata *udata)
{
#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
++cq->mcq.cons_index;
}
-int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
+ struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
struct mlx4_ib_cq *cq = to_mcq(ibcq);
int err;
mutex_lock(&cq->resize_mutex);
- if (entries < 1 || entries > dev->dev->caps.max_cqes) {
+ if (entries > dev->dev->caps.max_cqes) {
err = -EINVAL;
goto out;
}
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
-int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
+ struct ib_udata *udata);
int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
int mlx4_ib_create_user_cq(struct ib_cq *ibcq,
return 0;
}
-int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+int mlx5_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
+ struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
return -ENOSYS;
}
- if (entries < 1 ||
- entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
- mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
- entries,
- 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
return -EINVAL;
- }
entries = roundup_pow_of_two(entries + 1);
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
-int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
+int mlx5_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
+ struct ib_udata *udata);
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
return 0;
}
-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+static int mthca_resize_cq(struct ib_cq *ibcq, unsigned int entries,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibcq->device);
struct mthca_cq *cq = to_mcq(ibcq);
u32 lkey;
int ret;
- if (entries < 1 || entries > dev->limits.max_cqes)
+ if (entries > dev->limits.max_cqes)
return -EINVAL;
mutex_lock(&cq->mutex);
return status;
}
-int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
+int ocrdma_resize_cq(struct ib_cq *ibcq, unsigned int new_cnt,
struct ib_udata *udata)
{
- int status = 0;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
- if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
- status = -EINVAL;
- return status;
- }
+ if (new_cnt > cq->max_hw_cqe)
+ return -EINVAL;
+
ibcq->cqe = new_cnt;
- return status;
+ return 0;
}
static void ocrdma_flush_cq(struct ocrdma_cq *cq)
int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
-int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int ocrdma_resize_cq(struct ib_cq *, unsigned int cqe, struct ib_udata *);
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
*
* Return: 0 for success.
*/
-int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+int rvt_resize_cq(struct ib_cq *ibcq, unsigned int cqe, struct ib_udata *udata)
{
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
u32 head, tail, n;
struct rvt_k_cq_wc *k_wc = NULL;
struct rvt_k_cq_wc *old_k_wc = NULL;
- if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
+ if (cqe > rdi->dparms.props.max_cqe)
return -EINVAL;
/*
struct uverbs_attr_bundle *attrs);
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+int rvt_resize_cq(struct ib_cq *ibcq, unsigned int cqe, struct ib_udata *udata);
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
int rvt_driver_cq_init(void);
void rvt_cq_exit(void);
#include "rxe_loc.h"
#include "rxe_queue.h"
-int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
- int cqe, int comp_vector)
-{
- int count;
-
- if (cqe <= 0) {
- rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
- goto err1;
- }
-
- if (cqe > rxe->attr.max_cqe) {
- rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
- cqe, rxe->attr.max_cqe);
- goto err1;
- }
-
- if (cq) {
- count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
- if (cqe < count) {
- rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
- cqe, count);
- goto err1;
- }
- }
-
- return 0;
-
-err1:
- return -EINVAL;
-}
-
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp)
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp);
/* rxe_cq.c */
-int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
- int cqe, int comp_vector);
-
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp);
goto err_out;
}
- err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
- if (err) {
- rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
- goto err_out;
- }
+ if (attr->cqe > rxe->attr.max_cqe)
+ return -EINVAL;
err = rxe_add_to_pool(&rxe->cq_pool, cq);
if (err) {
return err;
}
-static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+static int rxe_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
+ struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_dev *rxe = to_rdev(ibcq->device);
uresp = udata->outbuf;
}
- err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
- if (err) {
- rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
- goto err_out;
- }
+ if (cqe > rxe->attr.max_cqe ||
+ cqe < queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT))
+ return -EINVAL;
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err) {
struct uverbs_attr_bundle *attrs);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
- int (*resize_user_cq)(struct ib_cq *cq, int cqe,
+ int (*resize_user_cq)(struct ib_cq *cq, unsigned int cqe,
struct ib_udata *udata);
/*
* pre_destroy_cq - Prevent a cq from generating any new work