+++ /dev/null
-From 036428f1dfbbe9833e79755c0c8e6b3caccb2abb Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Mar 2023 16:16:24 -0600
-Subject: RDMA/rxe: Add error messages
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit 5bf944f24129cbc4b5828348bdce2db94ca9fbd6 ]
-
-This patch adds error and debug messages so that every interaction
-with rdma-core through a verbs API call or a completion error return
-will generate at least one error message backed up by debug messages
-with more detail.
-
-With dynamic debugging one can follow up after seeing an error message
-by turning on the appropriate debug messages.
-
-Link: https://lore.kernel.org/r/20230303221623.8053-5-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe_comp.c | 4 +
- drivers/infiniband/sw/rxe/rxe_loc.h | 1 -
- drivers/infiniband/sw/rxe/rxe_mr.c | 13 -
- drivers/infiniband/sw/rxe/rxe_resp.c | 4 +
- drivers/infiniband/sw/rxe/rxe_verbs.c | 828 +++++++++++++++++++-------
- 5 files changed, 609 insertions(+), 241 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
-index 20737fec392bf..876057e3ee3c1 100644
---- a/drivers/infiniband/sw/rxe/rxe_comp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
-@@ -428,6 +428,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- uwc->wc_flags = IB_WC_WITH_IMM;
- uwc->byte_len = wqe->dma.length;
- }
-+ } else {
-+ if (wqe->status != IB_WC_WR_FLUSH_ERR)
-+ rxe_err_qp(qp, "non-flush error status = %d",
-+ wqe->status);
- }
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
-index 1bb0cb479eb12..839de34cf4c94 100644
---- a/drivers/infiniband/sw/rxe/rxe_loc.h
-+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
-@@ -80,7 +80,6 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
- int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
- int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
- int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
--int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
- void rxe_mr_cleanup(struct rxe_pool_elem *elem);
-
- /* rxe_mw.c */
-diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
-index b10aa1580a644..1e17f8086d59a 100644
---- a/drivers/infiniband/sw/rxe/rxe_mr.c
-+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
-@@ -722,19 +722,6 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
- return 0;
- }
-
--int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
--{
-- struct rxe_mr *mr = to_rmr(ibmr);
--
-- /* See IBA 10.6.7.2.6 */
-- if (atomic_read(&mr->num_mw) > 0)
-- return -EINVAL;
--
-- rxe_cleanup(mr);
-- kfree_rcu(mr);
-- return 0;
--}
--
- void rxe_mr_cleanup(struct rxe_pool_elem *elem)
- {
- struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
-diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
-index 0cc1ba91d48cc..4217eec03a943 100644
---- a/drivers/infiniband/sw/rxe/rxe_resp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
-@@ -1151,6 +1151,10 @@ static enum resp_states do_complete(struct rxe_qp *qp,
-
- wc->port_num = qp->attr.port_num;
- }
-+ } else {
-+ if (wc->status != IB_WC_WR_FLUSH_ERR)
-+ rxe_err_qp(qp, "non-flush error status = %d",
-+ wc->status);
- }
-
- /* have copy for srq and reference for !srq */
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index f178d0773ff20..84b53c070fc55 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -12,31 +12,48 @@
- #include "rxe_queue.h"
- #include "rxe_hw_counters.h"
-
--static int rxe_query_device(struct ib_device *dev,
-+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
-+
-+/* dev */
-+static int rxe_query_device(struct ib_device *ibdev,
- struct ib_device_attr *attr,
-- struct ib_udata *uhw)
-+ struct ib_udata *udata)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-+
-+ if (udata->inlen || udata->outlen) {
-+ rxe_dbg_dev(rxe, "malformed udata");
-+ err = -EINVAL;
-+ goto err_out;
-+ }
-
-- if (uhw->inlen || uhw->outlen)
-- return -EINVAL;
-+ memcpy(attr, &rxe->attr, sizeof(*attr));
-
-- *attr = rxe->attr;
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_query_port(struct ib_device *dev,
-+static int rxe_query_port(struct ib_device *ibdev,
- u32 port_num, struct ib_port_attr *attr)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-- int rc;
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err, ret;
-
-- /* *attr being zeroed by the caller, avoid zeroing it here */
-- *attr = rxe->port.attr;
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-+
-+ memcpy(attr, &rxe->port.attr, sizeof(*attr));
-
- mutex_lock(&rxe->usdev_lock);
-- rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
-- &attr->active_width);
-+ ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
-+ &attr->active_width);
-
- if (attr->state == IB_PORT_ACTIVE)
- attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
-@@ -47,27 +64,45 @@ static int rxe_query_port(struct ib_device *dev,
-
- mutex_unlock(&rxe->usdev_lock);
-
-- return rc;
-+ return ret;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_query_pkey(struct ib_device *device,
-+static int rxe_query_pkey(struct ib_device *ibdev,
- u32 port_num, u16 index, u16 *pkey)
- {
-- if (index > 0)
-- return -EINVAL;
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-+
-+ if (index != 0) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad pkey index = %d", index);
-+ goto err_out;
-+ }
-
- *pkey = IB_DEFAULT_PKEY_FULL;
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_modify_device(struct ib_device *dev,
-+static int rxe_modify_device(struct ib_device *ibdev,
- int mask, struct ib_device_modify *attr)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-
- if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
-- IB_DEVICE_MODIFY_NODE_DESC))
-- return -EOPNOTSUPP;
-+ IB_DEVICE_MODIFY_NODE_DESC)) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
-+ goto err_out;
-+ }
-
- if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
- rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
-@@ -78,16 +113,33 @@ static int rxe_modify_device(struct ib_device *dev,
- }
-
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_modify_port(struct ib_device *dev,
-- u32 port_num, int mask, struct ib_port_modify *attr)
-+static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
-+ int mask, struct ib_port_modify *attr)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
- struct rxe_port *port;
-+ int err;
-
-- port = &rxe->port;
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-
-+ //TODO is shutdown useful
-+ if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
-+ goto err_out;
-+ }
-+
-+ port = &rxe->port;
- port->attr.port_cap_flags |= attr->set_port_cap_mask;
- port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
-
-@@ -95,73 +147,125 @@ static int rxe_modify_port(struct ib_device *dev,
- port->attr.qkey_viol_cntr = 0;
-
- return 0;
--}
-
--static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
-- u32 port_num)
--{
-- return IB_LINK_LAYER_ETHERNET;
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
-+static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
-+ u32 port_num)
- {
-- struct rxe_dev *rxe = to_rdev(ibuc->device);
-- struct rxe_ucontext *uc = to_ruc(ibuc);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-
-- return rxe_add_to_pool(&rxe->uc_pool, uc);
--}
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-
--static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
--{
-- struct rxe_ucontext *uc = to_ruc(ibuc);
-+ return IB_LINK_LAYER_ETHERNET;
-
-- rxe_cleanup(uc);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
-+static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
- {
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ struct ib_port_attr attr = {};
- int err;
-- struct ib_port_attr attr;
-
-- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-
-- err = ib_query_port(dev, port_num, &attr);
-+ err = ib_query_port(ibdev, port_num, &attr);
- if (err)
-- return err;
-+ goto err_out;
-
-+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
-+/* uc */
-+static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
-+{
-+ struct rxe_dev *rxe = to_rdev(ibuc->device);
-+ struct rxe_ucontext *uc = to_ruc(ibuc);
-+ int err;
-+
-+ err = rxe_add_to_pool(&rxe->uc_pool, uc);
-+ if (err)
-+ rxe_err_dev(rxe, "unable to create uc");
-+
-+ return err;
-+}
-+
-+static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
-+{
-+ struct rxe_ucontext *uc = to_ruc(ibuc);
-+ int err;
-+
-+ err = rxe_cleanup(uc);
-+ if (err)
-+ rxe_err_uc(uc, "cleanup failed, err = %d", err);
-+}
-+
-+/* pd */
- static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
- {
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
-+ int err;
-+
-+ err = rxe_add_to_pool(&rxe->pd_pool, pd);
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to alloc pd");
-+ goto err_out;
-+ }
-
-- return rxe_add_to_pool(&rxe->pd_pool, pd);
-+ return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
- {
- struct rxe_pd *pd = to_rpd(ibpd);
-+ int err;
-+
-+ err = rxe_cleanup(pd);
-+ if (err)
-+ rxe_err_pd(pd, "cleanup failed, err = %d", err);
-
-- rxe_cleanup(pd);
- return 0;
- }
-
-+/* ah */
- static int rxe_create_ah(struct ib_ah *ibah,
- struct rdma_ah_init_attr *init_attr,
- struct ib_udata *udata)
--
- {
- struct rxe_dev *rxe = to_rdev(ibah->device);
- struct rxe_ah *ah = to_rah(ibah);
- struct rxe_create_ah_resp __user *uresp = NULL;
-- int err;
-+ int err, cleanup_err;
-
- if (udata) {
- /* test if new user provider */
-@@ -174,16 +278,18 @@ static int rxe_create_ah(struct ib_ah *ibah,
-
- err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
- init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create ah");
-+ goto err_out;
-+ }
-
- /* create index > 0 */
- ah->ah_num = ah->elem.index;
-
- err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
- if (err) {
-- rxe_cleanup(ah);
-- return err;
-+ rxe_dbg_ah(ah, "bad attr");
-+ goto err_cleanup;
- }
-
- if (uresp) {
-@@ -191,8 +297,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
- err = copy_to_user(&uresp->ah_num, &ah->ah_num,
- sizeof(uresp->ah_num));
- if (err) {
-- rxe_cleanup(ah);
-- return -EFAULT;
-+ err = -EFAULT;
-+ rxe_dbg_ah(ah, "unable to copy to user");
-+ goto err_cleanup;
- }
- } else if (ah->is_user) {
- /* only if old user provider */
-@@ -203,19 +310,34 @@ static int rxe_create_ah(struct ib_ah *ibah,
- rxe_finalize(ah);
-
- return 0;
-+
-+err_cleanup:
-+ cleanup_err = rxe_cleanup(ah);
-+ if (cleanup_err)
-+ rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_ah(ah, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
- {
-- int err;
- struct rxe_ah *ah = to_rah(ibah);
-+ int err;
-
- err = rxe_ah_chk_attr(ah, attr);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_ah(ah, "bad attr");
-+ goto err_out;
-+ }
-
- rxe_init_av(attr, &ah->av);
-+
- return 0;
-+
-+err_out:
-+ rxe_err_ah(ah, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
-@@ -225,92 +347,77 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
- memset(attr, 0, sizeof(*attr));
- attr->type = ibah->type;
- rxe_av_to_attr(&ah->av, attr);
-+
- return 0;
- }
-
- static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
- {
- struct rxe_ah *ah = to_rah(ibah);
-+ int err;
-
-- rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
--
-- return 0;
--}
--
--static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
--{
-- int i;
-- u32 length;
-- struct rxe_recv_wqe *recv_wqe;
-- int num_sge = ibwr->num_sge;
-- int full;
--
-- full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
-- if (unlikely(full))
-- return -ENOMEM;
--
-- if (unlikely(num_sge > rq->max_sge))
-- return -EINVAL;
--
-- length = 0;
-- for (i = 0; i < num_sge; i++)
-- length += ibwr->sg_list[i].length;
--
-- recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
-- recv_wqe->wr_id = ibwr->wr_id;
--
-- memcpy(recv_wqe->dma.sge, ibwr->sg_list,
-- num_sge * sizeof(struct ib_sge));
--
-- recv_wqe->dma.length = length;
-- recv_wqe->dma.resid = length;
-- recv_wqe->dma.num_sge = num_sge;
-- recv_wqe->dma.cur_sge = 0;
-- recv_wqe->dma.sge_offset = 0;
--
-- queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
-+ err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
-+ if (err)
-+ rxe_err_ah(ah, "cleanup failed, err = %d", err);
-
- return 0;
- }
-
-+/* srq */
- static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
- struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibsrq->device);
- struct rxe_pd *pd = to_rpd(ibsrq->pd);
- struct rxe_srq *srq = to_rsrq(ibsrq);
- struct rxe_create_srq_resp __user *uresp = NULL;
-+ int err, cleanup_err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_err_dev(rxe, "malformed udata");
-+ goto err_out;
-+ }
- uresp = udata->outbuf;
- }
-
-- if (init->srq_type != IB_SRQT_BASIC)
-- return -EOPNOTSUPP;
-+ if (init->srq_type != IB_SRQT_BASIC) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "srq type = %d, not supported",
-+ init->srq_type);
-+ goto err_out;
-+ }
-
- err = rxe_srq_chk_init(rxe, init);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "invalid init attributes");
-+ goto err_out;
-+ }
-
- err = rxe_add_to_pool(&rxe->srq_pool, srq);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
-+ goto err_out;
-+ }
-
- rxe_get(pd);
- srq->pd = pd;
-
- err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
-- if (err)
-+ if (err) {
-+ rxe_dbg_srq(srq, "create srq failed, err = %d", err);
- goto err_cleanup;
-+ }
-
- return 0;
-
- err_cleanup:
-- rxe_cleanup(srq);
--
-+ cleanup_err = rxe_cleanup(srq);
-+ if (cleanup_err)
-+ rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
- return err;
- }
-
-@@ -318,46 +425,64 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask mask,
- struct ib_udata *udata)
- {
-- int err;
- struct rxe_srq *srq = to_rsrq(ibsrq);
- struct rxe_dev *rxe = to_rdev(ibsrq->device);
-- struct rxe_modify_srq_cmd ucmd = {};
-+ struct rxe_modify_srq_cmd cmd = {};
-+ int err;
-
- if (udata) {
-- if (udata->inlen < sizeof(ucmd))
-- return -EINVAL;
-+ if (udata->inlen < sizeof(cmd)) {
-+ err = -EINVAL;
-+ rxe_dbg_srq(srq, "malformed udata");
-+ goto err_out;
-+ }
-
-- err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
-- if (err)
-- return err;
-+ err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
-+ if (err) {
-+ err = -EFAULT;
-+ rxe_dbg_srq(srq, "unable to read udata");
-+ goto err_out;
-+ }
- }
-
- err = rxe_srq_chk_attr(rxe, srq, attr, mask);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_srq(srq, "bad init attributes");
-+ goto err_out;
-+ }
-+
-+ err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
-+ if (err) {
-+ rxe_dbg_srq(srq, "bad attr");
-+ goto err_out;
-+ }
-+
-+ return 0;
-
-- return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
-+err_out:
-+ rxe_err_srq(srq, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
- {
- struct rxe_srq *srq = to_rsrq(ibsrq);
-+ int err;
-
-- if (srq->error)
-- return -EINVAL;
-+ if (srq->error) {
-+ err = -EINVAL;
-+ rxe_dbg_srq(srq, "srq in error state");
-+ goto err_out;
-+ }
-
- attr->max_wr = srq->rq.queue->buf->index_mask;
- attr->max_sge = srq->rq.max_sge;
- attr->srq_limit = srq->limit;
- return 0;
--}
--
--static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
--{
-- struct rxe_srq *srq = to_rsrq(ibsrq);
-
-- rxe_cleanup(srq);
-- return 0;
-+err_out:
-+ rxe_err_srq(srq, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
-@@ -378,76 +503,116 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
-
- spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
-
-- if (err)
-+ if (err) {
- *bad_wr = wr;
-+ rxe_err_srq(srq, "returned err = %d", err);
-+ }
-
- return err;
- }
-
-+static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
-+{
-+ struct rxe_srq *srq = to_rsrq(ibsrq);
-+ int err;
-+
-+ err = rxe_cleanup(srq);
-+ if (err)
-+ rxe_err_srq(srq, "cleanup failed, err = %d", err);
-+
-+ return 0;
-+}
-+
-+/* qp */
- static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
- struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_pd *pd = to_rpd(ibqp->pd);
- struct rxe_qp *qp = to_rqp(ibqp);
- struct rxe_create_qp_resp __user *uresp = NULL;
-+ int err, cleanup_err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-- uresp = udata->outbuf;
-- }
--
-- if (init->create_flags)
-- return -EOPNOTSUPP;
--
-- err = rxe_qp_chk_init(rxe, init);
-- if (err)
-- return err;
-+ if (udata->inlen) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
-+ goto err_out;
-+ }
-
-- if (udata) {
-- if (udata->inlen)
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
-+ goto err_out;
-+ }
-
- qp->is_user = true;
-+ uresp = udata->outbuf;
- } else {
- qp->is_user = false;
- }
-
-+ if (init->create_flags) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
-+ goto err_out;
-+ }
-+
-+ err = rxe_qp_chk_init(rxe, init);
-+ if (err) {
-+ rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
-+ goto err_out;
-+ }
-+
- err = rxe_add_to_pool(&rxe->qp_pool, qp);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
-- if (err)
-- goto qp_init;
-+ if (err) {
-+ rxe_dbg_qp(qp, "create qp failed, err = %d", err);
-+ goto err_cleanup;
-+ }
-
- rxe_finalize(qp);
- return 0;
-
--qp_init:
-- rxe_cleanup(qp);
-+err_cleanup:
-+ cleanup_err = rxe_cleanup(qp);
-+ if (cleanup_err)
-+ rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
- return err;
- }
-
- static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int mask, struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_qp *qp = to_rqp(ibqp);
-+ int err;
-
-- if (mask & ~IB_QP_ATTR_STANDARD_BITS)
-- return -EOPNOTSUPP;
-+ if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
-+ mask, err);
-+ goto err_out;
-+ }
-
- err = rxe_qp_chk_attr(rxe, qp, attr, mask);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_qp_from_attr(qp, attr, mask, udata);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
-+ goto err_out;
-+ }
-
- if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
- qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
-@@ -455,6 +620,10 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- qp->attr.dest_qp_num);
-
- return 0;
-+
-+err_out:
-+ rxe_err_qp(qp, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-@@ -471,38 +640,59 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
- {
- struct rxe_qp *qp = to_rqp(ibqp);
-- int ret;
-+ int err;
-+
-+ err = rxe_qp_chk_destroy(qp);
-+ if (err) {
-+ rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
-+ goto err_out;
-+ }
-
-- ret = rxe_qp_chk_destroy(qp);
-- if (ret)
-- return ret;
-+ err = rxe_cleanup(qp);
-+ if (err)
-+ rxe_err_qp(qp, "cleanup failed, err = %d", err);
-
-- rxe_cleanup(qp);
- return 0;
-+
-+err_out:
-+ rxe_err_qp(qp, "returned err = %d", err);
-+ return err;
- }
-
-+/* send wr */
- static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
- unsigned int mask, unsigned int length)
- {
- int num_sge = ibwr->num_sge;
- struct rxe_sq *sq = &qp->sq;
-
-- if (unlikely(num_sge > sq->max_sge))
-- return -EINVAL;
-+ if (unlikely(num_sge > sq->max_sge)) {
-+ rxe_dbg_qp(qp, "num_sge > max_sge");
-+ goto err_out;
-+ }
-
- if (unlikely(mask & WR_ATOMIC_MASK)) {
-- if (length < 8)
-- return -EINVAL;
-+ if (length != 8) {
-+ rxe_dbg_qp(qp, "atomic length != 8");
-+ goto err_out;
-+ }
-
-- if (atomic_wr(ibwr)->remote_addr & 0x7)
-- return -EINVAL;
-+ if (atomic_wr(ibwr)->remote_addr & 0x7) {
-+ rxe_dbg_qp(qp, "misaligned atomic address");
-+ goto err_out;
-+ }
- }
-
- if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
-- (length > sq->max_inline)))
-- return -EINVAL;
-+ (length > sq->max_inline))) {
-+ rxe_dbg_qp(qp, "inline length too big");
-+ goto err_out;
-+ }
-
- return 0;
-+
-+err_out:
-+ return -EINVAL;
- }
-
- static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
-@@ -550,12 +740,12 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
- break;
- case IB_WR_LOCAL_INV:
- wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
-- break;
-+ break;
- case IB_WR_REG_MR:
- wr->wr.reg.mr = reg_wr(ibwr)->mr;
- wr->wr.reg.key = reg_wr(ibwr)->key;
- wr->wr.reg.access = reg_wr(ibwr)->access;
-- break;
-+ break;
- default:
- break;
- }
-@@ -624,9 +814,9 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
- spin_lock_irqsave(&qp->sq.sq_lock, flags);
-
- full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
--
- if (unlikely(full)) {
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
-+ rxe_dbg_qp(qp, "queue full");
- return -ENOMEM;
- }
-
-@@ -652,6 +842,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
- while (wr) {
- mask = wr_opcode_mask(wr->opcode, qp);
- if (unlikely(!mask)) {
-+ rxe_dbg_qp(qp, "bad wr opcode for qp");
- err = -EINVAL;
- *bad_wr = wr;
- break;
-@@ -659,6 +850,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
-
- if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
- !(mask & WR_INLINE_MASK))) {
-+ rxe_dbg_qp(qp, "opcode doesn't support inline data");
- err = -EINVAL;
- *bad_wr = wr;
- break;
-@@ -669,17 +861,26 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
- length = 0;
- for (i = 0; i < wr->num_sge; i++)
- length += wr->sg_list[i].length;
-+ if (length > 1<<31) {
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "message length too long");
-+ *bad_wr = wr;
-+ break;
-+ }
-
- err = post_one_send(qp, wr, mask, length);
--
- if (err) {
- *bad_wr = wr;
- break;
- }
-+
- wr = next;
- }
-
-- rxe_sched_task(&qp->req.task);
-+ /* if we didn't post anything there's nothing to do */
-+ if (!err)
-+ rxe_sched_task(&qp->req.task);
-+
- if (unlikely(qp->req.state == QP_STATE_ERROR))
- rxe_sched_task(&qp->comp.task);
-
-@@ -690,23 +891,90 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
- {
- struct rxe_qp *qp = to_rqp(ibqp);
-+ int err;
-
- if (unlikely(!qp->valid)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "qp destroyed");
-+ goto err_out;
- }
-
- if (unlikely(qp->req.state < QP_STATE_READY)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "qp not ready to send");
-+ goto err_out;
- }
-
- if (qp->is_user) {
- /* Utilize process context to do protocol processing */
- rxe_run_task(&qp->req.task);
-- return 0;
-- } else
-- return rxe_post_send_kernel(qp, wr, bad_wr);
-+ } else {
-+ err = rxe_post_send_kernel(qp, wr, bad_wr);
-+ if (err)
-+ goto err_out;
-+ }
-+
-+ return 0;
-+
-+err_out:
-+ rxe_err_qp(qp, "returned err = %d", err);
-+ return err;
-+}
-+
-+/* recv wr */
-+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
-+{
-+ int i;
-+ unsigned long length;
-+ struct rxe_recv_wqe *recv_wqe;
-+ int num_sge = ibwr->num_sge;
-+ int full;
-+ int err;
-+
-+ full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
-+ if (unlikely(full)) {
-+ err = -ENOMEM;
-+ rxe_dbg("queue full");
-+ goto err_out;
-+ }
-+
-+ if (unlikely(num_sge > rq->max_sge)) {
-+ err = -EINVAL;
-+ rxe_dbg("bad num_sge > max_sge");
-+ goto err_out;
-+ }
-+
-+ length = 0;
-+ for (i = 0; i < num_sge; i++)
-+ length += ibwr->sg_list[i].length;
-+
-+ /* IBA max message size is 2^31 */
-+ if (length >= (1UL<<31)) {
-+ err = -EINVAL;
-+ rxe_dbg("message length too long");
-+ goto err_out;
-+ }
-+
-+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
-+
-+ recv_wqe->wr_id = ibwr->wr_id;
-+ recv_wqe->dma.length = length;
-+ recv_wqe->dma.resid = length;
-+ recv_wqe->dma.num_sge = num_sge;
-+ recv_wqe->dma.cur_sge = 0;
-+ recv_wqe->dma.sge_offset = 0;
-+ memcpy(recv_wqe->dma.sge, ibwr->sg_list,
-+ num_sge * sizeof(struct ib_sge));
-+
-+ queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
-+
-+ return 0;
-+
-+err_out:
-+ rxe_dbg("returned err = %d", err);
-+ return err;
- }
-
- static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
-@@ -719,12 +987,16 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
-
- if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "qp destroyed or not ready to post recv");
-+ goto err_out;
- }
-
- if (unlikely(qp->srq)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "use post_srq_recv instead");
-+ goto err_out;
- }
-
- spin_lock_irqsave(&rq->producer_lock, flags);
-@@ -743,73 +1015,101 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- if (qp->resp.state == QP_STATE_ERROR)
- rxe_sched_task(&qp->resp.task);
-
-+err_out:
-+ if (err)
-+ rxe_err_qp(qp, "returned err = %d", err);
-+
- return err;
- }
-
-+/* cq */
- static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
- {
-- int err;
- struct ib_device *dev = ibcq->device;
- struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_cq *cq = to_rcq(ibcq);
- struct rxe_create_cq_resp __user *uresp = NULL;
-+ int err, cleanup_err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
-+ goto err_out;
-+ }
- uresp = udata->outbuf;
- }
-
-- if (attr->flags)
-- return -EOPNOTSUPP;
-+ if (attr->flags) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
-+ goto err_out;
-+ }
-+
-+ err = rxe_add_to_pool(&rxe->cq_pool, cq);
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
- uresp);
-- if (err)
-- return err;
--
-- return rxe_add_to_pool(&rxe->cq_pool, cq);
--}
--
--static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
--{
-- struct rxe_cq *cq = to_rcq(ibcq);
--
-- /* See IBA C11-17: The CI shall return an error if this Verb is
-- * invoked while a Work Queue is still associated with the CQ.
-- */
-- if (atomic_read(&cq->num_wq))
-- return -EINVAL;
--
-- rxe_cq_disable(cq);
-+ if (err) {
-+ rxe_dbg_cq(cq, "create cq failed, err = %d", err);
-+ goto err_cleanup;
-+ }
-
-- rxe_cleanup(cq);
- return 0;
-+
-+err_cleanup:
-+ cleanup_err = rxe_cleanup(cq);
-+ if (cleanup_err)
-+ rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
- {
-- int err;
- struct rxe_cq *cq = to_rcq(ibcq);
- struct rxe_dev *rxe = to_rdev(ibcq->device);
- struct rxe_resize_cq_resp __user *uresp = NULL;
-+ int err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_dbg_cq(cq, "malformed udata");
-+ goto err_out;
-+ }
- uresp = udata->outbuf;
- }
-
- err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_cq(cq, "bad attr, err = %d", err);
-+ goto err_out;
-+ }
-
-- return rxe_cq_resize_queue(cq, cqe, uresp, udata);
-+ err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
-+ if (err) {
-+ rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
-+ goto err_out;
-+ }
-+
-+ return 0;
-+
-+err_out:
-+ rxe_err_cq(cq, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-@@ -823,7 +1123,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
- for (i = 0; i < num_entries; i++) {
- cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
- if (!cqe)
-- break;
-+ break; /* queue empty */
-
- memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
-@@ -864,6 +1164,34 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
- return ret;
- }
-
-+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
-+{
-+ struct rxe_cq *cq = to_rcq(ibcq);
-+ int err;
-+
-+ /* See IBA C11-17: The CI shall return an error if this Verb is
-+ * invoked while a Work Queue is still associated with the CQ.
-+ */
-+ if (atomic_read(&cq->num_wq)) {
-+ err = -EINVAL;
-+ rxe_dbg_cq(cq, "still in use");
-+ goto err_out;
-+ }
-+
-+ rxe_cq_disable(cq);
-+
-+ err = rxe_cleanup(cq);
-+ if (err)
-+ rxe_err_cq(cq, "cleanup failed, err = %d", err);
-+
-+ return 0;
-+
-+err_out:
-+ rxe_err_cq(cq, "returned err = %d", err);
-+ return err;
-+}
-+
-+/* mr */
- static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- {
- struct rxe_dev *rxe = to_rdev(ibpd->device);
-@@ -874,12 +1202,15 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
-+ rxe_dbg_dev(rxe, "no memory for mr");
- goto err_out;
- }
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create mr");
- goto err_free;
-+ }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
-@@ -892,46 +1223,53 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- err_free:
- kfree(mr);
- err_out:
-+ rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-
--static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
-- u64 start,
-- u64 length,
-- u64 iova,
-- int access, struct ib_udata *udata)
-+static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
-+ u64 length, u64 iova, int access,
-+ struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mr *mr;
-+ int err, cleanup_err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
-+ rxe_dbg_pd(pd, "no memory for mr");
- goto err_out;
- }
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_pd(pd, "unable to create mr");
- goto err_free;
-+ }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
- mr->ibmr.device = ibpd->device;
-
- err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
- goto err_cleanup;
-+ }
-
- rxe_finalize(mr);
- return &mr->ibmr;
-
- err_cleanup:
-- rxe_cleanup(mr);
-+ cleanup_err = rxe_cleanup(mr);
-+ if (cleanup_err)
-+ rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
- err_free:
- kfree(mr);
- err_out:
-+ rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-
-@@ -941,40 +1279,76 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mr *mr;
-- int err;
-+ int err, cleanup_err;
-
-- if (mr_type != IB_MR_TYPE_MEM_REG)
-- return ERR_PTR(-EINVAL);
-+ if (mr_type != IB_MR_TYPE_MEM_REG) {
-+ err = -EINVAL;
-+ rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
-+ mr_type, err);
-+ goto err_out;
-+ }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
-+ rxe_dbg_mr(mr, "no memory for mr");
- goto err_out;
- }
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_mr(mr, "unable to create mr, err = %d", err);
- goto err_free;
-+ }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
- mr->ibmr.device = ibpd->device;
-
- err = rxe_mr_init_fast(max_num_sg, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
- goto err_cleanup;
-+ }
-
- rxe_finalize(mr);
- return &mr->ibmr;
-
- err_cleanup:
-- rxe_cleanup(mr);
-+ cleanup_err = rxe_cleanup(mr);
-+ if (cleanup_err)
-+ rxe_err_mr(mr, "cleanup failed, err = %d", err);
- err_free:
- kfree(mr);
- err_out:
-+ rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-
-+static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
-+{
-+ struct rxe_mr *mr = to_rmr(ibmr);
-+ int err, cleanup_err;
-+
-+ /* See IBA 10.6.7.2.6 */
-+ if (atomic_read(&mr->num_mw) > 0) {
-+ err = -EINVAL;
-+ rxe_dbg_mr(mr, "mr has mw's bound");
-+ goto err_out;
-+ }
-+
-+ cleanup_err = rxe_cleanup(mr);
-+ if (cleanup_err)
-+ rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
-+
-+ kfree_rcu(mr);
-+ return 0;
-+
-+err_out:
-+ rxe_err_mr(mr, "returned err = %d", err);
-+ return err;
-+}
-+
- static ssize_t parent_show(struct device *device,
- struct device_attribute *attr, char *buf)
- {
---
-2.39.2
-
+++ /dev/null
-From b9ae89a1169d2759138ce6300d0c5bc2c0855bce Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Mar 2023 16:16:22 -0600
-Subject: RDMA/rxe: Change rxe_dbg to rxe_dbg_dev
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit a9fb3287211e64b94ceb2b6b4791cc2b829d0d56 ]
-
-Replace the name rxe_dbg with rxe_dbg_dev which better matches
-the remaining rxe_dbg_xxx macros for debug messages with a
-rxe device parameter. Reuse the name rxe_dbg for debug messages
-which do not have a rxe device parameter.
-
-Link: https://lore.kernel.org/r/20230303221623.8053-3-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe.c | 2 +-
- drivers/infiniband/sw/rxe/rxe.h | 3 ++-
- drivers/infiniband/sw/rxe/rxe_cq.c | 6 +++---
- drivers/infiniband/sw/rxe/rxe_icrc.c | 4 ++--
- drivers/infiniband/sw/rxe/rxe_mmap.c | 6 +++---
- drivers/infiniband/sw/rxe/rxe_net.c | 4 ++--
- drivers/infiniband/sw/rxe/rxe_qp.c | 16 ++++++++--------
- drivers/infiniband/sw/rxe/rxe_srq.c | 6 +++---
- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 +-
- 9 files changed, 25 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
-index a3f05fdd9fac2..d57ba7a5964b9 100644
---- a/drivers/infiniband/sw/rxe/rxe.c
-+++ b/drivers/infiniband/sw/rxe/rxe.c
-@@ -187,7 +187,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
- rxe = rxe_get_dev_from_net(ndev);
- if (rxe) {
- ib_device_put(&rxe->ib_dev);
-- rxe_dbg(rxe, "already configured on %s\n", ndev->name);
-+ rxe_dbg_dev(rxe, "already configured on %s\n", ndev->name);
- err = -EEXIST;
- goto err;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
-index 2415f3704f576..0757acc381038 100644
---- a/drivers/infiniband/sw/rxe/rxe.h
-+++ b/drivers/infiniband/sw/rxe/rxe.h
-@@ -38,7 +38,8 @@
-
- #define RXE_ROCE_V2_SPORT (0xc000)
-
--#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
-+#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
-+#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
- "%s: " fmt, __func__, ##__VA_ARGS__)
- #define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
- "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
-diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
-index 1df186534639a..22fbc198e5d17 100644
---- a/drivers/infiniband/sw/rxe/rxe_cq.c
-+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
-@@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
- int count;
-
- if (cqe <= 0) {
-- rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe);
-+ rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
- goto err1;
- }
-
- if (cqe > rxe->attr.max_cqe) {
-- rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n",
-+ rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
- cqe, rxe->attr.max_cqe);
- goto err1;
- }
-@@ -65,7 +65,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
- cq->queue = rxe_queue_init(rxe, &cqe,
- sizeof(struct rxe_cqe), type);
- if (!cq->queue) {
-- rxe_dbg(rxe, "unable to create cq\n");
-+ rxe_dbg_dev(rxe, "unable to create cq\n");
- return -ENOMEM;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
-index 71bc2c1895888..fdf5f08cd8f17 100644
---- a/drivers/infiniband/sw/rxe/rxe_icrc.c
-+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
-@@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe)
-
- tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(tfm)) {
-- rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n",
-+ rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-@@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
- *(__be32 *)shash_desc_ctx(shash) = crc;
- err = crypto_shash_update(shash, next, len);
- if (unlikely(err)) {
-- rxe_dbg(rxe, "failed crc calculation, err: %d\n", err);
-+ rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err);
- return (__force __be32)crc32_le((__force u32)crc, next, len);
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
-index a47d72dbc5376..6b7f2bd698799 100644
---- a/drivers/infiniband/sw/rxe/rxe_mmap.c
-+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
-@@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-
- /* Don't allow a mmap larger than the object. */
- if (size > ip->info.size) {
-- rxe_dbg(rxe, "mmap region is larger than the object!\n");
-+ rxe_dbg_dev(rxe, "mmap region is larger than the object!\n");
- spin_unlock_bh(&rxe->pending_lock);
- ret = -EINVAL;
- goto done;
-@@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-
- goto found_it;
- }
-- rxe_dbg(rxe, "unable to find pending mmap info\n");
-+ rxe_dbg_dev(rxe, "unable to find pending mmap info\n");
- spin_unlock_bh(&rxe->pending_lock);
- ret = -EINVAL;
- goto done;
-@@ -98,7 +98,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-
- ret = remap_vmalloc_range(vma, ip->obj, 0);
- if (ret) {
-- rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret);
-+ rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret);
- goto done;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
-index e02e1624bcf4d..a2ace42e95366 100644
---- a/drivers/infiniband/sw/rxe/rxe_net.c
-+++ b/drivers/infiniband/sw/rxe/rxe_net.c
-@@ -596,7 +596,7 @@ static int rxe_notify(struct notifier_block *not_blk,
- rxe_port_down(rxe);
- break;
- case NETDEV_CHANGEMTU:
-- rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
-+ rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
- rxe_set_mtu(rxe, ndev->mtu);
- break;
- case NETDEV_CHANGE:
-@@ -608,7 +608,7 @@ static int rxe_notify(struct notifier_block *not_blk,
- case NETDEV_CHANGENAME:
- case NETDEV_FEAT_CHANGE:
- default:
-- rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n",
-+ rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n",
- event, ndev->name);
- break;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
-index ab72db68b58f6..c954dd9394baf 100644
---- a/drivers/infiniband/sw/rxe/rxe_qp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
-@@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
- int has_srq)
- {
- if (cap->max_send_wr > rxe->attr.max_qp_wr) {
-- rxe_dbg(rxe, "invalid send wr = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
- cap->max_send_wr, rxe->attr.max_qp_wr);
- goto err1;
- }
-
- if (cap->max_send_sge > rxe->attr.max_send_sge) {
-- rxe_dbg(rxe, "invalid send sge = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
- cap->max_send_sge, rxe->attr.max_send_sge);
- goto err1;
- }
-
- if (!has_srq) {
- if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
-- rxe_dbg(rxe, "invalid recv wr = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
- cap->max_recv_wr, rxe->attr.max_qp_wr);
- goto err1;
- }
-
- if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
-- rxe_dbg(rxe, "invalid recv sge = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
- cap->max_recv_sge, rxe->attr.max_recv_sge);
- goto err1;
- }
- }
-
- if (cap->max_inline_data > rxe->max_inline_data) {
-- rxe_dbg(rxe, "invalid max inline data = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
- cap->max_inline_data, rxe->max_inline_data);
- goto err1;
- }
-@@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
- }
-
- if (!init->recv_cq || !init->send_cq) {
-- rxe_dbg(rxe, "missing cq\n");
-+ rxe_dbg_dev(rxe, "missing cq\n");
- goto err1;
- }
-
-@@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
-
- if (init->qp_type == IB_QPT_GSI) {
- if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
-- rxe_dbg(rxe, "invalid port = %d\n", port_num);
-+ rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
- goto err1;
- }
-
- port = &rxe->port;
-
- if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
-- rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num);
-+ rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
- goto err1;
- }
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
-index 82e37a41ced40..27ca82ec0826b 100644
---- a/drivers/infiniband/sw/rxe/rxe_srq.c
-+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
-@@ -13,13 +13,13 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
- struct ib_srq_attr *attr = &init->attr;
-
- if (attr->max_wr > rxe->attr.max_srq_wr) {
-- rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
-+ rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
- attr->max_wr, rxe->attr.max_srq_wr);
- goto err1;
- }
-
- if (attr->max_wr <= 0) {
-- rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
-+ rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
- goto err1;
- }
-
-@@ -27,7 +27,7 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
- attr->max_wr = RXE_MIN_SRQ_WR;
-
- if (attr->max_sge > rxe->attr.max_srq_sge) {
-- rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
-+ rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
- attr->max_sge, rxe->attr.max_srq_sge);
- goto err1;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index e14050a692766..f178d0773ff20 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -1095,7 +1095,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
-
- err = ib_register_device(dev, ibdev_name, NULL);
- if (err)
-- rxe_dbg(rxe, "failed with error %d\n", err);
-+ rxe_dbg_dev(rxe, "failed with error %d\n", err);
-
- /*
- * Note that rxe may be invalid at this point if another thread
---
-2.39.2
-
+++ /dev/null
-From 8b9e9c0fd0e17166cb1df8471c0281d4d2cd63c2 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 29 Mar 2023 21:14:01 +0300
-Subject: RDMA/rxe: Clean kzalloc failure paths
-
-From: Leon Romanovsky <leonro@nvidia.com>
-
-[ Upstream commit b6ba68555d75fd99f7daa9c5a5e476f8635cb155 ]
-
-There is no need to print any debug messages after failure to
-allocate memory, because kernel will print OOM dumps anyway.
-
-Together with removal of these messages, remove useless goto jumps.
-
-Fixes: 5bf944f24129 ("RDMA/rxe: Add error messages")
-Reported-by: Dan Carpenter <error27@gmail.com>
-Link: https://lore.kernel.org/all/ea43486f-43dd-4054-b1d5-3a0d202be621@kili.mountain
-Link: https://lore.kernel.org/r/d3cedf723b84e73e8062a67b7489d33802bafba2.1680113597.git.leon@kernel.org
-Reviewed-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe_queue.c | 5 ++---
- drivers/infiniband/sw/rxe/rxe_verbs.c | 27 +++++++--------------------
- 2 files changed, 9 insertions(+), 23 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
-index d6dbf5a0058dc..9611ee191a46d 100644
---- a/drivers/infiniband/sw/rxe/rxe_queue.c
-+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
-@@ -61,11 +61,11 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
-
- /* num_elem == 0 is allowed, but uninteresting */
- if (*num_elem < 0)
-- goto err1;
-+ return NULL;
-
- q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
-- goto err1;
-+ return NULL;
-
- q->rxe = rxe;
- q->type = type;
-@@ -100,7 +100,6 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
-
- err2:
- kfree(q);
--err1:
- return NULL;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index 090d5bfb1e18d..4e2db7c2e4ed7 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -1198,11 +1198,8 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- int err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-- if (!mr) {
-- err = -ENOMEM;
-- rxe_dbg_dev(rxe, "no memory for mr");
-- goto err_out;
-- }
-+ if (!mr)
-+ return ERR_PTR(-ENOMEM);
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
- if (err) {
-@@ -1220,7 +1217,6 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
-
- err_free:
- kfree(mr);
--err_out:
- rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-@@ -1235,11 +1231,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
- int err, cleanup_err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-- if (!mr) {
-- err = -ENOMEM;
-- rxe_dbg_pd(pd, "no memory for mr");
-- goto err_out;
-- }
-+ if (!mr)
-+ return ERR_PTR(-ENOMEM);
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
- if (err) {
-@@ -1266,7 +1259,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
- err_free:
- kfree(mr);
--err_out:
- rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-@@ -1287,17 +1279,12 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-- if (!mr) {
-- err = -ENOMEM;
-- rxe_dbg_mr(mr, "no memory for mr");
-- goto err_out;
-- }
-+ if (!mr)
-+ return ERR_PTR(-ENOMEM);
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err) {
-- rxe_dbg_mr(mr, "unable to create mr, err = %d", err);
-+ if (err)
- goto err_free;
-- }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
---
-2.39.2
-
Stable-dep-of: b2b1ddc45745 ("RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task"")
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- drivers/infiniband/sw/rxe/rxe_comp.c | 3 +--
- drivers/infiniband/sw/rxe/rxe_loc.h | 6 +++---
- drivers/infiniband/sw/rxe/rxe_req.c | 3 +--
- drivers/infiniband/sw/rxe/rxe_resp.c | 3 +--
- drivers/infiniband/sw/rxe/rxe_task.c | 11 ++++++-----
- drivers/infiniband/sw/rxe/rxe_task.h | 9 +++++----
+ drivers/infiniband/sw/rxe/rxe_comp.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_loc.h | 6 +++---
+ drivers/infiniband/sw/rxe/rxe_req.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_resp.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_task.c | 11 ++++++-----
+ drivers/infiniband/sw/rxe/rxe_task.h | 9 +++++----
6 files changed, 17 insertions(+), 18 deletions(-)
-diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
-index 876057e3ee3c1..cbfa16b3a4902 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
-@@ -575,9 +575,8 @@ static void free_pkt(struct rxe_pkt_info *pkt)
+@@ -571,9 +571,8 @@ static void free_pkt(struct rxe_pkt_info
ib_device_put(dev);
}
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_send_wqe *wqe = NULL;
struct sk_buff *skb = NULL;
-diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
-index 839de34cf4c94..804b15e929dd9 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
-@@ -170,9 +170,9 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem);
+@@ -171,9 +171,9 @@ void rxe_srq_cleanup(struct rxe_pool_ele
void rxe_dealloc(struct ib_device *ib_dev);
/* rxe_icrc.c */
int rxe_icrc_init(struct rxe_dev *rxe);
-diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
-index 899c8779f8001..f2dc2d191e16f 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
-@@ -635,9 +635,8 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+@@ -635,9 +635,8 @@ static int rxe_do_local_ops(struct rxe_q
return 0;
}
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_pkt_info pkt;
struct sk_buff *skb;
-diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
-index 4217eec03a943..7cb1b962d6652 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
-@@ -1443,9 +1443,8 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+@@ -1439,9 +1439,8 @@ static void rxe_drain_req_pkts(struct rx
queue_advance_consumer(q, q->type);
}
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
-diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
-index 60b90e33a8849..959cc6229a34e 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -11,7 +11,7 @@ int __rxe_do_task(struct rxe_task *task)
;
task->ret = ret;
-@@ -29,7 +29,7 @@ static void do_task(struct tasklet_struct *t)
+@@ -29,7 +29,7 @@ static void do_task(struct tasklet_struc
int cont;
int ret;
struct rxe_task *task = from_tasklet(task, t, tasklet);
unsigned int iterations = RXE_MAX_ITERATIONS;
spin_lock_bh(&task->lock);
-@@ -54,7 +54,7 @@ static void do_task(struct tasklet_struct *t)
+@@ -54,7 +54,7 @@ static void do_task(struct tasklet_struc
do {
cont = 0;
spin_lock_bh(&task->lock);
switch (task->state) {
-@@ -91,9 +91,10 @@ static void do_task(struct tasklet_struct *t)
+@@ -91,9 +91,10 @@ static void do_task(struct tasklet_struc
task->ret = ret;
}
task->func = func;
task->destroyed = false;
-diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
-index 7b88129702ac6..41efd5fd49b03 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -22,18 +22,19 @@ struct rxe_task {
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
---
-2.39.2
-
+++ /dev/null
-From c5ef84e44c55d95b83ad936094e6d2e181d0f796 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Mar 2023 16:16:23 -0600
-Subject: RDMA/rxe: Extend dbg log messages to err and info
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit 9ac01f434a1eb56ea94611bd75cf62fa276b41f4 ]
-
-Extend the dbg log messages (e.g. rxe_dbg_xxx) to include
-err and info types. rxe.c is modified to use these new log
-messages as examples.
-
-Link: https://lore.kernel.org/r/20230303221623.8053-4-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe.c | 8 ++++---
- drivers/infiniband/sw/rxe/rxe.h | 42 +++++++++++++++++++++++++++++++++
- 2 files changed, 47 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
-index d57ba7a5964b9..7a7e713de52db 100644
---- a/drivers/infiniband/sw/rxe/rxe.c
-+++ b/drivers/infiniband/sw/rxe/rxe.c
-@@ -160,6 +160,8 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
-
- port->attr.active_mtu = mtu;
- port->mtu_cap = ib_mtu_enum_to_int(mtu);
-+
-+ rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
- }
-
- /* called by ifc layer to create new rxe device.
-@@ -179,7 +181,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
- int err = 0;
-
- if (is_vlan_dev(ndev)) {
-- pr_err("rxe creation allowed on top of a real device only\n");
-+ rxe_err("rxe creation allowed on top of a real device only");
- err = -EPERM;
- goto err;
- }
-@@ -187,14 +189,14 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
- rxe = rxe_get_dev_from_net(ndev);
- if (rxe) {
- ib_device_put(&rxe->ib_dev);
-- rxe_dbg_dev(rxe, "already configured on %s\n", ndev->name);
-+ rxe_err_dev(rxe, "already configured on %s", ndev->name);
- err = -EEXIST;
- goto err;
- }
-
- err = rxe_net_add(ibdev_name, ndev);
- if (err) {
-- pr_debug("failed to add %s\n", ndev->name);
-+ rxe_err("failed to add %s\n", ndev->name);
- goto err;
- }
- err:
-diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
-index 0757acc381038..bd8a8ea4ea8fd 100644
---- a/drivers/infiniband/sw/rxe/rxe.h
-+++ b/drivers/infiniband/sw/rxe/rxe.h
-@@ -58,6 +58,48 @@
- #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
- "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-
-+#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
-+ ##__VA_ARGS__)
-+#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
-+ "%s: " fmt, __func__, ##__VA_ARGS__)
-+#define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \
-+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \
-+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \
-+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
-+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \
-+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
-+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \
-+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
-+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-+
-+#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
-+ ##__VA_ARGS__)
-+#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
-+ "%s: " fmt, __func__, ##__VA_ARGS__)
-+#define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \
-+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \
-+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \
-+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \
-+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \
-+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \
-+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \
-+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
-+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-+
- /* responder states */
- enum resp_states {
- RESPST_NONE,
---
-2.39.2
-
+++ /dev/null
-From 331410a2122fb6497edde368205f3d369dfa8031 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 13 Feb 2023 16:55:52 -0600
-Subject: RDMA/rxe: Remove rxe_alloc()
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit 72a03627443d5bc7032ab98bd784740cd8a76f8a ]
-
-Currently all the object types in the rxe driver are allocated in
-rdma-core except for MRs. By moving tha kzalloc() call outside of
-the pool code the rxe_alloc() subroutine can be eliminated and code
-checking for MR as a special case can be removed.
-
-This patch moves the kzalloc() and kfree_rcu() calls into the mr
-registration and destruction verbs. It removes that code from
-rxe_pool.c including the rxe_alloc() subroutine which is no longer
-used.
-
-Link: https://lore.kernel.org/r/20230213225551.12437-1-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Reviewed-by: Devesh Sharma <devesh.s.sharma@oracle.com>
-Reviewed-by: Devesh Sharma <devesh.s.sharma@oracle.com>
-Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe_mr.c | 2 +-
- drivers/infiniband/sw/rxe/rxe_pool.c | 46 ---------------------
- drivers/infiniband/sw/rxe/rxe_pool.h | 3 --
- drivers/infiniband/sw/rxe/rxe_verbs.c | 59 +++++++++++++++++++--------
- 4 files changed, 44 insertions(+), 66 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
-index 5e9a03831bf9f..b10aa1580a644 100644
---- a/drivers/infiniband/sw/rxe/rxe_mr.c
-+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
-@@ -731,7 +731,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
- return -EINVAL;
-
- rxe_cleanup(mr);
--
-+ kfree_rcu(mr);
- return 0;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
-index 1151c0b5cceab..6215c6de3a840 100644
---- a/drivers/infiniband/sw/rxe/rxe_pool.c
-+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
-@@ -116,55 +116,12 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
- WARN_ON(!xa_empty(&pool->xa));
- }
-
--void *rxe_alloc(struct rxe_pool *pool)
--{
-- struct rxe_pool_elem *elem;
-- void *obj;
-- int err;
--
-- if (WARN_ON(!(pool->type == RXE_TYPE_MR)))
-- return NULL;
--
-- if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
-- goto err_cnt;
--
-- obj = kzalloc(pool->elem_size, GFP_KERNEL);
-- if (!obj)
-- goto err_cnt;
--
-- elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
--
-- elem->pool = pool;
-- elem->obj = obj;
-- kref_init(&elem->ref_cnt);
-- init_completion(&elem->complete);
--
-- /* allocate index in array but leave pointer as NULL so it
-- * can't be looked up until rxe_finalize() is called
-- */
-- err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
-- &pool->next, GFP_KERNEL);
-- if (err < 0)
-- goto err_free;
--
-- return obj;
--
--err_free:
-- kfree(obj);
--err_cnt:
-- atomic_dec(&pool->num_elem);
-- return NULL;
--}
--
- int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
- bool sleepable)
- {
- int err;
- gfp_t gfp_flags;
-
-- if (WARN_ON(pool->type == RXE_TYPE_MR))
-- return -EINVAL;
--
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
-
-@@ -275,9 +232,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
- if (pool->cleanup)
- pool->cleanup(elem);
-
-- if (pool->type == RXE_TYPE_MR)
-- kfree_rcu(elem->obj);
--
- atomic_dec(&pool->num_elem);
-
- return err;
-diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
-index 9d83cb32092ff..b42e26427a702 100644
---- a/drivers/infiniband/sw/rxe/rxe_pool.h
-+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
-@@ -54,9 +54,6 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
- /* free resources from object pool */
- void rxe_pool_cleanup(struct rxe_pool *pool);
-
--/* allocate an object from pool */
--void *rxe_alloc(struct rxe_pool *pool);
--
- /* connect already allocated object to pool */
- int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
- bool sleepable);
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index a3aee247aa157..e14050a692766 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -869,10 +869,17 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mr *mr;
-+ int err;
-
-- mr = rxe_alloc(&rxe->mr_pool);
-- if (!mr)
-- return ERR_PTR(-ENOMEM);
-+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-+ if (!mr) {
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+
-+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
-+ if (err)
-+ goto err_free;
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
-@@ -880,8 +887,12 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
-
- rxe_mr_init_dma(access, mr);
- rxe_finalize(mr);
--
- return &mr->ibmr;
-+
-+err_free:
-+ kfree(mr);
-+err_out:
-+ return ERR_PTR(err);
- }
-
- static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
-@@ -895,9 +906,15 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mr *mr;
-
-- mr = rxe_alloc(&rxe->mr_pool);
-- if (!mr)
-- return ERR_PTR(-ENOMEM);
-+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-+ if (!mr) {
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+
-+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
-+ if (err)
-+ goto err_free;
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
-@@ -905,14 +922,16 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
-
- err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
- if (err)
-- goto err1;
-+ goto err_cleanup;
-
- rxe_finalize(mr);
--
- return &mr->ibmr;
-
--err1:
-+err_cleanup:
- rxe_cleanup(mr);
-+err_free:
-+ kfree(mr);
-+err_out:
- return ERR_PTR(err);
- }
-
-@@ -927,9 +946,15 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- if (mr_type != IB_MR_TYPE_MEM_REG)
- return ERR_PTR(-EINVAL);
-
-- mr = rxe_alloc(&rxe->mr_pool);
-- if (!mr)
-- return ERR_PTR(-ENOMEM);
-+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-+ if (!mr) {
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+
-+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
-+ if (err)
-+ goto err_free;
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
-@@ -937,14 +962,16 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
-
- err = rxe_mr_init_fast(max_num_sg, mr);
- if (err)
-- goto err1;
-+ goto err_cleanup;
-
- rxe_finalize(mr);
--
- return &mr->ibmr;
-
--err1:
-+err_cleanup:
- rxe_cleanup(mr);
-+err_free:
-+ kfree(mr);
-+err_out:
- return ERR_PTR(err);
- }
-
---
-2.39.2
-
-From b08cdff2bf716dd2ab4b9f93ed67a6f1492fde13 Mon Sep 17 00:00:00 2001
+From a107e8f6f8d74622dbe2aea50868f9326390200c Mon Sep 17 00:00:00 2001
From: Sasha Levin <sashal@kernel.org>
Date: Mon, 27 Mar 2023 16:56:44 -0500
Subject: RDMA/rxe: Remove tasklet call from rxe_cq.c
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- drivers/infiniband/sw/rxe/rxe_cq.c | 32 +++------------------------
- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 --
- drivers/infiniband/sw/rxe/rxe_verbs.h | 2 --
+ drivers/infiniband/sw/rxe/rxe_cq.c | 32 +++-----------------------------
+ drivers/infiniband/sw/rxe/rxe_verbs.c | 2 --
+ drivers/infiniband/sw/rxe/rxe_verbs.h | 2 --
3 files changed, 3 insertions(+), 33 deletions(-)
-diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
-index 22fbc198e5d17..519ddec29b4ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
-@@ -39,21 +39,6 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+@@ -39,21 +39,6 @@ err1:
return -EINVAL;
}
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp)
-@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe
cq->is_user = uresp;
spin_lock_init(&cq->cq_lock);
cq->ibcq.cqe = cqe;
return 0;
-@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
+@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *c
return err;
}
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
-@@ -135,21 +117,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+@@ -135,21 +117,13 @@ int rxe_cq_post(struct rxe_cq *cq, struc
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = 0;
void rxe_cq_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index 84b53c070fc55..090d5bfb1e18d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -1178,8 +1178,6 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
- goto err_out;
- }
+@@ -786,8 +786,6 @@ static int rxe_destroy_cq(struct ib_cq *
+ if (atomic_read(&cq->num_wq))
+ return -EINVAL;
- rxe_cq_disable(cq);
-
- err = rxe_cleanup(cq);
- if (err)
- rxe_err_cq(cq, "cleanup failed, err = %d", err);
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
-index c269ae2a32243..d812093a39166 100644
+ rxe_cleanup(cq);
+ return 0;
+ }
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -63,9 +63,7 @@ struct rxe_cq {
atomic_t num_wq;
};
---
-2.39.2
-
fs-ntfs3-fix-oob-read-in-indx_insert_into_buffer.patch
fs-ntfs3-fix-slab-out-of-bounds-read-in-hdr_delete_d.patch
iommu-mediatek-set-dma_mask-for-pgtable_pa_35_en.patch
-rdma-rxe-remove-rxe_alloc.patch
-rdma-rxe-change-rxe_dbg-to-rxe_dbg_dev.patch
-rdma-rxe-extend-dbg-log-messages-to-err-and-info.patch
-rdma-rxe-add-error-messages.patch
rdma-rxe-remove-tasklet-call-from-rxe_cq.c.patch
power-supply-generic-adc-battery-fix-unit-scaling.patch
clk-add-missing-of_node_put-in-assigned-clocks-prope.patch
-rdma-rxe-clean-kzalloc-failure-paths.patch
rdma-siw-remove-namespace-check-from-siw_netdev_even.patch
clk-qcom-gcc-sm6115-mark-rcgs-shared-where-applicabl.patch
power-supply-rk817-fix-low-soc-bugs.patch
+++ /dev/null
-From 50c048f946dc9c385b791e397bec6ed1f848c525 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Mar 2023 16:16:24 -0600
-Subject: RDMA/rxe: Add error messages
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit 5bf944f24129cbc4b5828348bdce2db94ca9fbd6 ]
-
-This patch adds error and debug messages so that every interaction
-with rdma-core through a verbs API call or a completion error return
-will generate at least one error message backed up by debug messages
-with more detail.
-
-With dynamic debugging one can follow up after seeing an error message
-by turning on the appropriate debug messages.
-
-Link: https://lore.kernel.org/r/20230303221623.8053-5-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe_comp.c | 4 +
- drivers/infiniband/sw/rxe/rxe_loc.h | 1 -
- drivers/infiniband/sw/rxe/rxe_mr.c | 13 -
- drivers/infiniband/sw/rxe/rxe_resp.c | 4 +
- drivers/infiniband/sw/rxe/rxe_verbs.c | 828 +++++++++++++++++++-------
- 5 files changed, 609 insertions(+), 241 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
-index 20737fec392bf..876057e3ee3c1 100644
---- a/drivers/infiniband/sw/rxe/rxe_comp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
-@@ -428,6 +428,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- uwc->wc_flags = IB_WC_WITH_IMM;
- uwc->byte_len = wqe->dma.length;
- }
-+ } else {
-+ if (wqe->status != IB_WC_WR_FLUSH_ERR)
-+ rxe_err_qp(qp, "non-flush error status = %d",
-+ wqe->status);
- }
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
-index 1bb0cb479eb12..839de34cf4c94 100644
---- a/drivers/infiniband/sw/rxe/rxe_loc.h
-+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
-@@ -80,7 +80,6 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
- int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
- int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
- int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
--int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
- void rxe_mr_cleanup(struct rxe_pool_elem *elem);
-
- /* rxe_mw.c */
-diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
-index b10aa1580a644..1e17f8086d59a 100644
---- a/drivers/infiniband/sw/rxe/rxe_mr.c
-+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
-@@ -722,19 +722,6 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
- return 0;
- }
-
--int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
--{
-- struct rxe_mr *mr = to_rmr(ibmr);
--
-- /* See IBA 10.6.7.2.6 */
-- if (atomic_read(&mr->num_mw) > 0)
-- return -EINVAL;
--
-- rxe_cleanup(mr);
-- kfree_rcu(mr);
-- return 0;
--}
--
- void rxe_mr_cleanup(struct rxe_pool_elem *elem)
- {
- struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
-diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
-index 0cc1ba91d48cc..4217eec03a943 100644
---- a/drivers/infiniband/sw/rxe/rxe_resp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
-@@ -1151,6 +1151,10 @@ static enum resp_states do_complete(struct rxe_qp *qp,
-
- wc->port_num = qp->attr.port_num;
- }
-+ } else {
-+ if (wc->status != IB_WC_WR_FLUSH_ERR)
-+ rxe_err_qp(qp, "non-flush error status = %d",
-+ wc->status);
- }
-
- /* have copy for srq and reference for !srq */
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index f178d0773ff20..84b53c070fc55 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -12,31 +12,48 @@
- #include "rxe_queue.h"
- #include "rxe_hw_counters.h"
-
--static int rxe_query_device(struct ib_device *dev,
-+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
-+
-+/* dev */
-+static int rxe_query_device(struct ib_device *ibdev,
- struct ib_device_attr *attr,
-- struct ib_udata *uhw)
-+ struct ib_udata *udata)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-+
-+ if (udata->inlen || udata->outlen) {
-+ rxe_dbg_dev(rxe, "malformed udata");
-+ err = -EINVAL;
-+ goto err_out;
-+ }
-
-- if (uhw->inlen || uhw->outlen)
-- return -EINVAL;
-+ memcpy(attr, &rxe->attr, sizeof(*attr));
-
-- *attr = rxe->attr;
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_query_port(struct ib_device *dev,
-+static int rxe_query_port(struct ib_device *ibdev,
- u32 port_num, struct ib_port_attr *attr)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-- int rc;
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err, ret;
-
-- /* *attr being zeroed by the caller, avoid zeroing it here */
-- *attr = rxe->port.attr;
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-+
-+ memcpy(attr, &rxe->port.attr, sizeof(*attr));
-
- mutex_lock(&rxe->usdev_lock);
-- rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
-- &attr->active_width);
-+ ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
-+ &attr->active_width);
-
- if (attr->state == IB_PORT_ACTIVE)
- attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
-@@ -47,27 +64,45 @@ static int rxe_query_port(struct ib_device *dev,
-
- mutex_unlock(&rxe->usdev_lock);
-
-- return rc;
-+ return ret;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_query_pkey(struct ib_device *device,
-+static int rxe_query_pkey(struct ib_device *ibdev,
- u32 port_num, u16 index, u16 *pkey)
- {
-- if (index > 0)
-- return -EINVAL;
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-+
-+ if (index != 0) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad pkey index = %d", index);
-+ goto err_out;
-+ }
-
- *pkey = IB_DEFAULT_PKEY_FULL;
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_modify_device(struct ib_device *dev,
-+static int rxe_modify_device(struct ib_device *ibdev,
- int mask, struct ib_device_modify *attr)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-
- if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
-- IB_DEVICE_MODIFY_NODE_DESC))
-- return -EOPNOTSUPP;
-+ IB_DEVICE_MODIFY_NODE_DESC)) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
-+ goto err_out;
-+ }
-
- if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
- rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
-@@ -78,16 +113,33 @@ static int rxe_modify_device(struct ib_device *dev,
- }
-
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_modify_port(struct ib_device *dev,
-- u32 port_num, int mask, struct ib_port_modify *attr)
-+static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
-+ int mask, struct ib_port_modify *attr)
- {
-- struct rxe_dev *rxe = to_rdev(dev);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
- struct rxe_port *port;
-+ int err;
-
-- port = &rxe->port;
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-
-+ //TODO is shutdown useful
-+ if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
-+ goto err_out;
-+ }
-+
-+ port = &rxe->port;
- port->attr.port_cap_flags |= attr->set_port_cap_mask;
- port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
-
-@@ -95,73 +147,125 @@ static int rxe_modify_port(struct ib_device *dev,
- port->attr.qkey_viol_cntr = 0;
-
- return 0;
--}
-
--static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
-- u32 port_num)
--{
-- return IB_LINK_LAYER_ETHERNET;
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
-+static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
-+ u32 port_num)
- {
-- struct rxe_dev *rxe = to_rdev(ibuc->device);
-- struct rxe_ucontext *uc = to_ruc(ibuc);
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ int err;
-
-- return rxe_add_to_pool(&rxe->uc_pool, uc);
--}
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-
--static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
--{
-- struct rxe_ucontext *uc = to_ruc(ibuc);
-+ return IB_LINK_LAYER_ETHERNET;
-
-- rxe_cleanup(uc);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
--static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
-+static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
- {
-+ struct rxe_dev *rxe = to_rdev(ibdev);
-+ struct ib_port_attr attr = {};
- int err;
-- struct ib_port_attr attr;
-
-- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
-+ if (port_num != 1) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
-+ goto err_out;
-+ }
-
-- err = ib_query_port(dev, port_num, &attr);
-+ err = ib_query_port(ibdev, port_num, &attr);
- if (err)
-- return err;
-+ goto err_out;
-
-+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
-+/* uc */
-+static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
-+{
-+ struct rxe_dev *rxe = to_rdev(ibuc->device);
-+ struct rxe_ucontext *uc = to_ruc(ibuc);
-+ int err;
-+
-+ err = rxe_add_to_pool(&rxe->uc_pool, uc);
-+ if (err)
-+ rxe_err_dev(rxe, "unable to create uc");
-+
-+ return err;
-+}
-+
-+static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
-+{
-+ struct rxe_ucontext *uc = to_ruc(ibuc);
-+ int err;
-+
-+ err = rxe_cleanup(uc);
-+ if (err)
-+ rxe_err_uc(uc, "cleanup failed, err = %d", err);
-+}
-+
-+/* pd */
- static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
- {
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
-+ int err;
-+
-+ err = rxe_add_to_pool(&rxe->pd_pool, pd);
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to alloc pd");
-+ goto err_out;
-+ }
-
-- return rxe_add_to_pool(&rxe->pd_pool, pd);
-+ return 0;
-+
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
- {
- struct rxe_pd *pd = to_rpd(ibpd);
-+ int err;
-+
-+ err = rxe_cleanup(pd);
-+ if (err)
-+ rxe_err_pd(pd, "cleanup failed, err = %d", err);
-
-- rxe_cleanup(pd);
- return 0;
- }
-
-+/* ah */
- static int rxe_create_ah(struct ib_ah *ibah,
- struct rdma_ah_init_attr *init_attr,
- struct ib_udata *udata)
--
- {
- struct rxe_dev *rxe = to_rdev(ibah->device);
- struct rxe_ah *ah = to_rah(ibah);
- struct rxe_create_ah_resp __user *uresp = NULL;
-- int err;
-+ int err, cleanup_err;
-
- if (udata) {
- /* test if new user provider */
-@@ -174,16 +278,18 @@ static int rxe_create_ah(struct ib_ah *ibah,
-
- err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
- init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create ah");
-+ goto err_out;
-+ }
-
- /* create index > 0 */
- ah->ah_num = ah->elem.index;
-
- err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
- if (err) {
-- rxe_cleanup(ah);
-- return err;
-+ rxe_dbg_ah(ah, "bad attr");
-+ goto err_cleanup;
- }
-
- if (uresp) {
-@@ -191,8 +297,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
- err = copy_to_user(&uresp->ah_num, &ah->ah_num,
- sizeof(uresp->ah_num));
- if (err) {
-- rxe_cleanup(ah);
-- return -EFAULT;
-+ err = -EFAULT;
-+ rxe_dbg_ah(ah, "unable to copy to user");
-+ goto err_cleanup;
- }
- } else if (ah->is_user) {
- /* only if old user provider */
-@@ -203,19 +310,34 @@ static int rxe_create_ah(struct ib_ah *ibah,
- rxe_finalize(ah);
-
- return 0;
-+
-+err_cleanup:
-+ cleanup_err = rxe_cleanup(ah);
-+ if (cleanup_err)
-+ rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_ah(ah, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
- {
-- int err;
- struct rxe_ah *ah = to_rah(ibah);
-+ int err;
-
- err = rxe_ah_chk_attr(ah, attr);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_ah(ah, "bad attr");
-+ goto err_out;
-+ }
-
- rxe_init_av(attr, &ah->av);
-+
- return 0;
-+
-+err_out:
-+ rxe_err_ah(ah, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
-@@ -225,92 +347,77 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
- memset(attr, 0, sizeof(*attr));
- attr->type = ibah->type;
- rxe_av_to_attr(&ah->av, attr);
-+
- return 0;
- }
-
- static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
- {
- struct rxe_ah *ah = to_rah(ibah);
-+ int err;
-
-- rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
--
-- return 0;
--}
--
--static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
--{
-- int i;
-- u32 length;
-- struct rxe_recv_wqe *recv_wqe;
-- int num_sge = ibwr->num_sge;
-- int full;
--
-- full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
-- if (unlikely(full))
-- return -ENOMEM;
--
-- if (unlikely(num_sge > rq->max_sge))
-- return -EINVAL;
--
-- length = 0;
-- for (i = 0; i < num_sge; i++)
-- length += ibwr->sg_list[i].length;
--
-- recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
-- recv_wqe->wr_id = ibwr->wr_id;
--
-- memcpy(recv_wqe->dma.sge, ibwr->sg_list,
-- num_sge * sizeof(struct ib_sge));
--
-- recv_wqe->dma.length = length;
-- recv_wqe->dma.resid = length;
-- recv_wqe->dma.num_sge = num_sge;
-- recv_wqe->dma.cur_sge = 0;
-- recv_wqe->dma.sge_offset = 0;
--
-- queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
-+ err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
-+ if (err)
-+ rxe_err_ah(ah, "cleanup failed, err = %d", err);
-
- return 0;
- }
-
-+/* srq */
- static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
- struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibsrq->device);
- struct rxe_pd *pd = to_rpd(ibsrq->pd);
- struct rxe_srq *srq = to_rsrq(ibsrq);
- struct rxe_create_srq_resp __user *uresp = NULL;
-+ int err, cleanup_err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_err_dev(rxe, "malformed udata");
-+ goto err_out;
-+ }
- uresp = udata->outbuf;
- }
-
-- if (init->srq_type != IB_SRQT_BASIC)
-- return -EOPNOTSUPP;
-+ if (init->srq_type != IB_SRQT_BASIC) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "srq type = %d, not supported",
-+ init->srq_type);
-+ goto err_out;
-+ }
-
- err = rxe_srq_chk_init(rxe, init);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "invalid init attributes");
-+ goto err_out;
-+ }
-
- err = rxe_add_to_pool(&rxe->srq_pool, srq);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
-+ goto err_out;
-+ }
-
- rxe_get(pd);
- srq->pd = pd;
-
- err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
-- if (err)
-+ if (err) {
-+ rxe_dbg_srq(srq, "create srq failed, err = %d", err);
- goto err_cleanup;
-+ }
-
- return 0;
-
- err_cleanup:
-- rxe_cleanup(srq);
--
-+ cleanup_err = rxe_cleanup(srq);
-+ if (cleanup_err)
-+ rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
- return err;
- }
-
-@@ -318,46 +425,64 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask mask,
- struct ib_udata *udata)
- {
-- int err;
- struct rxe_srq *srq = to_rsrq(ibsrq);
- struct rxe_dev *rxe = to_rdev(ibsrq->device);
-- struct rxe_modify_srq_cmd ucmd = {};
-+ struct rxe_modify_srq_cmd cmd = {};
-+ int err;
-
- if (udata) {
-- if (udata->inlen < sizeof(ucmd))
-- return -EINVAL;
-+ if (udata->inlen < sizeof(cmd)) {
-+ err = -EINVAL;
-+ rxe_dbg_srq(srq, "malformed udata");
-+ goto err_out;
-+ }
-
-- err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
-- if (err)
-- return err;
-+ err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
-+ if (err) {
-+ err = -EFAULT;
-+ rxe_dbg_srq(srq, "unable to read udata");
-+ goto err_out;
-+ }
- }
-
- err = rxe_srq_chk_attr(rxe, srq, attr, mask);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_srq(srq, "bad init attributes");
-+ goto err_out;
-+ }
-+
-+ err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
-+ if (err) {
-+ rxe_dbg_srq(srq, "bad attr");
-+ goto err_out;
-+ }
-+
-+ return 0;
-
-- return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
-+err_out:
-+ rxe_err_srq(srq, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
- {
- struct rxe_srq *srq = to_rsrq(ibsrq);
-+ int err;
-
-- if (srq->error)
-- return -EINVAL;
-+ if (srq->error) {
-+ err = -EINVAL;
-+ rxe_dbg_srq(srq, "srq in error state");
-+ goto err_out;
-+ }
-
- attr->max_wr = srq->rq.queue->buf->index_mask;
- attr->max_sge = srq->rq.max_sge;
- attr->srq_limit = srq->limit;
- return 0;
--}
--
--static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
--{
-- struct rxe_srq *srq = to_rsrq(ibsrq);
-
-- rxe_cleanup(srq);
-- return 0;
-+err_out:
-+ rxe_err_srq(srq, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
-@@ -378,76 +503,116 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
-
- spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
-
-- if (err)
-+ if (err) {
- *bad_wr = wr;
-+ rxe_err_srq(srq, "returned err = %d", err);
-+ }
-
- return err;
- }
-
-+static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
-+{
-+ struct rxe_srq *srq = to_rsrq(ibsrq);
-+ int err;
-+
-+ err = rxe_cleanup(srq);
-+ if (err)
-+ rxe_err_srq(srq, "cleanup failed, err = %d", err);
-+
-+ return 0;
-+}
-+
-+/* qp */
- static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
- struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_pd *pd = to_rpd(ibqp->pd);
- struct rxe_qp *qp = to_rqp(ibqp);
- struct rxe_create_qp_resp __user *uresp = NULL;
-+ int err, cleanup_err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-- uresp = udata->outbuf;
-- }
--
-- if (init->create_flags)
-- return -EOPNOTSUPP;
--
-- err = rxe_qp_chk_init(rxe, init);
-- if (err)
-- return err;
-+ if (udata->inlen) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
-+ goto err_out;
-+ }
-
-- if (udata) {
-- if (udata->inlen)
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
-+ goto err_out;
-+ }
-
- qp->is_user = true;
-+ uresp = udata->outbuf;
- } else {
- qp->is_user = false;
- }
-
-+ if (init->create_flags) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
-+ goto err_out;
-+ }
-+
-+ err = rxe_qp_chk_init(rxe, init);
-+ if (err) {
-+ rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
-+ goto err_out;
-+ }
-+
- err = rxe_add_to_pool(&rxe->qp_pool, qp);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
-- if (err)
-- goto qp_init;
-+ if (err) {
-+ rxe_dbg_qp(qp, "create qp failed, err = %d", err);
-+ goto err_cleanup;
-+ }
-
- rxe_finalize(qp);
- return 0;
-
--qp_init:
-- rxe_cleanup(qp);
-+err_cleanup:
-+ cleanup_err = rxe_cleanup(qp);
-+ if (cleanup_err)
-+ rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
- return err;
- }
-
- static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int mask, struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_qp *qp = to_rqp(ibqp);
-+ int err;
-
-- if (mask & ~IB_QP_ATTR_STANDARD_BITS)
-- return -EOPNOTSUPP;
-+ if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
-+ mask, err);
-+ goto err_out;
-+ }
-
- err = rxe_qp_chk_attr(rxe, qp, attr, mask);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_qp_from_attr(qp, attr, mask, udata);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
-+ goto err_out;
-+ }
-
- if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
- qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
-@@ -455,6 +620,10 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- qp->attr.dest_qp_num);
-
- return 0;
-+
-+err_out:
-+ rxe_err_qp(qp, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-@@ -471,38 +640,59 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
- {
- struct rxe_qp *qp = to_rqp(ibqp);
-- int ret;
-+ int err;
-+
-+ err = rxe_qp_chk_destroy(qp);
-+ if (err) {
-+ rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
-+ goto err_out;
-+ }
-
-- ret = rxe_qp_chk_destroy(qp);
-- if (ret)
-- return ret;
-+ err = rxe_cleanup(qp);
-+ if (err)
-+ rxe_err_qp(qp, "cleanup failed, err = %d", err);
-
-- rxe_cleanup(qp);
- return 0;
-+
-+err_out:
-+ rxe_err_qp(qp, "returned err = %d", err);
-+ return err;
- }
-
-+/* send wr */
- static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
- unsigned int mask, unsigned int length)
- {
- int num_sge = ibwr->num_sge;
- struct rxe_sq *sq = &qp->sq;
-
-- if (unlikely(num_sge > sq->max_sge))
-- return -EINVAL;
-+ if (unlikely(num_sge > sq->max_sge)) {
-+ rxe_dbg_qp(qp, "num_sge > max_sge");
-+ goto err_out;
-+ }
-
- if (unlikely(mask & WR_ATOMIC_MASK)) {
-- if (length < 8)
-- return -EINVAL;
-+ if (length != 8) {
-+ rxe_dbg_qp(qp, "atomic length != 8");
-+ goto err_out;
-+ }
-
-- if (atomic_wr(ibwr)->remote_addr & 0x7)
-- return -EINVAL;
-+ if (atomic_wr(ibwr)->remote_addr & 0x7) {
-+ rxe_dbg_qp(qp, "misaligned atomic address");
-+ goto err_out;
-+ }
- }
-
- if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
-- (length > sq->max_inline)))
-- return -EINVAL;
-+ (length > sq->max_inline))) {
-+ rxe_dbg_qp(qp, "inline length too big");
-+ goto err_out;
-+ }
-
- return 0;
-+
-+err_out:
-+ return -EINVAL;
- }
-
- static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
-@@ -550,12 +740,12 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
- break;
- case IB_WR_LOCAL_INV:
- wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
-- break;
-+ break;
- case IB_WR_REG_MR:
- wr->wr.reg.mr = reg_wr(ibwr)->mr;
- wr->wr.reg.key = reg_wr(ibwr)->key;
- wr->wr.reg.access = reg_wr(ibwr)->access;
-- break;
-+ break;
- default:
- break;
- }
-@@ -624,9 +814,9 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
- spin_lock_irqsave(&qp->sq.sq_lock, flags);
-
- full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
--
- if (unlikely(full)) {
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
-+ rxe_dbg_qp(qp, "queue full");
- return -ENOMEM;
- }
-
-@@ -652,6 +842,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
- while (wr) {
- mask = wr_opcode_mask(wr->opcode, qp);
- if (unlikely(!mask)) {
-+ rxe_dbg_qp(qp, "bad wr opcode for qp");
- err = -EINVAL;
- *bad_wr = wr;
- break;
-@@ -659,6 +850,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
-
- if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
- !(mask & WR_INLINE_MASK))) {
-+ rxe_dbg_qp(qp, "opcode doesn't support inline data");
- err = -EINVAL;
- *bad_wr = wr;
- break;
-@@ -669,17 +861,26 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
- length = 0;
- for (i = 0; i < wr->num_sge; i++)
- length += wr->sg_list[i].length;
-+ if (length > 1<<31) {
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "message length too long");
-+ *bad_wr = wr;
-+ break;
-+ }
-
- err = post_one_send(qp, wr, mask, length);
--
- if (err) {
- *bad_wr = wr;
- break;
- }
-+
- wr = next;
- }
-
-- rxe_sched_task(&qp->req.task);
-+ /* if we didn't post anything there's nothing to do */
-+ if (!err)
-+ rxe_sched_task(&qp->req.task);
-+
- if (unlikely(qp->req.state == QP_STATE_ERROR))
- rxe_sched_task(&qp->comp.task);
-
-@@ -690,23 +891,90 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
- {
- struct rxe_qp *qp = to_rqp(ibqp);
-+ int err;
-
- if (unlikely(!qp->valid)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "qp destroyed");
-+ goto err_out;
- }
-
- if (unlikely(qp->req.state < QP_STATE_READY)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "qp not ready to send");
-+ goto err_out;
- }
-
- if (qp->is_user) {
- /* Utilize process context to do protocol processing */
- rxe_run_task(&qp->req.task);
-- return 0;
-- } else
-- return rxe_post_send_kernel(qp, wr, bad_wr);
-+ } else {
-+ err = rxe_post_send_kernel(qp, wr, bad_wr);
-+ if (err)
-+ goto err_out;
-+ }
-+
-+ return 0;
-+
-+err_out:
-+ rxe_err_qp(qp, "returned err = %d", err);
-+ return err;
-+}
-+
-+/* recv wr */
-+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
-+{
-+ int i;
-+ unsigned long length;
-+ struct rxe_recv_wqe *recv_wqe;
-+ int num_sge = ibwr->num_sge;
-+ int full;
-+ int err;
-+
-+ full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
-+ if (unlikely(full)) {
-+ err = -ENOMEM;
-+ rxe_dbg("queue full");
-+ goto err_out;
-+ }
-+
-+ if (unlikely(num_sge > rq->max_sge)) {
-+ err = -EINVAL;
-+ rxe_dbg("bad num_sge > max_sge");
-+ goto err_out;
-+ }
-+
-+ length = 0;
-+ for (i = 0; i < num_sge; i++)
-+ length += ibwr->sg_list[i].length;
-+
-+ /* IBA max message size is 2^31 */
-+ if (length >= (1UL<<31)) {
-+ err = -EINVAL;
-+ rxe_dbg("message length too long");
-+ goto err_out;
-+ }
-+
-+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
-+
-+ recv_wqe->wr_id = ibwr->wr_id;
-+ recv_wqe->dma.length = length;
-+ recv_wqe->dma.resid = length;
-+ recv_wqe->dma.num_sge = num_sge;
-+ recv_wqe->dma.cur_sge = 0;
-+ recv_wqe->dma.sge_offset = 0;
-+ memcpy(recv_wqe->dma.sge, ibwr->sg_list,
-+ num_sge * sizeof(struct ib_sge));
-+
-+ queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
-+
-+ return 0;
-+
-+err_out:
-+ rxe_dbg("returned err = %d", err);
-+ return err;
- }
-
- static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
-@@ -719,12 +987,16 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
-
- if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "qp destroyed or not ready to post recv");
-+ goto err_out;
- }
-
- if (unlikely(qp->srq)) {
- *bad_wr = wr;
-- return -EINVAL;
-+ err = -EINVAL;
-+ rxe_dbg_qp(qp, "use post_srq_recv instead");
-+ goto err_out;
- }
-
- spin_lock_irqsave(&rq->producer_lock, flags);
-@@ -743,73 +1015,101 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- if (qp->resp.state == QP_STATE_ERROR)
- rxe_sched_task(&qp->resp.task);
-
-+err_out:
-+ if (err)
-+ rxe_err_qp(qp, "returned err = %d", err);
-+
- return err;
- }
-
-+/* cq */
- static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
- {
-- int err;
- struct ib_device *dev = ibcq->device;
- struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_cq *cq = to_rcq(ibcq);
- struct rxe_create_cq_resp __user *uresp = NULL;
-+ int err, cleanup_err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
-+ goto err_out;
-+ }
- uresp = udata->outbuf;
- }
-
-- if (attr->flags)
-- return -EOPNOTSUPP;
-+ if (attr->flags) {
-+ err = -EOPNOTSUPP;
-+ rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
-+ goto err_out;
-+ }
-+
-+ err = rxe_add_to_pool(&rxe->cq_pool, cq);
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
-+ goto err_out;
-+ }
-
- err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
- uresp);
-- if (err)
-- return err;
--
-- return rxe_add_to_pool(&rxe->cq_pool, cq);
--}
--
--static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
--{
-- struct rxe_cq *cq = to_rcq(ibcq);
--
-- /* See IBA C11-17: The CI shall return an error if this Verb is
-- * invoked while a Work Queue is still associated with the CQ.
-- */
-- if (atomic_read(&cq->num_wq))
-- return -EINVAL;
--
-- rxe_cq_disable(cq);
-+ if (err) {
-+ rxe_dbg_cq(cq, "create cq failed, err = %d", err);
-+ goto err_cleanup;
-+ }
-
-- rxe_cleanup(cq);
- return 0;
-+
-+err_cleanup:
-+ cleanup_err = rxe_cleanup(cq);
-+ if (cleanup_err)
-+ rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
-+err_out:
-+ rxe_err_dev(rxe, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
- {
-- int err;
- struct rxe_cq *cq = to_rcq(ibcq);
- struct rxe_dev *rxe = to_rdev(ibcq->device);
- struct rxe_resize_cq_resp __user *uresp = NULL;
-+ int err;
-
- if (udata) {
-- if (udata->outlen < sizeof(*uresp))
-- return -EINVAL;
-+ if (udata->outlen < sizeof(*uresp)) {
-+ err = -EINVAL;
-+ rxe_dbg_cq(cq, "malformed udata");
-+ goto err_out;
-+ }
- uresp = udata->outbuf;
- }
-
- err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
-- if (err)
-- return err;
-+ if (err) {
-+ rxe_dbg_cq(cq, "bad attr, err = %d", err);
-+ goto err_out;
-+ }
-
-- return rxe_cq_resize_queue(cq, cqe, uresp, udata);
-+ err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
-+ if (err) {
-+ rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
-+ goto err_out;
-+ }
-+
-+ return 0;
-+
-+err_out:
-+ rxe_err_cq(cq, "returned err = %d", err);
-+ return err;
- }
-
- static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-@@ -823,7 +1123,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
- for (i = 0; i < num_entries; i++) {
- cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
- if (!cqe)
-- break;
-+ break; /* queue empty */
-
- memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
-@@ -864,6 +1164,34 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
- return ret;
- }
-
-+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
-+{
-+ struct rxe_cq *cq = to_rcq(ibcq);
-+ int err;
-+
-+ /* See IBA C11-17: The CI shall return an error if this Verb is
-+ * invoked while a Work Queue is still associated with the CQ.
-+ */
-+ if (atomic_read(&cq->num_wq)) {
-+ err = -EINVAL;
-+ rxe_dbg_cq(cq, "still in use");
-+ goto err_out;
-+ }
-+
-+ rxe_cq_disable(cq);
-+
-+ err = rxe_cleanup(cq);
-+ if (err)
-+ rxe_err_cq(cq, "cleanup failed, err = %d", err);
-+
-+ return 0;
-+
-+err_out:
-+ rxe_err_cq(cq, "returned err = %d", err);
-+ return err;
-+}
-+
-+/* mr */
- static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- {
- struct rxe_dev *rxe = to_rdev(ibpd->device);
-@@ -874,12 +1202,15 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
-+ rxe_dbg_dev(rxe, "no memory for mr");
- goto err_out;
- }
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_dev(rxe, "unable to create mr");
- goto err_free;
-+ }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
-@@ -892,46 +1223,53 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- err_free:
- kfree(mr);
- err_out:
-+ rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-
--static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
-- u64 start,
-- u64 length,
-- u64 iova,
-- int access, struct ib_udata *udata)
-+static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
-+ u64 length, u64 iova, int access,
-+ struct ib_udata *udata)
- {
-- int err;
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mr *mr;
-+ int err, cleanup_err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
-+ rxe_dbg_pd(pd, "no memory for mr");
- goto err_out;
- }
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_pd(pd, "unable to create mr");
- goto err_free;
-+ }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
- mr->ibmr.device = ibpd->device;
-
- err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
- goto err_cleanup;
-+ }
-
- rxe_finalize(mr);
- return &mr->ibmr;
-
- err_cleanup:
-- rxe_cleanup(mr);
-+ cleanup_err = rxe_cleanup(mr);
-+ if (cleanup_err)
-+ rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
- err_free:
- kfree(mr);
- err_out:
-+ rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-
-@@ -941,40 +1279,76 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mr *mr;
-- int err;
-+ int err, cleanup_err;
-
-- if (mr_type != IB_MR_TYPE_MEM_REG)
-- return ERR_PTR(-EINVAL);
-+ if (mr_type != IB_MR_TYPE_MEM_REG) {
-+ err = -EINVAL;
-+ rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
-+ mr_type, err);
-+ goto err_out;
-+ }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
-+ rxe_dbg_mr(mr, "no memory for mr");
- goto err_out;
- }
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_mr(mr, "unable to create mr, err = %d", err);
- goto err_free;
-+ }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
- mr->ibmr.device = ibpd->device;
-
- err = rxe_mr_init_fast(max_num_sg, mr);
-- if (err)
-+ if (err) {
-+ rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
- goto err_cleanup;
-+ }
-
- rxe_finalize(mr);
- return &mr->ibmr;
-
- err_cleanup:
-- rxe_cleanup(mr);
-+ cleanup_err = rxe_cleanup(mr);
-+ if (cleanup_err)
-+ rxe_err_mr(mr, "cleanup failed, err = %d", err);
- err_free:
- kfree(mr);
- err_out:
-+ rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-
-+static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
-+{
-+ struct rxe_mr *mr = to_rmr(ibmr);
-+ int err, cleanup_err;
-+
-+ /* See IBA 10.6.7.2.6 */
-+ if (atomic_read(&mr->num_mw) > 0) {
-+ err = -EINVAL;
-+ rxe_dbg_mr(mr, "mr has mw's bound");
-+ goto err_out;
-+ }
-+
-+ cleanup_err = rxe_cleanup(mr);
-+ if (cleanup_err)
-+ rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
-+
-+ kfree_rcu(mr);
-+ return 0;
-+
-+err_out:
-+ rxe_err_mr(mr, "returned err = %d", err);
-+ return err;
-+}
-+
- static ssize_t parent_show(struct device *device,
- struct device_attribute *attr, char *buf)
- {
---
-2.39.2
-
+++ /dev/null
-From c476b6405d404bebae518870115d3e422712b8ae Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Mar 2023 16:16:22 -0600
-Subject: RDMA/rxe: Change rxe_dbg to rxe_dbg_dev
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit a9fb3287211e64b94ceb2b6b4791cc2b829d0d56 ]
-
-Replace the name rxe_dbg with rxe_dbg_dev which better matches
-the remaining rxe_dbg_xxx macros for debug messages with a
-rxe device parameter. Reuse the name rxe_dbg for debug messages
-which do not have a rxe device parameter.
-
-Link: https://lore.kernel.org/r/20230303221623.8053-3-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe.c | 2 +-
- drivers/infiniband/sw/rxe/rxe.h | 3 ++-
- drivers/infiniband/sw/rxe/rxe_cq.c | 6 +++---
- drivers/infiniband/sw/rxe/rxe_icrc.c | 4 ++--
- drivers/infiniband/sw/rxe/rxe_mmap.c | 6 +++---
- drivers/infiniband/sw/rxe/rxe_net.c | 4 ++--
- drivers/infiniband/sw/rxe/rxe_qp.c | 16 ++++++++--------
- drivers/infiniband/sw/rxe/rxe_srq.c | 6 +++---
- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 +-
- 9 files changed, 25 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
-index a3f05fdd9fac2..d57ba7a5964b9 100644
---- a/drivers/infiniband/sw/rxe/rxe.c
-+++ b/drivers/infiniband/sw/rxe/rxe.c
-@@ -187,7 +187,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
- rxe = rxe_get_dev_from_net(ndev);
- if (rxe) {
- ib_device_put(&rxe->ib_dev);
-- rxe_dbg(rxe, "already configured on %s\n", ndev->name);
-+ rxe_dbg_dev(rxe, "already configured on %s\n", ndev->name);
- err = -EEXIST;
- goto err;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
-index 2415f3704f576..0757acc381038 100644
---- a/drivers/infiniband/sw/rxe/rxe.h
-+++ b/drivers/infiniband/sw/rxe/rxe.h
-@@ -38,7 +38,8 @@
-
- #define RXE_ROCE_V2_SPORT (0xc000)
-
--#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
-+#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
-+#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
- "%s: " fmt, __func__, ##__VA_ARGS__)
- #define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
- "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
-diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
-index 1df186534639a..22fbc198e5d17 100644
---- a/drivers/infiniband/sw/rxe/rxe_cq.c
-+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
-@@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
- int count;
-
- if (cqe <= 0) {
-- rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe);
-+ rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
- goto err1;
- }
-
- if (cqe > rxe->attr.max_cqe) {
-- rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n",
-+ rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
- cqe, rxe->attr.max_cqe);
- goto err1;
- }
-@@ -65,7 +65,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
- cq->queue = rxe_queue_init(rxe, &cqe,
- sizeof(struct rxe_cqe), type);
- if (!cq->queue) {
-- rxe_dbg(rxe, "unable to create cq\n");
-+ rxe_dbg_dev(rxe, "unable to create cq\n");
- return -ENOMEM;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
-index 71bc2c1895888..fdf5f08cd8f17 100644
---- a/drivers/infiniband/sw/rxe/rxe_icrc.c
-+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
-@@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe)
-
- tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(tfm)) {
-- rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n",
-+ rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-@@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
- *(__be32 *)shash_desc_ctx(shash) = crc;
- err = crypto_shash_update(shash, next, len);
- if (unlikely(err)) {
-- rxe_dbg(rxe, "failed crc calculation, err: %d\n", err);
-+ rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err);
- return (__force __be32)crc32_le((__force u32)crc, next, len);
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
-index a47d72dbc5376..6b7f2bd698799 100644
---- a/drivers/infiniband/sw/rxe/rxe_mmap.c
-+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
-@@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-
- /* Don't allow a mmap larger than the object. */
- if (size > ip->info.size) {
-- rxe_dbg(rxe, "mmap region is larger than the object!\n");
-+ rxe_dbg_dev(rxe, "mmap region is larger than the object!\n");
- spin_unlock_bh(&rxe->pending_lock);
- ret = -EINVAL;
- goto done;
-@@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-
- goto found_it;
- }
-- rxe_dbg(rxe, "unable to find pending mmap info\n");
-+ rxe_dbg_dev(rxe, "unable to find pending mmap info\n");
- spin_unlock_bh(&rxe->pending_lock);
- ret = -EINVAL;
- goto done;
-@@ -98,7 +98,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-
- ret = remap_vmalloc_range(vma, ip->obj, 0);
- if (ret) {
-- rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret);
-+ rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret);
- goto done;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
-index e02e1624bcf4d..a2ace42e95366 100644
---- a/drivers/infiniband/sw/rxe/rxe_net.c
-+++ b/drivers/infiniband/sw/rxe/rxe_net.c
-@@ -596,7 +596,7 @@ static int rxe_notify(struct notifier_block *not_blk,
- rxe_port_down(rxe);
- break;
- case NETDEV_CHANGEMTU:
-- rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
-+ rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
- rxe_set_mtu(rxe, ndev->mtu);
- break;
- case NETDEV_CHANGE:
-@@ -608,7 +608,7 @@ static int rxe_notify(struct notifier_block *not_blk,
- case NETDEV_CHANGENAME:
- case NETDEV_FEAT_CHANGE:
- default:
-- rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n",
-+ rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n",
- event, ndev->name);
- break;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
-index ab72db68b58f6..c954dd9394baf 100644
---- a/drivers/infiniband/sw/rxe/rxe_qp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
-@@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
- int has_srq)
- {
- if (cap->max_send_wr > rxe->attr.max_qp_wr) {
-- rxe_dbg(rxe, "invalid send wr = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
- cap->max_send_wr, rxe->attr.max_qp_wr);
- goto err1;
- }
-
- if (cap->max_send_sge > rxe->attr.max_send_sge) {
-- rxe_dbg(rxe, "invalid send sge = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
- cap->max_send_sge, rxe->attr.max_send_sge);
- goto err1;
- }
-
- if (!has_srq) {
- if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
-- rxe_dbg(rxe, "invalid recv wr = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
- cap->max_recv_wr, rxe->attr.max_qp_wr);
- goto err1;
- }
-
- if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
-- rxe_dbg(rxe, "invalid recv sge = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
- cap->max_recv_sge, rxe->attr.max_recv_sge);
- goto err1;
- }
- }
-
- if (cap->max_inline_data > rxe->max_inline_data) {
-- rxe_dbg(rxe, "invalid max inline data = %u > %d\n",
-+ rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
- cap->max_inline_data, rxe->max_inline_data);
- goto err1;
- }
-@@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
- }
-
- if (!init->recv_cq || !init->send_cq) {
-- rxe_dbg(rxe, "missing cq\n");
-+ rxe_dbg_dev(rxe, "missing cq\n");
- goto err1;
- }
-
-@@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
-
- if (init->qp_type == IB_QPT_GSI) {
- if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
-- rxe_dbg(rxe, "invalid port = %d\n", port_num);
-+ rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
- goto err1;
- }
-
- port = &rxe->port;
-
- if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
-- rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num);
-+ rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
- goto err1;
- }
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
-index 82e37a41ced40..27ca82ec0826b 100644
---- a/drivers/infiniband/sw/rxe/rxe_srq.c
-+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
-@@ -13,13 +13,13 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
- struct ib_srq_attr *attr = &init->attr;
-
- if (attr->max_wr > rxe->attr.max_srq_wr) {
-- rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
-+ rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
- attr->max_wr, rxe->attr.max_srq_wr);
- goto err1;
- }
-
- if (attr->max_wr <= 0) {
-- rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
-+ rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
- goto err1;
- }
-
-@@ -27,7 +27,7 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
- attr->max_wr = RXE_MIN_SRQ_WR;
-
- if (attr->max_sge > rxe->attr.max_srq_sge) {
-- rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
-+ rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
- attr->max_sge, rxe->attr.max_srq_sge);
- goto err1;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index e14050a692766..f178d0773ff20 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -1095,7 +1095,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
-
- err = ib_register_device(dev, ibdev_name, NULL);
- if (err)
-- rxe_dbg(rxe, "failed with error %d\n", err);
-+ rxe_dbg_dev(rxe, "failed with error %d\n", err);
-
- /*
- * Note that rxe may be invalid at this point if another thread
---
-2.39.2
-
+++ /dev/null
-From 003c4979d16994ce678244c27d97ac5996e3e671 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 29 Mar 2023 21:14:01 +0300
-Subject: RDMA/rxe: Clean kzalloc failure paths
-
-From: Leon Romanovsky <leonro@nvidia.com>
-
-[ Upstream commit b6ba68555d75fd99f7daa9c5a5e476f8635cb155 ]
-
-There is no need to print any debug messages after failure to
-allocate memory, because kernel will print OOM dumps anyway.
-
-Together with removal of these messages, remove useless goto jumps.
-
-Fixes: 5bf944f24129 ("RDMA/rxe: Add error messages")
-Reported-by: Dan Carpenter <error27@gmail.com>
-Link: https://lore.kernel.org/all/ea43486f-43dd-4054-b1d5-3a0d202be621@kili.mountain
-Link: https://lore.kernel.org/r/d3cedf723b84e73e8062a67b7489d33802bafba2.1680113597.git.leon@kernel.org
-Reviewed-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe_queue.c | 5 ++---
- drivers/infiniband/sw/rxe/rxe_verbs.c | 27 +++++++--------------------
- 2 files changed, 9 insertions(+), 23 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
-index d6dbf5a0058dc..9611ee191a46d 100644
---- a/drivers/infiniband/sw/rxe/rxe_queue.c
-+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
-@@ -61,11 +61,11 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
-
- /* num_elem == 0 is allowed, but uninteresting */
- if (*num_elem < 0)
-- goto err1;
-+ return NULL;
-
- q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
-- goto err1;
-+ return NULL;
-
- q->rxe = rxe;
- q->type = type;
-@@ -100,7 +100,6 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
-
- err2:
- kfree(q);
--err1:
- return NULL;
- }
-
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index 090d5bfb1e18d..4e2db7c2e4ed7 100644
---- a/drivers/infiniband/sw/rxe/rxe_verbs.c
-+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -1198,11 +1198,8 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
- int err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-- if (!mr) {
-- err = -ENOMEM;
-- rxe_dbg_dev(rxe, "no memory for mr");
-- goto err_out;
-- }
-+ if (!mr)
-+ return ERR_PTR(-ENOMEM);
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
- if (err) {
-@@ -1220,7 +1217,6 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
-
- err_free:
- kfree(mr);
--err_out:
- rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-@@ -1235,11 +1231,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
- int err, cleanup_err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-- if (!mr) {
-- err = -ENOMEM;
-- rxe_dbg_pd(pd, "no memory for mr");
-- goto err_out;
-- }
-+ if (!mr)
-+ return ERR_PTR(-ENOMEM);
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
- if (err) {
-@@ -1266,7 +1259,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
- err_free:
- kfree(mr);
--err_out:
- rxe_err_pd(pd, "returned err = %d", err);
- return ERR_PTR(err);
- }
-@@ -1287,17 +1279,12 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-- if (!mr) {
-- err = -ENOMEM;
-- rxe_dbg_mr(mr, "no memory for mr");
-- goto err_out;
-- }
-+ if (!mr)
-+ return ERR_PTR(-ENOMEM);
-
- err = rxe_add_to_pool(&rxe->mr_pool, mr);
-- if (err) {
-- rxe_dbg_mr(mr, "unable to create mr, err = %d", err);
-+ if (err)
- goto err_free;
-- }
-
- rxe_get(pd);
- mr->ibmr.pd = ibpd;
---
-2.39.2
-
Stable-dep-of: b2b1ddc45745 ("RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task"")
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- drivers/infiniband/sw/rxe/rxe_comp.c | 3 +--
- drivers/infiniband/sw/rxe/rxe_loc.h | 6 +++---
- drivers/infiniband/sw/rxe/rxe_req.c | 3 +--
- drivers/infiniband/sw/rxe/rxe_resp.c | 3 +--
- drivers/infiniband/sw/rxe/rxe_task.c | 11 ++++++-----
- drivers/infiniband/sw/rxe/rxe_task.h | 9 +++++----
+ drivers/infiniband/sw/rxe/rxe_comp.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_loc.h | 6 +++---
+ drivers/infiniband/sw/rxe/rxe_req.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_resp.c | 3 +--
+ drivers/infiniband/sw/rxe/rxe_task.c | 11 ++++++-----
+ drivers/infiniband/sw/rxe/rxe_task.h | 9 +++++----
6 files changed, 17 insertions(+), 18 deletions(-)
-diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
-index 876057e3ee3c1..cbfa16b3a4902 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
-@@ -575,9 +575,8 @@ static void free_pkt(struct rxe_pkt_info *pkt)
+@@ -571,9 +571,8 @@ static void free_pkt(struct rxe_pkt_info
ib_device_put(dev);
}
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_send_wqe *wqe = NULL;
struct sk_buff *skb = NULL;
-diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
-index 839de34cf4c94..804b15e929dd9 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
-@@ -170,9 +170,9 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem);
+@@ -171,9 +171,9 @@ void rxe_srq_cleanup(struct rxe_pool_ele
void rxe_dealloc(struct ib_device *ib_dev);
/* rxe_icrc.c */
int rxe_icrc_init(struct rxe_dev *rxe);
-diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
-index 899c8779f8001..f2dc2d191e16f 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
-@@ -635,9 +635,8 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+@@ -635,9 +635,8 @@ static int rxe_do_local_ops(struct rxe_q
return 0;
}
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_pkt_info pkt;
struct sk_buff *skb;
-diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
-index 4217eec03a943..7cb1b962d6652 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
-@@ -1443,9 +1443,8 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+@@ -1439,9 +1439,8 @@ static void rxe_drain_req_pkts(struct rx
queue_advance_consumer(q, q->type);
}
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
-diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
-index 60b90e33a8849..959cc6229a34e 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -11,7 +11,7 @@ int __rxe_do_task(struct rxe_task *task)
;
task->ret = ret;
-@@ -29,7 +29,7 @@ static void do_task(struct tasklet_struct *t)
+@@ -29,7 +29,7 @@ static void do_task(struct tasklet_struc
int cont;
int ret;
struct rxe_task *task = from_tasklet(task, t, tasklet);
unsigned int iterations = RXE_MAX_ITERATIONS;
spin_lock_bh(&task->lock);
-@@ -54,7 +54,7 @@ static void do_task(struct tasklet_struct *t)
+@@ -54,7 +54,7 @@ static void do_task(struct tasklet_struc
do {
cont = 0;
spin_lock_bh(&task->lock);
switch (task->state) {
-@@ -91,9 +91,10 @@ static void do_task(struct tasklet_struct *t)
+@@ -91,9 +91,10 @@ static void do_task(struct tasklet_struc
task->ret = ret;
}
task->func = func;
task->destroyed = false;
-diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
-index 7b88129702ac6..41efd5fd49b03 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -22,18 +22,19 @@ struct rxe_task {
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
---
-2.39.2
-
+++ /dev/null
-From 70ece68f6a3aa7cc1608da69e564c7d2dd11b30a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Mar 2023 16:16:23 -0600
-Subject: RDMA/rxe: Extend dbg log messages to err and info
-
-From: Bob Pearson <rpearsonhpe@gmail.com>
-
-[ Upstream commit 9ac01f434a1eb56ea94611bd75cf62fa276b41f4 ]
-
-Extend the dbg log messages (e.g. rxe_dbg_xxx) to include
-err and info types. rxe.c is modified to use these new log
-messages as examples.
-
-Link: https://lore.kernel.org/r/20230303221623.8053-4-rpearsonhpe@gmail.com
-Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: 78b26a335310 ("RDMA/rxe: Remove tasklet call from rxe_cq.c")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rxe/rxe.c | 8 ++++---
- drivers/infiniband/sw/rxe/rxe.h | 42 +++++++++++++++++++++++++++++++++
- 2 files changed, 47 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
-index d57ba7a5964b9..7a7e713de52db 100644
---- a/drivers/infiniband/sw/rxe/rxe.c
-+++ b/drivers/infiniband/sw/rxe/rxe.c
-@@ -160,6 +160,8 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
-
- port->attr.active_mtu = mtu;
- port->mtu_cap = ib_mtu_enum_to_int(mtu);
-+
-+ rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
- }
-
- /* called by ifc layer to create new rxe device.
-@@ -179,7 +181,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
- int err = 0;
-
- if (is_vlan_dev(ndev)) {
-- pr_err("rxe creation allowed on top of a real device only\n");
-+ rxe_err("rxe creation allowed on top of a real device only");
- err = -EPERM;
- goto err;
- }
-@@ -187,14 +189,14 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
- rxe = rxe_get_dev_from_net(ndev);
- if (rxe) {
- ib_device_put(&rxe->ib_dev);
-- rxe_dbg_dev(rxe, "already configured on %s\n", ndev->name);
-+ rxe_err_dev(rxe, "already configured on %s", ndev->name);
- err = -EEXIST;
- goto err;
- }
-
- err = rxe_net_add(ibdev_name, ndev);
- if (err) {
-- pr_debug("failed to add %s\n", ndev->name);
-+ rxe_err("failed to add %s\n", ndev->name);
- goto err;
- }
- err:
-diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
-index 0757acc381038..bd8a8ea4ea8fd 100644
---- a/drivers/infiniband/sw/rxe/rxe.h
-+++ b/drivers/infiniband/sw/rxe/rxe.h
-@@ -58,6 +58,48 @@
- #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
- "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-
-+#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
-+ ##__VA_ARGS__)
-+#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
-+ "%s: " fmt, __func__, ##__VA_ARGS__)
-+#define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \
-+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \
-+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \
-+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
-+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \
-+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
-+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \
-+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
-+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-+
-+#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
-+ ##__VA_ARGS__)
-+#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
-+ "%s: " fmt, __func__, ##__VA_ARGS__)
-+#define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \
-+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \
-+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \
-+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \
-+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \
-+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \
-+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \
-+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
-+#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
-+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-+
- /* responder states */
- enum resp_states {
- RESPST_NONE,
---
-2.39.2
-
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- drivers/infiniband/sw/rxe/rxe_cq.c | 32 +++------------------------
- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 --
- drivers/infiniband/sw/rxe/rxe_verbs.h | 2 --
+ drivers/infiniband/sw/rxe/rxe_cq.c | 32 +++-----------------------------
+ drivers/infiniband/sw/rxe/rxe_verbs.c | 2 --
+ drivers/infiniband/sw/rxe/rxe_verbs.h | 2 --
3 files changed, 3 insertions(+), 33 deletions(-)
-diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
-index 22fbc198e5d17..519ddec29b4ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
-@@ -39,21 +39,6 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+@@ -39,21 +39,6 @@ err1:
return -EINVAL;
}
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp)
-@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe
cq->is_user = uresp;
spin_lock_init(&cq->cq_lock);
cq->ibcq.cqe = cqe;
return 0;
-@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
+@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *c
return err;
}
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
-@@ -135,21 +117,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+@@ -135,21 +117,13 @@ int rxe_cq_post(struct rxe_cq *cq, struc
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = 0;
void rxe_cq_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
-index 84b53c070fc55..090d5bfb1e18d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
-@@ -1178,8 +1178,6 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
- goto err_out;
- }
+@@ -786,8 +786,6 @@ static int rxe_destroy_cq(struct ib_cq *
+ if (atomic_read(&cq->num_wq))
+ return -EINVAL;
- rxe_cq_disable(cq);
-
- err = rxe_cleanup(cq);
- if (err)
- rxe_err_cq(cq, "cleanup failed, err = %d", err);
-diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
-index c269ae2a32243..d812093a39166 100644
+ rxe_cleanup(cq);
+ return 0;
+ }
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -63,9 +63,7 @@ struct rxe_cq {
atomic_t num_wq;
};
---
-2.39.2
-
fs-ntfs3-fix-oob-read-in-indx_insert_into_buffer.patch
fs-ntfs3-fix-slab-out-of-bounds-read-in-hdr_delete_d.patch
iommu-mediatek-set-dma_mask-for-pgtable_pa_35_en.patch
-rdma-rxe-change-rxe_dbg-to-rxe_dbg_dev.patch
-rdma-rxe-extend-dbg-log-messages-to-err-and-info.patch
-rdma-rxe-add-error-messages.patch
rdma-rxe-remove-tasklet-call-from-rxe_cq.c.patch
power-supply-generic-adc-battery-fix-unit-scaling.patch
clk-add-missing-of_node_put-in-assigned-clocks-prope.patch
-rdma-rxe-clean-kzalloc-failure-paths.patch
rdma-siw-remove-namespace-check-from-siw_netdev_even.patch
clk-qcom-gcc-sm6115-mark-rcgs-shared-where-applicabl.patch
power-supply-rk817-fix-low-soc-bugs.patch