]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
RDMA/irdma: Remove doorbell elision logic
authorJacob Moroni <jmoroni@google.com>
Tue, 25 Nov 2025 02:53:49 +0000 (20:53 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Dec 2025 13:03:19 +0000 (14:03 +0100)
[ Upstream commit 62356fccb195f83d2ceafc787c5ba87ebbe5edfe ]

In some cases, this logic can result in doorbell writes being
skipped when they should not have been (at least on GEN3 HW),
so remove it. This also means that the mb() can be safely
downgraded to dma_wmb().

Fixes: 551c46edc769 ("RDMA/irdma: Add user/kernel shared libraries")
Signed-off-by: Jacob Moroni <jmoroni@google.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Link: https://patch.msgid.link/20251125025350.180-9-tatyana.e.nikolova@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/hw/irdma/puda.c
drivers/infiniband/hw/irdma/uk.c
drivers/infiniband/hw/irdma/user.h

index 694e5a9ed15d0fa633827ec2ae56a96328e1b5da..9cd14a50f1a9373eb194ecb157817ff4195cb120 100644 (file)
@@ -685,7 +685,6 @@ static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
        ukqp->rq_size = rsrc->rq_size;
 
        IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
-       IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
        IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
        ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
 
index ce1ae10c30fcadee179accf3717e5924ae9f0334..d5568584ad5e3286b36656f947f3ea2bfb70785a 100644 (file)
@@ -114,33 +114,8 @@ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
  */
 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
 {
-       u64 temp;
-       u32 hw_sq_tail;
-       u32 sw_sq_head;
-
-       /* valid bit is written and loads completed before reading shadow */
-       mb();
-
-       /* read the doorbell shadow area */
-       get_64bit_val(qp->shadow_area, 0, &temp);
-
-       hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
-       sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
-       if (sw_sq_head != qp->initial_ring.head) {
-               if (sw_sq_head != hw_sq_tail) {
-                       if (sw_sq_head > qp->initial_ring.head) {
-                               if (hw_sq_tail >= qp->initial_ring.head &&
-                                   hw_sq_tail < sw_sq_head)
-                                       writel(qp->qp_id, qp->wqe_alloc_db);
-                       } else {
-                               if (hw_sq_tail >= qp->initial_ring.head ||
-                                   hw_sq_tail < sw_sq_head)
-                                       writel(qp->qp_id, qp->wqe_alloc_db);
-                       }
-               }
-       }
-
-       qp->initial_ring.head = qp->sq_ring.head;
+       dma_wmb();
+       writel(qp->qp_id, qp->wqe_alloc_db);
 }
 
 /**
@@ -1574,7 +1549,6 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
        qp->conn_wqes = move_cnt;
        IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
        IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
-       IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
 }
 
 /**
@@ -1719,7 +1693,6 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
        qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
        sq_ring_size = qp->sq_size << info->sq_shift;
        IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
-       IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
        if (info->first_sq_wq) {
                irdma_setup_connection_wqes(qp, info);
                qp->swqe_polarity = 1;
index ab57f689827a024fbbde40b3487809baa60a8b8f..aeebf768174abd047c3dff40ac3c578c40c0ab0a 100644 (file)
@@ -456,7 +456,6 @@ struct irdma_srq_uk {
        struct irdma_uk_attrs *uk_attrs;
        __le64 *shadow_area;
        struct irdma_ring srq_ring;
-       struct irdma_ring initial_ring;
        u32 srq_id;
        u32 srq_size;
        u32 max_srq_frag_cnt;