]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/rxe: Move code to check if drained to subroutine
authorBob Pearson <rpearsonhpe@gmail.com>
Wed, 5 Apr 2023 04:26:10 +0000 (23:26 -0500)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 17 Apr 2023 19:01:44 +0000 (16:01 -0300)
Move two blocks of code in rxe_comp.c and rxe_req.c to subroutines that
check if draining is complete in the SQD state and, if so, generate a
SQ_DRAINED event.

Link: https://lore.kernel.org/r/20230405042611.6467-4-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_req.c

index 979990734e0cbbf307eac8819e33442be1a12384..1ccae8cff3596c3ac792b3df06a775a78d836261 100644 (file)
@@ -477,20 +477,8 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
        }
 }
 
-static inline enum comp_state complete_ack(struct rxe_qp *qp,
-                                          struct rxe_pkt_info *pkt,
-                                          struct rxe_send_wqe *wqe)
+static void comp_check_sq_drain_done(struct rxe_qp *qp)
 {
-       if (wqe->has_rd_atomic) {
-               wqe->has_rd_atomic = 0;
-               atomic_inc(&qp->req.rd_atomic);
-               if (qp->req.need_rd_atomic) {
-                       qp->comp.timeout_retry = 0;
-                       qp->req.need_rd_atomic = 0;
-                       rxe_sched_task(&qp->req.task);
-               }
-       }
-
        if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
                /* state_lock used by requester & completer */
                spin_lock_bh(&qp->state_lock);
@@ -507,10 +495,27 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
                                qp->ibqp.event_handler(&ev,
                                        qp->ibqp.qp_context);
                        }
-               } else {
-                       spin_unlock_bh(&qp->state_lock);
+                       return;
                }
+               spin_unlock_bh(&qp->state_lock);
        }
+}
+
+static inline enum comp_state complete_ack(struct rxe_qp *qp,
+                                          struct rxe_pkt_info *pkt,
+                                          struct rxe_send_wqe *wqe)
+{
+       if (wqe->has_rd_atomic) {
+               wqe->has_rd_atomic = 0;
+               atomic_inc(&qp->req.rd_atomic);
+               if (qp->req.need_rd_atomic) {
+                       qp->comp.timeout_retry = 0;
+                       qp->req.need_rd_atomic = 0;
+                       rxe_sched_task(&qp->req.task);
+               }
+       }
+
+       comp_check_sq_drain_done(qp);
 
        do_complete(qp, wqe);
 
index 8a8242512f2a5962d488a523c1ca9968da46a33b..f329038efbc8ae029ebdf451d5860491422e8b5f 100644 (file)
@@ -108,17 +108,12 @@ void rnr_nak_timer(struct timer_list *t)
        rxe_sched_task(&qp->req.task);
 }
 
-static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+static void req_check_sq_drain_done(struct rxe_qp *qp)
 {
-       struct rxe_send_wqe *wqe;
        struct rxe_queue *q = qp->sq.queue;
        unsigned int index = qp->req.wqe_index;
-       unsigned int cons;
-       unsigned int prod;
-
-       wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
-       cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
-       prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
+       unsigned int cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+       struct rxe_send_wqe *wqe = queue_addr_from_index(q, cons);
 
        if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
                /* check to see if we are drained;
@@ -126,18 +121,14 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
                 */
                spin_lock_bh(&qp->state_lock);
                do {
-                       if (!qp->attr.sq_draining) {
+                       if (!qp->attr.sq_draining)
                                /* comp just finished */
-                               spin_unlock_bh(&qp->state_lock);
                                break;
-                       }
 
                        if (wqe && ((index != cons) ||
-                               (wqe->state != wqe_state_posted))) {
+                               (wqe->state != wqe_state_posted)))
                                /* comp not done yet */
-                               spin_unlock_bh(&qp->state_lock);
                                break;
-                       }
 
                        qp->attr.sq_draining = 0;
                        spin_unlock_bh(&qp->state_lock);
@@ -151,9 +142,22 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
                                qp->ibqp.event_handler(&ev,
                                        qp->ibqp.qp_context);
                        }
+                       return;
                } while (0);
+               spin_unlock_bh(&qp->state_lock);
        }
+}
 
+static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+{
+       struct rxe_send_wqe *wqe;
+       struct rxe_queue *q = qp->sq.queue;
+       unsigned int index = qp->req.wqe_index;
+       unsigned int prod;
+
+       req_check_sq_drain_done(qp);
+
+       prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
        if (index == prod)
                return NULL;