]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
RDMA/rxe: Merge normal and retry atomic flows
authorBob Pearson <rpearsonhpe@gmail.com>
Mon, 6 Jun 2022 14:38:37 +0000 (09:38 -0500)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 30 Jun 2022 17:00:21 +0000 (14:00 -0300)
Make the execution of the atomic operation in rxe_atomic_reply()
conditional on res->replay and make duplicate_request() call into
rxe_atomic_reply() to merge the two flows. This is modeled on the behavior
of read reply. Delete the skb from the atomic responder resource since it
is no longer used. Adjust the reference counting of the qp in
send_atomic_ack() for this flow.

Fixes: 8700e3e7c485 ("Soft RoCE driver")
Link: https://lore.kernel.org/r/20220606143836.3323-6-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/sw/rxe/rxe_verbs.h

index 22e9b85344c350491029b40f1c1153ee9daad03c..8355a5b1cb609ff70f903e4644c108a180694b97 100644 (file)
@@ -129,8 +129,6 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
 
 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
 {
-       if (res->type == RXE_ATOMIC_MASK)
-               kfree_skb(res->atomic.skb);
        res->type = 0;
 }
 
index 5399e2a571b54f1bc9a445602a374333abc6158b..1212bec3ee3df9e71e85522f28d55f08af2b5eed 100644 (file)
@@ -586,40 +586,43 @@ static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
                qp->resp.res = res;
        }
 
-       if (mr->state != RXE_MR_STATE_VALID) {
-               ret = RESPST_ERR_RKEY_VIOLATION;
-               goto out;
-       }
+       if (!res->replay) {
+               if (mr->state != RXE_MR_STATE_VALID) {
+                       ret = RESPST_ERR_RKEY_VIOLATION;
+                       goto out;
+               }
 
-       vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
+               vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
+                                       sizeof(u64));
 
-       /* check vaddr is 8 bytes aligned. */
-       if (!vaddr || (uintptr_t)vaddr & 7) {
-               ret = RESPST_ERR_MISALIGNED_ATOMIC;
-               goto out;
-       }
+               /* check vaddr is 8 bytes aligned. */
+               if (!vaddr || (uintptr_t)vaddr & 7) {
+                       ret = RESPST_ERR_MISALIGNED_ATOMIC;
+                       goto out;
+               }
 
-       spin_lock_bh(&atomic_ops_lock);
-       res->atomic.orig_val = value = *vaddr;
+               spin_lock_bh(&atomic_ops_lock);
+               res->atomic.orig_val = value = *vaddr;
 
-       if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
-               if (value == atmeth_comp(pkt))
-                       value = atmeth_swap_add(pkt);
-       } else {
-               value += atmeth_swap_add(pkt);
-       }
+               if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+                       if (value == atmeth_comp(pkt))
+                               value = atmeth_swap_add(pkt);
+               } else {
+                       value += atmeth_swap_add(pkt);
+               }
 
-       *vaddr = value;
-       spin_unlock_bh(&atomic_ops_lock);
+               *vaddr = value;
+               spin_unlock_bh(&atomic_ops_lock);
 
-       qp->resp.msn++;
+               qp->resp.msn++;
 
-       /* next expected psn, read handles this separately */
-       qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
-       qp->resp.ack_psn = qp->resp.psn;
+               /* next expected psn, read handles this separately */
+               qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+               qp->resp.ack_psn = qp->resp.psn;
 
-       qp->resp.opcode = pkt->opcode;
-       qp->resp.status = IB_WC_SUCCESS;
+               qp->resp.opcode = pkt->opcode;
+               qp->resp.status = IB_WC_SUCCESS;
+       }
 
        ret = RESPST_ACKNOWLEDGE;
 out:
@@ -1056,7 +1059,6 @@ static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
        int err = 0;
        struct rxe_pkt_info ack_pkt;
        struct sk_buff *skb;
-       struct resp_res *res = qp->resp.res;
 
        skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
                                 0, psn, syndrome);
@@ -1065,15 +1067,9 @@ static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
                goto out;
        }
 
-       skb_get(skb);
-
-       res->atomic.skb = skb;
-
        err = rxe_xmit_packet(qp, &ack_pkt, skb);
-       if (err) {
-               pr_err_ratelimited("Failed sending ack\n");
-               rxe_put(qp);
-       }
+       if (err)
+               pr_err_ratelimited("Failed sending atomic ack\n");
 
        /* have to clear this since it is used to trigger
         * long read replies
@@ -1201,14 +1197,11 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                /* Find the operation in our list of responder resources. */
                res = find_resource(qp, pkt->psn);
                if (res) {
-                       skb_get(res->atomic.skb);
-                       /* Resend the result. */
-                       rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
-                       if (rc) {
-                               pr_err("Failed resending result. This flow is not handled - skb ignored\n");
-                               rc = RESPST_CLEANUP;
-                               goto out;
-                       }
+                       res->replay = 1;
+                       res->cur_psn = pkt->psn;
+                       qp->resp.res = res;
+                       rc = RESPST_ATOMIC_REPLY;
+                       goto out;
                }
 
                /* Resource not found. Class D error. Drop the request. */
index 5ee0f2599896e07010f0b17b6afd1675f664f076..0c01c7f58d4366559340532a9648fa2f15f8e7e9 100644 (file)
@@ -155,7 +155,6 @@ struct resp_res {
 
        union {
                struct {
-                       struct sk_buff  *skb;
                        u64             orig_val;
                } atomic;
                struct {