--- /dev/null
+From 2b23b6097303ed0ba5f4bc036a1c07b6027af5c6 Mon Sep 17 00:00:00 2001
+From: Bob Pearson <rpearsonhpe@gmail.com>
+Date: Fri, 29 Mar 2024 09:55:04 -0500
+Subject: RDMA/rxe: Fix seg fault in rxe_comp_queue_pkt
+
+From: Bob Pearson <rpearsonhpe@gmail.com>
+
+commit 2b23b6097303ed0ba5f4bc036a1c07b6027af5c6 upstream.
+
+In rxe_comp_queue_pkt() an incoming response packet skb is enqueued to the
+resp_pkts queue and then a decision is made whether to run the completer
+task inline or schedule it. Finally the skb is dereferenced to bump a 'hw'
+performance counter. This is wrong because if the completer task is
+already running in a separate thread it may have already processed the skb
+and freed it which can cause a seg fault. This has been observed
+infrequently in testing at high scale.
+
+This patch fixes this by changing the order of enqueuing the packet until
+after the counter is accessed.
+
+Link: https://lore.kernel.org/r/20240329145513.35381-4-rpearsonhpe@gmail.com
+Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
+Fixes: 0b1e5b99a48b ("IB/rxe: Add port protocol stats")
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+[Sherry: bp to fix CVE-2024-38544. Fix conflict due to missing commit:
+dccb23f6c312 ("RDMA/rxe: Split rxe_run_task() into two subroutines")
+which is not necessary to backport]
+Signed-off-by: Sherry Yang <sherry.yang@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/sw/rxe/rxe_comp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -150,12 +150,12 @@ void rxe_comp_queue_pkt(struct rxe_qp *q
+ {
+ int must_sched;
+
+- skb_queue_tail(&qp->resp_pkts, skb);
+-
+- must_sched = skb_queue_len(&qp->resp_pkts) > 1;
++ must_sched = skb_queue_len(&qp->resp_pkts) > 0;
+ if (must_sched != 0)
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+
++ skb_queue_tail(&qp->resp_pkts, skb);
++
+ rxe_run_task(&qp->comp.task, must_sched);
+ }
+