]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-4.19/ib-hfi1-avoid-hardlockup-with-flushlist_lock.patch
Revert "fixes for 4.14"
[thirdparty/kernel/stable-queue.git] / queue-4.19 / ib-hfi1-avoid-hardlockup-with-flushlist_lock.patch
CommitLineData
d6e8b1fc
SL
1From 5b1058c9eafcafe88a8bd4c664fc904a44d21aca Mon Sep 17 00:00:00 2001
2From: Mike Marciniszyn <mike.marciniszyn@intel.com>
3Date: Mon, 24 Jun 2019 12:14:29 -0400
4Subject: IB/hfi1: Avoid hardlockup with flushlist_lock
5
6commit cf131a81967583ae737df6383a0893b9fee75b4e upstream.
7
8Heavy contention of the sde flushlist_lock can cause hard lockups at
9extreme scale when the flushing logic is under stress.
10
11Mitigate by replacing the item at a time copy to the local list with
12an O(1) list_splice_init() and using the high priority work queue to
13do the flushes.
14
15Ported to linux-4.14.y.
16
17Fixes: 7724105686e7 ("IB/hfi1: add driver files")
18Cc: <stable@vger.kernel.org>
19Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
20Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
21Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
22Signed-off-by: Doug Ledford <dledford@redhat.com>
23Signed-off-by: Sasha Levin <sashal@kernel.org>
24---
25 drivers/infiniband/hw/hfi1/sdma.c | 9 +++------
26 1 file changed, 3 insertions(+), 6 deletions(-)
27
28diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
29index 88e326d6cc49..d648a4167832 100644
30--- a/drivers/infiniband/hw/hfi1/sdma.c
31+++ b/drivers/infiniband/hw/hfi1/sdma.c
32@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
33 sdma_flush_descq(sde);
34 spin_lock_irqsave(&sde->flushlist_lock, flags);
35 /* copy flush list */
36- list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
37- list_del_init(&txp->list);
38- list_add_tail(&txp->list, &flushlist);
39- }
40+ list_splice_init(&sde->flushlist, &flushlist);
41 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
42 /* flush from flush list */
43 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
44@@ -2426,7 +2423,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
45 wait->tx_count++;
46 wait->count += tx->num_desc;
47 }
48- schedule_work(&sde->flush_worker);
49+ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
50 ret = -ECOMM;
51 goto unlock;
52 nodesc:
53@@ -2526,7 +2523,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
54 }
55 }
56 spin_unlock(&sde->flushlist_lock);
57- schedule_work(&sde->flush_worker);
58+ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
59 ret = -ECOMM;
60 goto update_tail;
61 nodesc:
62--
632.20.1
64