]>
Commit | Line | Data |
---|---|---|
d6e8b1fc SL |
1 | From 5b1058c9eafcafe88a8bd4c664fc904a44d21aca Mon Sep 17 00:00:00 2001 |
2 | From: Mike Marciniszyn <mike.marciniszyn@intel.com> | |
3 | Date: Mon, 24 Jun 2019 12:14:29 -0400 | |
4 | Subject: IB/hfi1: Avoid hardlockup with flushlist_lock | |
5 | ||
6 | commit cf131a81967583ae737df6383a0893b9fee75b4e upstream. | |
7 | ||
8 | Heavy contention of the sde flushlist_lock can cause hard lockups at | |
9 | extreme scale when the flushing logic is under stress. | |
10 | ||
11 | Mitigate by replacing the item at a time copy to the local list with | |
12 | an O(1) list_splice_init() and using the high priority work queue to | |
13 | do the flushes. | |
14 | ||
15 | Ported to linux-4.14.y. | |
16 | ||
17 | Fixes: 7724105686e7 ("IB/hfi1: add driver files") | |
18 | Cc: <stable@vger.kernel.org> | |
19 | Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> | |
20 | Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> | |
21 | Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> | |
22 | Signed-off-by: Doug Ledford <dledford@redhat.com> | |
23 | Signed-off-by: Sasha Levin <sashal@kernel.org> | |
24 | --- | |
25 | drivers/infiniband/hw/hfi1/sdma.c | 9 +++------ | |
26 | 1 file changed, 3 insertions(+), 6 deletions(-) | |
27 | ||
28 | diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c | |
29 | index 88e326d6cc49..d648a4167832 100644 | |
30 | --- a/drivers/infiniband/hw/hfi1/sdma.c | |
31 | +++ b/drivers/infiniband/hw/hfi1/sdma.c | |
32 | @@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde) | |
33 | sdma_flush_descq(sde); | |
34 | spin_lock_irqsave(&sde->flushlist_lock, flags); | |
35 | /* copy flush list */ | |
36 | - list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { | |
37 | - list_del_init(&txp->list); | |
38 | - list_add_tail(&txp->list, &flushlist); | |
39 | - } | |
40 | + list_splice_init(&sde->flushlist, &flushlist); | |
41 | spin_unlock_irqrestore(&sde->flushlist_lock, flags); | |
42 | /* flush from flush list */ | |
43 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) | |
44 | @@ -2426,7 +2423,7 @@ int sdma_send_txreq(struct sdma_engine *sde, | |
45 | wait->tx_count++; | |
46 | wait->count += tx->num_desc; | |
47 | } | |
48 | - schedule_work(&sde->flush_worker); | |
49 | + queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); | |
50 | ret = -ECOMM; | |
51 | goto unlock; | |
52 | nodesc: | |
53 | @@ -2526,7 +2523,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, | |
54 | } | |
55 | } | |
56 | spin_unlock(&sde->flushlist_lock); | |
57 | - schedule_work(&sde->flush_worker); | |
58 | + queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); | |
59 | ret = -ECOMM; | |
60 | goto update_tail; | |
61 | nodesc: | |
62 | -- | |
63 | 2.20.1 | |
64 |