]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.40/rdma-cxgb4-release-hw-resources-on-device-removal.patch
Linux 4.14.95
[thirdparty/kernel/stable-queue.git] / releases / 4.14.40 / rdma-cxgb4-release-hw-resources-on-device-removal.patch
1 From 26bff1bd74a4f7417509a83295614e9dab995b2a Mon Sep 17 00:00:00 2001
2 From: Raju Rangoju <rajur@chelsio.com>
3 Date: Mon, 23 Apr 2018 21:42:37 +0530
4 Subject: RDMA/cxgb4: release hw resources on device removal
5
6 From: Raju Rangoju <rajur@chelsio.com>
7
8 commit 26bff1bd74a4f7417509a83295614e9dab995b2a upstream.
9
10 The c4iw_rdev_close() logic was not releasing all the hw
11 resources (PBL and RQT memory) during the device removal
12 event (driver unload / system reboot). This can cause panic
13 in gen_pool_destroy().
14
15 The module remove function will wait for all the hw
16 resources to be released during the device removal event.
17
18 Fixes c12a67fe(iw_cxgb4: free EQ queue memory on last deref)
19 Signed-off-by: Raju Rangoju <rajur@chelsio.com>
20 Reviewed-by: Steve Wise <swise@opengridcomputing.com>
21 Cc: stable@vger.kernel.org
22 Signed-off-by: Doug Ledford <dledford@redhat.com>
23 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
24
25 ---
26 drivers/infiniband/hw/cxgb4/device.c | 9 ++++++++-
27 drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 4 ++++
28 drivers/infiniband/hw/cxgb4/resource.c | 26 ++++++++++++++++++++++++--
29 3 files changed, 36 insertions(+), 3 deletions(-)
30
31 --- a/drivers/infiniband/hw/cxgb4/device.c
32 +++ b/drivers/infiniband/hw/cxgb4/device.c
33 @@ -884,6 +884,11 @@ static int c4iw_rdev_open(struct c4iw_rd
34
35 rdev->status_page->db_off = 0;
36
37 + init_completion(&rdev->rqt_compl);
38 + init_completion(&rdev->pbl_compl);
39 + kref_init(&rdev->rqt_kref);
40 + kref_init(&rdev->pbl_kref);
41 +
42 return 0;
43 err_free_status_page_and_wr_log:
44 if (c4iw_wr_log && rdev->wr_log)
45 @@ -902,13 +907,15 @@ destroy_resource:
46
47 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
48 {
49 - destroy_workqueue(rdev->free_workq);
50 kfree(rdev->wr_log);
51 c4iw_release_dev_ucontext(rdev, &rdev->uctx);
52 free_page((unsigned long)rdev->status_page);
53 c4iw_pblpool_destroy(rdev);
54 c4iw_rqtpool_destroy(rdev);
55 + wait_for_completion(&rdev->pbl_compl);
56 + wait_for_completion(&rdev->rqt_compl);
57 c4iw_ocqp_pool_destroy(rdev);
58 + destroy_workqueue(rdev->free_workq);
59 c4iw_destroy_resource(&rdev->resource);
60 }
61
62 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
63 +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
64 @@ -185,6 +185,10 @@ struct c4iw_rdev {
65 struct wr_log_entry *wr_log;
66 int wr_log_size;
67 struct workqueue_struct *free_workq;
68 + struct completion rqt_compl;
69 + struct completion pbl_compl;
70 + struct kref rqt_kref;
71 + struct kref pbl_kref;
72 };
73
74 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
75 --- a/drivers/infiniband/hw/cxgb4/resource.c
76 +++ b/drivers/infiniband/hw/cxgb4/resource.c
77 @@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev
78 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
79 if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
80 rdev->stats.pbl.max = rdev->stats.pbl.cur;
81 + kref_get(&rdev->pbl_kref);
82 } else
83 rdev->stats.pbl.fail++;
84 mutex_unlock(&rdev->stats.lock);
85 return (u32)addr;
86 }
87
88 +static void destroy_pblpool(struct kref *kref)
89 +{
90 + struct c4iw_rdev *rdev;
91 +
92 + rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
93 + gen_pool_destroy(rdev->pbl_pool);
94 + complete(&rdev->pbl_compl);
95 +}
96 +
97 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
98 {
99 pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
100 @@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev
101 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
102 mutex_unlock(&rdev->stats.lock);
103 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
104 + kref_put(&rdev->pbl_kref, destroy_pblpool);
105 }
106
107 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
108 @@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev
109
110 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
111 {
112 - gen_pool_destroy(rdev->pbl_pool);
113 + kref_put(&rdev->pbl_kref, destroy_pblpool);
114 }
115
116 /*
117 @@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev
118 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
119 if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
120 rdev->stats.rqt.max = rdev->stats.rqt.cur;
121 + kref_get(&rdev->rqt_kref);
122 } else
123 rdev->stats.rqt.fail++;
124 mutex_unlock(&rdev->stats.lock);
125 return (u32)addr;
126 }
127
128 +static void destroy_rqtpool(struct kref *kref)
129 +{
130 + struct c4iw_rdev *rdev;
131 +
132 + rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
133 + gen_pool_destroy(rdev->rqt_pool);
134 + complete(&rdev->rqt_compl);
135 +}
136 +
137 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
138 {
139 pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
140 @@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev
141 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
142 mutex_unlock(&rdev->stats.lock);
143 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
144 + kref_put(&rdev->rqt_kref, destroy_rqtpool);
145 }
146
147 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
148 @@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev
149
150 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
151 {
152 - gen_pool_destroy(rdev->rqt_pool);
153 + kref_put(&rdev->rqt_kref, destroy_rqtpool);
154 }
155
156 /*