]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/infiniband/hw/mlx4/srq.c
RDMA: Cleanup undesired pd->uobject usage
[thirdparty/kernel/stable.git] / drivers / infiniband / hw / mlx4 / srq.c
1 /*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/mlx4/qp.h>
35 #include <linux/mlx4/srq.h>
36 #include <linux/slab.h>
37
38 #include "mlx4_ib.h"
39 #include <rdma/mlx4-abi.h>
40
41 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
42 {
43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
44 }
45
46 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
47 {
48 struct ib_event event;
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
50
51 if (ibsrq->event_handler) {
52 event.device = ibsrq->device;
53 event.element.srq = ibsrq;
54 switch (type) {
55 case MLX4_EVENT_TYPE_SRQ_LIMIT:
56 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
57 break;
58 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
59 event.event = IB_EVENT_SRQ_ERR;
60 break;
61 default:
62 pr_warn("Unexpected event type %d "
63 "on SRQ %06x\n", type, srq->srqn);
64 return;
65 }
66
67 ibsrq->event_handler(&event, ibsrq->srq_context);
68 }
69 }
70
71 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
72 struct ib_srq_init_attr *init_attr,
73 struct ib_udata *udata)
74 {
75 struct mlx4_ib_dev *dev = to_mdev(pd->device);
76 struct mlx4_ib_srq *srq;
77 struct mlx4_wqe_srq_next_seg *next;
78 struct mlx4_wqe_data_seg *scatter;
79 u32 cqn;
80 u16 xrcdn;
81 int desc_size;
82 int buf_size;
83 int err;
84 int i;
85
86 /* Sanity check SRQ size before proceeding */
87 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
88 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
89 return ERR_PTR(-EINVAL);
90
91 srq = kmalloc(sizeof *srq, GFP_KERNEL);
92 if (!srq)
93 return ERR_PTR(-ENOMEM);
94
95 mutex_init(&srq->mutex);
96 spin_lock_init(&srq->lock);
97 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
98 srq->msrq.max_gs = init_attr->attr.max_sge;
99
100 desc_size = max(32UL,
101 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
102 srq->msrq.max_gs *
103 sizeof (struct mlx4_wqe_data_seg)));
104 srq->msrq.wqe_shift = ilog2(desc_size);
105
106 buf_size = srq->msrq.max * desc_size;
107
108 if (udata) {
109 struct mlx4_ib_create_srq ucmd;
110
111 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
112 err = -EFAULT;
113 goto err_srq;
114 }
115
116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
117 buf_size, 0, 0);
118 if (IS_ERR(srq->umem)) {
119 err = PTR_ERR(srq->umem);
120 goto err_srq;
121 }
122
123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
124 srq->umem->page_shift, &srq->mtt);
125 if (err)
126 goto err_buf;
127
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
129 if (err)
130 goto err_mtt;
131
132 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
133 ucmd.db_addr, &srq->db);
134 if (err)
135 goto err_mtt;
136 } else {
137 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
138 if (err)
139 goto err_srq;
140
141 *srq->db.db = 0;
142
143 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
144 &srq->buf)) {
145 err = -ENOMEM;
146 goto err_db;
147 }
148
149 srq->head = 0;
150 srq->tail = srq->msrq.max - 1;
151 srq->wqe_ctr = 0;
152
153 for (i = 0; i < srq->msrq.max; ++i) {
154 next = get_wqe(srq, i);
155 next->next_wqe_index =
156 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
157
158 for (scatter = (void *) (next + 1);
159 (void *) scatter < (void *) next + desc_size;
160 ++scatter)
161 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
162 }
163
164 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
165 &srq->mtt);
166 if (err)
167 goto err_buf;
168
169 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
170 if (err)
171 goto err_mtt;
172
173 srq->wrid = kvmalloc_array(srq->msrq.max,
174 sizeof(u64), GFP_KERNEL);
175 if (!srq->wrid) {
176 err = -ENOMEM;
177 goto err_mtt;
178 }
179 }
180
181 cqn = ib_srq_has_cq(init_attr->srq_type) ?
182 to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
183 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
184 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
185 (u16) dev->dev->caps.reserved_xrcds;
186 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
187 srq->db.dma, &srq->msrq);
188 if (err)
189 goto err_wrid;
190
191 srq->msrq.event = mlx4_ib_srq_event;
192 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
193
194 if (udata)
195 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
196 err = -EFAULT;
197 goto err_wrid;
198 }
199
200 init_attr->attr.max_wr = srq->msrq.max - 1;
201
202 return &srq->ibsrq;
203
204 err_wrid:
205 if (udata)
206 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
207 else
208 kvfree(srq->wrid);
209
210 err_mtt:
211 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
212
213 err_buf:
214 if (srq->umem)
215 ib_umem_release(srq->umem);
216 else
217 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
218
219 err_db:
220 if (!udata)
221 mlx4_db_free(dev->dev, &srq->db);
222
223 err_srq:
224 kfree(srq);
225
226 return ERR_PTR(err);
227 }
228
229 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
230 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
231 {
232 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
233 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
234 int ret;
235
236 /* We don't support resizing SRQs (yet?) */
237 if (attr_mask & IB_SRQ_MAX_WR)
238 return -EINVAL;
239
240 if (attr_mask & IB_SRQ_LIMIT) {
241 if (attr->srq_limit >= srq->msrq.max)
242 return -EINVAL;
243
244 mutex_lock(&srq->mutex);
245 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
246 mutex_unlock(&srq->mutex);
247
248 if (ret)
249 return ret;
250 }
251
252 return 0;
253 }
254
255 int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
256 {
257 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
258 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
259 int ret;
260 int limit_watermark;
261
262 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
263 if (ret)
264 return ret;
265
266 srq_attr->srq_limit = limit_watermark;
267 srq_attr->max_wr = srq->msrq.max - 1;
268 srq_attr->max_sge = srq->msrq.max_gs;
269
270 return 0;
271 }
272
273 int mlx4_ib_destroy_srq(struct ib_srq *srq)
274 {
275 struct mlx4_ib_dev *dev = to_mdev(srq->device);
276 struct mlx4_ib_srq *msrq = to_msrq(srq);
277
278 mlx4_srq_free(dev->dev, &msrq->msrq);
279 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
280
281 if (srq->uobject) {
282 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
283 ib_umem_release(msrq->umem);
284 } else {
285 kvfree(msrq->wrid);
286 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
287 &msrq->buf);
288 mlx4_db_free(dev->dev, &msrq->db);
289 }
290
291 kfree(msrq);
292
293 return 0;
294 }
295
296 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
297 {
298 struct mlx4_wqe_srq_next_seg *next;
299
300 /* always called with interrupts disabled. */
301 spin_lock(&srq->lock);
302
303 next = get_wqe(srq, srq->tail);
304 next->next_wqe_index = cpu_to_be16(wqe_index);
305 srq->tail = wqe_index;
306
307 spin_unlock(&srq->lock);
308 }
309
310 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
311 const struct ib_recv_wr **bad_wr)
312 {
313 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
314 struct mlx4_wqe_srq_next_seg *next;
315 struct mlx4_wqe_data_seg *scat;
316 unsigned long flags;
317 int err = 0;
318 int nreq;
319 int i;
320 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
321
322 spin_lock_irqsave(&srq->lock, flags);
323 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
324 err = -EIO;
325 *bad_wr = wr;
326 nreq = 0;
327 goto out;
328 }
329
330 for (nreq = 0; wr; ++nreq, wr = wr->next) {
331 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
332 err = -EINVAL;
333 *bad_wr = wr;
334 break;
335 }
336
337 if (unlikely(srq->head == srq->tail)) {
338 err = -ENOMEM;
339 *bad_wr = wr;
340 break;
341 }
342
343 srq->wrid[srq->head] = wr->wr_id;
344
345 next = get_wqe(srq, srq->head);
346 srq->head = be16_to_cpu(next->next_wqe_index);
347 scat = (struct mlx4_wqe_data_seg *) (next + 1);
348
349 for (i = 0; i < wr->num_sge; ++i) {
350 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
351 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
352 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
353 }
354
355 if (i < srq->msrq.max_gs) {
356 scat[i].byte_count = 0;
357 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
358 scat[i].addr = 0;
359 }
360 }
361
362 if (likely(nreq)) {
363 srq->wqe_ctr += nreq;
364
365 /*
366 * Make sure that descriptors are written before
367 * doorbell record.
368 */
369 wmb();
370
371 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
372 }
373 out:
374
375 spin_unlock_irqrestore(&srq->lock, flags);
376
377 return err;
378 }