]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/infiniband/hw/qib/qib_mr.c
IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters
[people/ms/linux.git] / drivers / infiniband / hw / qib / qib_mr.c
CommitLineData
f931551b
RC
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_umem.h>
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38
39/* Fast memory region */
40struct qib_fmr {
41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct qib_mregion mr; /* must be last */
44};
45
46static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
47{
48 return container_of(ibfmr, struct qib_fmr, ibfmr);
49}
50
51/**
52 * qib_get_dma_mr - get a DMA memory region
53 * @pd: protection domain for this memory region
54 * @acc: access flags
55 *
56 * Returns the memory region on success, otherwise returns an errno.
57 * Note that all DMA addresses should be created via the
58 * struct ib_dma_mapping_ops functions (see qib_dma.c).
59 */
60struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
61{
62 struct qib_ibdev *dev = to_idev(pd->device);
63 struct qib_mr *mr;
64 struct ib_mr *ret;
65 unsigned long flags;
66
67 if (to_ipd(pd)->user) {
68 ret = ERR_PTR(-EPERM);
69 goto bail;
70 }
71
72 mr = kzalloc(sizeof *mr, GFP_KERNEL);
73 if (!mr) {
74 ret = ERR_PTR(-ENOMEM);
75 goto bail;
76 }
77
78 mr->mr.access_flags = acc;
79 atomic_set(&mr->mr.refcount, 0);
80
81 spin_lock_irqsave(&dev->lk_table.lock, flags);
82 if (!dev->dma_mr)
83 dev->dma_mr = &mr->mr;
84 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
85
86 ret = &mr->ibmr;
87
88bail:
89 return ret;
90}
91
92static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
93{
94 struct qib_mr *mr;
95 int m, i = 0;
96
97 /* Allocate struct plus pointers to first level page tables. */
98 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
99 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
100 if (!mr)
101 goto done;
102
103 /* Allocate first level page tables. */
104 for (; i < m; i++) {
105 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
106 if (!mr->mr.map[i])
107 goto bail;
108 }
109 mr->mr.mapsz = m;
110 mr->mr.max_segs = count;
111
112 /*
113 * ib_reg_phys_mr() will initialize mr->ibmr except for
114 * lkey and rkey.
115 */
116 if (!qib_alloc_lkey(lk_table, &mr->mr))
117 goto bail;
118 mr->ibmr.lkey = mr->mr.lkey;
119 mr->ibmr.rkey = mr->mr.lkey;
120
121 atomic_set(&mr->mr.refcount, 0);
122 goto done;
123
124bail:
125 while (i)
126 kfree(mr->mr.map[--i]);
127 kfree(mr);
128 mr = NULL;
129
130done:
131 return mr;
132}
133
134/**
135 * qib_reg_phys_mr - register a physical memory region
136 * @pd: protection domain for this memory region
137 * @buffer_list: pointer to the list of physical buffers to register
138 * @num_phys_buf: the number of physical buffers to register
139 * @iova_start: the starting address passed over IB which maps to this MR
140 *
141 * Returns the memory region on success, otherwise returns an errno.
142 */
143struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
144 struct ib_phys_buf *buffer_list,
145 int num_phys_buf, int acc, u64 *iova_start)
146{
147 struct qib_mr *mr;
148 int n, m, i;
149 struct ib_mr *ret;
150
151 mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
152 if (mr == NULL) {
153 ret = ERR_PTR(-ENOMEM);
154 goto bail;
155 }
156
157 mr->mr.pd = pd;
158 mr->mr.user_base = *iova_start;
159 mr->mr.iova = *iova_start;
160 mr->mr.length = 0;
161 mr->mr.offset = 0;
162 mr->mr.access_flags = acc;
163 mr->umem = NULL;
164
165 m = 0;
166 n = 0;
167 for (i = 0; i < num_phys_buf; i++) {
168 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
169 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
170 mr->mr.length += buffer_list[i].size;
171 n++;
172 if (n == QIB_SEGSZ) {
173 m++;
174 n = 0;
175 }
176 }
177
178 ret = &mr->ibmr;
179
180bail:
181 return ret;
182}
183
184/**
185 * qib_reg_user_mr - register a userspace memory region
186 * @pd: protection domain for this memory region
187 * @start: starting userspace address
188 * @length: length of region to register
189 * @virt_addr: virtual address to use (from HCA's point of view)
190 * @mr_access_flags: access flags for this memory region
191 * @udata: unused by the QLogic_IB driver
192 *
193 * Returns the memory region on success, otherwise returns an errno.
194 */
195struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
196 u64 virt_addr, int mr_access_flags,
197 struct ib_udata *udata)
198{
199 struct qib_mr *mr;
200 struct ib_umem *umem;
201 struct ib_umem_chunk *chunk;
202 int n, m, i;
203 struct ib_mr *ret;
204
205 if (length == 0) {
206 ret = ERR_PTR(-EINVAL);
207 goto bail;
208 }
209
210 umem = ib_umem_get(pd->uobject->context, start, length,
211 mr_access_flags, 0);
212 if (IS_ERR(umem))
213 return (void *) umem;
214
215 n = 0;
216 list_for_each_entry(chunk, &umem->chunk_list, list)
217 n += chunk->nents;
218
219 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
220 if (!mr) {
221 ret = ERR_PTR(-ENOMEM);
222 ib_umem_release(umem);
223 goto bail;
224 }
225
226 mr->mr.pd = pd;
227 mr->mr.user_base = start;
228 mr->mr.iova = virt_addr;
229 mr->mr.length = length;
230 mr->mr.offset = umem->offset;
231 mr->mr.access_flags = mr_access_flags;
232 mr->umem = umem;
233
234 m = 0;
235 n = 0;
236 list_for_each_entry(chunk, &umem->chunk_list, list) {
237 for (i = 0; i < chunk->nents; i++) {
238 void *vaddr;
239
240 vaddr = page_address(sg_page(&chunk->page_list[i]));
241 if (!vaddr) {
242 ret = ERR_PTR(-EINVAL);
243 goto bail;
244 }
245 mr->mr.map[m]->segs[n].vaddr = vaddr;
246 mr->mr.map[m]->segs[n].length = umem->page_size;
247 n++;
248 if (n == QIB_SEGSZ) {
249 m++;
250 n = 0;
251 }
252 }
253 }
254 ret = &mr->ibmr;
255
256bail:
257 return ret;
258}
259
260/**
261 * qib_dereg_mr - unregister and free a memory region
262 * @ibmr: the memory region to free
263 *
264 * Returns 0 on success.
265 *
266 * Note that this is called to free MRs created by qib_get_dma_mr()
267 * or qib_reg_user_mr().
268 */
269int qib_dereg_mr(struct ib_mr *ibmr)
270{
271 struct qib_mr *mr = to_imr(ibmr);
272 struct qib_ibdev *dev = to_idev(ibmr->device);
273 int ret;
274 int i;
275
276 ret = qib_free_lkey(dev, &mr->mr);
277 if (ret)
278 return ret;
279
280 i = mr->mr.mapsz;
281 while (i)
282 kfree(mr->mr.map[--i]);
283 if (mr->umem)
284 ib_umem_release(mr->umem);
285 kfree(mr);
286 return 0;
287}
288
289/*
290 * Allocate a memory region usable with the
291 * IB_WR_FAST_REG_MR send work request.
292 *
293 * Return the memory region on success, otherwise return an errno.
294 */
295struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
296{
297 struct qib_mr *mr;
298
299 mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
300 if (mr == NULL)
301 return ERR_PTR(-ENOMEM);
302
303 mr->mr.pd = pd;
304 mr->mr.user_base = 0;
305 mr->mr.iova = 0;
306 mr->mr.length = 0;
307 mr->mr.offset = 0;
308 mr->mr.access_flags = 0;
309 mr->umem = NULL;
310
311 return &mr->ibmr;
312}
313
314struct ib_fast_reg_page_list *
315qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
316{
317 unsigned size = page_list_len * sizeof(u64);
318 struct ib_fast_reg_page_list *pl;
319
320 if (size > PAGE_SIZE)
321 return ERR_PTR(-EINVAL);
322
323 pl = kmalloc(sizeof *pl, GFP_KERNEL);
324 if (!pl)
325 return ERR_PTR(-ENOMEM);
326
327 pl->page_list = kmalloc(size, GFP_KERNEL);
328 if (!pl->page_list)
329 goto err_free;
330
331 return pl;
332
333err_free:
334 kfree(pl);
335 return ERR_PTR(-ENOMEM);
336}
337
338void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
339{
340 kfree(pl->page_list);
341 kfree(pl);
342}
343
344/**
345 * qib_alloc_fmr - allocate a fast memory region
346 * @pd: the protection domain for this memory region
347 * @mr_access_flags: access flags for this memory region
348 * @fmr_attr: fast memory region attributes
349 *
350 * Returns the memory region on success, otherwise returns an errno.
351 */
352struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
353 struct ib_fmr_attr *fmr_attr)
354{
355 struct qib_fmr *fmr;
356 int m, i = 0;
357 struct ib_fmr *ret;
358
359 /* Allocate struct plus pointers to first level page tables. */
360 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
361 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
362 if (!fmr)
363 goto bail;
364
365 /* Allocate first level page tables. */
366 for (; i < m; i++) {
367 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
368 GFP_KERNEL);
369 if (!fmr->mr.map[i])
370 goto bail;
371 }
372 fmr->mr.mapsz = m;
373
374 /*
375 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
376 * rkey.
377 */
378 if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
379 goto bail;
380 fmr->ibfmr.rkey = fmr->mr.lkey;
381 fmr->ibfmr.lkey = fmr->mr.lkey;
382 /*
383 * Resources are allocated but no valid mapping (RKEY can't be
384 * used).
385 */
386 fmr->mr.pd = pd;
387 fmr->mr.user_base = 0;
388 fmr->mr.iova = 0;
389 fmr->mr.length = 0;
390 fmr->mr.offset = 0;
391 fmr->mr.access_flags = mr_access_flags;
392 fmr->mr.max_segs = fmr_attr->max_pages;
393 fmr->page_shift = fmr_attr->page_shift;
394
395 atomic_set(&fmr->mr.refcount, 0);
396 ret = &fmr->ibfmr;
397 goto done;
398
399bail:
400 while (i)
401 kfree(fmr->mr.map[--i]);
402 kfree(fmr);
403 ret = ERR_PTR(-ENOMEM);
404
405done:
406 return ret;
407}
408
409/**
410 * qib_map_phys_fmr - set up a fast memory region
411 * @ibmfr: the fast memory region to set up
412 * @page_list: the list of pages to associate with the fast memory region
413 * @list_len: the number of pages to associate with the fast memory region
414 * @iova: the virtual address of the start of the fast memory region
415 *
416 * This may be called from interrupt context.
417 */
418
419int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
420 int list_len, u64 iova)
421{
422 struct qib_fmr *fmr = to_ifmr(ibfmr);
423 struct qib_lkey_table *rkt;
424 unsigned long flags;
425 int m, n, i;
426 u32 ps;
427 int ret;
428
429 if (atomic_read(&fmr->mr.refcount))
430 return -EBUSY;
431
432 if (list_len > fmr->mr.max_segs) {
433 ret = -EINVAL;
434 goto bail;
435 }
436 rkt = &to_idev(ibfmr->device)->lk_table;
437 spin_lock_irqsave(&rkt->lock, flags);
438 fmr->mr.user_base = iova;
439 fmr->mr.iova = iova;
440 ps = 1 << fmr->page_shift;
441 fmr->mr.length = list_len * ps;
442 m = 0;
443 n = 0;
444 for (i = 0; i < list_len; i++) {
445 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
446 fmr->mr.map[m]->segs[n].length = ps;
447 if (++n == QIB_SEGSZ) {
448 m++;
449 n = 0;
450 }
451 }
452 spin_unlock_irqrestore(&rkt->lock, flags);
453 ret = 0;
454
455bail:
456 return ret;
457}
458
459/**
460 * qib_unmap_fmr - unmap fast memory regions
461 * @fmr_list: the list of fast memory regions to unmap
462 *
463 * Returns 0 on success.
464 */
465int qib_unmap_fmr(struct list_head *fmr_list)
466{
467 struct qib_fmr *fmr;
468 struct qib_lkey_table *rkt;
469 unsigned long flags;
470
471 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
472 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
473 spin_lock_irqsave(&rkt->lock, flags);
474 fmr->mr.user_base = 0;
475 fmr->mr.iova = 0;
476 fmr->mr.length = 0;
477 spin_unlock_irqrestore(&rkt->lock, flags);
478 }
479 return 0;
480}
481
482/**
483 * qib_dealloc_fmr - deallocate a fast memory region
484 * @ibfmr: the fast memory region to deallocate
485 *
486 * Returns 0 on success.
487 */
488int qib_dealloc_fmr(struct ib_fmr *ibfmr)
489{
490 struct qib_fmr *fmr = to_ifmr(ibfmr);
491 int ret;
492 int i;
493
494 ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
495 if (ret)
496 return ret;
497
498 i = fmr->mr.mapsz;
499 while (i)
500 kfree(fmr->mr.map[--i]);
501 kfree(fmr);
502 return 0;
503}