]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/infiniband/sw/rxe/rxe_mr.c
RDMA/rxe: Fix memleak in rxe_mem_init_user
[people/arne_f/kernel.git] / drivers / infiniband / sw / rxe / rxe_mr.c
CommitLineData
8700e3e7
MS
1/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "rxe.h"
35#include "rxe_loc.h"
36
37/*
38 * lfsr (linear feedback shift register) with period 255
39 */
40static u8 rxe_get_key(void)
41{
e404f945 42 static u32 key = 1;
8700e3e7
MS
43
44 key = key << 1;
45
46 key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
47 ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
48
49 key &= 0xff;
50
51 return key;
52}
53
54int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
55{
56 switch (mem->type) {
57 case RXE_MEM_TYPE_DMA:
58 return 0;
59
60 case RXE_MEM_TYPE_MR:
61 case RXE_MEM_TYPE_FMR:
647bf3d8
EI
62 if (iova < mem->iova ||
63 length > mem->length ||
64 iova > mem->iova + mem->length - length)
65 return -EFAULT;
66 return 0;
8700e3e7
MS
67
68 default:
69 return -EFAULT;
70 }
71}
72
73#define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
74 | IB_ACCESS_REMOTE_WRITE \
75 | IB_ACCESS_REMOTE_ATOMIC)
76
77static void rxe_mem_init(int access, struct rxe_mem *mem)
78{
79 u32 lkey = mem->pelem.index << 8 | rxe_get_key();
80 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
81
82 if (mem->pelem.pool->type == RXE_TYPE_MR) {
83 mem->ibmr.lkey = lkey;
84 mem->ibmr.rkey = rkey;
85 }
86
87 mem->lkey = lkey;
88 mem->rkey = rkey;
89 mem->state = RXE_MEM_STATE_INVALID;
90 mem->type = RXE_MEM_TYPE_NONE;
91 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
92}
93
32404fb7 94void rxe_mem_cleanup(struct rxe_pool_entry *arg)
8700e3e7 95{
32404fb7 96 struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
8700e3e7
MS
97 int i;
98
99 if (mem->umem)
100 ib_umem_release(mem->umem);
101
102 if (mem->map) {
103 for (i = 0; i < mem->num_map; i++)
104 kfree(mem->map[i]);
105
106 kfree(mem->map);
107 }
108}
109
110static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
111{
112 int i;
113 int num_map;
114 struct rxe_map **map = mem->map;
115
116 num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
117
118 mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
119 if (!mem->map)
120 goto err1;
121
122 for (i = 0; i < num_map; i++) {
123 mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
124 if (!mem->map[i])
125 goto err2;
126 }
127
43553b47 128 BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
8700e3e7
MS
129
130 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
131 mem->map_mask = RXE_BUF_PER_MAP - 1;
132
133 mem->num_buf = num_buf;
134 mem->num_map = num_map;
135 mem->max_buf = num_map * RXE_BUF_PER_MAP;
136
137 return 0;
138
139err2:
140 for (i--; i >= 0; i--)
141 kfree(mem->map[i]);
142
143 kfree(mem->map);
144err1:
145 return -ENOMEM;
146}
147
148int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
149 int access, struct rxe_mem *mem)
150{
151 rxe_mem_init(access, mem);
152
153 mem->pd = pd;
154 mem->access = access;
155 mem->state = RXE_MEM_STATE_VALID;
156 mem->type = RXE_MEM_TYPE_DMA;
157
158 return 0;
159}
160
161int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
162 u64 length, u64 iova, int access, struct ib_udata *udata,
163 struct rxe_mem *mem)
164{
165 int entry;
166 struct rxe_map **map;
167 struct rxe_phys_buf *buf = NULL;
168 struct ib_umem *umem;
169 struct scatterlist *sg;
170 int num_buf;
171 void *vaddr;
172 int err;
173
174 umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
175 if (IS_ERR(umem)) {
176 pr_warn("err %d from rxe_umem_get\n",
177 (int)PTR_ERR(umem));
178 err = -EINVAL;
179 goto err1;
180 }
181
182 mem->umem = umem;
183 num_buf = umem->nmap;
184
185 rxe_mem_init(access, mem);
186
187 err = rxe_mem_alloc(rxe, mem, num_buf);
188 if (err) {
189 pr_warn("err %d from rxe_mem_alloc\n", err);
190 ib_umem_release(umem);
191 goto err1;
192 }
193
3e7e1193
AK
194 mem->page_shift = umem->page_shift;
195 mem->page_mask = BIT(umem->page_shift) - 1;
8700e3e7
MS
196
197 num_buf = 0;
198 map = mem->map;
199 if (length > 0) {
200 buf = map[0]->buf;
201
202 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
203 vaddr = page_address(sg_page(sg));
204 if (!vaddr) {
205 pr_warn("null vaddr\n");
557f256d 206 ib_umem_release(umem);
8700e3e7
MS
207 err = -ENOMEM;
208 goto err1;
209 }
210
211 buf->addr = (uintptr_t)vaddr;
3e7e1193 212 buf->size = BIT(umem->page_shift);
8700e3e7
MS
213 num_buf++;
214 buf++;
215
216 if (num_buf >= RXE_BUF_PER_MAP) {
217 map++;
218 buf = map[0]->buf;
219 num_buf = 0;
220 }
221 }
222 }
223
224 mem->pd = pd;
225 mem->umem = umem;
226 mem->access = access;
227 mem->length = length;
228 mem->iova = iova;
229 mem->va = start;
230 mem->offset = ib_umem_offset(umem);
231 mem->state = RXE_MEM_STATE_VALID;
232 mem->type = RXE_MEM_TYPE_MR;
233
234 return 0;
235
236err1:
237 return err;
238}
239
240int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
241 int max_pages, struct rxe_mem *mem)
242{
243 int err;
244
245 rxe_mem_init(0, mem);
246
247 /* In fastreg, we also set the rkey */
248 mem->ibmr.rkey = mem->ibmr.lkey;
249
250 err = rxe_mem_alloc(rxe, mem, max_pages);
251 if (err)
252 goto err1;
253
254 mem->pd = pd;
255 mem->max_buf = max_pages;
256 mem->state = RXE_MEM_STATE_FREE;
257 mem->type = RXE_MEM_TYPE_MR;
258
259 return 0;
260
261err1:
262 return err;
263}
264
265static void lookup_iova(
266 struct rxe_mem *mem,
267 u64 iova,
268 int *m_out,
269 int *n_out,
270 size_t *offset_out)
271{
272 size_t offset = iova - mem->iova + mem->offset;
273 int map_index;
274 int buf_index;
275 u64 length;
276
277 if (likely(mem->page_shift)) {
278 *offset_out = offset & mem->page_mask;
279 offset >>= mem->page_shift;
280 *n_out = offset & mem->map_mask;
281 *m_out = offset >> mem->map_shift;
282 } else {
283 map_index = 0;
284 buf_index = 0;
285
286 length = mem->map[map_index]->buf[buf_index].size;
287
288 while (offset >= length) {
289 offset -= length;
290 buf_index++;
291
292 if (buf_index == RXE_BUF_PER_MAP) {
293 map_index++;
294 buf_index = 0;
295 }
296 length = mem->map[map_index]->buf[buf_index].size;
297 }
298
299 *m_out = map_index;
300 *n_out = buf_index;
301 *offset_out = offset;
302 }
303}
304
305void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
306{
307 size_t offset;
308 int m, n;
309 void *addr;
310
311 if (mem->state != RXE_MEM_STATE_VALID) {
312 pr_warn("mem not in valid state\n");
313 addr = NULL;
314 goto out;
315 }
316
317 if (!mem->map) {
318 addr = (void *)(uintptr_t)iova;
319 goto out;
320 }
321
322 if (mem_check_range(mem, iova, length)) {
323 pr_warn("range violation\n");
324 addr = NULL;
325 goto out;
326 }
327
328 lookup_iova(mem, iova, &m, &n, &offset);
329
330 if (offset + length > mem->map[m]->buf[n].size) {
331 pr_warn("crosses page boundary\n");
332 addr = NULL;
333 goto out;
334 }
335
336 addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
337
338out:
339 return addr;
340}
341
342/* copy data from a range (vaddr, vaddr+length-1) to or from
343 * a mem object starting at iova. Compute incremental value of
344 * crc32 if crcp is not zero. caller must hold a reference to mem
345 */
346int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
347 enum copy_direction dir, u32 *crcp)
348{
349 int err;
350 int bytes;
351 u8 *va;
352 struct rxe_map **map;
353 struct rxe_phys_buf *buf;
354 int m;
355 int i;
356 size_t offset;
357 u32 crc = crcp ? (*crcp) : 0;
358
d4fb5925
AB
359 if (length == 0)
360 return 0;
361
8700e3e7
MS
362 if (mem->type == RXE_MEM_TYPE_DMA) {
363 u8 *src, *dest;
364
365 src = (dir == to_mem_obj) ?
366 addr : ((void *)(uintptr_t)iova);
367
368 dest = (dir == to_mem_obj) ?
369 ((void *)(uintptr_t)iova) : addr;
370
13eb1e21
AB
371 memcpy(dest, src, length);
372
8700e3e7 373 if (crcp)
af5df5fb 374 *crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device),
13eb1e21 375 *crcp, dest, length);
8700e3e7
MS
376
377 return 0;
378 }
379
43553b47 380 WARN_ON_ONCE(!mem->map);
8700e3e7
MS
381
382 err = mem_check_range(mem, iova, length);
383 if (err) {
384 err = -EFAULT;
385 goto err1;
386 }
387
388 lookup_iova(mem, iova, &m, &i, &offset);
389
390 map = mem->map + m;
391 buf = map[0]->buf + i;
392
393 while (length > 0) {
394 u8 *src, *dest;
395
396 va = (u8 *)(uintptr_t)buf->addr + offset;
397 src = (dir == to_mem_obj) ? addr : va;
398 dest = (dir == to_mem_obj) ? va : addr;
399
400 bytes = buf->size - offset;
401
402 if (bytes > length)
403 bytes = length;
404
13eb1e21
AB
405 memcpy(dest, src, bytes);
406
8700e3e7 407 if (crcp)
cee2688e 408 crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
13eb1e21 409 crc, dest, bytes);
8700e3e7
MS
410
411 length -= bytes;
412 addr += bytes;
413
414 offset = 0;
415 buf++;
416 i++;
417
418 if (i == RXE_BUF_PER_MAP) {
419 i = 0;
420 map++;
421 buf = map[0]->buf;
422 }
423 }
424
425 if (crcp)
426 *crcp = crc;
427
428 return 0;
429
430err1:
431 return err;
432}
433
434/* copy data in or out of a wqe, i.e. sg list
435 * under the control of a dma descriptor
436 */
437int copy_data(
438 struct rxe_dev *rxe,
439 struct rxe_pd *pd,
440 int access,
441 struct rxe_dma_info *dma,
442 void *addr,
443 int length,
444 enum copy_direction dir,
445 u32 *crcp)
446{
447 int bytes;
448 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
449 int offset = dma->sge_offset;
450 int resid = dma->resid;
451 struct rxe_mem *mem = NULL;
452 u64 iova;
453 int err;
454
455 if (length == 0)
456 return 0;
457
458 if (length > resid) {
459 err = -EINVAL;
460 goto err2;
461 }
462
463 if (sge->length && (offset < sge->length)) {
464 mem = lookup_mem(pd, access, sge->lkey, lookup_local);
465 if (!mem) {
466 err = -EINVAL;
467 goto err1;
468 }
469 }
470
471 while (length > 0) {
472 bytes = length;
473
474 if (offset >= sge->length) {
475 if (mem) {
476 rxe_drop_ref(mem);
477 mem = NULL;
478 }
479 sge++;
480 dma->cur_sge++;
481 offset = 0;
482
483 if (dma->cur_sge >= dma->num_sge) {
484 err = -ENOSPC;
485 goto err2;
486 }
487
488 if (sge->length) {
489 mem = lookup_mem(pd, access, sge->lkey,
490 lookup_local);
491 if (!mem) {
492 err = -EINVAL;
493 goto err1;
494 }
495 } else {
496 continue;
497 }
498 }
499
500 if (bytes > sge->length - offset)
501 bytes = sge->length - offset;
502
503 if (bytes > 0) {
504 iova = sge->addr + offset;
505
506 err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
507 if (err)
508 goto err2;
509
510 offset += bytes;
511 resid -= bytes;
512 length -= bytes;
513 addr += bytes;
514 }
515 }
516
517 dma->sge_offset = offset;
518 dma->resid = resid;
519
520 if (mem)
521 rxe_drop_ref(mem);
522
523 return 0;
524
525err2:
526 if (mem)
527 rxe_drop_ref(mem);
528err1:
529 return err;
530}
531
532int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
533{
534 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
535 int offset = dma->sge_offset;
536 int resid = dma->resid;
537
538 while (length) {
539 unsigned int bytes;
540
541 if (offset >= sge->length) {
542 sge++;
543 dma->cur_sge++;
544 offset = 0;
545 if (dma->cur_sge >= dma->num_sge)
546 return -ENOSPC;
547 }
548
549 bytes = length;
550
551 if (bytes > sge->length - offset)
552 bytes = sge->length - offset;
553
554 offset += bytes;
555 resid -= bytes;
556 length -= bytes;
557 }
558
559 dma->sge_offset = offset;
560 dma->resid = resid;
561
562 return 0;
563}
564
565/* (1) find the mem (mr or mw) corresponding to lkey/rkey
566 * depending on lookup_type
567 * (2) verify that the (qp) pd matches the mem pd
568 * (3) verify that the mem can support the requested access
569 * (4) verify that mem state is valid
570 */
571struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
572 enum lookup_type type)
573{
574 struct rxe_mem *mem;
575 struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
576 int index = key >> 8;
577
578 if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
579 mem = rxe_pool_get_index(&rxe->mr_pool, index);
580 if (!mem)
581 goto err1;
582 } else {
583 goto err1;
584 }
585
586 if ((type == lookup_local && mem->lkey != key) ||
587 (type == lookup_remote && mem->rkey != key))
588 goto err2;
589
590 if (mem->pd != pd)
591 goto err2;
592
593 if (access && !(access & mem->access))
594 goto err2;
595
596 if (mem->state != RXE_MEM_STATE_VALID)
597 goto err2;
598
599 return mem;
600
601err2:
602 rxe_drop_ref(mem);
603err1:
604 return NULL;
605}
606
607int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
608 u64 *page, int num_pages, u64 iova)
609{
610 int i;
611 int num_buf;
612 int err;
613 struct rxe_map **map;
614 struct rxe_phys_buf *buf;
615 int page_size;
616
617 if (num_pages > mem->max_buf) {
618 err = -EINVAL;
619 goto err1;
620 }
621
622 num_buf = 0;
623 page_size = 1 << mem->page_shift;
624 map = mem->map;
625 buf = map[0]->buf;
626
627 for (i = 0; i < num_pages; i++) {
628 buf->addr = *page++;
629 buf->size = page_size;
630 buf++;
631 num_buf++;
632
633 if (num_buf == RXE_BUF_PER_MAP) {
634 map++;
635 buf = map[0]->buf;
636 num_buf = 0;
637 }
638 }
639
640 mem->iova = iova;
641 mem->va = iova;
642 mem->length = num_pages << mem->page_shift;
643 mem->state = RXE_MEM_STATE_VALID;
644
645 return 0;
646
647err1:
648 return err;
649}