2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #include <linux/slab.h>
44 #include <rdma/ib_umem.h>
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
54 /* max number of rpages (per hcall register_rpages) */
55 #define MAX_RPAGES 512
57 /* DMEM toleration management */
58 #define EHCA_SECTSHIFT SECTION_SIZE_BITS
59 #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
60 #define EHCA_HUGEPAGESHIFT 34
61 #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
62 #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
63 #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
64 #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
65 #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
66 #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
67 #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
68 #define EHCA_DIR_MAP_SIZE (0x10000)
69 #define EHCA_ENT_MAP_SIZE (0x10000)
70 #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
72 static unsigned long ehca_mr_len
;
75 * Memory map data structures
77 struct ehca_dir_bmap
{
78 u64 ent
[EHCA_MAP_ENTRIES
];
80 struct ehca_top_bmap
{
81 struct ehca_dir_bmap
*dir
[EHCA_MAP_ENTRIES
];
84 struct ehca_top_bmap
*top
[EHCA_MAP_ENTRIES
];
87 static struct ehca_bmap
*ehca_bmap
;
89 static struct kmem_cache
*mr_cache
;
90 static struct kmem_cache
*mw_cache
;
93 EHCA_MR_PGSIZE4K
= 0x1000L
,
94 EHCA_MR_PGSIZE64K
= 0x10000L
,
95 EHCA_MR_PGSIZE1M
= 0x100000L
,
96 EHCA_MR_PGSIZE16M
= 0x1000000L
99 #define EHCA_MR_PGSHIFT4K 12
100 #define EHCA_MR_PGSHIFT64K 16
101 #define EHCA_MR_PGSHIFT1M 20
102 #define EHCA_MR_PGSHIFT16M 24
104 static u64
ehca_map_vaddr(void *caddr
);
106 static u32
ehca_encode_hwpage_size(u32 pgsize
)
108 int log
= ilog2(pgsize
);
109 WARN_ON(log
< 12 || log
> 24 || log
& 3);
110 return (log
- 12) / 4;
113 static u64
ehca_get_max_hwpage_size(struct ehca_shca
*shca
)
115 return 1UL << ilog2(shca
->hca_cap_mr_pgsize
);
118 static struct ehca_mr
*ehca_mr_new(void)
122 me
= kmem_cache_zalloc(mr_cache
, GFP_KERNEL
);
124 spin_lock_init(&me
->mrlock
);
126 ehca_gen_err("alloc failed");
131 static void ehca_mr_delete(struct ehca_mr
*me
)
133 kmem_cache_free(mr_cache
, me
);
136 static struct ehca_mw
*ehca_mw_new(void)
140 me
= kmem_cache_zalloc(mw_cache
, GFP_KERNEL
);
142 spin_lock_init(&me
->mwlock
);
144 ehca_gen_err("alloc failed");
149 static void ehca_mw_delete(struct ehca_mw
*me
)
151 kmem_cache_free(mw_cache
, me
);
154 /*----------------------------------------------------------------------*/
156 struct ib_mr
*ehca_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
160 struct ehca_mr
*e_maxmr
;
161 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
162 struct ehca_shca
*shca
=
163 container_of(pd
->device
, struct ehca_shca
, ib_device
);
166 e_maxmr
= ehca_mr_new();
168 ehca_err(&shca
->ib_device
, "out of memory");
169 ib_mr
= ERR_PTR(-ENOMEM
);
170 goto get_dma_mr_exit0
;
173 ret
= ehca_reg_maxmr(shca
, e_maxmr
,
174 (void *)ehca_map_vaddr((void *)KERNELBASE
),
175 mr_access_flags
, e_pd
,
176 &e_maxmr
->ib
.ib_mr
.lkey
,
177 &e_maxmr
->ib
.ib_mr
.rkey
);
179 ehca_mr_delete(e_maxmr
);
180 ib_mr
= ERR_PTR(ret
);
181 goto get_dma_mr_exit0
;
183 ib_mr
= &e_maxmr
->ib
.ib_mr
;
185 ehca_err(&shca
->ib_device
, "no internal max-MR exist!");
186 ib_mr
= ERR_PTR(-EINVAL
);
187 goto get_dma_mr_exit0
;
192 ehca_err(&shca
->ib_device
, "h_ret=%li pd=%p mr_access_flags=%x",
193 PTR_ERR(ib_mr
), pd
, mr_access_flags
);
195 } /* end ehca_get_dma_mr() */
197 /*----------------------------------------------------------------------*/
199 struct ib_mr
*ehca_reg_phys_mr(struct ib_pd
*pd
,
200 struct ib_phys_buf
*phys_buf_array
,
207 struct ehca_mr
*e_mr
;
208 struct ehca_shca
*shca
=
209 container_of(pd
->device
, struct ehca_shca
, ib_device
);
210 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
214 if ((num_phys_buf
<= 0) || !phys_buf_array
) {
215 ehca_err(pd
->device
, "bad input values: num_phys_buf=%x "
216 "phys_buf_array=%p", num_phys_buf
, phys_buf_array
);
217 ib_mr
= ERR_PTR(-EINVAL
);
218 goto reg_phys_mr_exit0
;
220 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
221 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
222 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
223 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
225 * Remote Write Access requires Local Write Access
226 * Remote Atomic Access requires Local Write Access
228 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
230 ib_mr
= ERR_PTR(-EINVAL
);
231 goto reg_phys_mr_exit0
;
234 /* check physical buffer list and calculate size */
235 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
, num_phys_buf
,
238 ib_mr
= ERR_PTR(ret
);
239 goto reg_phys_mr_exit0
;
242 (((u64
)iova_start
+ size
) < (u64
)iova_start
)) {
243 ehca_err(pd
->device
, "bad input values: size=%llx iova_start=%p",
245 ib_mr
= ERR_PTR(-EINVAL
);
246 goto reg_phys_mr_exit0
;
249 e_mr
= ehca_mr_new();
251 ehca_err(pd
->device
, "out of memory");
252 ib_mr
= ERR_PTR(-ENOMEM
);
253 goto reg_phys_mr_exit0
;
256 /* register MR on HCA */
257 if (ehca_mr_is_maxmr(size
, iova_start
)) {
258 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
259 ret
= ehca_reg_maxmr(shca
, e_mr
, iova_start
, mr_access_flags
,
260 e_pd
, &e_mr
->ib
.ib_mr
.lkey
,
261 &e_mr
->ib
.ib_mr
.rkey
);
263 ib_mr
= ERR_PTR(ret
);
264 goto reg_phys_mr_exit1
;
267 struct ehca_mr_pginfo pginfo
;
272 num_kpages
= NUM_CHUNKS(((u64
)iova_start
% PAGE_SIZE
) + size
,
274 /* for kernel space we try most possible pgsize */
275 hw_pgsize
= ehca_get_max_hwpage_size(shca
);
276 num_hwpages
= NUM_CHUNKS(((u64
)iova_start
% hw_pgsize
) + size
,
278 memset(&pginfo
, 0, sizeof(pginfo
));
279 pginfo
.type
= EHCA_MR_PGI_PHYS
;
280 pginfo
.num_kpages
= num_kpages
;
281 pginfo
.hwpage_size
= hw_pgsize
;
282 pginfo
.num_hwpages
= num_hwpages
;
283 pginfo
.u
.phy
.num_phys_buf
= num_phys_buf
;
284 pginfo
.u
.phy
.phys_buf_array
= phys_buf_array
;
286 ((u64
)iova_start
& ~PAGE_MASK
) / hw_pgsize
;
288 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, mr_access_flags
,
289 e_pd
, &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
290 &e_mr
->ib
.ib_mr
.rkey
, EHCA_REG_MR
);
292 ib_mr
= ERR_PTR(ret
);
293 goto reg_phys_mr_exit1
;
297 /* successful registration of all pages */
298 return &e_mr
->ib
.ib_mr
;
301 ehca_mr_delete(e_mr
);
304 ehca_err(pd
->device
, "h_ret=%li pd=%p phys_buf_array=%p "
305 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
306 PTR_ERR(ib_mr
), pd
, phys_buf_array
,
307 num_phys_buf
, mr_access_flags
, iova_start
);
309 } /* end ehca_reg_phys_mr() */
311 /*----------------------------------------------------------------------*/
313 struct ib_mr
*ehca_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
314 u64 virt
, int mr_access_flags
,
315 struct ib_udata
*udata
)
318 struct ehca_mr
*e_mr
;
319 struct ehca_shca
*shca
=
320 container_of(pd
->device
, struct ehca_shca
, ib_device
);
321 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
322 struct ehca_mr_pginfo pginfo
;
329 ehca_gen_err("bad pd=%p", pd
);
330 return ERR_PTR(-EFAULT
);
333 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
334 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
335 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
336 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
338 * Remote Write Access requires Local Write Access
339 * Remote Atomic Access requires Local Write Access
341 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
343 ib_mr
= ERR_PTR(-EINVAL
);
344 goto reg_user_mr_exit0
;
347 if (length
== 0 || virt
+ length
< virt
) {
348 ehca_err(pd
->device
, "bad input values: length=%llx "
349 "virt_base=%llx", length
, virt
);
350 ib_mr
= ERR_PTR(-EINVAL
);
351 goto reg_user_mr_exit0
;
354 e_mr
= ehca_mr_new();
356 ehca_err(pd
->device
, "out of memory");
357 ib_mr
= ERR_PTR(-ENOMEM
);
358 goto reg_user_mr_exit0
;
361 e_mr
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
363 if (IS_ERR(e_mr
->umem
)) {
364 ib_mr
= (void *)e_mr
->umem
;
365 goto reg_user_mr_exit1
;
368 if (e_mr
->umem
->page_size
!= PAGE_SIZE
) {
369 ehca_err(pd
->device
, "page size not supported, "
370 "e_mr->umem->page_size=%x", e_mr
->umem
->page_size
);
371 ib_mr
= ERR_PTR(-EINVAL
);
372 goto reg_user_mr_exit2
;
375 /* determine number of MR pages */
376 num_kpages
= NUM_CHUNKS((virt
% PAGE_SIZE
) + length
, PAGE_SIZE
);
377 /* select proper hw_pgsize */
378 page_shift
= PAGE_SHIFT
;
379 if (e_mr
->umem
->hugetlb
) {
380 /* determine page_shift, clamp between 4K and 16M */
381 page_shift
= (fls64(length
- 1) + 3) & ~3;
382 page_shift
= min(max(page_shift
, EHCA_MR_PGSHIFT4K
),
385 hwpage_size
= 1UL << page_shift
;
387 /* now that we have the desired page size, shift until it's
388 * supported, too. 4K is always supported, so this terminates.
390 while (!(hwpage_size
& shca
->hca_cap_mr_pgsize
))
393 reg_user_mr_fallback
:
394 num_hwpages
= NUM_CHUNKS((virt
% hwpage_size
) + length
, hwpage_size
);
395 /* register MR on HCA */
396 memset(&pginfo
, 0, sizeof(pginfo
));
397 pginfo
.type
= EHCA_MR_PGI_USER
;
398 pginfo
.hwpage_size
= hwpage_size
;
399 pginfo
.num_kpages
= num_kpages
;
400 pginfo
.num_hwpages
= num_hwpages
;
401 pginfo
.u
.usr
.region
= e_mr
->umem
;
402 pginfo
.next_hwpage
= e_mr
->umem
->offset
/ hwpage_size
;
403 pginfo
.u
.usr
.next_chunk
= list_prepare_entry(pginfo
.u
.usr
.next_chunk
,
404 (&e_mr
->umem
->chunk_list
),
407 ret
= ehca_reg_mr(shca
, e_mr
, (u64
*)virt
, length
, mr_access_flags
,
408 e_pd
, &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
409 &e_mr
->ib
.ib_mr
.rkey
, EHCA_REG_MR
);
410 if (ret
== -EINVAL
&& pginfo
.hwpage_size
> PAGE_SIZE
) {
411 ehca_warn(pd
->device
, "failed to register mr "
412 "with hwpage_size=%llx", hwpage_size
);
413 ehca_info(pd
->device
, "try to register mr with "
414 "kpage_size=%lx", PAGE_SIZE
);
416 * this means kpages are not contiguous for a hw page
417 * try kernel page size as fallback solution
419 hwpage_size
= PAGE_SIZE
;
420 goto reg_user_mr_fallback
;
423 ib_mr
= ERR_PTR(ret
);
424 goto reg_user_mr_exit2
;
427 /* successful registration of all pages */
428 return &e_mr
->ib
.ib_mr
;
431 ib_umem_release(e_mr
->umem
);
433 ehca_mr_delete(e_mr
);
436 ehca_err(pd
->device
, "rc=%li pd=%p mr_access_flags=%x udata=%p",
437 PTR_ERR(ib_mr
), pd
, mr_access_flags
, udata
);
439 } /* end ehca_reg_user_mr() */
441 /*----------------------------------------------------------------------*/
443 int ehca_rereg_phys_mr(struct ib_mr
*mr
,
446 struct ib_phys_buf
*phys_buf_array
,
453 struct ehca_shca
*shca
=
454 container_of(mr
->device
, struct ehca_shca
, ib_device
);
455 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
459 struct ehca_pd
*new_pd
;
460 u32 tmp_lkey
, tmp_rkey
;
461 unsigned long sl_flags
;
464 struct ehca_mr_pginfo pginfo
;
466 if (!(mr_rereg_mask
& IB_MR_REREG_TRANS
)) {
467 /* TODO not supported, because PHYP rereg hCall needs pages */
468 ehca_err(mr
->device
, "rereg without IB_MR_REREG_TRANS not "
469 "supported yet, mr_rereg_mask=%x", mr_rereg_mask
);
471 goto rereg_phys_mr_exit0
;
474 if (mr_rereg_mask
& IB_MR_REREG_PD
) {
476 ehca_err(mr
->device
, "rereg with bad pd, pd=%p "
477 "mr_rereg_mask=%x", pd
, mr_rereg_mask
);
479 goto rereg_phys_mr_exit0
;
484 ~(IB_MR_REREG_TRANS
| IB_MR_REREG_PD
| IB_MR_REREG_ACCESS
)) ||
485 (mr_rereg_mask
== 0)) {
487 goto rereg_phys_mr_exit0
;
490 /* check other parameters */
491 if (e_mr
== shca
->maxmr
) {
492 /* should be impossible, however reject to be sure */
493 ehca_err(mr
->device
, "rereg internal max-MR impossible, mr=%p "
494 "shca->maxmr=%p mr->lkey=%x",
495 mr
, shca
->maxmr
, mr
->lkey
);
497 goto rereg_phys_mr_exit0
;
499 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) { /* transl., i.e. addr/size */
500 if (e_mr
->flags
& EHCA_MR_FLAG_FMR
) {
501 ehca_err(mr
->device
, "not supported for FMR, mr=%p "
502 "flags=%x", mr
, e_mr
->flags
);
504 goto rereg_phys_mr_exit0
;
506 if (!phys_buf_array
|| num_phys_buf
<= 0) {
507 ehca_err(mr
->device
, "bad input values mr_rereg_mask=%x"
508 " phys_buf_array=%p num_phys_buf=%x",
509 mr_rereg_mask
, phys_buf_array
, num_phys_buf
);
511 goto rereg_phys_mr_exit0
;
514 if ((mr_rereg_mask
& IB_MR_REREG_ACCESS
) && /* change ACL */
515 (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
516 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
517 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
518 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)))) {
520 * Remote Write Access requires Local Write Access
521 * Remote Atomic Access requires Local Write Access
523 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x "
524 "mr_access_flags=%x", mr_rereg_mask
, mr_access_flags
);
526 goto rereg_phys_mr_exit0
;
529 /* set requested values dependent on rereg request */
530 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
531 new_start
= e_mr
->start
;
532 new_size
= e_mr
->size
;
534 new_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
536 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
537 u64 hw_pgsize
= ehca_get_max_hwpage_size(shca
);
539 new_start
= iova_start
; /* change address */
540 /* check physical buffer list and calculate size */
541 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
,
542 num_phys_buf
, iova_start
,
545 goto rereg_phys_mr_exit1
;
546 if ((new_size
== 0) ||
547 (((u64
)iova_start
+ new_size
) < (u64
)iova_start
)) {
548 ehca_err(mr
->device
, "bad input values: new_size=%llx "
549 "iova_start=%p", new_size
, iova_start
);
551 goto rereg_phys_mr_exit1
;
553 num_kpages
= NUM_CHUNKS(((u64
)new_start
% PAGE_SIZE
) +
554 new_size
, PAGE_SIZE
);
555 num_hwpages
= NUM_CHUNKS(((u64
)new_start
% hw_pgsize
) +
556 new_size
, hw_pgsize
);
557 memset(&pginfo
, 0, sizeof(pginfo
));
558 pginfo
.type
= EHCA_MR_PGI_PHYS
;
559 pginfo
.num_kpages
= num_kpages
;
560 pginfo
.hwpage_size
= hw_pgsize
;
561 pginfo
.num_hwpages
= num_hwpages
;
562 pginfo
.u
.phy
.num_phys_buf
= num_phys_buf
;
563 pginfo
.u
.phy
.phys_buf_array
= phys_buf_array
;
565 ((u64
)iova_start
& ~PAGE_MASK
) / hw_pgsize
;
567 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
568 new_acl
= mr_access_flags
;
569 if (mr_rereg_mask
& IB_MR_REREG_PD
)
570 new_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
572 ret
= ehca_rereg_mr(shca
, e_mr
, new_start
, new_size
, new_acl
,
573 new_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
575 goto rereg_phys_mr_exit1
;
577 /* successful reregistration */
578 if (mr_rereg_mask
& IB_MR_REREG_PD
)
584 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
587 ehca_err(mr
->device
, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
588 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
590 ret
, mr
, mr_rereg_mask
, pd
, phys_buf_array
,
591 num_phys_buf
, mr_access_flags
, iova_start
);
593 } /* end ehca_rereg_phys_mr() */
595 /*----------------------------------------------------------------------*/
597 int ehca_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
601 struct ehca_shca
*shca
=
602 container_of(mr
->device
, struct ehca_shca
, ib_device
);
603 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
604 unsigned long sl_flags
;
605 struct ehca_mr_hipzout_parms hipzout
;
607 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
608 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
609 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
614 memset(mr_attr
, 0, sizeof(struct ib_mr_attr
));
615 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
617 h_ret
= hipz_h_query_mr(shca
->ipz_hca_handle
, e_mr
, &hipzout
);
618 if (h_ret
!= H_SUCCESS
) {
619 ehca_err(mr
->device
, "hipz_mr_query failed, h_ret=%lli mr=%p "
620 "hca_hndl=%llx mr_hndl=%llx lkey=%x",
621 h_ret
, mr
, shca
->ipz_hca_handle
.handle
,
622 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
623 ret
= ehca2ib_return_code(h_ret
);
626 mr_attr
->pd
= mr
->pd
;
627 mr_attr
->device_virt_addr
= hipzout
.vaddr
;
628 mr_attr
->size
= hipzout
.len
;
629 mr_attr
->lkey
= hipzout
.lkey
;
630 mr_attr
->rkey
= hipzout
.rkey
;
631 ehca_mrmw_reverse_map_acl(&hipzout
.acl
, &mr_attr
->mr_access_flags
);
634 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
637 ehca_err(mr
->device
, "ret=%i mr=%p mr_attr=%p",
640 } /* end ehca_query_mr() */
642 /*----------------------------------------------------------------------*/
644 int ehca_dereg_mr(struct ib_mr
*mr
)
648 struct ehca_shca
*shca
=
649 container_of(mr
->device
, struct ehca_shca
, ib_device
);
650 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
652 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
653 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
654 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
657 } else if (e_mr
== shca
->maxmr
) {
658 /* should be impossible, however reject to be sure */
659 ehca_err(mr
->device
, "dereg internal max-MR impossible, mr=%p "
660 "shca->maxmr=%p mr->lkey=%x",
661 mr
, shca
->maxmr
, mr
->lkey
);
666 /* TODO: BUSY: MR still has bound window(s) */
667 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
668 if (h_ret
!= H_SUCCESS
) {
669 ehca_err(mr
->device
, "hipz_free_mr failed, h_ret=%lli shca=%p "
670 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
671 h_ret
, shca
, e_mr
, shca
->ipz_hca_handle
.handle
,
672 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
673 ret
= ehca2ib_return_code(h_ret
);
678 ib_umem_release(e_mr
->umem
);
680 /* successful deregistration */
681 ehca_mr_delete(e_mr
);
685 ehca_err(mr
->device
, "ret=%i mr=%p", ret
, mr
);
687 } /* end ehca_dereg_mr() */
689 /*----------------------------------------------------------------------*/
691 struct ib_mw
*ehca_alloc_mw(struct ib_pd
*pd
)
695 struct ehca_mw
*e_mw
;
696 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
697 struct ehca_shca
*shca
=
698 container_of(pd
->device
, struct ehca_shca
, ib_device
);
699 struct ehca_mw_hipzout_parms hipzout
;
701 e_mw
= ehca_mw_new();
703 ib_mw
= ERR_PTR(-ENOMEM
);
707 h_ret
= hipz_h_alloc_resource_mw(shca
->ipz_hca_handle
, e_mw
,
708 e_pd
->fw_pd
, &hipzout
);
709 if (h_ret
!= H_SUCCESS
) {
710 ehca_err(pd
->device
, "hipz_mw_allocate failed, h_ret=%lli "
711 "shca=%p hca_hndl=%llx mw=%p",
712 h_ret
, shca
, shca
->ipz_hca_handle
.handle
, e_mw
);
713 ib_mw
= ERR_PTR(ehca2ib_return_code(h_ret
));
716 /* successful MW allocation */
717 e_mw
->ipz_mw_handle
= hipzout
.handle
;
718 e_mw
->ib_mw
.rkey
= hipzout
.rkey
;
722 ehca_mw_delete(e_mw
);
725 ehca_err(pd
->device
, "h_ret=%li pd=%p", PTR_ERR(ib_mw
), pd
);
727 } /* end ehca_alloc_mw() */
729 /*----------------------------------------------------------------------*/
731 int ehca_bind_mw(struct ib_qp
*qp
,
733 struct ib_mw_bind
*mw_bind
)
735 /* TODO: not supported up to now */
736 ehca_gen_err("bind MW currently not supported by HCAD");
739 } /* end ehca_bind_mw() */
741 /*----------------------------------------------------------------------*/
743 int ehca_dealloc_mw(struct ib_mw
*mw
)
746 struct ehca_shca
*shca
=
747 container_of(mw
->device
, struct ehca_shca
, ib_device
);
748 struct ehca_mw
*e_mw
= container_of(mw
, struct ehca_mw
, ib_mw
);
750 h_ret
= hipz_h_free_resource_mw(shca
->ipz_hca_handle
, e_mw
);
751 if (h_ret
!= H_SUCCESS
) {
752 ehca_err(mw
->device
, "hipz_free_mw failed, h_ret=%lli shca=%p "
753 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
754 h_ret
, shca
, mw
, mw
->rkey
, shca
->ipz_hca_handle
.handle
,
755 e_mw
->ipz_mw_handle
.handle
);
756 return ehca2ib_return_code(h_ret
);
758 /* successful deallocation */
759 ehca_mw_delete(e_mw
);
761 } /* end ehca_dealloc_mw() */
763 /*----------------------------------------------------------------------*/
765 struct ib_fmr
*ehca_alloc_fmr(struct ib_pd
*pd
,
767 struct ib_fmr_attr
*fmr_attr
)
769 struct ib_fmr
*ib_fmr
;
770 struct ehca_shca
*shca
=
771 container_of(pd
->device
, struct ehca_shca
, ib_device
);
772 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
773 struct ehca_mr
*e_fmr
;
775 u32 tmp_lkey
, tmp_rkey
;
776 struct ehca_mr_pginfo pginfo
;
779 /* check other parameters */
780 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
781 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
782 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
783 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
785 * Remote Write Access requires Local Write Access
786 * Remote Atomic Access requires Local Write Access
788 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
790 ib_fmr
= ERR_PTR(-EINVAL
);
791 goto alloc_fmr_exit0
;
793 if (mr_access_flags
& IB_ACCESS_MW_BIND
) {
794 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
796 ib_fmr
= ERR_PTR(-EINVAL
);
797 goto alloc_fmr_exit0
;
799 if ((fmr_attr
->max_pages
== 0) || (fmr_attr
->max_maps
== 0)) {
800 ehca_err(pd
->device
, "bad input values: fmr_attr->max_pages=%x "
801 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
802 fmr_attr
->max_pages
, fmr_attr
->max_maps
,
803 fmr_attr
->page_shift
);
804 ib_fmr
= ERR_PTR(-EINVAL
);
805 goto alloc_fmr_exit0
;
808 hw_pgsize
= 1 << fmr_attr
->page_shift
;
809 if (!(hw_pgsize
& shca
->hca_cap_mr_pgsize
)) {
810 ehca_err(pd
->device
, "unsupported fmr_attr->page_shift=%x",
811 fmr_attr
->page_shift
);
812 ib_fmr
= ERR_PTR(-EINVAL
);
813 goto alloc_fmr_exit0
;
816 e_fmr
= ehca_mr_new();
818 ib_fmr
= ERR_PTR(-ENOMEM
);
819 goto alloc_fmr_exit0
;
821 e_fmr
->flags
|= EHCA_MR_FLAG_FMR
;
823 /* register MR on HCA */
824 memset(&pginfo
, 0, sizeof(pginfo
));
825 pginfo
.hwpage_size
= hw_pgsize
;
827 * pginfo.num_hwpages==0, ie register_rpages() will not be called
828 * but deferred to map_phys_fmr()
830 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
831 fmr_attr
->max_pages
* (1 << fmr_attr
->page_shift
),
832 mr_access_flags
, e_pd
, &pginfo
,
833 &tmp_lkey
, &tmp_rkey
, EHCA_REG_MR
);
835 ib_fmr
= ERR_PTR(ret
);
836 goto alloc_fmr_exit1
;
840 e_fmr
->hwpage_size
= hw_pgsize
;
841 e_fmr
->fmr_page_size
= 1 << fmr_attr
->page_shift
;
842 e_fmr
->fmr_max_pages
= fmr_attr
->max_pages
;
843 e_fmr
->fmr_max_maps
= fmr_attr
->max_maps
;
844 e_fmr
->fmr_map_cnt
= 0;
845 return &e_fmr
->ib
.ib_fmr
;
848 ehca_mr_delete(e_fmr
);
851 } /* end ehca_alloc_fmr() */
853 /*----------------------------------------------------------------------*/
855 int ehca_map_phys_fmr(struct ib_fmr
*fmr
,
861 struct ehca_shca
*shca
=
862 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
863 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
864 struct ehca_pd
*e_pd
= container_of(fmr
->pd
, struct ehca_pd
, ib_pd
);
865 struct ehca_mr_pginfo pginfo
;
866 u32 tmp_lkey
, tmp_rkey
;
868 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
869 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
870 e_fmr
, e_fmr
->flags
);
872 goto map_phys_fmr_exit0
;
874 ret
= ehca_fmr_check_page_list(e_fmr
, page_list
, list_len
);
876 goto map_phys_fmr_exit0
;
877 if (iova
% e_fmr
->fmr_page_size
) {
878 /* only whole-numbered pages */
879 ehca_err(fmr
->device
, "bad iova, iova=%llx fmr_page_size=%x",
880 iova
, e_fmr
->fmr_page_size
);
882 goto map_phys_fmr_exit0
;
884 if (e_fmr
->fmr_map_cnt
>= e_fmr
->fmr_max_maps
) {
885 /* HCAD does not limit the maps, however trace this anyway */
886 ehca_info(fmr
->device
, "map limit exceeded, fmr=%p "
887 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
888 fmr
, e_fmr
->fmr_map_cnt
, e_fmr
->fmr_max_maps
);
891 memset(&pginfo
, 0, sizeof(pginfo
));
892 pginfo
.type
= EHCA_MR_PGI_FMR
;
893 pginfo
.num_kpages
= list_len
;
894 pginfo
.hwpage_size
= e_fmr
->hwpage_size
;
896 list_len
* e_fmr
->fmr_page_size
/ pginfo
.hwpage_size
;
897 pginfo
.u
.fmr
.page_list
= page_list
;
899 (iova
& (e_fmr
->fmr_page_size
-1)) / pginfo
.hwpage_size
;
900 pginfo
.u
.fmr
.fmr_pgsize
= e_fmr
->fmr_page_size
;
902 ret
= ehca_rereg_mr(shca
, e_fmr
, (u64
*)iova
,
903 list_len
* e_fmr
->fmr_page_size
,
904 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
906 goto map_phys_fmr_exit0
;
908 /* successful reregistration */
909 e_fmr
->fmr_map_cnt
++;
910 e_fmr
->ib
.ib_fmr
.lkey
= tmp_lkey
;
911 e_fmr
->ib
.ib_fmr
.rkey
= tmp_rkey
;
916 ehca_err(fmr
->device
, "ret=%i fmr=%p page_list=%p list_len=%x "
917 "iova=%llx", ret
, fmr
, page_list
, list_len
, iova
);
919 } /* end ehca_map_phys_fmr() */
921 /*----------------------------------------------------------------------*/
923 int ehca_unmap_fmr(struct list_head
*fmr_list
)
926 struct ib_fmr
*ib_fmr
;
927 struct ehca_shca
*shca
= NULL
;
928 struct ehca_shca
*prev_shca
;
929 struct ehca_mr
*e_fmr
;
931 u32 unmap_fmr_cnt
= 0;
933 /* check all FMR belong to same SHCA, and check internal flag */
934 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
937 ehca_gen_err("bad fmr=%p in list", ib_fmr
);
939 goto unmap_fmr_exit0
;
941 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
943 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
944 if ((shca
!= prev_shca
) && prev_shca
) {
945 ehca_err(&shca
->ib_device
, "SHCA mismatch, shca=%p "
946 "prev_shca=%p e_fmr=%p",
947 shca
, prev_shca
, e_fmr
);
949 goto unmap_fmr_exit0
;
951 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
952 ehca_err(&shca
->ib_device
, "not a FMR, e_fmr=%p "
953 "e_fmr->flags=%x", e_fmr
, e_fmr
->flags
);
955 goto unmap_fmr_exit0
;
960 /* loop over all FMRs to unmap */
961 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
963 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
964 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
966 ret
= ehca_unmap_one_fmr(shca
, e_fmr
);
968 /* unmap failed, stop unmapping of rest of FMRs */
969 ehca_err(&shca
->ib_device
, "unmap of one FMR failed, "
970 "stop rest, e_fmr=%p num_fmr=%x "
971 "unmap_fmr_cnt=%x lkey=%x", e_fmr
, num_fmr
,
972 unmap_fmr_cnt
, e_fmr
->ib
.ib_fmr
.lkey
);
973 goto unmap_fmr_exit0
;
979 ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
980 ret
, fmr_list
, num_fmr
, unmap_fmr_cnt
);
982 } /* end ehca_unmap_fmr() */
984 /*----------------------------------------------------------------------*/
986 int ehca_dealloc_fmr(struct ib_fmr
*fmr
)
990 struct ehca_shca
*shca
=
991 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
992 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
994 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
995 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
996 e_fmr
, e_fmr
->flags
);
1001 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
1002 if (h_ret
!= H_SUCCESS
) {
1003 ehca_err(fmr
->device
, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
1004 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
1005 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1006 e_fmr
->ipz_mr_handle
.handle
, fmr
->lkey
);
1007 ret
= ehca2ib_return_code(h_ret
);
1008 goto free_fmr_exit0
;
1010 /* successful deregistration */
1011 ehca_mr_delete(e_fmr
);
1016 ehca_err(&shca
->ib_device
, "ret=%i fmr=%p", ret
, fmr
);
1018 } /* end ehca_dealloc_fmr() */
1020 /*----------------------------------------------------------------------*/
1022 static int ehca_reg_bmap_mr_rpages(struct ehca_shca
*shca
,
1023 struct ehca_mr
*e_mr
,
1024 struct ehca_mr_pginfo
*pginfo
);
1026 int ehca_reg_mr(struct ehca_shca
*shca
,
1027 struct ehca_mr
*e_mr
,
1031 struct ehca_pd
*e_pd
,
1032 struct ehca_mr_pginfo
*pginfo
,
1035 enum ehca_reg_type reg_type
)
1040 struct ehca_mr_hipzout_parms hipzout
;
1042 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1043 ehca_mrmw_set_pgsize_hipz_acl(pginfo
->hwpage_size
, &hipz_acl
);
1044 if (ehca_use_hp_mr
== 1)
1045 hipz_acl
|= 0x00000001;
1047 h_ret
= hipz_h_alloc_resource_mr(shca
->ipz_hca_handle
, e_mr
,
1048 (u64
)iova_start
, size
, hipz_acl
,
1049 e_pd
->fw_pd
, &hipzout
);
1050 if (h_ret
!= H_SUCCESS
) {
1051 ehca_err(&shca
->ib_device
, "hipz_alloc_mr failed, h_ret=%lli "
1052 "hca_hndl=%llx", h_ret
, shca
->ipz_hca_handle
.handle
);
1053 ret
= ehca2ib_return_code(h_ret
);
1054 goto ehca_reg_mr_exit0
;
1057 e_mr
->ipz_mr_handle
= hipzout
.handle
;
1059 if (reg_type
== EHCA_REG_BUSMAP_MR
)
1060 ret
= ehca_reg_bmap_mr_rpages(shca
, e_mr
, pginfo
);
1061 else if (reg_type
== EHCA_REG_MR
)
1062 ret
= ehca_reg_mr_rpages(shca
, e_mr
, pginfo
);
1067 goto ehca_reg_mr_exit1
;
1069 /* successful registration */
1070 e_mr
->num_kpages
= pginfo
->num_kpages
;
1071 e_mr
->num_hwpages
= pginfo
->num_hwpages
;
1072 e_mr
->hwpage_size
= pginfo
->hwpage_size
;
1073 e_mr
->start
= iova_start
;
1076 *lkey
= hipzout
.lkey
;
1077 *rkey
= hipzout
.rkey
;
1081 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
1082 if (h_ret
!= H_SUCCESS
) {
1083 ehca_err(&shca
->ib_device
, "h_ret=%lli shca=%p e_mr=%p "
1084 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
1085 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
1086 h_ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
,
1087 hipzout
.lkey
, pginfo
, pginfo
->num_kpages
,
1088 pginfo
->num_hwpages
, ret
);
1089 ehca_err(&shca
->ib_device
, "internal error in ehca_reg_mr, "
1094 ehca_err(&shca
->ib_device
, "ret=%i shca=%p e_mr=%p "
1095 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1096 "num_kpages=%llx num_hwpages=%llx",
1097 ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
, pginfo
,
1098 pginfo
->num_kpages
, pginfo
->num_hwpages
);
1100 } /* end ehca_reg_mr() */
1102 /*----------------------------------------------------------------------*/
1104 int ehca_reg_mr_rpages(struct ehca_shca
*shca
,
1105 struct ehca_mr
*e_mr
,
1106 struct ehca_mr_pginfo
*pginfo
)
1115 if (!pginfo
->num_hwpages
) /* in case of fmr */
1118 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1120 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1122 goto ehca_reg_mr_rpages_exit0
;
1125 /* max MAX_RPAGES ehca mr pages per register call */
1126 for (i
= 0; i
< NUM_CHUNKS(pginfo
->num_hwpages
, MAX_RPAGES
); i
++) {
1128 if (i
== NUM_CHUNKS(pginfo
->num_hwpages
, MAX_RPAGES
) - 1) {
1129 rnum
= pginfo
->num_hwpages
% MAX_RPAGES
; /* last shot */
1131 rnum
= MAX_RPAGES
; /* last shot is full */
1135 ret
= ehca_set_pagebuf(pginfo
, rnum
, kpage
);
1137 ehca_err(&shca
->ib_device
, "ehca_set_pagebuf "
1138 "bad rc, ret=%i rnum=%x kpage=%p",
1140 goto ehca_reg_mr_rpages_exit1
;
1144 rpage
= virt_to_abs(kpage
);
1146 ehca_err(&shca
->ib_device
, "kpage=%p i=%x",
1149 goto ehca_reg_mr_rpages_exit1
;
1154 h_ret
= hipz_h_register_rpage_mr(
1155 shca
->ipz_hca_handle
, e_mr
,
1156 ehca_encode_hwpage_size(pginfo
->hwpage_size
),
1159 if (i
== NUM_CHUNKS(pginfo
->num_hwpages
, MAX_RPAGES
) - 1) {
1161 * check for 'registration complete'==H_SUCCESS
1162 * and for 'page registered'==H_PAGE_REGISTERED
1164 if (h_ret
!= H_SUCCESS
) {
1165 ehca_err(&shca
->ib_device
, "last "
1166 "hipz_reg_rpage_mr failed, h_ret=%lli "
1167 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
1168 " lkey=%x", h_ret
, e_mr
, i
,
1169 shca
->ipz_hca_handle
.handle
,
1170 e_mr
->ipz_mr_handle
.handle
,
1171 e_mr
->ib
.ib_mr
.lkey
);
1172 ret
= ehca2ib_return_code(h_ret
);
1176 } else if (h_ret
!= H_PAGE_REGISTERED
) {
1177 ehca_err(&shca
->ib_device
, "hipz_reg_rpage_mr failed, "
1178 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
1179 "mr_hndl=%llx", h_ret
, e_mr
, i
,
1180 e_mr
->ib
.ib_mr
.lkey
,
1181 shca
->ipz_hca_handle
.handle
,
1182 e_mr
->ipz_mr_handle
.handle
);
1183 ret
= ehca2ib_return_code(h_ret
);
1190 ehca_reg_mr_rpages_exit1
:
1191 ehca_free_fw_ctrlblock(kpage
);
1192 ehca_reg_mr_rpages_exit0
:
1194 ehca_err(&shca
->ib_device
, "ret=%i shca=%p e_mr=%p pginfo=%p "
1195 "num_kpages=%llx num_hwpages=%llx", ret
, shca
, e_mr
,
1196 pginfo
, pginfo
->num_kpages
, pginfo
->num_hwpages
);
1198 } /* end ehca_reg_mr_rpages() */
1200 /*----------------------------------------------------------------------*/
1202 inline int ehca_rereg_mr_rereg1(struct ehca_shca
*shca
,
1203 struct ehca_mr
*e_mr
,
1207 struct ehca_pd
*e_pd
,
1208 struct ehca_mr_pginfo
*pginfo
,
1217 struct ehca_mr_pginfo pginfo_save
;
1218 struct ehca_mr_hipzout_parms hipzout
;
1220 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1221 ehca_mrmw_set_pgsize_hipz_acl(pginfo
->hwpage_size
, &hipz_acl
);
1223 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1225 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1227 goto ehca_rereg_mr_rereg1_exit0
;
1230 pginfo_save
= *pginfo
;
1231 ret
= ehca_set_pagebuf(pginfo
, pginfo
->num_hwpages
, kpage
);
1233 ehca_err(&shca
->ib_device
, "set pagebuf failed, e_mr=%p "
1234 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
1235 "kpage=%p", e_mr
, pginfo
, pginfo
->type
,
1236 pginfo
->num_kpages
, pginfo
->num_hwpages
, kpage
);
1237 goto ehca_rereg_mr_rereg1_exit1
;
1239 rpage
= virt_to_abs(kpage
);
1241 ehca_err(&shca
->ib_device
, "kpage=%p", kpage
);
1243 goto ehca_rereg_mr_rereg1_exit1
;
1245 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_mr
,
1246 (u64
)iova_start
, size
, hipz_acl
,
1247 e_pd
->fw_pd
, rpage
, &hipzout
);
1248 if (h_ret
!= H_SUCCESS
) {
1250 * reregistration unsuccessful, try it again with the 3 hCalls,
1251 * e.g. this is required in case H_MR_CONDITION
1252 * (MW bound or MR is shared)
1254 ehca_warn(&shca
->ib_device
, "hipz_h_reregister_pmr failed "
1255 "(Rereg1), h_ret=%lli e_mr=%p", h_ret
, e_mr
);
1256 *pginfo
= pginfo_save
;
1258 } else if ((u64
*)hipzout
.vaddr
!= iova_start
) {
1259 ehca_err(&shca
->ib_device
, "PHYP changed iova_start in "
1260 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
1261 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start
,
1262 hipzout
.vaddr
, e_mr
, e_mr
->ipz_mr_handle
.handle
,
1263 e_mr
->ib
.ib_mr
.lkey
, hipzout
.lkey
);
1267 * successful reregistration
1268 * note: start and start_out are identical for eServer HCAs
1270 e_mr
->num_kpages
= pginfo
->num_kpages
;
1271 e_mr
->num_hwpages
= pginfo
->num_hwpages
;
1272 e_mr
->hwpage_size
= pginfo
->hwpage_size
;
1273 e_mr
->start
= iova_start
;
1276 *lkey
= hipzout
.lkey
;
1277 *rkey
= hipzout
.rkey
;
1280 ehca_rereg_mr_rereg1_exit1
:
1281 ehca_free_fw_ctrlblock(kpage
);
1282 ehca_rereg_mr_rereg1_exit0
:
1283 if ( ret
&& (ret
!= -EAGAIN
) )
1284 ehca_err(&shca
->ib_device
, "ret=%i lkey=%x rkey=%x "
1285 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
1286 ret
, *lkey
, *rkey
, pginfo
, pginfo
->num_kpages
,
1287 pginfo
->num_hwpages
);
1289 } /* end ehca_rereg_mr_rereg1() */
1291 /*----------------------------------------------------------------------*/
1293 int ehca_rereg_mr(struct ehca_shca
*shca
,
1294 struct ehca_mr
*e_mr
,
1298 struct ehca_pd
*e_pd
,
1299 struct ehca_mr_pginfo
*pginfo
,
1305 int rereg_1_hcall
= 1; /* 1: use hipz_h_reregister_pmr directly */
1306 int rereg_3_hcall
= 0; /* 1: use 3 hipz calls for reregistration */
1308 /* first determine reregistration hCall(s) */
1309 if ((pginfo
->num_hwpages
> MAX_RPAGES
) ||
1310 (e_mr
->num_hwpages
> MAX_RPAGES
) ||
1311 (pginfo
->num_hwpages
> e_mr
->num_hwpages
)) {
1312 ehca_dbg(&shca
->ib_device
, "Rereg3 case, "
1313 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
1314 pginfo
->num_hwpages
, e_mr
->num_hwpages
);
1319 if (e_mr
->flags
& EHCA_MR_FLAG_MAXMR
) { /* check for max-MR */
1322 e_mr
->flags
&= ~EHCA_MR_FLAG_MAXMR
;
1323 ehca_err(&shca
->ib_device
, "Rereg MR for max-MR! e_mr=%p",
1327 if (rereg_1_hcall
) {
1328 ret
= ehca_rereg_mr_rereg1(shca
, e_mr
, iova_start
, size
,
1329 acl
, e_pd
, pginfo
, lkey
, rkey
);
1334 goto ehca_rereg_mr_exit0
;
1338 if (rereg_3_hcall
) {
1339 struct ehca_mr save_mr
;
1341 /* first deregister old MR */
1342 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
1343 if (h_ret
!= H_SUCCESS
) {
1344 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1345 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1347 h_ret
, e_mr
, shca
->ipz_hca_handle
.handle
,
1348 e_mr
->ipz_mr_handle
.handle
,
1349 e_mr
->ib
.ib_mr
.lkey
);
1350 ret
= ehca2ib_return_code(h_ret
);
1351 goto ehca_rereg_mr_exit0
;
1353 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1355 ehca_mr_deletenew(e_mr
);
1357 /* set some MR values */
1358 e_mr
->flags
= save_mr
.flags
;
1359 e_mr
->hwpage_size
= save_mr
.hwpage_size
;
1360 e_mr
->fmr_page_size
= save_mr
.fmr_page_size
;
1361 e_mr
->fmr_max_pages
= save_mr
.fmr_max_pages
;
1362 e_mr
->fmr_max_maps
= save_mr
.fmr_max_maps
;
1363 e_mr
->fmr_map_cnt
= save_mr
.fmr_map_cnt
;
1365 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, acl
,
1366 e_pd
, pginfo
, lkey
, rkey
, EHCA_REG_MR
);
1368 u32 offset
= (u64
)(&e_mr
->flags
) - (u64
)e_mr
;
1369 memcpy(&e_mr
->flags
, &(save_mr
.flags
),
1370 sizeof(struct ehca_mr
) - offset
);
1371 goto ehca_rereg_mr_exit0
;
1375 ehca_rereg_mr_exit0
:
1377 ehca_err(&shca
->ib_device
, "ret=%i shca=%p e_mr=%p "
1378 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1379 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1380 "rereg_3_hcall=%x", ret
, shca
, e_mr
, iova_start
, size
,
1381 acl
, e_pd
, pginfo
, pginfo
->num_kpages
, *lkey
, *rkey
,
1382 rereg_1_hcall
, rereg_3_hcall
);
1384 } /* end ehca_rereg_mr() */
1386 /*----------------------------------------------------------------------*/
1388 int ehca_unmap_one_fmr(struct ehca_shca
*shca
,
1389 struct ehca_mr
*e_fmr
)
1393 struct ehca_pd
*e_pd
=
1394 container_of(e_fmr
->ib
.ib_fmr
.pd
, struct ehca_pd
, ib_pd
);
1395 struct ehca_mr save_fmr
;
1396 u32 tmp_lkey
, tmp_rkey
;
1397 struct ehca_mr_pginfo pginfo
;
1398 struct ehca_mr_hipzout_parms hipzout
;
1399 struct ehca_mr save_mr
;
1401 if (e_fmr
->fmr_max_pages
<= MAX_RPAGES
) {
1403 * note: after using rereg hcall with len=0,
1404 * rereg hcall must be used again for registering pages
1406 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_fmr
, 0,
1407 0, 0, e_pd
->fw_pd
, 0, &hipzout
);
1408 if (h_ret
== H_SUCCESS
) {
1409 /* successful reregistration */
1410 e_fmr
->start
= NULL
;
1412 tmp_lkey
= hipzout
.lkey
;
1413 tmp_rkey
= hipzout
.rkey
;
1417 * should not happen, because length checked above,
1418 * FMRs are not shared and no MW bound to FMRs
1420 ehca_err(&shca
->ib_device
, "hipz_reregister_pmr failed "
1421 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1422 "mr_hndl=%llx lkey=%x lkey_out=%x",
1423 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1424 e_fmr
->ipz_mr_handle
.handle
,
1425 e_fmr
->ib
.ib_fmr
.lkey
, hipzout
.lkey
);
1426 /* try free and rereg */
1429 /* first free old FMR */
1430 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
1431 if (h_ret
!= H_SUCCESS
) {
1432 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1433 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1435 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1436 e_fmr
->ipz_mr_handle
.handle
,
1437 e_fmr
->ib
.ib_fmr
.lkey
);
1438 ret
= ehca2ib_return_code(h_ret
);
1439 goto ehca_unmap_one_fmr_exit0
;
1441 /* clean ehca_mr_t, without changing lock */
1443 ehca_mr_deletenew(e_fmr
);
1445 /* set some MR values */
1446 e_fmr
->flags
= save_fmr
.flags
;
1447 e_fmr
->hwpage_size
= save_fmr
.hwpage_size
;
1448 e_fmr
->fmr_page_size
= save_fmr
.fmr_page_size
;
1449 e_fmr
->fmr_max_pages
= save_fmr
.fmr_max_pages
;
1450 e_fmr
->fmr_max_maps
= save_fmr
.fmr_max_maps
;
1451 e_fmr
->fmr_map_cnt
= save_fmr
.fmr_map_cnt
;
1452 e_fmr
->acl
= save_fmr
.acl
;
1454 memset(&pginfo
, 0, sizeof(pginfo
));
1455 pginfo
.type
= EHCA_MR_PGI_FMR
;
1456 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
1457 (e_fmr
->fmr_max_pages
* e_fmr
->fmr_page_size
),
1458 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
,
1459 &tmp_rkey
, EHCA_REG_MR
);
1461 u32 offset
= (u64
)(&e_fmr
->flags
) - (u64
)e_fmr
;
1462 memcpy(&e_fmr
->flags
, &(save_mr
.flags
),
1463 sizeof(struct ehca_mr
) - offset
);
1466 ehca_unmap_one_fmr_exit0
:
1468 ehca_err(&shca
->ib_device
, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1470 ret
, tmp_lkey
, tmp_rkey
, e_fmr
->fmr_max_pages
);
1472 } /* end ehca_unmap_one_fmr() */
1474 /*----------------------------------------------------------------------*/
1476 int ehca_reg_smr(struct ehca_shca
*shca
,
1477 struct ehca_mr
*e_origmr
,
1478 struct ehca_mr
*e_newmr
,
1481 struct ehca_pd
*e_pd
,
1488 struct ehca_mr_hipzout_parms hipzout
;
1490 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1491 ehca_mrmw_set_pgsize_hipz_acl(e_origmr
->hwpage_size
, &hipz_acl
);
1493 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1494 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1496 if (h_ret
!= H_SUCCESS
) {
1497 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lli "
1498 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1499 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1500 h_ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
,
1501 shca
->ipz_hca_handle
.handle
,
1502 e_origmr
->ipz_mr_handle
.handle
,
1503 e_origmr
->ib
.ib_mr
.lkey
);
1504 ret
= ehca2ib_return_code(h_ret
);
1505 goto ehca_reg_smr_exit0
;
1507 /* successful registration */
1508 e_newmr
->num_kpages
= e_origmr
->num_kpages
;
1509 e_newmr
->num_hwpages
= e_origmr
->num_hwpages
;
1510 e_newmr
->hwpage_size
= e_origmr
->hwpage_size
;
1511 e_newmr
->start
= iova_start
;
1512 e_newmr
->size
= e_origmr
->size
;
1514 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1515 *lkey
= hipzout
.lkey
;
1516 *rkey
= hipzout
.rkey
;
1521 ehca_err(&shca
->ib_device
, "ret=%i shca=%p e_origmr=%p "
1522 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1523 ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
);
1525 } /* end ehca_reg_smr() */
1527 /*----------------------------------------------------------------------*/
1528 static inline void *ehca_calc_sectbase(int top
, int dir
, int idx
)
1530 unsigned long ret
= idx
;
1531 ret
|= dir
<< EHCA_DIR_INDEX_SHIFT
;
1532 ret
|= top
<< EHCA_TOP_INDEX_SHIFT
;
1533 return abs_to_virt(ret
<< SECTION_SIZE_BITS
);
1536 #define ehca_bmap_valid(entry) \
1537 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1539 static u64
ehca_reg_mr_section(int top
, int dir
, int idx
, u64
*kpage
,
1540 struct ehca_shca
*shca
, struct ehca_mr
*mr
,
1541 struct ehca_mr_pginfo
*pginfo
)
1544 unsigned long page
= 0;
1545 u64 rpage
= virt_to_abs(kpage
);
1548 void *sectbase
= ehca_calc_sectbase(top
, dir
, idx
);
1549 if ((unsigned long)sectbase
& (pginfo
->hwpage_size
- 1)) {
1550 ehca_err(&shca
->ib_device
, "reg_mr_section will probably fail:"
1551 "hwpage_size does not fit to "
1552 "section start address");
1554 page_count
= EHCA_SECTSIZE
/ pginfo
->hwpage_size
;
1556 while (page
< page_count
) {
1558 for (rnum
= 0; (rnum
< MAX_RPAGES
) && (page
< page_count
);
1560 void *pg
= sectbase
+ ((page
++) * pginfo
->hwpage_size
);
1561 kpage
[rnum
] = virt_to_abs(pg
);
1564 h_ret
= hipz_h_register_rpage_mr(shca
->ipz_hca_handle
, mr
,
1565 ehca_encode_hwpage_size(pginfo
->hwpage_size
),
1568 if ((h_ret
!= H_SUCCESS
) && (h_ret
!= H_PAGE_REGISTERED
)) {
1569 ehca_err(&shca
->ib_device
, "register_rpage_mr failed");
1576 static u64
ehca_reg_mr_sections(int top
, int dir
, u64
*kpage
,
1577 struct ehca_shca
*shca
, struct ehca_mr
*mr
,
1578 struct ehca_mr_pginfo
*pginfo
)
1580 u64 hret
= H_SUCCESS
;
1583 for (idx
= 0; idx
< EHCA_MAP_ENTRIES
; idx
++) {
1584 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]->dir
[dir
]->ent
[idx
]))
1587 hret
= ehca_reg_mr_section(top
, dir
, idx
, kpage
, shca
, mr
,
1589 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
1595 static u64
ehca_reg_mr_dir_sections(int top
, u64
*kpage
, struct ehca_shca
*shca
,
1597 struct ehca_mr_pginfo
*pginfo
)
1599 u64 hret
= H_SUCCESS
;
1602 for (dir
= 0; dir
< EHCA_MAP_ENTRIES
; dir
++) {
1603 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]->dir
[dir
]))
1606 hret
= ehca_reg_mr_sections(top
, dir
, kpage
, shca
, mr
, pginfo
);
1607 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
1613 /* register internal max-MR to internal SHCA */
1614 int ehca_reg_internal_maxmr(
1615 struct ehca_shca
*shca
,
1616 struct ehca_pd
*e_pd
,
1617 struct ehca_mr
**e_maxmr
) /*OUT*/
1620 struct ehca_mr
*e_mr
;
1623 struct ehca_mr_pginfo pginfo
;
1624 struct ib_phys_buf ib_pbuf
;
1631 goto ehca_reg_internal_maxmr_exit0
;
1634 e_mr
= ehca_mr_new();
1636 ehca_err(&shca
->ib_device
, "out of memory");
1638 goto ehca_reg_internal_maxmr_exit0
;
1640 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
1642 /* register internal max-MR on HCA */
1643 size_maxmr
= ehca_mr_len
;
1644 iova_start
= (u64
*)ehca_map_vaddr((void *)KERNELBASE
);
1646 ib_pbuf
.size
= size_maxmr
;
1647 num_kpages
= NUM_CHUNKS(((u64
)iova_start
% PAGE_SIZE
) + size_maxmr
,
1649 hw_pgsize
= ehca_get_max_hwpage_size(shca
);
1650 num_hwpages
= NUM_CHUNKS(((u64
)iova_start
% hw_pgsize
) + size_maxmr
,
1653 memset(&pginfo
, 0, sizeof(pginfo
));
1654 pginfo
.type
= EHCA_MR_PGI_PHYS
;
1655 pginfo
.num_kpages
= num_kpages
;
1656 pginfo
.num_hwpages
= num_hwpages
;
1657 pginfo
.hwpage_size
= hw_pgsize
;
1658 pginfo
.u
.phy
.num_phys_buf
= 1;
1659 pginfo
.u
.phy
.phys_buf_array
= &ib_pbuf
;
1661 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size_maxmr
, 0, e_pd
,
1662 &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
1663 &e_mr
->ib
.ib_mr
.rkey
, EHCA_REG_BUSMAP_MR
);
1665 ehca_err(&shca
->ib_device
, "reg of internal max MR failed, "
1666 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1667 "num_hwpages=%x", e_mr
, iova_start
, size_maxmr
,
1668 num_kpages
, num_hwpages
);
1669 goto ehca_reg_internal_maxmr_exit1
;
1672 /* successful registration of all pages */
1673 e_mr
->ib
.ib_mr
.device
= e_pd
->ib_pd
.device
;
1674 e_mr
->ib
.ib_mr
.pd
= &e_pd
->ib_pd
;
1675 e_mr
->ib
.ib_mr
.uobject
= NULL
;
1676 atomic_inc(&(e_pd
->ib_pd
.usecnt
));
1677 atomic_set(&(e_mr
->ib
.ib_mr
.usecnt
), 0);
1681 ehca_reg_internal_maxmr_exit1
:
1682 ehca_mr_delete(e_mr
);
1683 ehca_reg_internal_maxmr_exit0
:
1685 ehca_err(&shca
->ib_device
, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1686 ret
, shca
, e_pd
, e_maxmr
);
1688 } /* end ehca_reg_internal_maxmr() */
1690 /*----------------------------------------------------------------------*/
1692 int ehca_reg_maxmr(struct ehca_shca
*shca
,
1693 struct ehca_mr
*e_newmr
,
1696 struct ehca_pd
*e_pd
,
1701 struct ehca_mr
*e_origmr
= shca
->maxmr
;
1703 struct ehca_mr_hipzout_parms hipzout
;
1705 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1706 ehca_mrmw_set_pgsize_hipz_acl(e_origmr
->hwpage_size
, &hipz_acl
);
1708 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1709 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1711 if (h_ret
!= H_SUCCESS
) {
1712 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lli "
1713 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1714 h_ret
, e_origmr
, shca
->ipz_hca_handle
.handle
,
1715 e_origmr
->ipz_mr_handle
.handle
,
1716 e_origmr
->ib
.ib_mr
.lkey
);
1717 return ehca2ib_return_code(h_ret
);
1719 /* successful registration */
1720 e_newmr
->num_kpages
= e_origmr
->num_kpages
;
1721 e_newmr
->num_hwpages
= e_origmr
->num_hwpages
;
1722 e_newmr
->hwpage_size
= e_origmr
->hwpage_size
;
1723 e_newmr
->start
= iova_start
;
1724 e_newmr
->size
= e_origmr
->size
;
1726 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1727 *lkey
= hipzout
.lkey
;
1728 *rkey
= hipzout
.rkey
;
1730 } /* end ehca_reg_maxmr() */
1732 /*----------------------------------------------------------------------*/
1734 int ehca_dereg_internal_maxmr(struct ehca_shca
*shca
)
1737 struct ehca_mr
*e_maxmr
;
1738 struct ib_pd
*ib_pd
;
1741 ehca_err(&shca
->ib_device
, "bad call, shca=%p", shca
);
1743 goto ehca_dereg_internal_maxmr_exit0
;
1746 e_maxmr
= shca
->maxmr
;
1747 ib_pd
= e_maxmr
->ib
.ib_mr
.pd
;
1748 shca
->maxmr
= NULL
; /* remove internal max-MR indication from SHCA */
1750 ret
= ehca_dereg_mr(&e_maxmr
->ib
.ib_mr
);
1752 ehca_err(&shca
->ib_device
, "dereg internal max-MR failed, "
1753 "ret=%i e_maxmr=%p shca=%p lkey=%x",
1754 ret
, e_maxmr
, shca
, e_maxmr
->ib
.ib_mr
.lkey
);
1755 shca
->maxmr
= e_maxmr
;
1756 goto ehca_dereg_internal_maxmr_exit0
;
1759 atomic_dec(&ib_pd
->usecnt
);
1761 ehca_dereg_internal_maxmr_exit0
:
1763 ehca_err(&shca
->ib_device
, "ret=%i shca=%p shca->maxmr=%p",
1764 ret
, shca
, shca
->maxmr
);
1766 } /* end ehca_dereg_internal_maxmr() */
1768 /*----------------------------------------------------------------------*/
1771 * check physical buffer array of MR verbs for validness and
1772 * calculates MR size
1774 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf
*phys_buf_array
,
1779 struct ib_phys_buf
*pbuf
= phys_buf_array
;
1783 if (num_phys_buf
== 0) {
1784 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1787 /* check first buffer */
1788 if (((u64
)iova_start
& ~PAGE_MASK
) != (pbuf
->addr
& ~PAGE_MASK
)) {
1789 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1790 "pbuf->addr=%llx pbuf->size=%llx",
1791 iova_start
, pbuf
->addr
, pbuf
->size
);
1794 if (((pbuf
->addr
+ pbuf
->size
) % PAGE_SIZE
) &&
1795 (num_phys_buf
> 1)) {
1796 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
1797 "pbuf->size=%llx", pbuf
->addr
, pbuf
->size
);
1801 for (i
= 0; i
< num_phys_buf
; i
++) {
1802 if ((i
> 0) && (pbuf
->addr
% PAGE_SIZE
)) {
1803 ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
1805 i
, pbuf
->addr
, pbuf
->size
);
1808 if (((i
> 0) && /* not 1st */
1809 (i
< (num_phys_buf
- 1)) && /* not last */
1810 (pbuf
->size
% PAGE_SIZE
)) || (pbuf
->size
== 0)) {
1811 ehca_gen_err("bad size, i=%x pbuf->size=%llx",
1815 size_count
+= pbuf
->size
;
1821 } /* end ehca_mr_chk_buf_and_calc_size() */
1823 /*----------------------------------------------------------------------*/
1825 /* check page list of map FMR verb for validness */
1826 int ehca_fmr_check_page_list(struct ehca_mr
*e_fmr
,
1833 if ((list_len
== 0) || (list_len
> e_fmr
->fmr_max_pages
)) {
1834 ehca_gen_err("bad list_len, list_len=%x "
1835 "e_fmr->fmr_max_pages=%x fmr=%p",
1836 list_len
, e_fmr
->fmr_max_pages
, e_fmr
);
1840 /* each page must be aligned */
1842 for (i
= 0; i
< list_len
; i
++) {
1843 if (*page
% e_fmr
->fmr_page_size
) {
1844 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1845 "fmr_page_size=%x", i
, *page
, page
, e_fmr
,
1846 e_fmr
->fmr_page_size
);
1853 } /* end ehca_fmr_check_page_list() */
1855 /*----------------------------------------------------------------------*/
1857 /* PAGE_SIZE >= pginfo->hwpage_size */
1858 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo
*pginfo
,
1863 struct ib_umem_chunk
*prev_chunk
;
1864 struct ib_umem_chunk
*chunk
;
1868 int hwpages_per_kpage
= PAGE_SIZE
/ pginfo
->hwpage_size
;
1870 /* loop over desired chunk entries */
1871 chunk
= pginfo
->u
.usr
.next_chunk
;
1872 prev_chunk
= pginfo
->u
.usr
.next_chunk
;
1873 list_for_each_entry_continue(
1874 chunk
, (&(pginfo
->u
.usr
.region
->chunk_list
)), list
) {
1875 for (i
= pginfo
->u
.usr
.next_nmap
; i
< chunk
->nmap
; ) {
1876 pgaddr
= page_to_pfn(sg_page(&chunk
->page_list
[i
]))
1878 *kpage
= phys_to_abs(pgaddr
+
1879 (pginfo
->next_hwpage
*
1880 pginfo
->hwpage_size
));
1882 ehca_gen_err("pgaddr=%llx "
1883 "chunk->page_list[i]=%llx "
1884 "i=%x next_hwpage=%llx",
1885 pgaddr
, (u64
)sg_dma_address(
1886 &chunk
->page_list
[i
]),
1887 i
, pginfo
->next_hwpage
);
1890 (pginfo
->hwpage_cnt
)++;
1891 (pginfo
->next_hwpage
)++;
1893 if (pginfo
->next_hwpage
% hwpages_per_kpage
== 0) {
1894 (pginfo
->kpage_cnt
)++;
1895 (pginfo
->u
.usr
.next_nmap
)++;
1896 pginfo
->next_hwpage
= 0;
1900 if (j
>= number
) break;
1902 if ((pginfo
->u
.usr
.next_nmap
>= chunk
->nmap
) &&
1904 pginfo
->u
.usr
.next_nmap
= 0;
1907 } else if (pginfo
->u
.usr
.next_nmap
>= chunk
->nmap
) {
1908 pginfo
->u
.usr
.next_nmap
= 0;
1910 } else if (j
>= number
)
1915 pginfo
->u
.usr
.next_chunk
=
1916 list_prepare_entry(prev_chunk
,
1917 (&(pginfo
->u
.usr
.region
->chunk_list
)),
1923 * check given pages for contiguous layout
1924 * last page addr is returned in prev_pgaddr for further check
1926 static int ehca_check_kpages_per_ate(struct scatterlist
*page_list
,
1927 int start_idx
, int end_idx
,
1931 for (t
= start_idx
; t
<= end_idx
; t
++) {
1932 u64 pgaddr
= page_to_pfn(sg_page(&page_list
[t
])) << PAGE_SHIFT
;
1933 if (ehca_debug_level
>= 3)
1934 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr
,
1935 *(u64
*)abs_to_virt(phys_to_abs(pgaddr
)));
1936 if (pgaddr
- PAGE_SIZE
!= *prev_pgaddr
) {
1937 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1938 "prev_pgaddr=%llx page_list_i=%x",
1939 pgaddr
, *prev_pgaddr
, t
);
1942 *prev_pgaddr
= pgaddr
;
1947 /* PAGE_SIZE < pginfo->hwpage_size */
1948 static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo
*pginfo
,
1953 struct ib_umem_chunk
*prev_chunk
;
1954 struct ib_umem_chunk
*chunk
;
1955 u64 pgaddr
, prev_pgaddr
;
1958 int kpages_per_hwpage
= pginfo
->hwpage_size
/ PAGE_SIZE
;
1959 int nr_kpages
= kpages_per_hwpage
;
1961 /* loop over desired chunk entries */
1962 chunk
= pginfo
->u
.usr
.next_chunk
;
1963 prev_chunk
= pginfo
->u
.usr
.next_chunk
;
1964 list_for_each_entry_continue(
1965 chunk
, (&(pginfo
->u
.usr
.region
->chunk_list
)), list
) {
1966 for (i
= pginfo
->u
.usr
.next_nmap
; i
< chunk
->nmap
; ) {
1967 if (nr_kpages
== kpages_per_hwpage
) {
1968 pgaddr
= ( page_to_pfn(sg_page(&chunk
->page_list
[i
]))
1970 *kpage
= phys_to_abs(pgaddr
);
1972 ehca_gen_err("pgaddr=%llx i=%x",
1978 * The first page in a hwpage must be aligned;
1979 * the first MR page is exempt from this rule.
1981 if (pgaddr
& (pginfo
->hwpage_size
- 1)) {
1982 if (pginfo
->hwpage_cnt
) {
1984 "invalid alignment "
1988 pginfo
->hwpage_size
);
1995 (pginfo
->hwpage_size
- 1)) >>
1997 nr_kpages
-= pginfo
->kpage_cnt
;
1998 *kpage
= phys_to_abs(
2000 ~(pginfo
->hwpage_size
- 1));
2002 if (ehca_debug_level
>= 3) {
2003 u64 val
= *(u64
*)abs_to_virt(
2004 phys_to_abs(pgaddr
));
2005 ehca_gen_dbg("kpage=%llx chunk_page=%llx "
2007 *kpage
, pgaddr
, val
);
2009 prev_pgaddr
= pgaddr
;
2011 pginfo
->kpage_cnt
++;
2012 pginfo
->u
.usr
.next_nmap
++;
2018 if (i
+ nr_kpages
> chunk
->nmap
) {
2019 ret
= ehca_check_kpages_per_ate(
2020 chunk
->page_list
, i
,
2021 chunk
->nmap
- 1, &prev_pgaddr
);
2022 if (ret
) return ret
;
2023 pginfo
->kpage_cnt
+= chunk
->nmap
- i
;
2024 pginfo
->u
.usr
.next_nmap
+= chunk
->nmap
- i
;
2025 nr_kpages
-= chunk
->nmap
- i
;
2029 ret
= ehca_check_kpages_per_ate(chunk
->page_list
, i
,
2032 if (ret
) return ret
;
2034 pginfo
->kpage_cnt
+= nr_kpages
;
2035 pginfo
->u
.usr
.next_nmap
+= nr_kpages
;
2037 nr_kpages
= kpages_per_hwpage
;
2038 (pginfo
->hwpage_cnt
)++;
2041 if (j
>= number
) break;
2043 if ((pginfo
->u
.usr
.next_nmap
>= chunk
->nmap
) &&
2045 pginfo
->u
.usr
.next_nmap
= 0;
2048 } else if (pginfo
->u
.usr
.next_nmap
>= chunk
->nmap
) {
2049 pginfo
->u
.usr
.next_nmap
= 0;
2051 } else if (j
>= number
)
2056 pginfo
->u
.usr
.next_chunk
=
2057 list_prepare_entry(prev_chunk
,
2058 (&(pginfo
->u
.usr
.region
->chunk_list
)),
2063 static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo
*pginfo
,
2064 u32 number
, u64
*kpage
)
2067 struct ib_phys_buf
*pbuf
;
2068 u64 num_hw
, offs_hw
;
2071 /* loop over desired phys_buf_array entries */
2072 while (i
< number
) {
2073 pbuf
= pginfo
->u
.phy
.phys_buf_array
+ pginfo
->u
.phy
.next_buf
;
2074 num_hw
= NUM_CHUNKS((pbuf
->addr
% pginfo
->hwpage_size
) +
2075 pbuf
->size
, pginfo
->hwpage_size
);
2076 offs_hw
= (pbuf
->addr
& ~(pginfo
->hwpage_size
- 1)) /
2077 pginfo
->hwpage_size
;
2078 while (pginfo
->next_hwpage
< offs_hw
+ num_hw
) {
2080 if ((pginfo
->kpage_cnt
>= pginfo
->num_kpages
) ||
2081 (pginfo
->hwpage_cnt
>= pginfo
->num_hwpages
)) {
2082 ehca_gen_err("kpage_cnt >= num_kpages, "
2083 "kpage_cnt=%llx num_kpages=%llx "
2085 "num_hwpages=%llx i=%x",
2089 pginfo
->num_hwpages
, i
);
2092 *kpage
= phys_to_abs(
2093 (pbuf
->addr
& ~(pginfo
->hwpage_size
- 1)) +
2094 (pginfo
->next_hwpage
* pginfo
->hwpage_size
));
2095 if ( !(*kpage
) && pbuf
->addr
) {
2096 ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
2097 "next_hwpage=%llx", pbuf
->addr
,
2098 pbuf
->size
, pginfo
->next_hwpage
);
2101 (pginfo
->hwpage_cnt
)++;
2102 (pginfo
->next_hwpage
)++;
2103 if (PAGE_SIZE
>= pginfo
->hwpage_size
) {
2104 if (pginfo
->next_hwpage
%
2105 (PAGE_SIZE
/ pginfo
->hwpage_size
) == 0)
2106 (pginfo
->kpage_cnt
)++;
2108 pginfo
->kpage_cnt
+= pginfo
->hwpage_size
/
2112 if (i
>= number
) break;
2114 if (pginfo
->next_hwpage
>= offs_hw
+ num_hw
) {
2115 (pginfo
->u
.phy
.next_buf
)++;
2116 pginfo
->next_hwpage
= 0;
2122 static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo
*pginfo
,
2123 u32 number
, u64
*kpage
)
2129 /* loop over desired page_list entries */
2130 fmrlist
= pginfo
->u
.fmr
.page_list
+ pginfo
->u
.fmr
.next_listelem
;
2131 for (i
= 0; i
< number
; i
++) {
2132 *kpage
= phys_to_abs((*fmrlist
& ~(pginfo
->hwpage_size
- 1)) +
2133 pginfo
->next_hwpage
* pginfo
->hwpage_size
);
2135 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
2136 "next_listelem=%llx next_hwpage=%llx",
2138 pginfo
->u
.fmr
.next_listelem
,
2139 pginfo
->next_hwpage
);
2142 (pginfo
->hwpage_cnt
)++;
2143 if (pginfo
->u
.fmr
.fmr_pgsize
>= pginfo
->hwpage_size
) {
2144 if (pginfo
->next_hwpage
%
2145 (pginfo
->u
.fmr
.fmr_pgsize
/
2146 pginfo
->hwpage_size
) == 0) {
2147 (pginfo
->kpage_cnt
)++;
2148 (pginfo
->u
.fmr
.next_listelem
)++;
2150 pginfo
->next_hwpage
= 0;
2152 (pginfo
->next_hwpage
)++;
2154 unsigned int cnt_per_hwpage
= pginfo
->hwpage_size
/
2155 pginfo
->u
.fmr
.fmr_pgsize
;
2158 /* check if adrs are contiguous */
2159 for (j
= 1; j
< cnt_per_hwpage
; j
++) {
2160 u64 p
= phys_to_abs(fmrlist
[j
] &
2161 ~(pginfo
->hwpage_size
- 1));
2162 if (prev
+ pginfo
->u
.fmr
.fmr_pgsize
!= p
) {
2163 ehca_gen_err("uncontiguous fmr pages "
2164 "found prev=%llx p=%llx "
2165 "idx=%x", prev
, p
, i
+ j
);
2170 pginfo
->kpage_cnt
+= cnt_per_hwpage
;
2171 pginfo
->u
.fmr
.next_listelem
+= cnt_per_hwpage
;
2172 fmrlist
+= cnt_per_hwpage
;
2179 /* setup page buffer from page info */
2180 int ehca_set_pagebuf(struct ehca_mr_pginfo
*pginfo
,
2186 switch (pginfo
->type
) {
2187 case EHCA_MR_PGI_PHYS
:
2188 ret
= ehca_set_pagebuf_phys(pginfo
, number
, kpage
);
2190 case EHCA_MR_PGI_USER
:
2191 ret
= PAGE_SIZE
>= pginfo
->hwpage_size
?
2192 ehca_set_pagebuf_user1(pginfo
, number
, kpage
) :
2193 ehca_set_pagebuf_user2(pginfo
, number
, kpage
);
2195 case EHCA_MR_PGI_FMR
:
2196 ret
= ehca_set_pagebuf_fmr(pginfo
, number
, kpage
);
2199 ehca_gen_err("bad pginfo->type=%x", pginfo
->type
);
2204 } /* end ehca_set_pagebuf() */
2206 /*----------------------------------------------------------------------*/
2209 * check MR if it is a max-MR, i.e. uses whole memory
2210 * in case it's a max-MR 1 is returned, else 0
2212 int ehca_mr_is_maxmr(u64 size
,
2215 /* a MR is treated as max-MR only if it fits following: */
2216 if ((size
== ehca_mr_len
) &&
2217 (iova_start
== (void *)ehca_map_vaddr((void *)KERNELBASE
))) {
2218 ehca_gen_dbg("this is a max-MR");
2222 } /* end ehca_mr_is_maxmr() */
2224 /*----------------------------------------------------------------------*/
2226 /* map access control for MR/MW. This routine is used for MR and MW. */
2227 void ehca_mrmw_map_acl(int ib_acl
,
2231 if (ib_acl
& IB_ACCESS_REMOTE_READ
)
2232 *hipz_acl
|= HIPZ_ACCESSCTRL_R_READ
;
2233 if (ib_acl
& IB_ACCESS_REMOTE_WRITE
)
2234 *hipz_acl
|= HIPZ_ACCESSCTRL_R_WRITE
;
2235 if (ib_acl
& IB_ACCESS_REMOTE_ATOMIC
)
2236 *hipz_acl
|= HIPZ_ACCESSCTRL_R_ATOMIC
;
2237 if (ib_acl
& IB_ACCESS_LOCAL_WRITE
)
2238 *hipz_acl
|= HIPZ_ACCESSCTRL_L_WRITE
;
2239 if (ib_acl
& IB_ACCESS_MW_BIND
)
2240 *hipz_acl
|= HIPZ_ACCESSCTRL_MW_BIND
;
2241 } /* end ehca_mrmw_map_acl() */
2243 /*----------------------------------------------------------------------*/
2245 /* sets page size in hipz access control for MR/MW. */
2246 void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize
, u32
*hipz_acl
) /*INOUT*/
2248 *hipz_acl
|= (ehca_encode_hwpage_size(pgsize
) << 24);
2249 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2251 /*----------------------------------------------------------------------*/
2254 * reverse map access control for MR/MW.
2255 * This routine is used for MR and MW.
2257 void ehca_mrmw_reverse_map_acl(const u32
*hipz_acl
,
2258 int *ib_acl
) /*OUT*/
2261 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_READ
)
2262 *ib_acl
|= IB_ACCESS_REMOTE_READ
;
2263 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_WRITE
)
2264 *ib_acl
|= IB_ACCESS_REMOTE_WRITE
;
2265 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_ATOMIC
)
2266 *ib_acl
|= IB_ACCESS_REMOTE_ATOMIC
;
2267 if (*hipz_acl
& HIPZ_ACCESSCTRL_L_WRITE
)
2268 *ib_acl
|= IB_ACCESS_LOCAL_WRITE
;
2269 if (*hipz_acl
& HIPZ_ACCESSCTRL_MW_BIND
)
2270 *ib_acl
|= IB_ACCESS_MW_BIND
;
2271 } /* end ehca_mrmw_reverse_map_acl() */
2274 /*----------------------------------------------------------------------*/
2277 * MR destructor and constructor
2278 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2279 * except struct ib_mr and spinlock
2281 void ehca_mr_deletenew(struct ehca_mr
*mr
)
2285 mr
->num_hwpages
= 0;
2288 mr
->fmr_page_size
= 0;
2289 mr
->fmr_max_pages
= 0;
2290 mr
->fmr_max_maps
= 0;
2291 mr
->fmr_map_cnt
= 0;
2292 memset(&mr
->ipz_mr_handle
, 0, sizeof(mr
->ipz_mr_handle
));
2293 memset(&mr
->galpas
, 0, sizeof(mr
->galpas
));
2294 } /* end ehca_mr_deletenew() */
2296 int ehca_init_mrmw_cache(void)
2298 mr_cache
= kmem_cache_create("ehca_cache_mr",
2299 sizeof(struct ehca_mr
), 0,
2304 mw_cache
= kmem_cache_create("ehca_cache_mw",
2305 sizeof(struct ehca_mw
), 0,
2309 kmem_cache_destroy(mr_cache
);
2316 void ehca_cleanup_mrmw_cache(void)
2319 kmem_cache_destroy(mr_cache
);
2321 kmem_cache_destroy(mw_cache
);
2324 static inline int ehca_init_top_bmap(struct ehca_top_bmap
*ehca_top_bmap
,
2327 if (!ehca_bmap_valid(ehca_top_bmap
->dir
[dir
])) {
2328 ehca_top_bmap
->dir
[dir
] =
2329 kmalloc(sizeof(struct ehca_dir_bmap
), GFP_KERNEL
);
2330 if (!ehca_top_bmap
->dir
[dir
])
2332 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2333 memset(ehca_top_bmap
->dir
[dir
], 0xFF, EHCA_ENT_MAP_SIZE
);
2338 static inline int ehca_init_bmap(struct ehca_bmap
*ehca_bmap
, int top
, int dir
)
2340 if (!ehca_bmap_valid(ehca_bmap
->top
[top
])) {
2341 ehca_bmap
->top
[top
] =
2342 kmalloc(sizeof(struct ehca_top_bmap
), GFP_KERNEL
);
2343 if (!ehca_bmap
->top
[top
])
2345 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2346 memset(ehca_bmap
->top
[top
], 0xFF, EHCA_DIR_MAP_SIZE
);
2348 return ehca_init_top_bmap(ehca_bmap
->top
[top
], dir
);
2351 static inline int ehca_calc_index(unsigned long i
, unsigned long s
)
2353 return (i
>> s
) & EHCA_INDEX_MASK
;
2356 void ehca_destroy_busmap(void)
2363 for (top
= 0; top
< EHCA_MAP_ENTRIES
; top
++) {
2364 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]))
2366 for (dir
= 0; dir
< EHCA_MAP_ENTRIES
; dir
++) {
2367 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]->dir
[dir
]))
2370 kfree(ehca_bmap
->top
[top
]->dir
[dir
]);
2373 kfree(ehca_bmap
->top
[top
]);
2380 static int ehca_update_busmap(unsigned long pfn
, unsigned long nr_pages
)
2382 unsigned long i
, start_section
, end_section
;
2389 ehca_bmap
= kmalloc(sizeof(struct ehca_bmap
), GFP_KERNEL
);
2392 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2393 memset(ehca_bmap
, 0xFF, EHCA_TOP_MAP_SIZE
);
2396 start_section
= phys_to_abs(pfn
* PAGE_SIZE
) / EHCA_SECTSIZE
;
2397 end_section
= phys_to_abs((pfn
+ nr_pages
) * PAGE_SIZE
) / EHCA_SECTSIZE
;
2398 for (i
= start_section
; i
< end_section
; i
++) {
2400 top
= ehca_calc_index(i
, EHCA_TOP_INDEX_SHIFT
);
2401 dir
= ehca_calc_index(i
, EHCA_DIR_INDEX_SHIFT
);
2402 idx
= i
& EHCA_INDEX_MASK
;
2404 ret
= ehca_init_bmap(ehca_bmap
, top
, dir
);
2406 ehca_destroy_busmap();
2409 ehca_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] = ehca_mr_len
;
2410 ehca_mr_len
+= EHCA_SECTSIZE
;
2415 static int ehca_is_hugepage(unsigned long pfn
)
2419 if (pfn
& EHCA_HUGEPAGE_PFN_MASK
)
2422 page_order
= compound_order(pfn_to_page(pfn
));
2423 if (page_order
+ PAGE_SHIFT
!= EHCA_HUGEPAGESHIFT
)
2429 static int ehca_create_busmap_callback(unsigned long initial_pfn
,
2430 unsigned long total_nr_pages
, void *arg
)
2433 unsigned long pfn
, start_pfn
, end_pfn
, nr_pages
;
2435 if ((total_nr_pages
* PAGE_SIZE
) < EHCA_HUGEPAGE_SIZE
)
2436 return ehca_update_busmap(initial_pfn
, total_nr_pages
);
2438 /* Given chunk is >= 16GB -> check for hugepages */
2439 start_pfn
= initial_pfn
;
2440 end_pfn
= initial_pfn
+ total_nr_pages
;
2443 while (pfn
< end_pfn
) {
2444 if (ehca_is_hugepage(pfn
)) {
2445 /* Add mem found in front of the hugepage */
2446 nr_pages
= pfn
- start_pfn
;
2447 ret
= ehca_update_busmap(start_pfn
, nr_pages
);
2450 /* Skip the hugepage */
2451 pfn
+= (EHCA_HUGEPAGE_SIZE
/ PAGE_SIZE
);
2454 pfn
+= (EHCA_SECTSIZE
/ PAGE_SIZE
);
2457 /* Add mem found behind the hugepage(s) */
2458 nr_pages
= pfn
- start_pfn
;
2459 return ehca_update_busmap(start_pfn
, nr_pages
);
2462 int ehca_create_busmap(void)
2467 ret
= walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS
, NULL
,
2468 ehca_create_busmap_callback
);
2472 static int ehca_reg_bmap_mr_rpages(struct ehca_shca
*shca
,
2473 struct ehca_mr
*e_mr
,
2474 struct ehca_mr_pginfo
*pginfo
)
2479 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2481 ehca_err(&shca
->ib_device
, "kpage alloc failed");
2484 for (top
= 0; top
< EHCA_MAP_ENTRIES
; top
++) {
2485 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]))
2487 hret
= ehca_reg_mr_dir_sections(top
, kpage
, shca
, e_mr
, pginfo
);
2488 if ((hret
!= H_PAGE_REGISTERED
) && (hret
!= H_SUCCESS
))
2492 ehca_free_fw_ctrlblock(kpage
);
2494 if (hret
== H_SUCCESS
)
2495 return 0; /* Everything is fine */
2497 ehca_err(&shca
->ib_device
, "ehca_reg_bmap_mr_rpages failed, "
2498 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2499 "hca_hndl=%llx mr_hndl=%llx", hret
, e_mr
, top
,
2500 e_mr
->ib
.ib_mr
.lkey
,
2501 shca
->ipz_hca_handle
.handle
,
2502 e_mr
->ipz_mr_handle
.handle
);
2503 return ehca2ib_return_code(hret
);
2507 static u64
ehca_map_vaddr(void *caddr
)
2510 unsigned long abs_addr
, offset
;
2514 return EHCA_INVAL_ADDR
;
2516 abs_addr
= virt_to_abs(caddr
);
2517 top
= ehca_calc_index(abs_addr
, EHCA_TOP_INDEX_SHIFT
+ EHCA_SECTSHIFT
);
2518 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]))
2519 return EHCA_INVAL_ADDR
;
2521 dir
= ehca_calc_index(abs_addr
, EHCA_DIR_INDEX_SHIFT
+ EHCA_SECTSHIFT
);
2522 if (!ehca_bmap_valid(ehca_bmap
->top
[top
]->dir
[dir
]))
2523 return EHCA_INVAL_ADDR
;
2525 idx
= ehca_calc_index(abs_addr
, EHCA_SECTSHIFT
);
2527 entry
= ehca_bmap
->top
[top
]->dir
[dir
]->ent
[idx
];
2528 if (ehca_bmap_valid(entry
)) {
2529 offset
= (unsigned long)caddr
& (EHCA_SECTSIZE
- 1);
2530 return entry
| offset
;
2532 return EHCA_INVAL_ADDR
;
2535 static int ehca_dma_mapping_error(struct ib_device
*dev
, u64 dma_addr
)
2537 return dma_addr
== EHCA_INVAL_ADDR
;
2540 static u64
ehca_dma_map_single(struct ib_device
*dev
, void *cpu_addr
,
2541 size_t size
, enum dma_data_direction direction
)
2544 return ehca_map_vaddr(cpu_addr
);
2546 return EHCA_INVAL_ADDR
;
2549 static void ehca_dma_unmap_single(struct ib_device
*dev
, u64 addr
, size_t size
,
2550 enum dma_data_direction direction
)
2552 /* This is only a stub; nothing to be done here */
2555 static u64
ehca_dma_map_page(struct ib_device
*dev
, struct page
*page
,
2556 unsigned long offset
, size_t size
,
2557 enum dma_data_direction direction
)
2561 if (offset
+ size
> PAGE_SIZE
)
2562 return EHCA_INVAL_ADDR
;
2564 addr
= ehca_map_vaddr(page_address(page
));
2565 if (!ehca_dma_mapping_error(dev
, addr
))
2571 static void ehca_dma_unmap_page(struct ib_device
*dev
, u64 addr
, size_t size
,
2572 enum dma_data_direction direction
)
2574 /* This is only a stub; nothing to be done here */
2577 static int ehca_dma_map_sg(struct ib_device
*dev
, struct scatterlist
*sgl
,
2578 int nents
, enum dma_data_direction direction
)
2580 struct scatterlist
*sg
;
2583 for_each_sg(sgl
, sg
, nents
, i
) {
2585 addr
= ehca_map_vaddr(sg_virt(sg
));
2586 if (ehca_dma_mapping_error(dev
, addr
))
2589 sg
->dma_address
= addr
;
2590 sg
->dma_length
= sg
->length
;
2595 static void ehca_dma_unmap_sg(struct ib_device
*dev
, struct scatterlist
*sg
,
2596 int nents
, enum dma_data_direction direction
)
2598 /* This is only a stub; nothing to be done here */
2601 static u64
ehca_dma_address(struct ib_device
*dev
, struct scatterlist
*sg
)
2603 return sg
->dma_address
;
2606 static unsigned int ehca_dma_len(struct ib_device
*dev
, struct scatterlist
*sg
)
2611 static void ehca_dma_sync_single_for_cpu(struct ib_device
*dev
, u64 addr
,
2613 enum dma_data_direction dir
)
2615 dma_sync_single_for_cpu(dev
->dma_device
, addr
, size
, dir
);
2618 static void ehca_dma_sync_single_for_device(struct ib_device
*dev
, u64 addr
,
2620 enum dma_data_direction dir
)
2622 dma_sync_single_for_device(dev
->dma_device
, addr
, size
, dir
);
2625 static void *ehca_dma_alloc_coherent(struct ib_device
*dev
, size_t size
,
2626 u64
*dma_handle
, gfp_t flag
)
2632 p
= alloc_pages(flag
, get_order(size
));
2634 addr
= page_address(p
);
2635 dma_addr
= ehca_map_vaddr(addr
);
2636 if (ehca_dma_mapping_error(dev
, dma_addr
)) {
2637 free_pages((unsigned long)addr
, get_order(size
));
2641 *dma_handle
= dma_addr
;
2647 static void ehca_dma_free_coherent(struct ib_device
*dev
, size_t size
,
2648 void *cpu_addr
, u64 dma_handle
)
2650 if (cpu_addr
&& size
)
2651 free_pages((unsigned long)cpu_addr
, get_order(size
));
2655 struct ib_dma_mapping_ops ehca_dma_mapping_ops
= {
2656 .mapping_error
= ehca_dma_mapping_error
,
2657 .map_single
= ehca_dma_map_single
,
2658 .unmap_single
= ehca_dma_unmap_single
,
2659 .map_page
= ehca_dma_map_page
,
2660 .unmap_page
= ehca_dma_unmap_page
,
2661 .map_sg
= ehca_dma_map_sg
,
2662 .unmap_sg
= ehca_dma_unmap_sg
,
2663 .dma_address
= ehca_dma_address
,
2664 .dma_len
= ehca_dma_len
,
2665 .sync_single_for_cpu
= ehca_dma_sync_single_for_cpu
,
2666 .sync_single_for_device
= ehca_dma_sync_single_for_device
,
2667 .alloc_coherent
= ehca_dma_alloc_coherent
,
2668 .free_coherent
= ehca_dma_free_coherent
,