2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
9 * Copyright (c) 2005 IBM Corporation
11 * All rights reserved.
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
42 #include <rdma/ib_umem.h>
44 #include <asm/current.h>
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
51 static struct kmem_cache
*mr_cache
;
52 static struct kmem_cache
*mw_cache
;
54 static struct ehca_mr
*ehca_mr_new(void)
58 me
= kmem_cache_zalloc(mr_cache
, GFP_KERNEL
);
60 spin_lock_init(&me
->mrlock
);
62 ehca_gen_err("alloc failed");
67 static void ehca_mr_delete(struct ehca_mr
*me
)
69 kmem_cache_free(mr_cache
, me
);
72 static struct ehca_mw
*ehca_mw_new(void)
76 me
= kmem_cache_zalloc(mw_cache
, GFP_KERNEL
);
78 spin_lock_init(&me
->mwlock
);
80 ehca_gen_err("alloc failed");
85 static void ehca_mw_delete(struct ehca_mw
*me
)
87 kmem_cache_free(mw_cache
, me
);
90 /*----------------------------------------------------------------------*/
92 struct ib_mr
*ehca_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
96 struct ehca_mr
*e_maxmr
;
97 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
98 struct ehca_shca
*shca
=
99 container_of(pd
->device
, struct ehca_shca
, ib_device
);
102 e_maxmr
= ehca_mr_new();
104 ehca_err(&shca
->ib_device
, "out of memory");
105 ib_mr
= ERR_PTR(-ENOMEM
);
106 goto get_dma_mr_exit0
;
109 ret
= ehca_reg_maxmr(shca
, e_maxmr
, (u64
*)KERNELBASE
,
110 mr_access_flags
, e_pd
,
111 &e_maxmr
->ib
.ib_mr
.lkey
,
112 &e_maxmr
->ib
.ib_mr
.rkey
);
114 ib_mr
= ERR_PTR(ret
);
115 goto get_dma_mr_exit0
;
117 ib_mr
= &e_maxmr
->ib
.ib_mr
;
119 ehca_err(&shca
->ib_device
, "no internal max-MR exist!");
120 ib_mr
= ERR_PTR(-EINVAL
);
121 goto get_dma_mr_exit0
;
126 ehca_err(&shca
->ib_device
, "rc=%lx pd=%p mr_access_flags=%x ",
127 PTR_ERR(ib_mr
), pd
, mr_access_flags
);
129 } /* end ehca_get_dma_mr() */
131 /*----------------------------------------------------------------------*/
133 struct ib_mr
*ehca_reg_phys_mr(struct ib_pd
*pd
,
134 struct ib_phys_buf
*phys_buf_array
,
141 struct ehca_mr
*e_mr
;
142 struct ehca_shca
*shca
=
143 container_of(pd
->device
, struct ehca_shca
, ib_device
);
144 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
147 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
149 u32 num_pages_4k
; /* 4k portion "pages" */
151 if ((num_phys_buf
<= 0) || !phys_buf_array
) {
152 ehca_err(pd
->device
, "bad input values: num_phys_buf=%x "
153 "phys_buf_array=%p", num_phys_buf
, phys_buf_array
);
154 ib_mr
= ERR_PTR(-EINVAL
);
155 goto reg_phys_mr_exit0
;
157 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
158 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
159 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
160 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
162 * Remote Write Access requires Local Write Access
163 * Remote Atomic Access requires Local Write Access
165 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
167 ib_mr
= ERR_PTR(-EINVAL
);
168 goto reg_phys_mr_exit0
;
171 /* check physical buffer list and calculate size */
172 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
, num_phys_buf
,
175 ib_mr
= ERR_PTR(ret
);
176 goto reg_phys_mr_exit0
;
179 (((u64
)iova_start
+ size
) < (u64
)iova_start
)) {
180 ehca_err(pd
->device
, "bad input values: size=%lx iova_start=%p",
182 ib_mr
= ERR_PTR(-EINVAL
);
183 goto reg_phys_mr_exit0
;
186 e_mr
= ehca_mr_new();
188 ehca_err(pd
->device
, "out of memory");
189 ib_mr
= ERR_PTR(-ENOMEM
);
190 goto reg_phys_mr_exit0
;
193 /* determine number of MR pages */
194 num_pages_mr
= ((((u64
)iova_start
% PAGE_SIZE
) + size
+
195 PAGE_SIZE
- 1) / PAGE_SIZE
);
196 num_pages_4k
= ((((u64
)iova_start
% EHCA_PAGESIZE
) + size
+
197 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
199 /* register MR on HCA */
200 if (ehca_mr_is_maxmr(size
, iova_start
)) {
201 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
202 ret
= ehca_reg_maxmr(shca
, e_mr
, iova_start
, mr_access_flags
,
203 e_pd
, &e_mr
->ib
.ib_mr
.lkey
,
204 &e_mr
->ib
.ib_mr
.rkey
);
206 ib_mr
= ERR_PTR(ret
);
207 goto reg_phys_mr_exit1
;
210 pginfo
.type
= EHCA_MR_PGI_PHYS
;
211 pginfo
.num_pages
= num_pages_mr
;
212 pginfo
.num_4k
= num_pages_4k
;
213 pginfo
.num_phys_buf
= num_phys_buf
;
214 pginfo
.phys_buf_array
= phys_buf_array
;
215 pginfo
.next_4k
= (((u64
)iova_start
& ~PAGE_MASK
) /
218 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, mr_access_flags
,
219 e_pd
, &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
220 &e_mr
->ib
.ib_mr
.rkey
);
222 ib_mr
= ERR_PTR(ret
);
223 goto reg_phys_mr_exit1
;
227 /* successful registration of all pages */
228 return &e_mr
->ib
.ib_mr
;
231 ehca_mr_delete(e_mr
);
234 ehca_err(pd
->device
, "rc=%lx pd=%p phys_buf_array=%p "
235 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
236 PTR_ERR(ib_mr
), pd
, phys_buf_array
,
237 num_phys_buf
, mr_access_flags
, iova_start
);
239 } /* end ehca_reg_phys_mr() */
241 /*----------------------------------------------------------------------*/
243 struct ib_mr
*ehca_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
, u64 virt
,
244 int mr_access_flags
, struct ib_udata
*udata
)
247 struct ehca_mr
*e_mr
;
248 struct ehca_shca
*shca
=
249 container_of(pd
->device
, struct ehca_shca
, ib_device
);
250 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
251 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
254 u32 num_pages_4k
; /* 4k portion "pages" */
257 ehca_gen_err("bad pd=%p", pd
);
258 return ERR_PTR(-EFAULT
);
261 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
262 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
263 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
264 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
266 * Remote Write Access requires Local Write Access
267 * Remote Atomic Access requires Local Write Access
269 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
271 ib_mr
= ERR_PTR(-EINVAL
);
272 goto reg_user_mr_exit0
;
275 if (length
== 0 || virt
+ length
< virt
) {
276 ehca_err(pd
->device
, "bad input values: length=%lx "
277 "virt_base=%lx", length
, virt
);
278 ib_mr
= ERR_PTR(-EINVAL
);
279 goto reg_user_mr_exit0
;
282 e_mr
= ehca_mr_new();
284 ehca_err(pd
->device
, "out of memory");
285 ib_mr
= ERR_PTR(-ENOMEM
);
286 goto reg_user_mr_exit0
;
289 e_mr
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
291 if (IS_ERR(e_mr
->umem
)) {
292 ib_mr
= (void *) e_mr
->umem
;
293 goto reg_user_mr_exit1
;
296 if (e_mr
->umem
->page_size
!= PAGE_SIZE
) {
297 ehca_err(pd
->device
, "page size not supported, "
298 "e_mr->umem->page_size=%x", e_mr
->umem
->page_size
);
299 ib_mr
= ERR_PTR(-EINVAL
);
300 goto reg_user_mr_exit2
;
303 /* determine number of MR pages */
304 num_pages_mr
= (((virt
% PAGE_SIZE
) + length
+ PAGE_SIZE
- 1) /
306 num_pages_4k
= (((virt
% EHCA_PAGESIZE
) + length
+ EHCA_PAGESIZE
- 1) /
309 /* register MR on HCA */
310 pginfo
.type
= EHCA_MR_PGI_USER
;
311 pginfo
.num_pages
= num_pages_mr
;
312 pginfo
.num_4k
= num_pages_4k
;
313 pginfo
.region
= e_mr
->umem
;
314 pginfo
.next_4k
= e_mr
->umem
->offset
/ EHCA_PAGESIZE
;
315 pginfo
.next_chunk
= list_prepare_entry(pginfo
.next_chunk
,
316 (&e_mr
->umem
->chunk_list
),
319 ret
= ehca_reg_mr(shca
, e_mr
, (u64
*) virt
, length
, mr_access_flags
, e_pd
,
320 &pginfo
, &e_mr
->ib
.ib_mr
.lkey
, &e_mr
->ib
.ib_mr
.rkey
);
322 ib_mr
= ERR_PTR(ret
);
323 goto reg_user_mr_exit2
;
326 /* successful registration of all pages */
327 return &e_mr
->ib
.ib_mr
;
330 ib_umem_release(e_mr
->umem
);
332 ehca_mr_delete(e_mr
);
335 ehca_err(pd
->device
, "rc=%lx pd=%p mr_access_flags=%x"
337 PTR_ERR(ib_mr
), pd
, mr_access_flags
, udata
);
339 } /* end ehca_reg_user_mr() */
341 /*----------------------------------------------------------------------*/
343 int ehca_rereg_phys_mr(struct ib_mr
*mr
,
346 struct ib_phys_buf
*phys_buf_array
,
353 struct ehca_shca
*shca
=
354 container_of(mr
->device
, struct ehca_shca
, ib_device
);
355 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
356 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
360 struct ehca_pd
*new_pd
;
361 u32 tmp_lkey
, tmp_rkey
;
362 unsigned long sl_flags
;
363 u32 num_pages_mr
= 0;
364 u32 num_pages_4k
= 0; /* 4k portion "pages" */
365 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
366 u32 cur_pid
= current
->tgid
;
368 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
369 (my_pd
->ownpid
!= cur_pid
)) {
370 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
371 cur_pid
, my_pd
->ownpid
);
373 goto rereg_phys_mr_exit0
;
376 if (!(mr_rereg_mask
& IB_MR_REREG_TRANS
)) {
377 /* TODO not supported, because PHYP rereg hCall needs pages */
378 ehca_err(mr
->device
, "rereg without IB_MR_REREG_TRANS not "
379 "supported yet, mr_rereg_mask=%x", mr_rereg_mask
);
381 goto rereg_phys_mr_exit0
;
384 if (mr_rereg_mask
& IB_MR_REREG_PD
) {
386 ehca_err(mr
->device
, "rereg with bad pd, pd=%p "
387 "mr_rereg_mask=%x", pd
, mr_rereg_mask
);
389 goto rereg_phys_mr_exit0
;
394 ~(IB_MR_REREG_TRANS
| IB_MR_REREG_PD
| IB_MR_REREG_ACCESS
)) ||
395 (mr_rereg_mask
== 0)) {
397 goto rereg_phys_mr_exit0
;
400 /* check other parameters */
401 if (e_mr
== shca
->maxmr
) {
402 /* should be impossible, however reject to be sure */
403 ehca_err(mr
->device
, "rereg internal max-MR impossible, mr=%p "
404 "shca->maxmr=%p mr->lkey=%x",
405 mr
, shca
->maxmr
, mr
->lkey
);
407 goto rereg_phys_mr_exit0
;
409 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) { /* transl., i.e. addr/size */
410 if (e_mr
->flags
& EHCA_MR_FLAG_FMR
) {
411 ehca_err(mr
->device
, "not supported for FMR, mr=%p "
412 "flags=%x", mr
, e_mr
->flags
);
414 goto rereg_phys_mr_exit0
;
416 if (!phys_buf_array
|| num_phys_buf
<= 0) {
417 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x"
418 " phys_buf_array=%p num_phys_buf=%x",
419 mr_rereg_mask
, phys_buf_array
, num_phys_buf
);
421 goto rereg_phys_mr_exit0
;
424 if ((mr_rereg_mask
& IB_MR_REREG_ACCESS
) && /* change ACL */
425 (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
426 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
427 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
428 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)))) {
430 * Remote Write Access requires Local Write Access
431 * Remote Atomic Access requires Local Write Access
433 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x "
434 "mr_access_flags=%x", mr_rereg_mask
, mr_access_flags
);
436 goto rereg_phys_mr_exit0
;
439 /* set requested values dependent on rereg request */
440 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
441 new_start
= e_mr
->start
; /* new == old address */
442 new_size
= e_mr
->size
; /* new == old length */
443 new_acl
= e_mr
->acl
; /* new == old access control */
444 new_pd
= container_of(mr
->pd
,struct ehca_pd
,ib_pd
); /*new == old PD*/
446 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
447 new_start
= iova_start
; /* change address */
448 /* check physical buffer list and calculate size */
449 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
,
450 num_phys_buf
, iova_start
,
453 goto rereg_phys_mr_exit1
;
454 if ((new_size
== 0) ||
455 (((u64
)iova_start
+ new_size
) < (u64
)iova_start
)) {
456 ehca_err(mr
->device
, "bad input values: new_size=%lx "
457 "iova_start=%p", new_size
, iova_start
);
459 goto rereg_phys_mr_exit1
;
461 num_pages_mr
= ((((u64
)new_start
% PAGE_SIZE
) + new_size
+
462 PAGE_SIZE
- 1) / PAGE_SIZE
);
463 num_pages_4k
= ((((u64
)new_start
% EHCA_PAGESIZE
) + new_size
+
464 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
465 pginfo
.type
= EHCA_MR_PGI_PHYS
;
466 pginfo
.num_pages
= num_pages_mr
;
467 pginfo
.num_4k
= num_pages_4k
;
468 pginfo
.num_phys_buf
= num_phys_buf
;
469 pginfo
.phys_buf_array
= phys_buf_array
;
470 pginfo
.next_4k
= (((u64
)iova_start
& ~PAGE_MASK
) /
473 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
474 new_acl
= mr_access_flags
;
475 if (mr_rereg_mask
& IB_MR_REREG_PD
)
476 new_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
478 ret
= ehca_rereg_mr(shca
, e_mr
, new_start
, new_size
, new_acl
,
479 new_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
481 goto rereg_phys_mr_exit1
;
483 /* successful reregistration */
484 if (mr_rereg_mask
& IB_MR_REREG_PD
)
490 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
493 ehca_err(mr
->device
, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
494 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
496 ret
, mr
, mr_rereg_mask
, pd
, phys_buf_array
,
497 num_phys_buf
, mr_access_flags
, iova_start
);
499 } /* end ehca_rereg_phys_mr() */
501 /*----------------------------------------------------------------------*/
503 int ehca_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
507 struct ehca_shca
*shca
=
508 container_of(mr
->device
, struct ehca_shca
, ib_device
);
509 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
510 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
511 u32 cur_pid
= current
->tgid
;
512 unsigned long sl_flags
;
513 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
515 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
516 (my_pd
->ownpid
!= cur_pid
)) {
517 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
518 cur_pid
, my_pd
->ownpid
);
523 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
524 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
525 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
530 memset(mr_attr
, 0, sizeof(struct ib_mr_attr
));
531 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
533 h_ret
= hipz_h_query_mr(shca
->ipz_hca_handle
, e_mr
, &hipzout
);
534 if (h_ret
!= H_SUCCESS
) {
535 ehca_err(mr
->device
, "hipz_mr_query failed, h_ret=%lx mr=%p "
536 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
537 h_ret
, mr
, shca
->ipz_hca_handle
.handle
,
538 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
539 ret
= ehca_mrmw_map_hrc_query_mr(h_ret
);
542 mr_attr
->pd
= mr
->pd
;
543 mr_attr
->device_virt_addr
= hipzout
.vaddr
;
544 mr_attr
->size
= hipzout
.len
;
545 mr_attr
->lkey
= hipzout
.lkey
;
546 mr_attr
->rkey
= hipzout
.rkey
;
547 ehca_mrmw_reverse_map_acl(&hipzout
.acl
, &mr_attr
->mr_access_flags
);
550 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
553 ehca_err(mr
->device
, "ret=%x mr=%p mr_attr=%p",
556 } /* end ehca_query_mr() */
558 /*----------------------------------------------------------------------*/
560 int ehca_dereg_mr(struct ib_mr
*mr
)
564 struct ehca_shca
*shca
=
565 container_of(mr
->device
, struct ehca_shca
, ib_device
);
566 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
567 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
568 u32 cur_pid
= current
->tgid
;
570 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
571 (my_pd
->ownpid
!= cur_pid
)) {
572 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
573 cur_pid
, my_pd
->ownpid
);
578 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
579 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
580 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
583 } else if (e_mr
== shca
->maxmr
) {
584 /* should be impossible, however reject to be sure */
585 ehca_err(mr
->device
, "dereg internal max-MR impossible, mr=%p "
586 "shca->maxmr=%p mr->lkey=%x",
587 mr
, shca
->maxmr
, mr
->lkey
);
592 /* TODO: BUSY: MR still has bound window(s) */
593 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
594 if (h_ret
!= H_SUCCESS
) {
595 ehca_err(mr
->device
, "hipz_free_mr failed, h_ret=%lx shca=%p "
596 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
597 h_ret
, shca
, e_mr
, shca
->ipz_hca_handle
.handle
,
598 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
599 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
604 ib_umem_release(e_mr
->umem
);
606 /* successful deregistration */
607 ehca_mr_delete(e_mr
);
611 ehca_err(mr
->device
, "ret=%x mr=%p", ret
, mr
);
613 } /* end ehca_dereg_mr() */
615 /*----------------------------------------------------------------------*/
617 struct ib_mw
*ehca_alloc_mw(struct ib_pd
*pd
)
621 struct ehca_mw
*e_mw
;
622 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
623 struct ehca_shca
*shca
=
624 container_of(pd
->device
, struct ehca_shca
, ib_device
);
625 struct ehca_mw_hipzout_parms hipzout
= {{0},0};
627 e_mw
= ehca_mw_new();
629 ib_mw
= ERR_PTR(-ENOMEM
);
633 h_ret
= hipz_h_alloc_resource_mw(shca
->ipz_hca_handle
, e_mw
,
634 e_pd
->fw_pd
, &hipzout
);
635 if (h_ret
!= H_SUCCESS
) {
636 ehca_err(pd
->device
, "hipz_mw_allocate failed, h_ret=%lx "
637 "shca=%p hca_hndl=%lx mw=%p",
638 h_ret
, shca
, shca
->ipz_hca_handle
.handle
, e_mw
);
639 ib_mw
= ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret
));
642 /* successful MW allocation */
643 e_mw
->ipz_mw_handle
= hipzout
.handle
;
644 e_mw
->ib_mw
.rkey
= hipzout
.rkey
;
648 ehca_mw_delete(e_mw
);
651 ehca_err(pd
->device
, "rc=%lx pd=%p", PTR_ERR(ib_mw
), pd
);
653 } /* end ehca_alloc_mw() */
655 /*----------------------------------------------------------------------*/
657 int ehca_bind_mw(struct ib_qp
*qp
,
659 struct ib_mw_bind
*mw_bind
)
661 /* TODO: not supported up to now */
662 ehca_gen_err("bind MW currently not supported by HCAD");
665 } /* end ehca_bind_mw() */
667 /*----------------------------------------------------------------------*/
669 int ehca_dealloc_mw(struct ib_mw
*mw
)
672 struct ehca_shca
*shca
=
673 container_of(mw
->device
, struct ehca_shca
, ib_device
);
674 struct ehca_mw
*e_mw
= container_of(mw
, struct ehca_mw
, ib_mw
);
676 h_ret
= hipz_h_free_resource_mw(shca
->ipz_hca_handle
, e_mw
);
677 if (h_ret
!= H_SUCCESS
) {
678 ehca_err(mw
->device
, "hipz_free_mw failed, h_ret=%lx shca=%p "
679 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
680 h_ret
, shca
, mw
, mw
->rkey
, shca
->ipz_hca_handle
.handle
,
681 e_mw
->ipz_mw_handle
.handle
);
682 return ehca_mrmw_map_hrc_free_mw(h_ret
);
684 /* successful deallocation */
685 ehca_mw_delete(e_mw
);
687 } /* end ehca_dealloc_mw() */
689 /*----------------------------------------------------------------------*/
691 struct ib_fmr
*ehca_alloc_fmr(struct ib_pd
*pd
,
693 struct ib_fmr_attr
*fmr_attr
)
695 struct ib_fmr
*ib_fmr
;
696 struct ehca_shca
*shca
=
697 container_of(pd
->device
, struct ehca_shca
, ib_device
);
698 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
699 struct ehca_mr
*e_fmr
;
701 u32 tmp_lkey
, tmp_rkey
;
702 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
704 /* check other parameters */
705 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
706 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
707 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
708 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
710 * Remote Write Access requires Local Write Access
711 * Remote Atomic Access requires Local Write Access
713 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
715 ib_fmr
= ERR_PTR(-EINVAL
);
716 goto alloc_fmr_exit0
;
718 if (mr_access_flags
& IB_ACCESS_MW_BIND
) {
719 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
721 ib_fmr
= ERR_PTR(-EINVAL
);
722 goto alloc_fmr_exit0
;
724 if ((fmr_attr
->max_pages
== 0) || (fmr_attr
->max_maps
== 0)) {
725 ehca_err(pd
->device
, "bad input values: fmr_attr->max_pages=%x "
726 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
727 fmr_attr
->max_pages
, fmr_attr
->max_maps
,
728 fmr_attr
->page_shift
);
729 ib_fmr
= ERR_PTR(-EINVAL
);
730 goto alloc_fmr_exit0
;
732 if (((1 << fmr_attr
->page_shift
) != EHCA_PAGESIZE
) &&
733 ((1 << fmr_attr
->page_shift
) != PAGE_SIZE
)) {
734 ehca_err(pd
->device
, "unsupported fmr_attr->page_shift=%x",
735 fmr_attr
->page_shift
);
736 ib_fmr
= ERR_PTR(-EINVAL
);
737 goto alloc_fmr_exit0
;
740 e_fmr
= ehca_mr_new();
742 ib_fmr
= ERR_PTR(-ENOMEM
);
743 goto alloc_fmr_exit0
;
745 e_fmr
->flags
|= EHCA_MR_FLAG_FMR
;
747 /* register MR on HCA */
748 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
749 fmr_attr
->max_pages
* (1 << fmr_attr
->page_shift
),
750 mr_access_flags
, e_pd
, &pginfo
,
751 &tmp_lkey
, &tmp_rkey
);
753 ib_fmr
= ERR_PTR(ret
);
754 goto alloc_fmr_exit1
;
758 e_fmr
->fmr_page_size
= 1 << fmr_attr
->page_shift
;
759 e_fmr
->fmr_max_pages
= fmr_attr
->max_pages
;
760 e_fmr
->fmr_max_maps
= fmr_attr
->max_maps
;
761 e_fmr
->fmr_map_cnt
= 0;
762 return &e_fmr
->ib
.ib_fmr
;
765 ehca_mr_delete(e_fmr
);
768 ehca_err(pd
->device
, "rc=%lx pd=%p mr_access_flags=%x "
769 "fmr_attr=%p", PTR_ERR(ib_fmr
), pd
,
770 mr_access_flags
, fmr_attr
);
772 } /* end ehca_alloc_fmr() */
774 /*----------------------------------------------------------------------*/
776 int ehca_map_phys_fmr(struct ib_fmr
*fmr
,
782 struct ehca_shca
*shca
=
783 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
784 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
785 struct ehca_pd
*e_pd
= container_of(fmr
->pd
, struct ehca_pd
, ib_pd
);
786 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
787 u32 tmp_lkey
, tmp_rkey
;
789 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
790 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
791 e_fmr
, e_fmr
->flags
);
793 goto map_phys_fmr_exit0
;
795 ret
= ehca_fmr_check_page_list(e_fmr
, page_list
, list_len
);
797 goto map_phys_fmr_exit0
;
798 if (iova
% e_fmr
->fmr_page_size
) {
799 /* only whole-numbered pages */
800 ehca_err(fmr
->device
, "bad iova, iova=%lx fmr_page_size=%x",
801 iova
, e_fmr
->fmr_page_size
);
803 goto map_phys_fmr_exit0
;
805 if (e_fmr
->fmr_map_cnt
>= e_fmr
->fmr_max_maps
) {
806 /* HCAD does not limit the maps, however trace this anyway */
807 ehca_info(fmr
->device
, "map limit exceeded, fmr=%p "
808 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
809 fmr
, e_fmr
->fmr_map_cnt
, e_fmr
->fmr_max_maps
);
812 pginfo
.type
= EHCA_MR_PGI_FMR
;
813 pginfo
.num_pages
= list_len
;
814 pginfo
.num_4k
= list_len
* (e_fmr
->fmr_page_size
/ EHCA_PAGESIZE
);
815 pginfo
.page_list
= page_list
;
816 pginfo
.next_4k
= ((iova
& (e_fmr
->fmr_page_size
-1)) /
819 ret
= ehca_rereg_mr(shca
, e_fmr
, (u64
*)iova
,
820 list_len
* e_fmr
->fmr_page_size
,
821 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
823 goto map_phys_fmr_exit0
;
825 /* successful reregistration */
826 e_fmr
->fmr_map_cnt
++;
827 e_fmr
->ib
.ib_fmr
.lkey
= tmp_lkey
;
828 e_fmr
->ib
.ib_fmr
.rkey
= tmp_rkey
;
833 ehca_err(fmr
->device
, "ret=%x fmr=%p page_list=%p list_len=%x "
835 ret
, fmr
, page_list
, list_len
, iova
);
837 } /* end ehca_map_phys_fmr() */
839 /*----------------------------------------------------------------------*/
841 int ehca_unmap_fmr(struct list_head
*fmr_list
)
844 struct ib_fmr
*ib_fmr
;
845 struct ehca_shca
*shca
= NULL
;
846 struct ehca_shca
*prev_shca
;
847 struct ehca_mr
*e_fmr
;
849 u32 unmap_fmr_cnt
= 0;
851 /* check all FMR belong to same SHCA, and check internal flag */
852 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
855 ehca_gen_err("bad fmr=%p in list", ib_fmr
);
857 goto unmap_fmr_exit0
;
859 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
861 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
862 if ((shca
!= prev_shca
) && prev_shca
) {
863 ehca_err(&shca
->ib_device
, "SHCA mismatch, shca=%p "
864 "prev_shca=%p e_fmr=%p",
865 shca
, prev_shca
, e_fmr
);
867 goto unmap_fmr_exit0
;
869 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
870 ehca_err(&shca
->ib_device
, "not a FMR, e_fmr=%p "
871 "e_fmr->flags=%x", e_fmr
, e_fmr
->flags
);
873 goto unmap_fmr_exit0
;
878 /* loop over all FMRs to unmap */
879 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
881 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
882 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
884 ret
= ehca_unmap_one_fmr(shca
, e_fmr
);
886 /* unmap failed, stop unmapping of rest of FMRs */
887 ehca_err(&shca
->ib_device
, "unmap of one FMR failed, "
888 "stop rest, e_fmr=%p num_fmr=%x "
889 "unmap_fmr_cnt=%x lkey=%x", e_fmr
, num_fmr
,
890 unmap_fmr_cnt
, e_fmr
->ib
.ib_fmr
.lkey
);
891 goto unmap_fmr_exit0
;
897 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
898 ret
, fmr_list
, num_fmr
, unmap_fmr_cnt
);
900 } /* end ehca_unmap_fmr() */
902 /*----------------------------------------------------------------------*/
904 int ehca_dealloc_fmr(struct ib_fmr
*fmr
)
908 struct ehca_shca
*shca
=
909 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
910 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
912 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
913 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
914 e_fmr
, e_fmr
->flags
);
919 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
920 if (h_ret
!= H_SUCCESS
) {
921 ehca_err(fmr
->device
, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
922 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
923 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
924 e_fmr
->ipz_mr_handle
.handle
, fmr
->lkey
);
925 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
928 /* successful deregistration */
929 ehca_mr_delete(e_fmr
);
934 ehca_err(&shca
->ib_device
, "ret=%x fmr=%p", ret
, fmr
);
936 } /* end ehca_dealloc_fmr() */
938 /*----------------------------------------------------------------------*/
940 int ehca_reg_mr(struct ehca_shca
*shca
,
941 struct ehca_mr
*e_mr
,
945 struct ehca_pd
*e_pd
,
946 struct ehca_mr_pginfo
*pginfo
,
953 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
955 ehca_mrmw_map_acl(acl
, &hipz_acl
);
956 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
957 if (ehca_use_hp_mr
== 1)
958 hipz_acl
|= 0x00000001;
960 h_ret
= hipz_h_alloc_resource_mr(shca
->ipz_hca_handle
, e_mr
,
961 (u64
)iova_start
, size
, hipz_acl
,
962 e_pd
->fw_pd
, &hipzout
);
963 if (h_ret
!= H_SUCCESS
) {
964 ehca_err(&shca
->ib_device
, "hipz_alloc_mr failed, h_ret=%lx "
965 "hca_hndl=%lx", h_ret
, shca
->ipz_hca_handle
.handle
);
966 ret
= ehca_mrmw_map_hrc_alloc(h_ret
);
967 goto ehca_reg_mr_exit0
;
970 e_mr
->ipz_mr_handle
= hipzout
.handle
;
972 ret
= ehca_reg_mr_rpages(shca
, e_mr
, pginfo
);
974 goto ehca_reg_mr_exit1
;
976 /* successful registration */
977 e_mr
->num_pages
= pginfo
->num_pages
;
978 e_mr
->num_4k
= pginfo
->num_4k
;
979 e_mr
->start
= iova_start
;
982 *lkey
= hipzout
.lkey
;
983 *rkey
= hipzout
.rkey
;
987 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
988 if (h_ret
!= H_SUCCESS
) {
989 ehca_err(&shca
->ib_device
, "h_ret=%lx shca=%p e_mr=%p "
990 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
991 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
992 h_ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
,
993 hipzout
.lkey
, pginfo
, pginfo
->num_pages
,
994 pginfo
->num_4k
, ret
);
995 ehca_err(&shca
->ib_device
, "internal error in ehca_reg_mr, "
1000 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p "
1001 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1002 "num_pages=%lx num_4k=%lx",
1003 ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
, pginfo
,
1004 pginfo
->num_pages
, pginfo
->num_4k
);
1006 } /* end ehca_reg_mr() */
1008 /*----------------------------------------------------------------------*/
1010 int ehca_reg_mr_rpages(struct ehca_shca
*shca
,
1011 struct ehca_mr
*e_mr
,
1012 struct ehca_mr_pginfo
*pginfo
)
1021 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1023 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1025 goto ehca_reg_mr_rpages_exit0
;
1028 /* max 512 pages per shot */
1029 for (i
= 0; i
< ((pginfo
->num_4k
+ 512 - 1) / 512); i
++) {
1031 if (i
== ((pginfo
->num_4k
+ 512 - 1) / 512) - 1) {
1032 rnum
= pginfo
->num_4k
% 512; /* last shot */
1034 rnum
= 512; /* last shot is full */
1039 ret
= ehca_set_pagebuf(e_mr
, pginfo
, rnum
, kpage
);
1041 ehca_err(&shca
->ib_device
, "ehca_set_pagebuf "
1042 "bad rc, ret=%x rnum=%x kpage=%p",
1045 goto ehca_reg_mr_rpages_exit1
;
1047 rpage
= virt_to_abs(kpage
);
1049 ehca_err(&shca
->ib_device
, "kpage=%p i=%x",
1052 goto ehca_reg_mr_rpages_exit1
;
1054 } else { /* rnum==1 */
1055 ret
= ehca_set_pagebuf_1(e_mr
, pginfo
, &rpage
);
1057 ehca_err(&shca
->ib_device
, "ehca_set_pagebuf_1 "
1058 "bad rc, ret=%x i=%x", ret
, i
);
1060 goto ehca_reg_mr_rpages_exit1
;
1064 h_ret
= hipz_h_register_rpage_mr(shca
->ipz_hca_handle
, e_mr
,
1065 0, /* pagesize 4k */
1068 if (i
== ((pginfo
->num_4k
+ 512 - 1) / 512) - 1) {
1070 * check for 'registration complete'==H_SUCCESS
1071 * and for 'page registered'==H_PAGE_REGISTERED
1073 if (h_ret
!= H_SUCCESS
) {
1074 ehca_err(&shca
->ib_device
, "last "
1075 "hipz_reg_rpage_mr failed, h_ret=%lx "
1076 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1077 " lkey=%x", h_ret
, e_mr
, i
,
1078 shca
->ipz_hca_handle
.handle
,
1079 e_mr
->ipz_mr_handle
.handle
,
1080 e_mr
->ib
.ib_mr
.lkey
);
1081 ret
= ehca_mrmw_map_hrc_rrpg_last(h_ret
);
1085 } else if (h_ret
!= H_PAGE_REGISTERED
) {
1086 ehca_err(&shca
->ib_device
, "hipz_reg_rpage_mr failed, "
1087 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1088 "mr_hndl=%lx", h_ret
, e_mr
, i
,
1089 e_mr
->ib
.ib_mr
.lkey
,
1090 shca
->ipz_hca_handle
.handle
,
1091 e_mr
->ipz_mr_handle
.handle
);
1092 ret
= ehca_mrmw_map_hrc_rrpg_notlast(h_ret
);
1099 ehca_reg_mr_rpages_exit1
:
1100 ehca_free_fw_ctrlblock(kpage
);
1101 ehca_reg_mr_rpages_exit0
:
1103 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p pginfo=%p "
1104 "num_pages=%lx num_4k=%lx", ret
, shca
, e_mr
, pginfo
,
1105 pginfo
->num_pages
, pginfo
->num_4k
);
1107 } /* end ehca_reg_mr_rpages() */
1109 /*----------------------------------------------------------------------*/
1111 inline int ehca_rereg_mr_rereg1(struct ehca_shca
*shca
,
1112 struct ehca_mr
*e_mr
,
1116 struct ehca_pd
*e_pd
,
1117 struct ehca_mr_pginfo
*pginfo
,
1126 struct ehca_mr_pginfo pginfo_save
;
1127 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1129 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1130 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1132 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1134 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1136 goto ehca_rereg_mr_rereg1_exit0
;
1139 pginfo_save
= *pginfo
;
1140 ret
= ehca_set_pagebuf(e_mr
, pginfo
, pginfo
->num_4k
, kpage
);
1142 ehca_err(&shca
->ib_device
, "set pagebuf failed, e_mr=%p "
1143 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1144 e_mr
, pginfo
, pginfo
->type
, pginfo
->num_pages
,
1145 pginfo
->num_4k
,kpage
);
1146 goto ehca_rereg_mr_rereg1_exit1
;
1148 rpage
= virt_to_abs(kpage
);
1150 ehca_err(&shca
->ib_device
, "kpage=%p", kpage
);
1152 goto ehca_rereg_mr_rereg1_exit1
;
1154 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_mr
,
1155 (u64
)iova_start
, size
, hipz_acl
,
1156 e_pd
->fw_pd
, rpage
, &hipzout
);
1157 if (h_ret
!= H_SUCCESS
) {
1159 * reregistration unsuccessful, try it again with the 3 hCalls,
1160 * e.g. this is required in case H_MR_CONDITION
1161 * (MW bound or MR is shared)
1163 ehca_warn(&shca
->ib_device
, "hipz_h_reregister_pmr failed "
1164 "(Rereg1), h_ret=%lx e_mr=%p", h_ret
, e_mr
);
1165 *pginfo
= pginfo_save
;
1167 } else if ((u64
*)hipzout
.vaddr
!= iova_start
) {
1168 ehca_err(&shca
->ib_device
, "PHYP changed iova_start in "
1169 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1170 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start
,
1171 hipzout
.vaddr
, e_mr
, e_mr
->ipz_mr_handle
.handle
,
1172 e_mr
->ib
.ib_mr
.lkey
, hipzout
.lkey
);
1176 * successful reregistration
1177 * note: start and start_out are identical for eServer HCAs
1179 e_mr
->num_pages
= pginfo
->num_pages
;
1180 e_mr
->num_4k
= pginfo
->num_4k
;
1181 e_mr
->start
= iova_start
;
1184 *lkey
= hipzout
.lkey
;
1185 *rkey
= hipzout
.rkey
;
1188 ehca_rereg_mr_rereg1_exit1
:
1189 ehca_free_fw_ctrlblock(kpage
);
1190 ehca_rereg_mr_rereg1_exit0
:
1191 if ( ret
&& (ret
!= -EAGAIN
) )
1192 ehca_err(&shca
->ib_device
, "ret=%x lkey=%x rkey=%x "
1193 "pginfo=%p num_pages=%lx num_4k=%lx",
1194 ret
, *lkey
, *rkey
, pginfo
, pginfo
->num_pages
,
1197 } /* end ehca_rereg_mr_rereg1() */
1199 /*----------------------------------------------------------------------*/
1201 int ehca_rereg_mr(struct ehca_shca
*shca
,
1202 struct ehca_mr
*e_mr
,
1206 struct ehca_pd
*e_pd
,
1207 struct ehca_mr_pginfo
*pginfo
,
1213 int rereg_1_hcall
= 1; /* 1: use hipz_h_reregister_pmr directly */
1214 int rereg_3_hcall
= 0; /* 1: use 3 hipz calls for reregistration */
1216 /* first determine reregistration hCall(s) */
1217 if ((pginfo
->num_4k
> 512) || (e_mr
->num_4k
> 512) ||
1218 (pginfo
->num_4k
> e_mr
->num_4k
)) {
1219 ehca_dbg(&shca
->ib_device
, "Rereg3 case, pginfo->num_4k=%lx "
1220 "e_mr->num_4k=%x", pginfo
->num_4k
, e_mr
->num_4k
);
1225 if (e_mr
->flags
& EHCA_MR_FLAG_MAXMR
) { /* check for max-MR */
1228 e_mr
->flags
&= ~EHCA_MR_FLAG_MAXMR
;
1229 ehca_err(&shca
->ib_device
, "Rereg MR for max-MR! e_mr=%p",
1233 if (rereg_1_hcall
) {
1234 ret
= ehca_rereg_mr_rereg1(shca
, e_mr
, iova_start
, size
,
1235 acl
, e_pd
, pginfo
, lkey
, rkey
);
1240 goto ehca_rereg_mr_exit0
;
1244 if (rereg_3_hcall
) {
1245 struct ehca_mr save_mr
;
1247 /* first deregister old MR */
1248 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
1249 if (h_ret
!= H_SUCCESS
) {
1250 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1251 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1253 h_ret
, e_mr
, shca
->ipz_hca_handle
.handle
,
1254 e_mr
->ipz_mr_handle
.handle
,
1255 e_mr
->ib
.ib_mr
.lkey
);
1256 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
1257 goto ehca_rereg_mr_exit0
;
1259 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1261 ehca_mr_deletenew(e_mr
);
1263 /* set some MR values */
1264 e_mr
->flags
= save_mr
.flags
;
1265 e_mr
->fmr_page_size
= save_mr
.fmr_page_size
;
1266 e_mr
->fmr_max_pages
= save_mr
.fmr_max_pages
;
1267 e_mr
->fmr_max_maps
= save_mr
.fmr_max_maps
;
1268 e_mr
->fmr_map_cnt
= save_mr
.fmr_map_cnt
;
1270 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, acl
,
1271 e_pd
, pginfo
, lkey
, rkey
);
1273 u32 offset
= (u64
)(&e_mr
->flags
) - (u64
)e_mr
;
1274 memcpy(&e_mr
->flags
, &(save_mr
.flags
),
1275 sizeof(struct ehca_mr
) - offset
);
1276 goto ehca_rereg_mr_exit0
;
1280 ehca_rereg_mr_exit0
:
1282 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p "
1283 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1284 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1285 "rereg_3_hcall=%x", ret
, shca
, e_mr
, iova_start
, size
,
1286 acl
, e_pd
, pginfo
, pginfo
->num_pages
, *lkey
, *rkey
,
1287 rereg_1_hcall
, rereg_3_hcall
);
1289 } /* end ehca_rereg_mr() */
1291 /*----------------------------------------------------------------------*/
1293 int ehca_unmap_one_fmr(struct ehca_shca
*shca
,
1294 struct ehca_mr
*e_fmr
)
1298 int rereg_1_hcall
= 1; /* 1: use hipz_mr_reregister directly */
1299 int rereg_3_hcall
= 0; /* 1: use 3 hipz calls for unmapping */
1300 struct ehca_pd
*e_pd
=
1301 container_of(e_fmr
->ib
.ib_fmr
.pd
, struct ehca_pd
, ib_pd
);
1302 struct ehca_mr save_fmr
;
1303 u32 tmp_lkey
, tmp_rkey
;
1304 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
1305 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1307 /* first check if reregistration hCall can be used for unmap */
1308 if (e_fmr
->fmr_max_pages
> 512) {
1313 if (rereg_1_hcall
) {
1315 * note: after using rereg hcall with len=0,
1316 * rereg hcall must be used again for registering pages
1318 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_fmr
, 0,
1319 0, 0, e_pd
->fw_pd
, 0, &hipzout
);
1320 if (h_ret
!= H_SUCCESS
) {
1322 * should not happen, because length checked above,
1323 * FMRs are not shared and no MW bound to FMRs
1325 ehca_err(&shca
->ib_device
, "hipz_reregister_pmr failed "
1326 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1327 "mr_hndl=%lx lkey=%x lkey_out=%x",
1328 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1329 e_fmr
->ipz_mr_handle
.handle
,
1330 e_fmr
->ib
.ib_fmr
.lkey
, hipzout
.lkey
);
1333 /* successful reregistration */
1334 e_fmr
->start
= NULL
;
1336 tmp_lkey
= hipzout
.lkey
;
1337 tmp_rkey
= hipzout
.rkey
;
1341 if (rereg_3_hcall
) {
1342 struct ehca_mr save_mr
;
1344 /* first free old FMR */
1345 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
1346 if (h_ret
!= H_SUCCESS
) {
1347 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1348 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1350 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1351 e_fmr
->ipz_mr_handle
.handle
,
1352 e_fmr
->ib
.ib_fmr
.lkey
);
1353 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
1354 goto ehca_unmap_one_fmr_exit0
;
1356 /* clean ehca_mr_t, without changing lock */
1358 ehca_mr_deletenew(e_fmr
);
1360 /* set some MR values */
1361 e_fmr
->flags
= save_fmr
.flags
;
1362 e_fmr
->fmr_page_size
= save_fmr
.fmr_page_size
;
1363 e_fmr
->fmr_max_pages
= save_fmr
.fmr_max_pages
;
1364 e_fmr
->fmr_max_maps
= save_fmr
.fmr_max_maps
;
1365 e_fmr
->fmr_map_cnt
= save_fmr
.fmr_map_cnt
;
1366 e_fmr
->acl
= save_fmr
.acl
;
1368 pginfo
.type
= EHCA_MR_PGI_FMR
;
1369 pginfo
.num_pages
= 0;
1371 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
1372 (e_fmr
->fmr_max_pages
* e_fmr
->fmr_page_size
),
1373 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
,
1376 u32 offset
= (u64
)(&e_fmr
->flags
) - (u64
)e_fmr
;
1377 memcpy(&e_fmr
->flags
, &(save_mr
.flags
),
1378 sizeof(struct ehca_mr
) - offset
);
1379 goto ehca_unmap_one_fmr_exit0
;
1383 ehca_unmap_one_fmr_exit0
:
1385 ehca_err(&shca
->ib_device
, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1386 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1387 ret
, tmp_lkey
, tmp_rkey
, e_fmr
->fmr_max_pages
,
1388 rereg_1_hcall
, rereg_3_hcall
);
1390 } /* end ehca_unmap_one_fmr() */
1392 /*----------------------------------------------------------------------*/
1394 int ehca_reg_smr(struct ehca_shca
*shca
,
1395 struct ehca_mr
*e_origmr
,
1396 struct ehca_mr
*e_newmr
,
1399 struct ehca_pd
*e_pd
,
1406 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1408 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1409 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1411 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1412 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1414 if (h_ret
!= H_SUCCESS
) {
1415 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lx "
1416 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1417 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1418 h_ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
,
1419 shca
->ipz_hca_handle
.handle
,
1420 e_origmr
->ipz_mr_handle
.handle
,
1421 e_origmr
->ib
.ib_mr
.lkey
);
1422 ret
= ehca_mrmw_map_hrc_reg_smr(h_ret
);
1423 goto ehca_reg_smr_exit0
;
1425 /* successful registration */
1426 e_newmr
->num_pages
= e_origmr
->num_pages
;
1427 e_newmr
->num_4k
= e_origmr
->num_4k
;
1428 e_newmr
->start
= iova_start
;
1429 e_newmr
->size
= e_origmr
->size
;
1431 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1432 *lkey
= hipzout
.lkey
;
1433 *rkey
= hipzout
.rkey
;
1438 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_origmr=%p "
1439 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1440 ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
);
1442 } /* end ehca_reg_smr() */
1444 /*----------------------------------------------------------------------*/
1446 /* register internal max-MR to internal SHCA */
1447 int ehca_reg_internal_maxmr(
1448 struct ehca_shca
*shca
,
1449 struct ehca_pd
*e_pd
,
1450 struct ehca_mr
**e_maxmr
) /*OUT*/
1453 struct ehca_mr
*e_mr
;
1456 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
1457 struct ib_phys_buf ib_pbuf
;
1459 u32 num_pages_4k
; /* 4k portion "pages" */
1461 e_mr
= ehca_mr_new();
1463 ehca_err(&shca
->ib_device
, "out of memory");
1465 goto ehca_reg_internal_maxmr_exit0
;
1467 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
1469 /* register internal max-MR on HCA */
1470 size_maxmr
= (u64
)high_memory
- PAGE_OFFSET
;
1471 iova_start
= (u64
*)KERNELBASE
;
1473 ib_pbuf
.size
= size_maxmr
;
1474 num_pages_mr
= ((((u64
)iova_start
% PAGE_SIZE
) + size_maxmr
+
1475 PAGE_SIZE
- 1) / PAGE_SIZE
);
1476 num_pages_4k
= ((((u64
)iova_start
% EHCA_PAGESIZE
) + size_maxmr
+
1477 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
1479 pginfo
.type
= EHCA_MR_PGI_PHYS
;
1480 pginfo
.num_pages
= num_pages_mr
;
1481 pginfo
.num_4k
= num_pages_4k
;
1482 pginfo
.num_phys_buf
= 1;
1483 pginfo
.phys_buf_array
= &ib_pbuf
;
1485 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size_maxmr
, 0, e_pd
,
1486 &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
1487 &e_mr
->ib
.ib_mr
.rkey
);
1489 ehca_err(&shca
->ib_device
, "reg of internal max MR failed, "
1490 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1491 "num_pages_4k=%x", e_mr
, iova_start
, size_maxmr
,
1492 num_pages_mr
, num_pages_4k
);
1493 goto ehca_reg_internal_maxmr_exit1
;
1496 /* successful registration of all pages */
1497 e_mr
->ib
.ib_mr
.device
= e_pd
->ib_pd
.device
;
1498 e_mr
->ib
.ib_mr
.pd
= &e_pd
->ib_pd
;
1499 e_mr
->ib
.ib_mr
.uobject
= NULL
;
1500 atomic_inc(&(e_pd
->ib_pd
.usecnt
));
1501 atomic_set(&(e_mr
->ib
.ib_mr
.usecnt
), 0);
1505 ehca_reg_internal_maxmr_exit1
:
1506 ehca_mr_delete(e_mr
);
1507 ehca_reg_internal_maxmr_exit0
:
1509 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1510 ret
, shca
, e_pd
, e_maxmr
);
1512 } /* end ehca_reg_internal_maxmr() */
1514 /*----------------------------------------------------------------------*/
1516 int ehca_reg_maxmr(struct ehca_shca
*shca
,
1517 struct ehca_mr
*e_newmr
,
1520 struct ehca_pd
*e_pd
,
1525 struct ehca_mr
*e_origmr
= shca
->maxmr
;
1527 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1529 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1530 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1532 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1533 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1535 if (h_ret
!= H_SUCCESS
) {
1536 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lx "
1537 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1538 h_ret
, e_origmr
, shca
->ipz_hca_handle
.handle
,
1539 e_origmr
->ipz_mr_handle
.handle
,
1540 e_origmr
->ib
.ib_mr
.lkey
);
1541 return ehca_mrmw_map_hrc_reg_smr(h_ret
);
1543 /* successful registration */
1544 e_newmr
->num_pages
= e_origmr
->num_pages
;
1545 e_newmr
->num_4k
= e_origmr
->num_4k
;
1546 e_newmr
->start
= iova_start
;
1547 e_newmr
->size
= e_origmr
->size
;
1549 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1550 *lkey
= hipzout
.lkey
;
1551 *rkey
= hipzout
.rkey
;
1553 } /* end ehca_reg_maxmr() */
1555 /*----------------------------------------------------------------------*/
1557 int ehca_dereg_internal_maxmr(struct ehca_shca
*shca
)
1560 struct ehca_mr
*e_maxmr
;
1561 struct ib_pd
*ib_pd
;
1564 ehca_err(&shca
->ib_device
, "bad call, shca=%p", shca
);
1566 goto ehca_dereg_internal_maxmr_exit0
;
1569 e_maxmr
= shca
->maxmr
;
1570 ib_pd
= e_maxmr
->ib
.ib_mr
.pd
;
1571 shca
->maxmr
= NULL
; /* remove internal max-MR indication from SHCA */
1573 ret
= ehca_dereg_mr(&e_maxmr
->ib
.ib_mr
);
1575 ehca_err(&shca
->ib_device
, "dereg internal max-MR failed, "
1576 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1577 ret
, e_maxmr
, shca
, e_maxmr
->ib
.ib_mr
.lkey
);
1578 shca
->maxmr
= e_maxmr
;
1579 goto ehca_dereg_internal_maxmr_exit0
;
1582 atomic_dec(&ib_pd
->usecnt
);
1584 ehca_dereg_internal_maxmr_exit0
:
1586 ehca_err(&shca
->ib_device
, "ret=%x shca=%p shca->maxmr=%p",
1587 ret
, shca
, shca
->maxmr
);
1589 } /* end ehca_dereg_internal_maxmr() */
1591 /*----------------------------------------------------------------------*/
1594 * check physical buffer array of MR verbs for validness and
1595 * calculates MR size
1597 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf
*phys_buf_array
,
1602 struct ib_phys_buf
*pbuf
= phys_buf_array
;
1606 if (num_phys_buf
== 0) {
1607 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1610 /* check first buffer */
1611 if (((u64
)iova_start
& ~PAGE_MASK
) != (pbuf
->addr
& ~PAGE_MASK
)) {
1612 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1613 "pbuf->addr=%lx pbuf->size=%lx",
1614 iova_start
, pbuf
->addr
, pbuf
->size
);
1617 if (((pbuf
->addr
+ pbuf
->size
) % PAGE_SIZE
) &&
1618 (num_phys_buf
> 1)) {
1619 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1620 "pbuf->size=%lx", pbuf
->addr
, pbuf
->size
);
1624 for (i
= 0; i
< num_phys_buf
; i
++) {
1625 if ((i
> 0) && (pbuf
->addr
% PAGE_SIZE
)) {
1626 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1628 i
, pbuf
->addr
, pbuf
->size
);
1631 if (((i
> 0) && /* not 1st */
1632 (i
< (num_phys_buf
- 1)) && /* not last */
1633 (pbuf
->size
% PAGE_SIZE
)) || (pbuf
->size
== 0)) {
1634 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1638 size_count
+= pbuf
->size
;
1644 } /* end ehca_mr_chk_buf_and_calc_size() */
1646 /*----------------------------------------------------------------------*/
1648 /* check page list of map FMR verb for validness */
1649 int ehca_fmr_check_page_list(struct ehca_mr
*e_fmr
,
1656 if ((list_len
== 0) || (list_len
> e_fmr
->fmr_max_pages
)) {
1657 ehca_gen_err("bad list_len, list_len=%x "
1658 "e_fmr->fmr_max_pages=%x fmr=%p",
1659 list_len
, e_fmr
->fmr_max_pages
, e_fmr
);
1663 /* each page must be aligned */
1665 for (i
= 0; i
< list_len
; i
++) {
1666 if (*page
% e_fmr
->fmr_page_size
) {
1667 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1668 "fmr_page_size=%x", i
, *page
, page
, e_fmr
,
1669 e_fmr
->fmr_page_size
);
1676 } /* end ehca_fmr_check_page_list() */
1678 /*----------------------------------------------------------------------*/
1680 /* setup page buffer from page info */
1681 int ehca_set_pagebuf(struct ehca_mr
*e_mr
,
1682 struct ehca_mr_pginfo
*pginfo
,
1687 struct ib_umem_chunk
*prev_chunk
;
1688 struct ib_umem_chunk
*chunk
;
1689 struct ib_phys_buf
*pbuf
;
1691 u64 num4k
, pgaddr
, offs4k
;
1695 if (pginfo
->type
== EHCA_MR_PGI_PHYS
) {
1696 /* loop over desired phys_buf_array entries */
1697 while (i
< number
) {
1698 pbuf
= pginfo
->phys_buf_array
+ pginfo
->next_buf
;
1699 num4k
= ((pbuf
->addr
% EHCA_PAGESIZE
) + pbuf
->size
+
1700 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
;
1701 offs4k
= (pbuf
->addr
& ~PAGE_MASK
) / EHCA_PAGESIZE
;
1702 while (pginfo
->next_4k
< offs4k
+ num4k
) {
1704 if ((pginfo
->page_cnt
>= pginfo
->num_pages
) ||
1705 (pginfo
->page_4k_cnt
>= pginfo
->num_4k
)) {
1706 ehca_gen_err("page_cnt >= num_pages, "
1713 pginfo
->page_4k_cnt
,
1716 goto ehca_set_pagebuf_exit0
;
1718 *kpage
= phys_to_abs(
1719 (pbuf
->addr
& EHCA_PAGEMASK
)
1720 + (pginfo
->next_4k
* EHCA_PAGESIZE
));
1721 if ( !(*kpage
) && pbuf
->addr
) {
1722 ehca_gen_err("pbuf->addr=%lx "
1724 "next_4k=%lx", pbuf
->addr
,
1728 goto ehca_set_pagebuf_exit0
;
1730 (pginfo
->page_4k_cnt
)++;
1731 (pginfo
->next_4k
)++;
1732 if (pginfo
->next_4k
%
1733 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0)
1734 (pginfo
->page_cnt
)++;
1737 if (i
>= number
) break;
1739 if (pginfo
->next_4k
>= offs4k
+ num4k
) {
1740 (pginfo
->next_buf
)++;
1741 pginfo
->next_4k
= 0;
1744 } else if (pginfo
->type
== EHCA_MR_PGI_USER
) {
1745 /* loop over desired chunk entries */
1746 chunk
= pginfo
->next_chunk
;
1747 prev_chunk
= pginfo
->next_chunk
;
1748 list_for_each_entry_continue(chunk
,
1749 (&(pginfo
->region
->chunk_list
)),
1751 for (i
= pginfo
->next_nmap
; i
< chunk
->nmap
; ) {
1752 pgaddr
= ( page_to_pfn(chunk
->page_list
[i
].page
)
1754 *kpage
= phys_to_abs(pgaddr
+
1758 ehca_gen_err("pgaddr=%lx "
1759 "chunk->page_list[i]=%lx "
1760 "i=%x next_4k=%lx mr=%p",
1762 (u64
)sg_dma_address(
1765 i
, pginfo
->next_4k
, e_mr
);
1767 goto ehca_set_pagebuf_exit0
;
1769 (pginfo
->page_4k_cnt
)++;
1770 (pginfo
->next_4k
)++;
1772 if (pginfo
->next_4k
%
1773 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0) {
1774 (pginfo
->page_cnt
)++;
1775 (pginfo
->next_nmap
)++;
1776 pginfo
->next_4k
= 0;
1780 if (j
>= number
) break;
1782 if ((pginfo
->next_nmap
>= chunk
->nmap
) &&
1784 pginfo
->next_nmap
= 0;
1787 } else if (pginfo
->next_nmap
>= chunk
->nmap
) {
1788 pginfo
->next_nmap
= 0;
1790 } else if (j
>= number
)
1795 pginfo
->next_chunk
=
1796 list_prepare_entry(prev_chunk
,
1797 (&(pginfo
->region
->chunk_list
)),
1799 } else if (pginfo
->type
== EHCA_MR_PGI_FMR
) {
1800 /* loop over desired page_list entries */
1801 fmrlist
= pginfo
->page_list
+ pginfo
->next_listelem
;
1802 for (i
= 0; i
< number
; i
++) {
1803 *kpage
= phys_to_abs((*fmrlist
& EHCA_PAGEMASK
) +
1804 pginfo
->next_4k
* EHCA_PAGESIZE
);
1806 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1807 "next_listelem=%lx next_4k=%lx",
1809 pginfo
->next_listelem
,
1812 goto ehca_set_pagebuf_exit0
;
1814 (pginfo
->page_4k_cnt
)++;
1815 (pginfo
->next_4k
)++;
1817 if (pginfo
->next_4k
%
1818 (e_mr
->fmr_page_size
/ EHCA_PAGESIZE
) == 0) {
1819 (pginfo
->page_cnt
)++;
1820 (pginfo
->next_listelem
)++;
1822 pginfo
->next_4k
= 0;
1826 ehca_gen_err("bad pginfo->type=%x", pginfo
->type
);
1828 goto ehca_set_pagebuf_exit0
;
1831 ehca_set_pagebuf_exit0
:
1833 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1834 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1835 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1836 "next_listelem=%lx region=%p next_chunk=%p "
1837 "next_nmap=%lx", ret
, e_mr
, pginfo
, pginfo
->type
,
1838 pginfo
->num_pages
, pginfo
->num_4k
,
1839 pginfo
->next_buf
, pginfo
->next_4k
, number
, kpage
,
1840 pginfo
->page_cnt
, pginfo
->page_4k_cnt
, i
,
1841 pginfo
->next_listelem
, pginfo
->region
,
1842 pginfo
->next_chunk
, pginfo
->next_nmap
);
1844 } /* end ehca_set_pagebuf() */
1846 /*----------------------------------------------------------------------*/
1848 /* setup 1 page from page info page buffer */
1849 int ehca_set_pagebuf_1(struct ehca_mr
*e_mr
,
1850 struct ehca_mr_pginfo
*pginfo
,
1854 struct ib_phys_buf
*tmp_pbuf
;
1856 struct ib_umem_chunk
*chunk
;
1857 struct ib_umem_chunk
*prev_chunk
;
1858 u64 pgaddr
, num4k
, offs4k
;
1860 if (pginfo
->type
== EHCA_MR_PGI_PHYS
) {
1862 if ((pginfo
->page_cnt
>= pginfo
->num_pages
) ||
1863 (pginfo
->page_4k_cnt
>= pginfo
->num_4k
)) {
1864 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1865 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1866 pginfo
->page_cnt
, pginfo
->num_pages
,
1867 pginfo
->page_4k_cnt
, pginfo
->num_4k
);
1869 goto ehca_set_pagebuf_1_exit0
;
1871 tmp_pbuf
= pginfo
->phys_buf_array
+ pginfo
->next_buf
;
1872 num4k
= ((tmp_pbuf
->addr
% EHCA_PAGESIZE
) + tmp_pbuf
->size
+
1873 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
;
1874 offs4k
= (tmp_pbuf
->addr
& ~PAGE_MASK
) / EHCA_PAGESIZE
;
1875 *rpage
= phys_to_abs((tmp_pbuf
->addr
& EHCA_PAGEMASK
) +
1876 (pginfo
->next_4k
* EHCA_PAGESIZE
));
1877 if ( !(*rpage
) && tmp_pbuf
->addr
) {
1878 ehca_gen_err("tmp_pbuf->addr=%lx"
1879 " tmp_pbuf->size=%lx next_4k=%lx",
1880 tmp_pbuf
->addr
, tmp_pbuf
->size
,
1883 goto ehca_set_pagebuf_1_exit0
;
1885 (pginfo
->page_4k_cnt
)++;
1886 (pginfo
->next_4k
)++;
1887 if (pginfo
->next_4k
% (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0)
1888 (pginfo
->page_cnt
)++;
1889 if (pginfo
->next_4k
>= offs4k
+ num4k
) {
1890 (pginfo
->next_buf
)++;
1891 pginfo
->next_4k
= 0;
1893 } else if (pginfo
->type
== EHCA_MR_PGI_USER
) {
1894 chunk
= pginfo
->next_chunk
;
1895 prev_chunk
= pginfo
->next_chunk
;
1896 list_for_each_entry_continue(chunk
,
1897 (&(pginfo
->region
->chunk_list
)),
1899 pgaddr
= ( page_to_pfn(chunk
->page_list
[
1900 pginfo
->next_nmap
].page
)
1902 *rpage
= phys_to_abs(pgaddr
+
1903 (pginfo
->next_4k
* EHCA_PAGESIZE
));
1905 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1906 " next_nmap=%lx next_4k=%lx mr=%p",
1907 pgaddr
, (u64
)sg_dma_address(
1911 pginfo
->next_nmap
, pginfo
->next_4k
,
1914 goto ehca_set_pagebuf_1_exit0
;
1916 (pginfo
->page_4k_cnt
)++;
1917 (pginfo
->next_4k
)++;
1918 if (pginfo
->next_4k
%
1919 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0) {
1920 (pginfo
->page_cnt
)++;
1921 (pginfo
->next_nmap
)++;
1922 pginfo
->next_4k
= 0;
1924 if (pginfo
->next_nmap
>= chunk
->nmap
) {
1925 pginfo
->next_nmap
= 0;
1930 pginfo
->next_chunk
=
1931 list_prepare_entry(prev_chunk
,
1932 (&(pginfo
->region
->chunk_list
)),
1934 } else if (pginfo
->type
== EHCA_MR_PGI_FMR
) {
1935 fmrlist
= pginfo
->page_list
+ pginfo
->next_listelem
;
1936 *rpage
= phys_to_abs((*fmrlist
& EHCA_PAGEMASK
) +
1937 pginfo
->next_4k
* EHCA_PAGESIZE
);
1939 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1940 "next_listelem=%lx next_4k=%lx",
1941 *fmrlist
, fmrlist
, pginfo
->next_listelem
,
1944 goto ehca_set_pagebuf_1_exit0
;
1946 (pginfo
->page_4k_cnt
)++;
1947 (pginfo
->next_4k
)++;
1948 if (pginfo
->next_4k
%
1949 (e_mr
->fmr_page_size
/ EHCA_PAGESIZE
) == 0) {
1950 (pginfo
->page_cnt
)++;
1951 (pginfo
->next_listelem
)++;
1952 pginfo
->next_4k
= 0;
1955 ehca_gen_err("bad pginfo->type=%x", pginfo
->type
);
1957 goto ehca_set_pagebuf_1_exit0
;
1960 ehca_set_pagebuf_1_exit0
:
1962 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1963 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1964 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1965 "region=%p next_chunk=%p next_nmap=%lx", ret
, e_mr
,
1966 pginfo
, pginfo
->type
, pginfo
->num_pages
,
1967 pginfo
->num_4k
, pginfo
->next_buf
, pginfo
->next_4k
,
1968 rpage
, pginfo
->page_cnt
, pginfo
->page_4k_cnt
,
1969 pginfo
->next_listelem
, pginfo
->region
,
1970 pginfo
->next_chunk
, pginfo
->next_nmap
);
1972 } /* end ehca_set_pagebuf_1() */
1974 /*----------------------------------------------------------------------*/
1977 * check MR if it is a max-MR, i.e. uses whole memory
1978 * in case it's a max-MR 1 is returned, else 0
1980 int ehca_mr_is_maxmr(u64 size
,
1983 /* a MR is treated as max-MR only if it fits following: */
1984 if ((size
== ((u64
)high_memory
- PAGE_OFFSET
)) &&
1985 (iova_start
== (void*)KERNELBASE
)) {
1986 ehca_gen_dbg("this is a max-MR");
1990 } /* end ehca_mr_is_maxmr() */
1992 /*----------------------------------------------------------------------*/
1994 /* map access control for MR/MW. This routine is used for MR and MW. */
1995 void ehca_mrmw_map_acl(int ib_acl
,
1999 if (ib_acl
& IB_ACCESS_REMOTE_READ
)
2000 *hipz_acl
|= HIPZ_ACCESSCTRL_R_READ
;
2001 if (ib_acl
& IB_ACCESS_REMOTE_WRITE
)
2002 *hipz_acl
|= HIPZ_ACCESSCTRL_R_WRITE
;
2003 if (ib_acl
& IB_ACCESS_REMOTE_ATOMIC
)
2004 *hipz_acl
|= HIPZ_ACCESSCTRL_R_ATOMIC
;
2005 if (ib_acl
& IB_ACCESS_LOCAL_WRITE
)
2006 *hipz_acl
|= HIPZ_ACCESSCTRL_L_WRITE
;
2007 if (ib_acl
& IB_ACCESS_MW_BIND
)
2008 *hipz_acl
|= HIPZ_ACCESSCTRL_MW_BIND
;
2009 } /* end ehca_mrmw_map_acl() */
2011 /*----------------------------------------------------------------------*/
2013 /* sets page size in hipz access control for MR/MW. */
2014 void ehca_mrmw_set_pgsize_hipz_acl(u32
*hipz_acl
) /*INOUT*/
2016 return; /* HCA supports only 4k */
2017 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2019 /*----------------------------------------------------------------------*/
2022 * reverse map access control for MR/MW.
2023 * This routine is used for MR and MW.
2025 void ehca_mrmw_reverse_map_acl(const u32
*hipz_acl
,
2026 int *ib_acl
) /*OUT*/
2029 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_READ
)
2030 *ib_acl
|= IB_ACCESS_REMOTE_READ
;
2031 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_WRITE
)
2032 *ib_acl
|= IB_ACCESS_REMOTE_WRITE
;
2033 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_ATOMIC
)
2034 *ib_acl
|= IB_ACCESS_REMOTE_ATOMIC
;
2035 if (*hipz_acl
& HIPZ_ACCESSCTRL_L_WRITE
)
2036 *ib_acl
|= IB_ACCESS_LOCAL_WRITE
;
2037 if (*hipz_acl
& HIPZ_ACCESSCTRL_MW_BIND
)
2038 *ib_acl
|= IB_ACCESS_MW_BIND
;
2039 } /* end ehca_mrmw_reverse_map_acl() */
2042 /*----------------------------------------------------------------------*/
2045 * map HIPZ rc to IB retcodes for MR/MW allocations
2046 * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
2048 int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc
)
2051 case H_SUCCESS
: /* successful completion */
2053 case H_ADAPTER_PARM
: /* invalid adapter handle */
2054 case H_RT_PARM
: /* invalid resource type */
2055 case H_NOT_ENOUGH_RESOURCES
: /* insufficient resources */
2056 case H_MLENGTH_PARM
: /* invalid memory length */
2057 case H_MEM_ACCESS_PARM
: /* invalid access controls */
2058 case H_CONSTRAINED
: /* resource constraint */
2060 case H_BUSY
: /* long busy */
2065 } /* end ehca_mrmw_map_hrc_alloc() */
2067 /*----------------------------------------------------------------------*/
2070 * map HIPZ rc to IB retcodes for MR register rpage
2071 * Used for hipz_h_register_rpage_mr at registering last page
2073 int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc
)
2076 case H_SUCCESS
: /* registration complete */
2078 case H_PAGE_REGISTERED
: /* page registered */
2079 case H_ADAPTER_PARM
: /* invalid adapter handle */
2080 case H_RH_PARM
: /* invalid resource handle */
2081 /* case H_QT_PARM: invalid queue type */
2082 case H_PARAMETER
: /*
2083 * invalid logical address,
2084 * or count zero or greater 512
2086 case H_TABLE_FULL
: /* page table full */
2087 case H_HARDWARE
: /* HCA not operational */
2089 case H_BUSY
: /* long busy */
2094 } /* end ehca_mrmw_map_hrc_rrpg_last() */
2096 /*----------------------------------------------------------------------*/
2099 * map HIPZ rc to IB retcodes for MR register rpage
2100 * Used for hipz_h_register_rpage_mr at registering one page, but not last page
2102 int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc
)
2105 case H_PAGE_REGISTERED
: /* page registered */
2107 case H_SUCCESS
: /* registration complete */
2108 case H_ADAPTER_PARM
: /* invalid adapter handle */
2109 case H_RH_PARM
: /* invalid resource handle */
2110 /* case H_QT_PARM: invalid queue type */
2111 case H_PARAMETER
: /*
2112 * invalid logical address,
2113 * or count zero or greater 512
2115 case H_TABLE_FULL
: /* page table full */
2116 case H_HARDWARE
: /* HCA not operational */
2118 case H_BUSY
: /* long busy */
2123 } /* end ehca_mrmw_map_hrc_rrpg_notlast() */
2125 /*----------------------------------------------------------------------*/
2127 /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
2128 int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc
)
2131 case H_SUCCESS
: /* successful completion */
2133 case H_ADAPTER_PARM
: /* invalid adapter handle */
2134 case H_RH_PARM
: /* invalid resource handle */
2136 case H_BUSY
: /* long busy */
2141 } /* end ehca_mrmw_map_hrc_query_mr() */
2143 /*----------------------------------------------------------------------*/
2144 /*----------------------------------------------------------------------*/
2147 * map HIPZ rc to IB retcodes for freeing MR resource
2148 * Used for hipz_h_free_resource_mr
2150 int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc
)
2153 case H_SUCCESS
: /* resource freed */
2155 case H_ADAPTER_PARM
: /* invalid adapter handle */
2156 case H_RH_PARM
: /* invalid resource handle */
2157 case H_R_STATE
: /* invalid resource state */
2158 case H_HARDWARE
: /* HCA not operational */
2160 case H_RESOURCE
: /* Resource in use */
2161 case H_BUSY
: /* long busy */
2166 } /* end ehca_mrmw_map_hrc_free_mr() */
2168 /*----------------------------------------------------------------------*/
2171 * map HIPZ rc to IB retcodes for freeing MW resource
2172 * Used for hipz_h_free_resource_mw
2174 int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc
)
2177 case H_SUCCESS
: /* resource freed */
2179 case H_ADAPTER_PARM
: /* invalid adapter handle */
2180 case H_RH_PARM
: /* invalid resource handle */
2181 case H_R_STATE
: /* invalid resource state */
2182 case H_HARDWARE
: /* HCA not operational */
2184 case H_RESOURCE
: /* Resource in use */
2185 case H_BUSY
: /* long busy */
2190 } /* end ehca_mrmw_map_hrc_free_mw() */
2192 /*----------------------------------------------------------------------*/
2195 * map HIPZ rc to IB retcodes for SMR registrations
2196 * Used for hipz_h_register_smr.
2198 int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc
)
2201 case H_SUCCESS
: /* successful completion */
2203 case H_ADAPTER_PARM
: /* invalid adapter handle */
2204 case H_RH_PARM
: /* invalid resource handle */
2205 case H_MEM_PARM
: /* invalid MR virtual address */
2206 case H_MEM_ACCESS_PARM
: /* invalid access controls */
2207 case H_NOT_ENOUGH_RESOURCES
: /* insufficient resources */
2209 case H_BUSY
: /* long busy */
2214 } /* end ehca_mrmw_map_hrc_reg_smr() */
2216 /*----------------------------------------------------------------------*/
2219 * MR destructor and constructor
2220 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2221 * except struct ib_mr and spinlock
2223 void ehca_mr_deletenew(struct ehca_mr
*mr
)
2230 mr
->fmr_page_size
= 0;
2231 mr
->fmr_max_pages
= 0;
2232 mr
->fmr_max_maps
= 0;
2233 mr
->fmr_map_cnt
= 0;
2234 memset(&mr
->ipz_mr_handle
, 0, sizeof(mr
->ipz_mr_handle
));
2235 memset(&mr
->galpas
, 0, sizeof(mr
->galpas
));
2236 mr
->nr_of_pages
= 0;
2237 mr
->pagearray
= NULL
;
2238 } /* end ehca_mr_deletenew() */
2240 int ehca_init_mrmw_cache(void)
2242 mr_cache
= kmem_cache_create("ehca_cache_mr",
2243 sizeof(struct ehca_mr
), 0,
2248 mw_cache
= kmem_cache_create("ehca_cache_mw",
2249 sizeof(struct ehca_mw
), 0,
2253 kmem_cache_destroy(mr_cache
);
2260 void ehca_cleanup_mrmw_cache(void)
2263 kmem_cache_destroy(mr_cache
);
2265 kmem_cache_destroy(mw_cache
);