2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
9 * Copyright (c) 2005 IBM Corporation
11 * All rights reserved.
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
42 #include <asm/current.h>
44 #include "ehca_iverbs.h"
45 #include "ehca_mrmw.h"
49 static struct kmem_cache
*mr_cache
;
50 static struct kmem_cache
*mw_cache
;
52 static struct ehca_mr
*ehca_mr_new(void)
56 me
= kmem_cache_alloc(mr_cache
, GFP_KERNEL
);
58 memset(me
, 0, sizeof(struct ehca_mr
));
59 spin_lock_init(&me
->mrlock
);
61 ehca_gen_err("alloc failed");
66 static void ehca_mr_delete(struct ehca_mr
*me
)
68 kmem_cache_free(mr_cache
, me
);
71 static struct ehca_mw
*ehca_mw_new(void)
75 me
= kmem_cache_alloc(mw_cache
, GFP_KERNEL
);
77 memset(me
, 0, sizeof(struct ehca_mw
));
78 spin_lock_init(&me
->mwlock
);
80 ehca_gen_err("alloc failed");
85 static void ehca_mw_delete(struct ehca_mw
*me
)
87 kmem_cache_free(mw_cache
, me
);
90 /*----------------------------------------------------------------------*/
92 struct ib_mr
*ehca_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
96 struct ehca_mr
*e_maxmr
;
97 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
98 struct ehca_shca
*shca
=
99 container_of(pd
->device
, struct ehca_shca
, ib_device
);
102 e_maxmr
= ehca_mr_new();
104 ehca_err(&shca
->ib_device
, "out of memory");
105 ib_mr
= ERR_PTR(-ENOMEM
);
106 goto get_dma_mr_exit0
;
109 ret
= ehca_reg_maxmr(shca
, e_maxmr
, (u64
*)KERNELBASE
,
110 mr_access_flags
, e_pd
,
111 &e_maxmr
->ib
.ib_mr
.lkey
,
112 &e_maxmr
->ib
.ib_mr
.rkey
);
114 ib_mr
= ERR_PTR(ret
);
115 goto get_dma_mr_exit0
;
117 ib_mr
= &e_maxmr
->ib
.ib_mr
;
119 ehca_err(&shca
->ib_device
, "no internal max-MR exist!");
120 ib_mr
= ERR_PTR(-EINVAL
);
121 goto get_dma_mr_exit0
;
126 ehca_err(&shca
->ib_device
, "rc=%lx pd=%p mr_access_flags=%x ",
127 PTR_ERR(ib_mr
), pd
, mr_access_flags
);
129 } /* end ehca_get_dma_mr() */
131 /*----------------------------------------------------------------------*/
133 struct ib_mr
*ehca_reg_phys_mr(struct ib_pd
*pd
,
134 struct ib_phys_buf
*phys_buf_array
,
141 struct ehca_mr
*e_mr
;
142 struct ehca_shca
*shca
=
143 container_of(pd
->device
, struct ehca_shca
, ib_device
);
144 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
147 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
149 u32 num_pages_4k
; /* 4k portion "pages" */
151 if ((num_phys_buf
<= 0) || !phys_buf_array
) {
152 ehca_err(pd
->device
, "bad input values: num_phys_buf=%x "
153 "phys_buf_array=%p", num_phys_buf
, phys_buf_array
);
154 ib_mr
= ERR_PTR(-EINVAL
);
155 goto reg_phys_mr_exit0
;
157 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
158 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
159 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
160 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
162 * Remote Write Access requires Local Write Access
163 * Remote Atomic Access requires Local Write Access
165 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
167 ib_mr
= ERR_PTR(-EINVAL
);
168 goto reg_phys_mr_exit0
;
171 /* check physical buffer list and calculate size */
172 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
, num_phys_buf
,
175 ib_mr
= ERR_PTR(ret
);
176 goto reg_phys_mr_exit0
;
179 (((u64
)iova_start
+ size
) < (u64
)iova_start
)) {
180 ehca_err(pd
->device
, "bad input values: size=%lx iova_start=%p",
182 ib_mr
= ERR_PTR(-EINVAL
);
183 goto reg_phys_mr_exit0
;
186 e_mr
= ehca_mr_new();
188 ehca_err(pd
->device
, "out of memory");
189 ib_mr
= ERR_PTR(-ENOMEM
);
190 goto reg_phys_mr_exit0
;
193 /* determine number of MR pages */
194 num_pages_mr
= ((((u64
)iova_start
% PAGE_SIZE
) + size
+
195 PAGE_SIZE
- 1) / PAGE_SIZE
);
196 num_pages_4k
= ((((u64
)iova_start
% EHCA_PAGESIZE
) + size
+
197 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
199 /* register MR on HCA */
200 if (ehca_mr_is_maxmr(size
, iova_start
)) {
201 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
202 ret
= ehca_reg_maxmr(shca
, e_mr
, iova_start
, mr_access_flags
,
203 e_pd
, &e_mr
->ib
.ib_mr
.lkey
,
204 &e_mr
->ib
.ib_mr
.rkey
);
206 ib_mr
= ERR_PTR(ret
);
207 goto reg_phys_mr_exit1
;
210 pginfo
.type
= EHCA_MR_PGI_PHYS
;
211 pginfo
.num_pages
= num_pages_mr
;
212 pginfo
.num_4k
= num_pages_4k
;
213 pginfo
.num_phys_buf
= num_phys_buf
;
214 pginfo
.phys_buf_array
= phys_buf_array
;
215 pginfo
.next_4k
= (((u64
)iova_start
& ~PAGE_MASK
) /
218 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, mr_access_flags
,
219 e_pd
, &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
220 &e_mr
->ib
.ib_mr
.rkey
);
222 ib_mr
= ERR_PTR(ret
);
223 goto reg_phys_mr_exit1
;
227 /* successful registration of all pages */
228 return &e_mr
->ib
.ib_mr
;
231 ehca_mr_delete(e_mr
);
234 ehca_err(pd
->device
, "rc=%lx pd=%p phys_buf_array=%p "
235 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
236 PTR_ERR(ib_mr
), pd
, phys_buf_array
,
237 num_phys_buf
, mr_access_flags
, iova_start
);
239 } /* end ehca_reg_phys_mr() */
241 /*----------------------------------------------------------------------*/
243 struct ib_mr
*ehca_reg_user_mr(struct ib_pd
*pd
,
244 struct ib_umem
*region
,
246 struct ib_udata
*udata
)
249 struct ehca_mr
*e_mr
;
250 struct ehca_shca
*shca
=
251 container_of(pd
->device
, struct ehca_shca
, ib_device
);
252 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
253 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
256 u32 num_pages_4k
; /* 4k portion "pages" */
259 ehca_gen_err("bad pd=%p", pd
);
260 return ERR_PTR(-EFAULT
);
263 ehca_err(pd
->device
, "bad input values: region=%p", region
);
264 ib_mr
= ERR_PTR(-EINVAL
);
265 goto reg_user_mr_exit0
;
267 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
268 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
269 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
270 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
272 * Remote Write Access requires Local Write Access
273 * Remote Atomic Access requires Local Write Access
275 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
277 ib_mr
= ERR_PTR(-EINVAL
);
278 goto reg_user_mr_exit0
;
280 if (region
->page_size
!= PAGE_SIZE
) {
281 ehca_err(pd
->device
, "page size not supported, "
282 "region->page_size=%x", region
->page_size
);
283 ib_mr
= ERR_PTR(-EINVAL
);
284 goto reg_user_mr_exit0
;
287 if ((region
->length
== 0) ||
288 ((region
->virt_base
+ region
->length
) < region
->virt_base
)) {
289 ehca_err(pd
->device
, "bad input values: length=%lx "
290 "virt_base=%lx", region
->length
, region
->virt_base
);
291 ib_mr
= ERR_PTR(-EINVAL
);
292 goto reg_user_mr_exit0
;
295 e_mr
= ehca_mr_new();
297 ehca_err(pd
->device
, "out of memory");
298 ib_mr
= ERR_PTR(-ENOMEM
);
299 goto reg_user_mr_exit0
;
302 /* determine number of MR pages */
303 num_pages_mr
= (((region
->virt_base
% PAGE_SIZE
) + region
->length
+
304 PAGE_SIZE
- 1) / PAGE_SIZE
);
305 num_pages_4k
= (((region
->virt_base
% EHCA_PAGESIZE
) + region
->length
+
306 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
308 /* register MR on HCA */
309 pginfo
.type
= EHCA_MR_PGI_USER
;
310 pginfo
.num_pages
= num_pages_mr
;
311 pginfo
.num_4k
= num_pages_4k
;
312 pginfo
.region
= region
;
313 pginfo
.next_4k
= region
->offset
/ EHCA_PAGESIZE
;
314 pginfo
.next_chunk
= list_prepare_entry(pginfo
.next_chunk
,
315 (®ion
->chunk_list
),
318 ret
= ehca_reg_mr(shca
, e_mr
, (u64
*)region
->virt_base
,
319 region
->length
, mr_access_flags
, e_pd
, &pginfo
,
320 &e_mr
->ib
.ib_mr
.lkey
, &e_mr
->ib
.ib_mr
.rkey
);
322 ib_mr
= ERR_PTR(ret
);
323 goto reg_user_mr_exit1
;
326 /* successful registration of all pages */
327 return &e_mr
->ib
.ib_mr
;
330 ehca_mr_delete(e_mr
);
333 ehca_err(pd
->device
, "rc=%lx pd=%p region=%p mr_access_flags=%x"
335 PTR_ERR(ib_mr
), pd
, region
, mr_access_flags
, udata
);
337 } /* end ehca_reg_user_mr() */
339 /*----------------------------------------------------------------------*/
341 int ehca_rereg_phys_mr(struct ib_mr
*mr
,
344 struct ib_phys_buf
*phys_buf_array
,
351 struct ehca_shca
*shca
=
352 container_of(mr
->device
, struct ehca_shca
, ib_device
);
353 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
354 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
358 struct ehca_pd
*new_pd
;
359 u32 tmp_lkey
, tmp_rkey
;
360 unsigned long sl_flags
;
361 u32 num_pages_mr
= 0;
362 u32 num_pages_4k
= 0; /* 4k portion "pages" */
363 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
364 u32 cur_pid
= current
->tgid
;
366 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
367 (my_pd
->ownpid
!= cur_pid
)) {
368 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
369 cur_pid
, my_pd
->ownpid
);
371 goto rereg_phys_mr_exit0
;
374 if (!(mr_rereg_mask
& IB_MR_REREG_TRANS
)) {
375 /* TODO not supported, because PHYP rereg hCall needs pages */
376 ehca_err(mr
->device
, "rereg without IB_MR_REREG_TRANS not "
377 "supported yet, mr_rereg_mask=%x", mr_rereg_mask
);
379 goto rereg_phys_mr_exit0
;
382 if (mr_rereg_mask
& IB_MR_REREG_PD
) {
384 ehca_err(mr
->device
, "rereg with bad pd, pd=%p "
385 "mr_rereg_mask=%x", pd
, mr_rereg_mask
);
387 goto rereg_phys_mr_exit0
;
392 ~(IB_MR_REREG_TRANS
| IB_MR_REREG_PD
| IB_MR_REREG_ACCESS
)) ||
393 (mr_rereg_mask
== 0)) {
395 goto rereg_phys_mr_exit0
;
398 /* check other parameters */
399 if (e_mr
== shca
->maxmr
) {
400 /* should be impossible, however reject to be sure */
401 ehca_err(mr
->device
, "rereg internal max-MR impossible, mr=%p "
402 "shca->maxmr=%p mr->lkey=%x",
403 mr
, shca
->maxmr
, mr
->lkey
);
405 goto rereg_phys_mr_exit0
;
407 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) { /* transl., i.e. addr/size */
408 if (e_mr
->flags
& EHCA_MR_FLAG_FMR
) {
409 ehca_err(mr
->device
, "not supported for FMR, mr=%p "
410 "flags=%x", mr
, e_mr
->flags
);
412 goto rereg_phys_mr_exit0
;
414 if (!phys_buf_array
|| num_phys_buf
<= 0) {
415 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x"
416 " phys_buf_array=%p num_phys_buf=%x",
417 mr_rereg_mask
, phys_buf_array
, num_phys_buf
);
419 goto rereg_phys_mr_exit0
;
422 if ((mr_rereg_mask
& IB_MR_REREG_ACCESS
) && /* change ACL */
423 (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
424 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
425 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
426 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)))) {
428 * Remote Write Access requires Local Write Access
429 * Remote Atomic Access requires Local Write Access
431 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x "
432 "mr_access_flags=%x", mr_rereg_mask
, mr_access_flags
);
434 goto rereg_phys_mr_exit0
;
437 /* set requested values dependent on rereg request */
438 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
439 new_start
= e_mr
->start
; /* new == old address */
440 new_size
= e_mr
->size
; /* new == old length */
441 new_acl
= e_mr
->acl
; /* new == old access control */
442 new_pd
= container_of(mr
->pd
,struct ehca_pd
,ib_pd
); /*new == old PD*/
444 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
445 new_start
= iova_start
; /* change address */
446 /* check physical buffer list and calculate size */
447 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
,
448 num_phys_buf
, iova_start
,
451 goto rereg_phys_mr_exit1
;
452 if ((new_size
== 0) ||
453 (((u64
)iova_start
+ new_size
) < (u64
)iova_start
)) {
454 ehca_err(mr
->device
, "bad input values: new_size=%lx "
455 "iova_start=%p", new_size
, iova_start
);
457 goto rereg_phys_mr_exit1
;
459 num_pages_mr
= ((((u64
)new_start
% PAGE_SIZE
) + new_size
+
460 PAGE_SIZE
- 1) / PAGE_SIZE
);
461 num_pages_4k
= ((((u64
)new_start
% EHCA_PAGESIZE
) + new_size
+
462 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
463 pginfo
.type
= EHCA_MR_PGI_PHYS
;
464 pginfo
.num_pages
= num_pages_mr
;
465 pginfo
.num_4k
= num_pages_4k
;
466 pginfo
.num_phys_buf
= num_phys_buf
;
467 pginfo
.phys_buf_array
= phys_buf_array
;
468 pginfo
.next_4k
= (((u64
)iova_start
& ~PAGE_MASK
) /
471 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
472 new_acl
= mr_access_flags
;
473 if (mr_rereg_mask
& IB_MR_REREG_PD
)
474 new_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
476 ret
= ehca_rereg_mr(shca
, e_mr
, new_start
, new_size
, new_acl
,
477 new_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
479 goto rereg_phys_mr_exit1
;
481 /* successful reregistration */
482 if (mr_rereg_mask
& IB_MR_REREG_PD
)
488 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
491 ehca_err(mr
->device
, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
492 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
494 ret
, mr
, mr_rereg_mask
, pd
, phys_buf_array
,
495 num_phys_buf
, mr_access_flags
, iova_start
);
497 } /* end ehca_rereg_phys_mr() */
499 /*----------------------------------------------------------------------*/
501 int ehca_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
505 struct ehca_shca
*shca
=
506 container_of(mr
->device
, struct ehca_shca
, ib_device
);
507 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
508 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
509 u32 cur_pid
= current
->tgid
;
510 unsigned long sl_flags
;
511 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
513 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
514 (my_pd
->ownpid
!= cur_pid
)) {
515 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
516 cur_pid
, my_pd
->ownpid
);
521 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
522 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
523 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
528 memset(mr_attr
, 0, sizeof(struct ib_mr_attr
));
529 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
531 h_ret
= hipz_h_query_mr(shca
->ipz_hca_handle
, e_mr
, &hipzout
);
532 if (h_ret
!= H_SUCCESS
) {
533 ehca_err(mr
->device
, "hipz_mr_query failed, h_ret=%lx mr=%p "
534 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
535 h_ret
, mr
, shca
->ipz_hca_handle
.handle
,
536 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
537 ret
= ehca_mrmw_map_hrc_query_mr(h_ret
);
540 mr_attr
->pd
= mr
->pd
;
541 mr_attr
->device_virt_addr
= hipzout
.vaddr
;
542 mr_attr
->size
= hipzout
.len
;
543 mr_attr
->lkey
= hipzout
.lkey
;
544 mr_attr
->rkey
= hipzout
.rkey
;
545 ehca_mrmw_reverse_map_acl(&hipzout
.acl
, &mr_attr
->mr_access_flags
);
548 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
551 ehca_err(mr
->device
, "ret=%x mr=%p mr_attr=%p",
554 } /* end ehca_query_mr() */
556 /*----------------------------------------------------------------------*/
558 int ehca_dereg_mr(struct ib_mr
*mr
)
562 struct ehca_shca
*shca
=
563 container_of(mr
->device
, struct ehca_shca
, ib_device
);
564 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
565 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
566 u32 cur_pid
= current
->tgid
;
568 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
569 (my_pd
->ownpid
!= cur_pid
)) {
570 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
571 cur_pid
, my_pd
->ownpid
);
576 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
577 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
578 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
581 } else if (e_mr
== shca
->maxmr
) {
582 /* should be impossible, however reject to be sure */
583 ehca_err(mr
->device
, "dereg internal max-MR impossible, mr=%p "
584 "shca->maxmr=%p mr->lkey=%x",
585 mr
, shca
->maxmr
, mr
->lkey
);
590 /* TODO: BUSY: MR still has bound window(s) */
591 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
592 if (h_ret
!= H_SUCCESS
) {
593 ehca_err(mr
->device
, "hipz_free_mr failed, h_ret=%lx shca=%p "
594 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
595 h_ret
, shca
, e_mr
, shca
->ipz_hca_handle
.handle
,
596 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
597 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
601 /* successful deregistration */
602 ehca_mr_delete(e_mr
);
606 ehca_err(mr
->device
, "ret=%x mr=%p", ret
, mr
);
608 } /* end ehca_dereg_mr() */
610 /*----------------------------------------------------------------------*/
612 struct ib_mw
*ehca_alloc_mw(struct ib_pd
*pd
)
616 struct ehca_mw
*e_mw
;
617 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
618 struct ehca_shca
*shca
=
619 container_of(pd
->device
, struct ehca_shca
, ib_device
);
620 struct ehca_mw_hipzout_parms hipzout
= {{0},0};
622 e_mw
= ehca_mw_new();
624 ib_mw
= ERR_PTR(-ENOMEM
);
628 h_ret
= hipz_h_alloc_resource_mw(shca
->ipz_hca_handle
, e_mw
,
629 e_pd
->fw_pd
, &hipzout
);
630 if (h_ret
!= H_SUCCESS
) {
631 ehca_err(pd
->device
, "hipz_mw_allocate failed, h_ret=%lx "
632 "shca=%p hca_hndl=%lx mw=%p",
633 h_ret
, shca
, shca
->ipz_hca_handle
.handle
, e_mw
);
634 ib_mw
= ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret
));
637 /* successful MW allocation */
638 e_mw
->ipz_mw_handle
= hipzout
.handle
;
639 e_mw
->ib_mw
.rkey
= hipzout
.rkey
;
643 ehca_mw_delete(e_mw
);
646 ehca_err(pd
->device
, "rc=%lx pd=%p", PTR_ERR(ib_mw
), pd
);
648 } /* end ehca_alloc_mw() */
650 /*----------------------------------------------------------------------*/
652 int ehca_bind_mw(struct ib_qp
*qp
,
654 struct ib_mw_bind
*mw_bind
)
656 /* TODO: not supported up to now */
657 ehca_gen_err("bind MW currently not supported by HCAD");
660 } /* end ehca_bind_mw() */
662 /*----------------------------------------------------------------------*/
664 int ehca_dealloc_mw(struct ib_mw
*mw
)
667 struct ehca_shca
*shca
=
668 container_of(mw
->device
, struct ehca_shca
, ib_device
);
669 struct ehca_mw
*e_mw
= container_of(mw
, struct ehca_mw
, ib_mw
);
671 h_ret
= hipz_h_free_resource_mw(shca
->ipz_hca_handle
, e_mw
);
672 if (h_ret
!= H_SUCCESS
) {
673 ehca_err(mw
->device
, "hipz_free_mw failed, h_ret=%lx shca=%p "
674 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
675 h_ret
, shca
, mw
, mw
->rkey
, shca
->ipz_hca_handle
.handle
,
676 e_mw
->ipz_mw_handle
.handle
);
677 return ehca_mrmw_map_hrc_free_mw(h_ret
);
679 /* successful deallocation */
680 ehca_mw_delete(e_mw
);
682 } /* end ehca_dealloc_mw() */
684 /*----------------------------------------------------------------------*/
686 struct ib_fmr
*ehca_alloc_fmr(struct ib_pd
*pd
,
688 struct ib_fmr_attr
*fmr_attr
)
690 struct ib_fmr
*ib_fmr
;
691 struct ehca_shca
*shca
=
692 container_of(pd
->device
, struct ehca_shca
, ib_device
);
693 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
694 struct ehca_mr
*e_fmr
;
696 u32 tmp_lkey
, tmp_rkey
;
697 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
699 /* check other parameters */
700 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
701 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
702 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
703 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
705 * Remote Write Access requires Local Write Access
706 * Remote Atomic Access requires Local Write Access
708 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
710 ib_fmr
= ERR_PTR(-EINVAL
);
711 goto alloc_fmr_exit0
;
713 if (mr_access_flags
& IB_ACCESS_MW_BIND
) {
714 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
716 ib_fmr
= ERR_PTR(-EINVAL
);
717 goto alloc_fmr_exit0
;
719 if ((fmr_attr
->max_pages
== 0) || (fmr_attr
->max_maps
== 0)) {
720 ehca_err(pd
->device
, "bad input values: fmr_attr->max_pages=%x "
721 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
722 fmr_attr
->max_pages
, fmr_attr
->max_maps
,
723 fmr_attr
->page_shift
);
724 ib_fmr
= ERR_PTR(-EINVAL
);
725 goto alloc_fmr_exit0
;
727 if (((1 << fmr_attr
->page_shift
) != EHCA_PAGESIZE
) &&
728 ((1 << fmr_attr
->page_shift
) != PAGE_SIZE
)) {
729 ehca_err(pd
->device
, "unsupported fmr_attr->page_shift=%x",
730 fmr_attr
->page_shift
);
731 ib_fmr
= ERR_PTR(-EINVAL
);
732 goto alloc_fmr_exit0
;
735 e_fmr
= ehca_mr_new();
737 ib_fmr
= ERR_PTR(-ENOMEM
);
738 goto alloc_fmr_exit0
;
740 e_fmr
->flags
|= EHCA_MR_FLAG_FMR
;
742 /* register MR on HCA */
743 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
744 fmr_attr
->max_pages
* (1 << fmr_attr
->page_shift
),
745 mr_access_flags
, e_pd
, &pginfo
,
746 &tmp_lkey
, &tmp_rkey
);
748 ib_fmr
= ERR_PTR(ret
);
749 goto alloc_fmr_exit1
;
753 e_fmr
->fmr_page_size
= 1 << fmr_attr
->page_shift
;
754 e_fmr
->fmr_max_pages
= fmr_attr
->max_pages
;
755 e_fmr
->fmr_max_maps
= fmr_attr
->max_maps
;
756 e_fmr
->fmr_map_cnt
= 0;
757 return &e_fmr
->ib
.ib_fmr
;
760 ehca_mr_delete(e_fmr
);
763 ehca_err(pd
->device
, "rc=%lx pd=%p mr_access_flags=%x "
764 "fmr_attr=%p", PTR_ERR(ib_fmr
), pd
,
765 mr_access_flags
, fmr_attr
);
767 } /* end ehca_alloc_fmr() */
769 /*----------------------------------------------------------------------*/
771 int ehca_map_phys_fmr(struct ib_fmr
*fmr
,
777 struct ehca_shca
*shca
=
778 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
779 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
780 struct ehca_pd
*e_pd
= container_of(fmr
->pd
, struct ehca_pd
, ib_pd
);
781 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
782 u32 tmp_lkey
, tmp_rkey
;
784 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
785 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
786 e_fmr
, e_fmr
->flags
);
788 goto map_phys_fmr_exit0
;
790 ret
= ehca_fmr_check_page_list(e_fmr
, page_list
, list_len
);
792 goto map_phys_fmr_exit0
;
793 if (iova
% e_fmr
->fmr_page_size
) {
794 /* only whole-numbered pages */
795 ehca_err(fmr
->device
, "bad iova, iova=%lx fmr_page_size=%x",
796 iova
, e_fmr
->fmr_page_size
);
798 goto map_phys_fmr_exit0
;
800 if (e_fmr
->fmr_map_cnt
>= e_fmr
->fmr_max_maps
) {
801 /* HCAD does not limit the maps, however trace this anyway */
802 ehca_info(fmr
->device
, "map limit exceeded, fmr=%p "
803 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
804 fmr
, e_fmr
->fmr_map_cnt
, e_fmr
->fmr_max_maps
);
807 pginfo
.type
= EHCA_MR_PGI_FMR
;
808 pginfo
.num_pages
= list_len
;
809 pginfo
.num_4k
= list_len
* (e_fmr
->fmr_page_size
/ EHCA_PAGESIZE
);
810 pginfo
.page_list
= page_list
;
811 pginfo
.next_4k
= ((iova
& (e_fmr
->fmr_page_size
-1)) /
814 ret
= ehca_rereg_mr(shca
, e_fmr
, (u64
*)iova
,
815 list_len
* e_fmr
->fmr_page_size
,
816 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
818 goto map_phys_fmr_exit0
;
820 /* successful reregistration */
821 e_fmr
->fmr_map_cnt
++;
822 e_fmr
->ib
.ib_fmr
.lkey
= tmp_lkey
;
823 e_fmr
->ib
.ib_fmr
.rkey
= tmp_rkey
;
828 ehca_err(fmr
->device
, "ret=%x fmr=%p page_list=%p list_len=%x "
830 ret
, fmr
, page_list
, list_len
, iova
);
832 } /* end ehca_map_phys_fmr() */
834 /*----------------------------------------------------------------------*/
836 int ehca_unmap_fmr(struct list_head
*fmr_list
)
839 struct ib_fmr
*ib_fmr
;
840 struct ehca_shca
*shca
= NULL
;
841 struct ehca_shca
*prev_shca
;
842 struct ehca_mr
*e_fmr
;
844 u32 unmap_fmr_cnt
= 0;
846 /* check all FMR belong to same SHCA, and check internal flag */
847 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
850 ehca_gen_err("bad fmr=%p in list", ib_fmr
);
852 goto unmap_fmr_exit0
;
854 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
856 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
857 if ((shca
!= prev_shca
) && prev_shca
) {
858 ehca_err(&shca
->ib_device
, "SHCA mismatch, shca=%p "
859 "prev_shca=%p e_fmr=%p",
860 shca
, prev_shca
, e_fmr
);
862 goto unmap_fmr_exit0
;
864 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
865 ehca_err(&shca
->ib_device
, "not a FMR, e_fmr=%p "
866 "e_fmr->flags=%x", e_fmr
, e_fmr
->flags
);
868 goto unmap_fmr_exit0
;
873 /* loop over all FMRs to unmap */
874 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
876 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
877 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
879 ret
= ehca_unmap_one_fmr(shca
, e_fmr
);
881 /* unmap failed, stop unmapping of rest of FMRs */
882 ehca_err(&shca
->ib_device
, "unmap of one FMR failed, "
883 "stop rest, e_fmr=%p num_fmr=%x "
884 "unmap_fmr_cnt=%x lkey=%x", e_fmr
, num_fmr
,
885 unmap_fmr_cnt
, e_fmr
->ib
.ib_fmr
.lkey
);
886 goto unmap_fmr_exit0
;
892 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
893 ret
, fmr_list
, num_fmr
, unmap_fmr_cnt
);
895 } /* end ehca_unmap_fmr() */
897 /*----------------------------------------------------------------------*/
899 int ehca_dealloc_fmr(struct ib_fmr
*fmr
)
903 struct ehca_shca
*shca
=
904 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
905 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
907 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
908 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
909 e_fmr
, e_fmr
->flags
);
914 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
915 if (h_ret
!= H_SUCCESS
) {
916 ehca_err(fmr
->device
, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
917 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
918 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
919 e_fmr
->ipz_mr_handle
.handle
, fmr
->lkey
);
920 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
923 /* successful deregistration */
924 ehca_mr_delete(e_fmr
);
929 ehca_err(&shca
->ib_device
, "ret=%x fmr=%p", ret
, fmr
);
931 } /* end ehca_dealloc_fmr() */
933 /*----------------------------------------------------------------------*/
935 int ehca_reg_mr(struct ehca_shca
*shca
,
936 struct ehca_mr
*e_mr
,
940 struct ehca_pd
*e_pd
,
941 struct ehca_mr_pginfo
*pginfo
,
948 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
950 ehca_mrmw_map_acl(acl
, &hipz_acl
);
951 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
952 if (ehca_use_hp_mr
== 1)
953 hipz_acl
|= 0x00000001;
955 h_ret
= hipz_h_alloc_resource_mr(shca
->ipz_hca_handle
, e_mr
,
956 (u64
)iova_start
, size
, hipz_acl
,
957 e_pd
->fw_pd
, &hipzout
);
958 if (h_ret
!= H_SUCCESS
) {
959 ehca_err(&shca
->ib_device
, "hipz_alloc_mr failed, h_ret=%lx "
960 "hca_hndl=%lx", h_ret
, shca
->ipz_hca_handle
.handle
);
961 ret
= ehca_mrmw_map_hrc_alloc(h_ret
);
962 goto ehca_reg_mr_exit0
;
965 e_mr
->ipz_mr_handle
= hipzout
.handle
;
967 ret
= ehca_reg_mr_rpages(shca
, e_mr
, pginfo
);
969 goto ehca_reg_mr_exit1
;
971 /* successful registration */
972 e_mr
->num_pages
= pginfo
->num_pages
;
973 e_mr
->num_4k
= pginfo
->num_4k
;
974 e_mr
->start
= iova_start
;
977 *lkey
= hipzout
.lkey
;
978 *rkey
= hipzout
.rkey
;
982 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
983 if (h_ret
!= H_SUCCESS
) {
984 ehca_err(&shca
->ib_device
, "h_ret=%lx shca=%p e_mr=%p "
985 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
986 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
987 h_ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
,
988 hipzout
.lkey
, pginfo
, pginfo
->num_pages
,
989 pginfo
->num_4k
, ret
);
990 ehca_err(&shca
->ib_device
, "internal error in ehca_reg_mr, "
995 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p "
996 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
997 "num_pages=%lx num_4k=%lx",
998 ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
, pginfo
,
999 pginfo
->num_pages
, pginfo
->num_4k
);
1001 } /* end ehca_reg_mr() */
1003 /*----------------------------------------------------------------------*/
1005 int ehca_reg_mr_rpages(struct ehca_shca
*shca
,
1006 struct ehca_mr
*e_mr
,
1007 struct ehca_mr_pginfo
*pginfo
)
1016 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1018 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1020 goto ehca_reg_mr_rpages_exit0
;
1023 /* max 512 pages per shot */
1024 for (i
= 0; i
< ((pginfo
->num_4k
+ 512 - 1) / 512); i
++) {
1026 if (i
== ((pginfo
->num_4k
+ 512 - 1) / 512) - 1) {
1027 rnum
= pginfo
->num_4k
% 512; /* last shot */
1029 rnum
= 512; /* last shot is full */
1034 ret
= ehca_set_pagebuf(e_mr
, pginfo
, rnum
, kpage
);
1036 ehca_err(&shca
->ib_device
, "ehca_set_pagebuf "
1037 "bad rc, ret=%x rnum=%x kpage=%p",
1040 goto ehca_reg_mr_rpages_exit1
;
1042 rpage
= virt_to_abs(kpage
);
1044 ehca_err(&shca
->ib_device
, "kpage=%p i=%x",
1047 goto ehca_reg_mr_rpages_exit1
;
1049 } else { /* rnum==1 */
1050 ret
= ehca_set_pagebuf_1(e_mr
, pginfo
, &rpage
);
1052 ehca_err(&shca
->ib_device
, "ehca_set_pagebuf_1 "
1053 "bad rc, ret=%x i=%x", ret
, i
);
1055 goto ehca_reg_mr_rpages_exit1
;
1059 h_ret
= hipz_h_register_rpage_mr(shca
->ipz_hca_handle
, e_mr
,
1060 0, /* pagesize 4k */
1063 if (i
== ((pginfo
->num_4k
+ 512 - 1) / 512) - 1) {
1065 * check for 'registration complete'==H_SUCCESS
1066 * and for 'page registered'==H_PAGE_REGISTERED
1068 if (h_ret
!= H_SUCCESS
) {
1069 ehca_err(&shca
->ib_device
, "last "
1070 "hipz_reg_rpage_mr failed, h_ret=%lx "
1071 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1072 " lkey=%x", h_ret
, e_mr
, i
,
1073 shca
->ipz_hca_handle
.handle
,
1074 e_mr
->ipz_mr_handle
.handle
,
1075 e_mr
->ib
.ib_mr
.lkey
);
1076 ret
= ehca_mrmw_map_hrc_rrpg_last(h_ret
);
1080 } else if (h_ret
!= H_PAGE_REGISTERED
) {
1081 ehca_err(&shca
->ib_device
, "hipz_reg_rpage_mr failed, "
1082 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1083 "mr_hndl=%lx", h_ret
, e_mr
, i
,
1084 e_mr
->ib
.ib_mr
.lkey
,
1085 shca
->ipz_hca_handle
.handle
,
1086 e_mr
->ipz_mr_handle
.handle
);
1087 ret
= ehca_mrmw_map_hrc_rrpg_notlast(h_ret
);
1094 ehca_reg_mr_rpages_exit1
:
1095 ehca_free_fw_ctrlblock(kpage
);
1096 ehca_reg_mr_rpages_exit0
:
1098 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p pginfo=%p "
1099 "num_pages=%lx num_4k=%lx", ret
, shca
, e_mr
, pginfo
,
1100 pginfo
->num_pages
, pginfo
->num_4k
);
1102 } /* end ehca_reg_mr_rpages() */
1104 /*----------------------------------------------------------------------*/
1106 inline int ehca_rereg_mr_rereg1(struct ehca_shca
*shca
,
1107 struct ehca_mr
*e_mr
,
1111 struct ehca_pd
*e_pd
,
1112 struct ehca_mr_pginfo
*pginfo
,
1121 struct ehca_mr_pginfo pginfo_save
;
1122 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1124 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1127 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1129 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1131 goto ehca_rereg_mr_rereg1_exit0
;
1134 pginfo_save
= *pginfo
;
1135 ret
= ehca_set_pagebuf(e_mr
, pginfo
, pginfo
->num_4k
, kpage
);
1137 ehca_err(&shca
->ib_device
, "set pagebuf failed, e_mr=%p "
1138 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1139 e_mr
, pginfo
, pginfo
->type
, pginfo
->num_pages
,
1140 pginfo
->num_4k
,kpage
);
1141 goto ehca_rereg_mr_rereg1_exit1
;
1143 rpage
= virt_to_abs(kpage
);
1145 ehca_err(&shca
->ib_device
, "kpage=%p", kpage
);
1147 goto ehca_rereg_mr_rereg1_exit1
;
1149 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_mr
,
1150 (u64
)iova_start
, size
, hipz_acl
,
1151 e_pd
->fw_pd
, rpage
, &hipzout
);
1152 if (h_ret
!= H_SUCCESS
) {
1154 * reregistration unsuccessful, try it again with the 3 hCalls,
1155 * e.g. this is required in case H_MR_CONDITION
1156 * (MW bound or MR is shared)
1158 ehca_warn(&shca
->ib_device
, "hipz_h_reregister_pmr failed "
1159 "(Rereg1), h_ret=%lx e_mr=%p", h_ret
, e_mr
);
1160 *pginfo
= pginfo_save
;
1162 } else if ((u64
*)hipzout
.vaddr
!= iova_start
) {
1163 ehca_err(&shca
->ib_device
, "PHYP changed iova_start in "
1164 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1165 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start
,
1166 hipzout
.vaddr
, e_mr
, e_mr
->ipz_mr_handle
.handle
,
1167 e_mr
->ib
.ib_mr
.lkey
, hipzout
.lkey
);
1171 * successful reregistration
1172 * note: start and start_out are identical for eServer HCAs
1174 e_mr
->num_pages
= pginfo
->num_pages
;
1175 e_mr
->num_4k
= pginfo
->num_4k
;
1176 e_mr
->start
= iova_start
;
1179 *lkey
= hipzout
.lkey
;
1180 *rkey
= hipzout
.rkey
;
1183 ehca_rereg_mr_rereg1_exit1
:
1184 ehca_free_fw_ctrlblock(kpage
);
1185 ehca_rereg_mr_rereg1_exit0
:
1186 if ( ret
&& (ret
!= -EAGAIN
) )
1187 ehca_err(&shca
->ib_device
, "ret=%x lkey=%x rkey=%x "
1188 "pginfo=%p num_pages=%lx num_4k=%lx",
1189 ret
, *lkey
, *rkey
, pginfo
, pginfo
->num_pages
,
1192 } /* end ehca_rereg_mr_rereg1() */
1194 /*----------------------------------------------------------------------*/
1196 int ehca_rereg_mr(struct ehca_shca
*shca
,
1197 struct ehca_mr
*e_mr
,
1201 struct ehca_pd
*e_pd
,
1202 struct ehca_mr_pginfo
*pginfo
,
1208 int rereg_1_hcall
= 1; /* 1: use hipz_h_reregister_pmr directly */
1209 int rereg_3_hcall
= 0; /* 1: use 3 hipz calls for reregistration */
1211 /* first determine reregistration hCall(s) */
1212 if ((pginfo
->num_4k
> 512) || (e_mr
->num_4k
> 512) ||
1213 (pginfo
->num_4k
> e_mr
->num_4k
)) {
1214 ehca_dbg(&shca
->ib_device
, "Rereg3 case, pginfo->num_4k=%lx "
1215 "e_mr->num_4k=%x", pginfo
->num_4k
, e_mr
->num_4k
);
1220 if (e_mr
->flags
& EHCA_MR_FLAG_MAXMR
) { /* check for max-MR */
1223 e_mr
->flags
&= ~EHCA_MR_FLAG_MAXMR
;
1224 ehca_err(&shca
->ib_device
, "Rereg MR for max-MR! e_mr=%p",
1228 if (rereg_1_hcall
) {
1229 ret
= ehca_rereg_mr_rereg1(shca
, e_mr
, iova_start
, size
,
1230 acl
, e_pd
, pginfo
, lkey
, rkey
);
1235 goto ehca_rereg_mr_exit0
;
1239 if (rereg_3_hcall
) {
1240 struct ehca_mr save_mr
;
1242 /* first deregister old MR */
1243 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
1244 if (h_ret
!= H_SUCCESS
) {
1245 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1246 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1248 h_ret
, e_mr
, shca
->ipz_hca_handle
.handle
,
1249 e_mr
->ipz_mr_handle
.handle
,
1250 e_mr
->ib
.ib_mr
.lkey
);
1251 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
1252 goto ehca_rereg_mr_exit0
;
1254 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1256 ehca_mr_deletenew(e_mr
);
1258 /* set some MR values */
1259 e_mr
->flags
= save_mr
.flags
;
1260 e_mr
->fmr_page_size
= save_mr
.fmr_page_size
;
1261 e_mr
->fmr_max_pages
= save_mr
.fmr_max_pages
;
1262 e_mr
->fmr_max_maps
= save_mr
.fmr_max_maps
;
1263 e_mr
->fmr_map_cnt
= save_mr
.fmr_map_cnt
;
1265 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, acl
,
1266 e_pd
, pginfo
, lkey
, rkey
);
1268 u32 offset
= (u64
)(&e_mr
->flags
) - (u64
)e_mr
;
1269 memcpy(&e_mr
->flags
, &(save_mr
.flags
),
1270 sizeof(struct ehca_mr
) - offset
);
1271 goto ehca_rereg_mr_exit0
;
1275 ehca_rereg_mr_exit0
:
1277 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p "
1278 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1279 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1280 "rereg_3_hcall=%x", ret
, shca
, e_mr
, iova_start
, size
,
1281 acl
, e_pd
, pginfo
, pginfo
->num_pages
, *lkey
, *rkey
,
1282 rereg_1_hcall
, rereg_3_hcall
);
1284 } /* end ehca_rereg_mr() */
1286 /*----------------------------------------------------------------------*/
1288 int ehca_unmap_one_fmr(struct ehca_shca
*shca
,
1289 struct ehca_mr
*e_fmr
)
1293 int rereg_1_hcall
= 1; /* 1: use hipz_mr_reregister directly */
1294 int rereg_3_hcall
= 0; /* 1: use 3 hipz calls for unmapping */
1295 struct ehca_pd
*e_pd
=
1296 container_of(e_fmr
->ib
.ib_fmr
.pd
, struct ehca_pd
, ib_pd
);
1297 struct ehca_mr save_fmr
;
1298 u32 tmp_lkey
, tmp_rkey
;
1299 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
1300 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1302 /* first check if reregistration hCall can be used for unmap */
1303 if (e_fmr
->fmr_max_pages
> 512) {
1308 if (rereg_1_hcall
) {
1310 * note: after using rereg hcall with len=0,
1311 * rereg hcall must be used again for registering pages
1313 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_fmr
, 0,
1314 0, 0, e_pd
->fw_pd
, 0, &hipzout
);
1315 if (h_ret
!= H_SUCCESS
) {
1317 * should not happen, because length checked above,
1318 * FMRs are not shared and no MW bound to FMRs
1320 ehca_err(&shca
->ib_device
, "hipz_reregister_pmr failed "
1321 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1322 "mr_hndl=%lx lkey=%x lkey_out=%x",
1323 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1324 e_fmr
->ipz_mr_handle
.handle
,
1325 e_fmr
->ib
.ib_fmr
.lkey
, hipzout
.lkey
);
1328 /* successful reregistration */
1329 e_fmr
->start
= NULL
;
1331 tmp_lkey
= hipzout
.lkey
;
1332 tmp_rkey
= hipzout
.rkey
;
1336 if (rereg_3_hcall
) {
1337 struct ehca_mr save_mr
;
1339 /* first free old FMR */
1340 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
1341 if (h_ret
!= H_SUCCESS
) {
1342 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1343 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1345 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1346 e_fmr
->ipz_mr_handle
.handle
,
1347 e_fmr
->ib
.ib_fmr
.lkey
);
1348 ret
= ehca_mrmw_map_hrc_free_mr(h_ret
);
1349 goto ehca_unmap_one_fmr_exit0
;
1351 /* clean ehca_mr_t, without changing lock */
1353 ehca_mr_deletenew(e_fmr
);
1355 /* set some MR values */
1356 e_fmr
->flags
= save_fmr
.flags
;
1357 e_fmr
->fmr_page_size
= save_fmr
.fmr_page_size
;
1358 e_fmr
->fmr_max_pages
= save_fmr
.fmr_max_pages
;
1359 e_fmr
->fmr_max_maps
= save_fmr
.fmr_max_maps
;
1360 e_fmr
->fmr_map_cnt
= save_fmr
.fmr_map_cnt
;
1361 e_fmr
->acl
= save_fmr
.acl
;
1363 pginfo
.type
= EHCA_MR_PGI_FMR
;
1364 pginfo
.num_pages
= 0;
1366 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
1367 (e_fmr
->fmr_max_pages
* e_fmr
->fmr_page_size
),
1368 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
,
1371 u32 offset
= (u64
)(&e_fmr
->flags
) - (u64
)e_fmr
;
1372 memcpy(&e_fmr
->flags
, &(save_mr
.flags
),
1373 sizeof(struct ehca_mr
) - offset
);
1374 goto ehca_unmap_one_fmr_exit0
;
1378 ehca_unmap_one_fmr_exit0
:
1380 ehca_err(&shca
->ib_device
, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1381 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1382 ret
, tmp_lkey
, tmp_rkey
, e_fmr
->fmr_max_pages
,
1383 rereg_1_hcall
, rereg_3_hcall
);
1385 } /* end ehca_unmap_one_fmr() */
1387 /*----------------------------------------------------------------------*/
1389 int ehca_reg_smr(struct ehca_shca
*shca
,
1390 struct ehca_mr
*e_origmr
,
1391 struct ehca_mr
*e_newmr
,
1394 struct ehca_pd
*e_pd
,
1401 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1403 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1404 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1406 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1407 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1409 if (h_ret
!= H_SUCCESS
) {
1410 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lx "
1411 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1412 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1413 h_ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
,
1414 shca
->ipz_hca_handle
.handle
,
1415 e_origmr
->ipz_mr_handle
.handle
,
1416 e_origmr
->ib
.ib_mr
.lkey
);
1417 ret
= ehca_mrmw_map_hrc_reg_smr(h_ret
);
1418 goto ehca_reg_smr_exit0
;
1420 /* successful registration */
1421 e_newmr
->num_pages
= e_origmr
->num_pages
;
1422 e_newmr
->num_4k
= e_origmr
->num_4k
;
1423 e_newmr
->start
= iova_start
;
1424 e_newmr
->size
= e_origmr
->size
;
1426 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1427 *lkey
= hipzout
.lkey
;
1428 *rkey
= hipzout
.rkey
;
1433 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_origmr=%p "
1434 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1435 ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
);
1437 } /* end ehca_reg_smr() */
1439 /*----------------------------------------------------------------------*/
1441 /* register internal max-MR to internal SHCA */
1442 int ehca_reg_internal_maxmr(
1443 struct ehca_shca
*shca
,
1444 struct ehca_pd
*e_pd
,
1445 struct ehca_mr
**e_maxmr
) /*OUT*/
1448 struct ehca_mr
*e_mr
;
1451 struct ehca_mr_pginfo pginfo
={0,0,0,0,0,0,0,NULL
,0,NULL
,NULL
,0,NULL
,0};
1452 struct ib_phys_buf ib_pbuf
;
1454 u32 num_pages_4k
; /* 4k portion "pages" */
1456 e_mr
= ehca_mr_new();
1458 ehca_err(&shca
->ib_device
, "out of memory");
1460 goto ehca_reg_internal_maxmr_exit0
;
1462 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
1464 /* register internal max-MR on HCA */
1465 size_maxmr
= (u64
)high_memory
- PAGE_OFFSET
;
1466 iova_start
= (u64
*)KERNELBASE
;
1468 ib_pbuf
.size
= size_maxmr
;
1469 num_pages_mr
= ((((u64
)iova_start
% PAGE_SIZE
) + size_maxmr
+
1470 PAGE_SIZE
- 1) / PAGE_SIZE
);
1471 num_pages_4k
= ((((u64
)iova_start
% EHCA_PAGESIZE
) + size_maxmr
+
1472 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
);
1474 pginfo
.type
= EHCA_MR_PGI_PHYS
;
1475 pginfo
.num_pages
= num_pages_mr
;
1476 pginfo
.num_4k
= num_pages_4k
;
1477 pginfo
.num_phys_buf
= 1;
1478 pginfo
.phys_buf_array
= &ib_pbuf
;
1480 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size_maxmr
, 0, e_pd
,
1481 &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
1482 &e_mr
->ib
.ib_mr
.rkey
);
1484 ehca_err(&shca
->ib_device
, "reg of internal max MR failed, "
1485 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1486 "num_pages_4k=%x", e_mr
, iova_start
, size_maxmr
,
1487 num_pages_mr
, num_pages_4k
);
1488 goto ehca_reg_internal_maxmr_exit1
;
1491 /* successful registration of all pages */
1492 e_mr
->ib
.ib_mr
.device
= e_pd
->ib_pd
.device
;
1493 e_mr
->ib
.ib_mr
.pd
= &e_pd
->ib_pd
;
1494 e_mr
->ib
.ib_mr
.uobject
= NULL
;
1495 atomic_inc(&(e_pd
->ib_pd
.usecnt
));
1496 atomic_set(&(e_mr
->ib
.ib_mr
.usecnt
), 0);
1500 ehca_reg_internal_maxmr_exit1
:
1501 ehca_mr_delete(e_mr
);
1502 ehca_reg_internal_maxmr_exit0
:
1504 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1505 ret
, shca
, e_pd
, e_maxmr
);
1507 } /* end ehca_reg_internal_maxmr() */
1509 /*----------------------------------------------------------------------*/
1511 int ehca_reg_maxmr(struct ehca_shca
*shca
,
1512 struct ehca_mr
*e_newmr
,
1515 struct ehca_pd
*e_pd
,
1520 struct ehca_mr
*e_origmr
= shca
->maxmr
;
1522 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1524 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1525 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1527 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1528 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1530 if (h_ret
!= H_SUCCESS
) {
1531 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lx "
1532 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1533 h_ret
, e_origmr
, shca
->ipz_hca_handle
.handle
,
1534 e_origmr
->ipz_mr_handle
.handle
,
1535 e_origmr
->ib
.ib_mr
.lkey
);
1536 return ehca_mrmw_map_hrc_reg_smr(h_ret
);
1538 /* successful registration */
1539 e_newmr
->num_pages
= e_origmr
->num_pages
;
1540 e_newmr
->num_4k
= e_origmr
->num_4k
;
1541 e_newmr
->start
= iova_start
;
1542 e_newmr
->size
= e_origmr
->size
;
1544 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1545 *lkey
= hipzout
.lkey
;
1546 *rkey
= hipzout
.rkey
;
1548 } /* end ehca_reg_maxmr() */
1550 /*----------------------------------------------------------------------*/
1552 int ehca_dereg_internal_maxmr(struct ehca_shca
*shca
)
1555 struct ehca_mr
*e_maxmr
;
1556 struct ib_pd
*ib_pd
;
1559 ehca_err(&shca
->ib_device
, "bad call, shca=%p", shca
);
1561 goto ehca_dereg_internal_maxmr_exit0
;
1564 e_maxmr
= shca
->maxmr
;
1565 ib_pd
= e_maxmr
->ib
.ib_mr
.pd
;
1566 shca
->maxmr
= NULL
; /* remove internal max-MR indication from SHCA */
1568 ret
= ehca_dereg_mr(&e_maxmr
->ib
.ib_mr
);
1570 ehca_err(&shca
->ib_device
, "dereg internal max-MR failed, "
1571 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1572 ret
, e_maxmr
, shca
, e_maxmr
->ib
.ib_mr
.lkey
);
1573 shca
->maxmr
= e_maxmr
;
1574 goto ehca_dereg_internal_maxmr_exit0
;
1577 atomic_dec(&ib_pd
->usecnt
);
1579 ehca_dereg_internal_maxmr_exit0
:
1581 ehca_err(&shca
->ib_device
, "ret=%x shca=%p shca->maxmr=%p",
1582 ret
, shca
, shca
->maxmr
);
1584 } /* end ehca_dereg_internal_maxmr() */
1586 /*----------------------------------------------------------------------*/
1589 * check physical buffer array of MR verbs for validness and
1590 * calculates MR size
1592 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf
*phys_buf_array
,
1597 struct ib_phys_buf
*pbuf
= phys_buf_array
;
1601 if (num_phys_buf
== 0) {
1602 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1605 /* check first buffer */
1606 if (((u64
)iova_start
& ~PAGE_MASK
) != (pbuf
->addr
& ~PAGE_MASK
)) {
1607 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1608 "pbuf->addr=%lx pbuf->size=%lx",
1609 iova_start
, pbuf
->addr
, pbuf
->size
);
1612 if (((pbuf
->addr
+ pbuf
->size
) % PAGE_SIZE
) &&
1613 (num_phys_buf
> 1)) {
1614 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1615 "pbuf->size=%lx", pbuf
->addr
, pbuf
->size
);
1619 for (i
= 0; i
< num_phys_buf
; i
++) {
1620 if ((i
> 0) && (pbuf
->addr
% PAGE_SIZE
)) {
1621 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1623 i
, pbuf
->addr
, pbuf
->size
);
1626 if (((i
> 0) && /* not 1st */
1627 (i
< (num_phys_buf
- 1)) && /* not last */
1628 (pbuf
->size
% PAGE_SIZE
)) || (pbuf
->size
== 0)) {
1629 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1633 size_count
+= pbuf
->size
;
1639 } /* end ehca_mr_chk_buf_and_calc_size() */
1641 /*----------------------------------------------------------------------*/
1643 /* check page list of map FMR verb for validness */
1644 int ehca_fmr_check_page_list(struct ehca_mr
*e_fmr
,
1651 if ((list_len
== 0) || (list_len
> e_fmr
->fmr_max_pages
)) {
1652 ehca_gen_err("bad list_len, list_len=%x "
1653 "e_fmr->fmr_max_pages=%x fmr=%p",
1654 list_len
, e_fmr
->fmr_max_pages
, e_fmr
);
1658 /* each page must be aligned */
1660 for (i
= 0; i
< list_len
; i
++) {
1661 if (*page
% e_fmr
->fmr_page_size
) {
1662 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1663 "fmr_page_size=%x", i
, *page
, page
, e_fmr
,
1664 e_fmr
->fmr_page_size
);
1671 } /* end ehca_fmr_check_page_list() */
1673 /*----------------------------------------------------------------------*/
1675 /* setup page buffer from page info */
1676 int ehca_set_pagebuf(struct ehca_mr
*e_mr
,
1677 struct ehca_mr_pginfo
*pginfo
,
1682 struct ib_umem_chunk
*prev_chunk
;
1683 struct ib_umem_chunk
*chunk
;
1684 struct ib_phys_buf
*pbuf
;
1686 u64 num4k
, pgaddr
, offs4k
;
1690 if (pginfo
->type
== EHCA_MR_PGI_PHYS
) {
1691 /* loop over desired phys_buf_array entries */
1692 while (i
< number
) {
1693 pbuf
= pginfo
->phys_buf_array
+ pginfo
->next_buf
;
1694 num4k
= ((pbuf
->addr
% EHCA_PAGESIZE
) + pbuf
->size
+
1695 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
;
1696 offs4k
= (pbuf
->addr
& ~PAGE_MASK
) / EHCA_PAGESIZE
;
1697 while (pginfo
->next_4k
< offs4k
+ num4k
) {
1699 if ((pginfo
->page_cnt
>= pginfo
->num_pages
) ||
1700 (pginfo
->page_4k_cnt
>= pginfo
->num_4k
)) {
1701 ehca_gen_err("page_cnt >= num_pages, "
1708 pginfo
->page_4k_cnt
,
1711 goto ehca_set_pagebuf_exit0
;
1713 *kpage
= phys_to_abs(
1714 (pbuf
->addr
& EHCA_PAGEMASK
)
1715 + (pginfo
->next_4k
* EHCA_PAGESIZE
));
1716 if ( !(*kpage
) && pbuf
->addr
) {
1717 ehca_gen_err("pbuf->addr=%lx "
1719 "next_4k=%lx", pbuf
->addr
,
1723 goto ehca_set_pagebuf_exit0
;
1725 (pginfo
->page_4k_cnt
)++;
1726 (pginfo
->next_4k
)++;
1727 if (pginfo
->next_4k
%
1728 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0)
1729 (pginfo
->page_cnt
)++;
1732 if (i
>= number
) break;
1734 if (pginfo
->next_4k
>= offs4k
+ num4k
) {
1735 (pginfo
->next_buf
)++;
1736 pginfo
->next_4k
= 0;
1739 } else if (pginfo
->type
== EHCA_MR_PGI_USER
) {
1740 /* loop over desired chunk entries */
1741 chunk
= pginfo
->next_chunk
;
1742 prev_chunk
= pginfo
->next_chunk
;
1743 list_for_each_entry_continue(chunk
,
1744 (&(pginfo
->region
->chunk_list
)),
1746 for (i
= pginfo
->next_nmap
; i
< chunk
->nmap
; ) {
1747 pgaddr
= ( page_to_pfn(chunk
->page_list
[i
].page
)
1749 *kpage
= phys_to_abs(pgaddr
+
1753 ehca_gen_err("pgaddr=%lx "
1754 "chunk->page_list[i]=%lx "
1755 "i=%x next_4k=%lx mr=%p",
1757 (u64
)sg_dma_address(
1760 i
, pginfo
->next_4k
, e_mr
);
1762 goto ehca_set_pagebuf_exit0
;
1764 (pginfo
->page_4k_cnt
)++;
1765 (pginfo
->next_4k
)++;
1767 if (pginfo
->next_4k
%
1768 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0) {
1769 (pginfo
->page_cnt
)++;
1770 (pginfo
->next_nmap
)++;
1771 pginfo
->next_4k
= 0;
1775 if (j
>= number
) break;
1777 if ((pginfo
->next_nmap
>= chunk
->nmap
) &&
1779 pginfo
->next_nmap
= 0;
1782 } else if (pginfo
->next_nmap
>= chunk
->nmap
) {
1783 pginfo
->next_nmap
= 0;
1785 } else if (j
>= number
)
1790 pginfo
->next_chunk
=
1791 list_prepare_entry(prev_chunk
,
1792 (&(pginfo
->region
->chunk_list
)),
1794 } else if (pginfo
->type
== EHCA_MR_PGI_FMR
) {
1795 /* loop over desired page_list entries */
1796 fmrlist
= pginfo
->page_list
+ pginfo
->next_listelem
;
1797 for (i
= 0; i
< number
; i
++) {
1798 *kpage
= phys_to_abs((*fmrlist
& EHCA_PAGEMASK
) +
1799 pginfo
->next_4k
* EHCA_PAGESIZE
);
1801 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1802 "next_listelem=%lx next_4k=%lx",
1804 pginfo
->next_listelem
,
1807 goto ehca_set_pagebuf_exit0
;
1809 (pginfo
->page_4k_cnt
)++;
1810 (pginfo
->next_4k
)++;
1812 if (pginfo
->next_4k
%
1813 (e_mr
->fmr_page_size
/ EHCA_PAGESIZE
) == 0) {
1814 (pginfo
->page_cnt
)++;
1815 (pginfo
->next_listelem
)++;
1817 pginfo
->next_4k
= 0;
1821 ehca_gen_err("bad pginfo->type=%x", pginfo
->type
);
1823 goto ehca_set_pagebuf_exit0
;
1826 ehca_set_pagebuf_exit0
:
1828 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1829 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1830 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1831 "next_listelem=%lx region=%p next_chunk=%p "
1832 "next_nmap=%lx", ret
, e_mr
, pginfo
, pginfo
->type
,
1833 pginfo
->num_pages
, pginfo
->num_4k
,
1834 pginfo
->next_buf
, pginfo
->next_4k
, number
, kpage
,
1835 pginfo
->page_cnt
, pginfo
->page_4k_cnt
, i
,
1836 pginfo
->next_listelem
, pginfo
->region
,
1837 pginfo
->next_chunk
, pginfo
->next_nmap
);
1839 } /* end ehca_set_pagebuf() */
1841 /*----------------------------------------------------------------------*/
1843 /* setup 1 page from page info page buffer */
1844 int ehca_set_pagebuf_1(struct ehca_mr
*e_mr
,
1845 struct ehca_mr_pginfo
*pginfo
,
1849 struct ib_phys_buf
*tmp_pbuf
;
1851 struct ib_umem_chunk
*chunk
;
1852 struct ib_umem_chunk
*prev_chunk
;
1853 u64 pgaddr
, num4k
, offs4k
;
1855 if (pginfo
->type
== EHCA_MR_PGI_PHYS
) {
1857 if ((pginfo
->page_cnt
>= pginfo
->num_pages
) ||
1858 (pginfo
->page_4k_cnt
>= pginfo
->num_4k
)) {
1859 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1860 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1861 pginfo
->page_cnt
, pginfo
->num_pages
,
1862 pginfo
->page_4k_cnt
, pginfo
->num_4k
);
1864 goto ehca_set_pagebuf_1_exit0
;
1866 tmp_pbuf
= pginfo
->phys_buf_array
+ pginfo
->next_buf
;
1867 num4k
= ((tmp_pbuf
->addr
% EHCA_PAGESIZE
) + tmp_pbuf
->size
+
1868 EHCA_PAGESIZE
- 1) / EHCA_PAGESIZE
;
1869 offs4k
= (tmp_pbuf
->addr
& ~PAGE_MASK
) / EHCA_PAGESIZE
;
1870 *rpage
= phys_to_abs((tmp_pbuf
->addr
& EHCA_PAGEMASK
) +
1871 (pginfo
->next_4k
* EHCA_PAGESIZE
));
1872 if ( !(*rpage
) && tmp_pbuf
->addr
) {
1873 ehca_gen_err("tmp_pbuf->addr=%lx"
1874 " tmp_pbuf->size=%lx next_4k=%lx",
1875 tmp_pbuf
->addr
, tmp_pbuf
->size
,
1878 goto ehca_set_pagebuf_1_exit0
;
1880 (pginfo
->page_4k_cnt
)++;
1881 (pginfo
->next_4k
)++;
1882 if (pginfo
->next_4k
% (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0)
1883 (pginfo
->page_cnt
)++;
1884 if (pginfo
->next_4k
>= offs4k
+ num4k
) {
1885 (pginfo
->next_buf
)++;
1886 pginfo
->next_4k
= 0;
1888 } else if (pginfo
->type
== EHCA_MR_PGI_USER
) {
1889 chunk
= pginfo
->next_chunk
;
1890 prev_chunk
= pginfo
->next_chunk
;
1891 list_for_each_entry_continue(chunk
,
1892 (&(pginfo
->region
->chunk_list
)),
1894 pgaddr
= ( page_to_pfn(chunk
->page_list
[
1895 pginfo
->next_nmap
].page
)
1897 *rpage
= phys_to_abs(pgaddr
+
1898 (pginfo
->next_4k
* EHCA_PAGESIZE
));
1900 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1901 " next_nmap=%lx next_4k=%lx mr=%p",
1902 pgaddr
, (u64
)sg_dma_address(
1906 pginfo
->next_nmap
, pginfo
->next_4k
,
1909 goto ehca_set_pagebuf_1_exit0
;
1911 (pginfo
->page_4k_cnt
)++;
1912 (pginfo
->next_4k
)++;
1913 if (pginfo
->next_4k
%
1914 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0) {
1915 (pginfo
->page_cnt
)++;
1916 (pginfo
->next_nmap
)++;
1917 pginfo
->next_4k
= 0;
1919 if (pginfo
->next_nmap
>= chunk
->nmap
) {
1920 pginfo
->next_nmap
= 0;
1925 pginfo
->next_chunk
=
1926 list_prepare_entry(prev_chunk
,
1927 (&(pginfo
->region
->chunk_list
)),
1929 } else if (pginfo
->type
== EHCA_MR_PGI_FMR
) {
1930 fmrlist
= pginfo
->page_list
+ pginfo
->next_listelem
;
1931 *rpage
= phys_to_abs((*fmrlist
& EHCA_PAGEMASK
) +
1932 pginfo
->next_4k
* EHCA_PAGESIZE
);
1934 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1935 "next_listelem=%lx next_4k=%lx",
1936 *fmrlist
, fmrlist
, pginfo
->next_listelem
,
1939 goto ehca_set_pagebuf_1_exit0
;
1941 (pginfo
->page_4k_cnt
)++;
1942 (pginfo
->next_4k
)++;
1943 if (pginfo
->next_4k
%
1944 (e_mr
->fmr_page_size
/ EHCA_PAGESIZE
) == 0) {
1945 (pginfo
->page_cnt
)++;
1946 (pginfo
->next_listelem
)++;
1947 pginfo
->next_4k
= 0;
1950 ehca_gen_err("bad pginfo->type=%x", pginfo
->type
);
1952 goto ehca_set_pagebuf_1_exit0
;
1955 ehca_set_pagebuf_1_exit0
:
1957 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1958 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1959 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1960 "region=%p next_chunk=%p next_nmap=%lx", ret
, e_mr
,
1961 pginfo
, pginfo
->type
, pginfo
->num_pages
,
1962 pginfo
->num_4k
, pginfo
->next_buf
, pginfo
->next_4k
,
1963 rpage
, pginfo
->page_cnt
, pginfo
->page_4k_cnt
,
1964 pginfo
->next_listelem
, pginfo
->region
,
1965 pginfo
->next_chunk
, pginfo
->next_nmap
);
1967 } /* end ehca_set_pagebuf_1() */
1969 /*----------------------------------------------------------------------*/
1972 * check MR if it is a max-MR, i.e. uses whole memory
1973 * in case it's a max-MR 1 is returned, else 0
1975 int ehca_mr_is_maxmr(u64 size
,
1978 /* a MR is treated as max-MR only if it fits following: */
1979 if ((size
== ((u64
)high_memory
- PAGE_OFFSET
)) &&
1980 (iova_start
== (void*)KERNELBASE
)) {
1981 ehca_gen_dbg("this is a max-MR");
1985 } /* end ehca_mr_is_maxmr() */
1987 /*----------------------------------------------------------------------*/
1989 /* map access control for MR/MW. This routine is used for MR and MW. */
1990 void ehca_mrmw_map_acl(int ib_acl
,
1994 if (ib_acl
& IB_ACCESS_REMOTE_READ
)
1995 *hipz_acl
|= HIPZ_ACCESSCTRL_R_READ
;
1996 if (ib_acl
& IB_ACCESS_REMOTE_WRITE
)
1997 *hipz_acl
|= HIPZ_ACCESSCTRL_R_WRITE
;
1998 if (ib_acl
& IB_ACCESS_REMOTE_ATOMIC
)
1999 *hipz_acl
|= HIPZ_ACCESSCTRL_R_ATOMIC
;
2000 if (ib_acl
& IB_ACCESS_LOCAL_WRITE
)
2001 *hipz_acl
|= HIPZ_ACCESSCTRL_L_WRITE
;
2002 if (ib_acl
& IB_ACCESS_MW_BIND
)
2003 *hipz_acl
|= HIPZ_ACCESSCTRL_MW_BIND
;
2004 } /* end ehca_mrmw_map_acl() */
2006 /*----------------------------------------------------------------------*/
2008 /* sets page size in hipz access control for MR/MW. */
2009 void ehca_mrmw_set_pgsize_hipz_acl(u32
*hipz_acl
) /*INOUT*/
2011 return; /* HCA supports only 4k */
2012 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2014 /*----------------------------------------------------------------------*/
2017 * reverse map access control for MR/MW.
2018 * This routine is used for MR and MW.
2020 void ehca_mrmw_reverse_map_acl(const u32
*hipz_acl
,
2021 int *ib_acl
) /*OUT*/
2024 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_READ
)
2025 *ib_acl
|= IB_ACCESS_REMOTE_READ
;
2026 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_WRITE
)
2027 *ib_acl
|= IB_ACCESS_REMOTE_WRITE
;
2028 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_ATOMIC
)
2029 *ib_acl
|= IB_ACCESS_REMOTE_ATOMIC
;
2030 if (*hipz_acl
& HIPZ_ACCESSCTRL_L_WRITE
)
2031 *ib_acl
|= IB_ACCESS_LOCAL_WRITE
;
2032 if (*hipz_acl
& HIPZ_ACCESSCTRL_MW_BIND
)
2033 *ib_acl
|= IB_ACCESS_MW_BIND
;
2034 } /* end ehca_mrmw_reverse_map_acl() */
2037 /*----------------------------------------------------------------------*/
2040 * map HIPZ rc to IB retcodes for MR/MW allocations
2041 * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
2043 int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc
)
2046 case H_SUCCESS
: /* successful completion */
2048 case H_ADAPTER_PARM
: /* invalid adapter handle */
2049 case H_RT_PARM
: /* invalid resource type */
2050 case H_NOT_ENOUGH_RESOURCES
: /* insufficient resources */
2051 case H_MLENGTH_PARM
: /* invalid memory length */
2052 case H_MEM_ACCESS_PARM
: /* invalid access controls */
2053 case H_CONSTRAINED
: /* resource constraint */
2055 case H_BUSY
: /* long busy */
2060 } /* end ehca_mrmw_map_hrc_alloc() */
2062 /*----------------------------------------------------------------------*/
2065 * map HIPZ rc to IB retcodes for MR register rpage
2066 * Used for hipz_h_register_rpage_mr at registering last page
2068 int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc
)
2071 case H_SUCCESS
: /* registration complete */
2073 case H_PAGE_REGISTERED
: /* page registered */
2074 case H_ADAPTER_PARM
: /* invalid adapter handle */
2075 case H_RH_PARM
: /* invalid resource handle */
2076 /* case H_QT_PARM: invalid queue type */
2077 case H_PARAMETER
: /*
2078 * invalid logical address,
2079 * or count zero or greater 512
2081 case H_TABLE_FULL
: /* page table full */
2082 case H_HARDWARE
: /* HCA not operational */
2084 case H_BUSY
: /* long busy */
2089 } /* end ehca_mrmw_map_hrc_rrpg_last() */
2091 /*----------------------------------------------------------------------*/
2094 * map HIPZ rc to IB retcodes for MR register rpage
2095 * Used for hipz_h_register_rpage_mr at registering one page, but not last page
2097 int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc
)
2100 case H_PAGE_REGISTERED
: /* page registered */
2102 case H_SUCCESS
: /* registration complete */
2103 case H_ADAPTER_PARM
: /* invalid adapter handle */
2104 case H_RH_PARM
: /* invalid resource handle */
2105 /* case H_QT_PARM: invalid queue type */
2106 case H_PARAMETER
: /*
2107 * invalid logical address,
2108 * or count zero or greater 512
2110 case H_TABLE_FULL
: /* page table full */
2111 case H_HARDWARE
: /* HCA not operational */
2113 case H_BUSY
: /* long busy */
2118 } /* end ehca_mrmw_map_hrc_rrpg_notlast() */
2120 /*----------------------------------------------------------------------*/
2122 /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
2123 int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc
)
2126 case H_SUCCESS
: /* successful completion */
2128 case H_ADAPTER_PARM
: /* invalid adapter handle */
2129 case H_RH_PARM
: /* invalid resource handle */
2131 case H_BUSY
: /* long busy */
2136 } /* end ehca_mrmw_map_hrc_query_mr() */
2138 /*----------------------------------------------------------------------*/
2139 /*----------------------------------------------------------------------*/
2142 * map HIPZ rc to IB retcodes for freeing MR resource
2143 * Used for hipz_h_free_resource_mr
2145 int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc
)
2148 case H_SUCCESS
: /* resource freed */
2150 case H_ADAPTER_PARM
: /* invalid adapter handle */
2151 case H_RH_PARM
: /* invalid resource handle */
2152 case H_R_STATE
: /* invalid resource state */
2153 case H_HARDWARE
: /* HCA not operational */
2155 case H_RESOURCE
: /* Resource in use */
2156 case H_BUSY
: /* long busy */
2161 } /* end ehca_mrmw_map_hrc_free_mr() */
2163 /*----------------------------------------------------------------------*/
2166 * map HIPZ rc to IB retcodes for freeing MW resource
2167 * Used for hipz_h_free_resource_mw
2169 int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc
)
2172 case H_SUCCESS
: /* resource freed */
2174 case H_ADAPTER_PARM
: /* invalid adapter handle */
2175 case H_RH_PARM
: /* invalid resource handle */
2176 case H_R_STATE
: /* invalid resource state */
2177 case H_HARDWARE
: /* HCA not operational */
2179 case H_RESOURCE
: /* Resource in use */
2180 case H_BUSY
: /* long busy */
2185 } /* end ehca_mrmw_map_hrc_free_mw() */
2187 /*----------------------------------------------------------------------*/
2190 * map HIPZ rc to IB retcodes for SMR registrations
2191 * Used for hipz_h_register_smr.
2193 int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc
)
2196 case H_SUCCESS
: /* successful completion */
2198 case H_ADAPTER_PARM
: /* invalid adapter handle */
2199 case H_RH_PARM
: /* invalid resource handle */
2200 case H_MEM_PARM
: /* invalid MR virtual address */
2201 case H_MEM_ACCESS_PARM
: /* invalid access controls */
2202 case H_NOT_ENOUGH_RESOURCES
: /* insufficient resources */
2204 case H_BUSY
: /* long busy */
2209 } /* end ehca_mrmw_map_hrc_reg_smr() */
2211 /*----------------------------------------------------------------------*/
2214 * MR destructor and constructor
2215 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2216 * except struct ib_mr and spinlock
2218 void ehca_mr_deletenew(struct ehca_mr
*mr
)
2225 mr
->fmr_page_size
= 0;
2226 mr
->fmr_max_pages
= 0;
2227 mr
->fmr_max_maps
= 0;
2228 mr
->fmr_map_cnt
= 0;
2229 memset(&mr
->ipz_mr_handle
, 0, sizeof(mr
->ipz_mr_handle
));
2230 memset(&mr
->galpas
, 0, sizeof(mr
->galpas
));
2231 mr
->nr_of_pages
= 0;
2232 mr
->pagearray
= NULL
;
2233 } /* end ehca_mr_deletenew() */
2235 int ehca_init_mrmw_cache(void)
2237 mr_cache
= kmem_cache_create("ehca_cache_mr",
2238 sizeof(struct ehca_mr
), 0,
2243 mw_cache
= kmem_cache_create("ehca_cache_mw",
2244 sizeof(struct ehca_mw
), 0,
2248 kmem_cache_destroy(mr_cache
);
2255 void ehca_cleanup_mrmw_cache(void)
2258 kmem_cache_destroy(mr_cache
);
2260 kmem_cache_destroy(mw_cache
);