2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <linux/atomic.h>
38 #define T4_ULPTX_MIN_IO 32
39 #define C4IW_MAX_INLINE_SIZE 96
41 static int write_adapter_mem(struct c4iw_rdev
*rdev
, u32 addr
, u32 len
,
45 struct ulp_mem_io
*req
;
46 struct ulptx_idata
*sc
;
47 u8 wr_len
, *to_dp
, *from_dp
;
48 int copy_len
, num_wqe
, i
, ret
= 0;
49 struct c4iw_wr_wait wr_wait
;
52 PDBG("%s addr 0x%x len %u\n", __func__
, addr
, len
);
53 num_wqe
= DIV_ROUND_UP(len
, C4IW_MAX_INLINE_SIZE
);
54 c4iw_init_wr_wait(&wr_wait
);
55 for (i
= 0; i
< num_wqe
; i
++) {
57 copy_len
= len
> C4IW_MAX_INLINE_SIZE
? C4IW_MAX_INLINE_SIZE
:
59 wr_len
= roundup(sizeof *req
+ sizeof *sc
+
60 roundup(copy_len
, T4_ULPTX_MIN_IO
), 16);
62 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
65 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
67 req
= (struct ulp_mem_io
*)__skb_put(skb
, wr_len
);
68 memset(req
, 0, wr_len
);
69 INIT_ULPTX_WR(req
, wr_len
, 0, 0);
71 if (i
== (num_wqe
-1)) {
72 req
->wr
.wr_hi
= cpu_to_be32(FW_WR_OP(FW_ULPTX_WR
) |
74 req
->wr
.wr_lo
= (__force __be64
)(unsigned long) &wr_wait
;
76 req
->wr
.wr_hi
= cpu_to_be32(FW_WR_OP(FW_ULPTX_WR
));
77 req
->wr
.wr_mid
= cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len
, 16)));
80 req
->cmd
= cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE
) | (1<<23));
81 req
->dlen
= cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len
, T4_ULPTX_MIN_IO
)));
83 req
->len16
= cpu_to_be32(DIV_ROUND_UP(wr_len
-sizeof(req
->wr
),
85 req
->lock_addr
= cpu_to_be32(ULP_MEMIO_ADDR(addr
+ i
* 3));
87 sc
= (struct ulptx_idata
*)(req
+ 1);
88 sc
->cmd_more
= cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM
));
89 sc
->len
= cpu_to_be32(roundup(copy_len
, T4_ULPTX_MIN_IO
));
91 to_dp
= (u8
*)(sc
+ 1);
92 from_dp
= (u8
*)data
+ i
* C4IW_MAX_INLINE_SIZE
;
94 memcpy(to_dp
, from_dp
, copy_len
);
96 memset(to_dp
, 0, copy_len
);
97 if (copy_len
% T4_ULPTX_MIN_IO
)
98 memset(to_dp
+ copy_len
, 0, T4_ULPTX_MIN_IO
-
99 (copy_len
% T4_ULPTX_MIN_IO
));
100 ret
= c4iw_ofld_send(rdev
, skb
);
103 len
-= C4IW_MAX_INLINE_SIZE
;
106 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, 0, __func__
);
111 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
113 * pbl_size and pbl_addr
116 static int write_tpt_entry(struct c4iw_rdev
*rdev
, u32 reset_tpt_entry
,
117 u32
*stag
, u8 stag_state
, u32 pdid
,
118 enum fw_ri_stag_type type
, enum fw_ri_mem_perms perm
,
119 int bind_enabled
, u32 zbva
, u64 to
,
120 u64 len
, u8 page_size
, u32 pbl_size
, u32 pbl_addr
)
123 struct fw_ri_tpte tpt
;
127 if (c4iw_fatal_error(rdev
))
130 stag_state
= stag_state
> 0;
131 stag_idx
= (*stag
) >> 8;
133 if ((!reset_tpt_entry
) && (*stag
== T4_STAG_UNSET
)) {
134 stag_idx
= c4iw_get_resource(&rdev
->resource
.tpt_fifo
,
135 &rdev
->resource
.tpt_fifo_lock
);
138 *stag
= (stag_idx
<< 8) | (atomic_inc_return(&key
) & 0xff);
140 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
141 __func__
, stag_state
, type
, pdid
, stag_idx
);
143 /* write TPT entry */
145 memset(&tpt
, 0, sizeof(tpt
));
147 tpt
.valid_to_pdid
= cpu_to_be32(F_FW_RI_TPTE_VALID
|
148 V_FW_RI_TPTE_STAGKEY((*stag
& M_FW_RI_TPTE_STAGKEY
)) |
149 V_FW_RI_TPTE_STAGSTATE(stag_state
) |
150 V_FW_RI_TPTE_STAGTYPE(type
) | V_FW_RI_TPTE_PDID(pdid
));
151 tpt
.locread_to_qpid
= cpu_to_be32(V_FW_RI_TPTE_PERM(perm
) |
152 (bind_enabled
? F_FW_RI_TPTE_MWBINDEN
: 0) |
153 V_FW_RI_TPTE_ADDRTYPE((zbva
? FW_RI_ZERO_BASED_TO
:
155 V_FW_RI_TPTE_PS(page_size
));
156 tpt
.nosnoop_pbladdr
= !pbl_size
? 0 : cpu_to_be32(
157 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev
, pbl_addr
)>>3));
158 tpt
.len_lo
= cpu_to_be32((u32
)(len
& 0xffffffffUL
));
159 tpt
.va_hi
= cpu_to_be32((u32
)(to
>> 32));
160 tpt
.va_lo_fbo
= cpu_to_be32((u32
)(to
& 0xffffffffUL
));
161 tpt
.dca_mwbcnt_pstag
= cpu_to_be32(0);
162 tpt
.len_hi
= cpu_to_be32((u32
)(len
>> 32));
164 err
= write_adapter_mem(rdev
, stag_idx
+
165 (rdev
->lldi
.vr
->stag
.start
>> 5),
169 c4iw_put_resource(&rdev
->resource
.tpt_fifo
, stag_idx
,
170 &rdev
->resource
.tpt_fifo_lock
);
174 static int write_pbl(struct c4iw_rdev
*rdev
, __be64
*pbl
,
175 u32 pbl_addr
, u32 pbl_size
)
179 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
180 __func__
, pbl_addr
, rdev
->lldi
.vr
->pbl
.start
,
183 err
= write_adapter_mem(rdev
, pbl_addr
>> 5, pbl_size
<< 3, pbl
);
187 static int dereg_mem(struct c4iw_rdev
*rdev
, u32 stag
, u32 pbl_size
,
190 return write_tpt_entry(rdev
, 1, &stag
, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
194 static int allocate_window(struct c4iw_rdev
*rdev
, u32
* stag
, u32 pdid
)
196 *stag
= T4_STAG_UNSET
;
197 return write_tpt_entry(rdev
, 0, stag
, 0, pdid
, FW_RI_STAG_MW
, 0, 0, 0,
201 static int deallocate_window(struct c4iw_rdev
*rdev
, u32 stag
)
203 return write_tpt_entry(rdev
, 1, &stag
, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
207 static int allocate_stag(struct c4iw_rdev
*rdev
, u32
*stag
, u32 pdid
,
208 u32 pbl_size
, u32 pbl_addr
)
210 *stag
= T4_STAG_UNSET
;
211 return write_tpt_entry(rdev
, 0, stag
, 0, pdid
, FW_RI_STAG_NSMR
, 0, 0, 0,
212 0UL, 0, 0, pbl_size
, pbl_addr
);
215 static int finish_mem_reg(struct c4iw_mr
*mhp
, u32 stag
)
220 mhp
->attr
.stag
= stag
;
222 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
223 PDBG("%s mmid 0x%x mhp %p\n", __func__
, mmid
, mhp
);
224 return insert_handle(mhp
->rhp
, &mhp
->rhp
->mmidr
, mhp
, mmid
);
227 static int register_mem(struct c4iw_dev
*rhp
, struct c4iw_pd
*php
,
228 struct c4iw_mr
*mhp
, int shift
)
230 u32 stag
= T4_STAG_UNSET
;
233 ret
= write_tpt_entry(&rhp
->rdev
, 0, &stag
, 1, mhp
->attr
.pdid
,
234 FW_RI_STAG_NSMR
, mhp
->attr
.perms
,
235 mhp
->attr
.mw_bind_enable
, mhp
->attr
.zbva
,
236 mhp
->attr
.va_fbo
, mhp
->attr
.len
, shift
- 12,
237 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
241 ret
= finish_mem_reg(mhp
, stag
);
243 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
248 static int reregister_mem(struct c4iw_dev
*rhp
, struct c4iw_pd
*php
,
249 struct c4iw_mr
*mhp
, int shift
, int npages
)
254 if (npages
> mhp
->attr
.pbl_size
)
257 stag
= mhp
->attr
.stag
;
258 ret
= write_tpt_entry(&rhp
->rdev
, 0, &stag
, 1, mhp
->attr
.pdid
,
259 FW_RI_STAG_NSMR
, mhp
->attr
.perms
,
260 mhp
->attr
.mw_bind_enable
, mhp
->attr
.zbva
,
261 mhp
->attr
.va_fbo
, mhp
->attr
.len
, shift
- 12,
262 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
266 ret
= finish_mem_reg(mhp
, stag
);
268 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
274 static int alloc_pbl(struct c4iw_mr
*mhp
, int npages
)
276 mhp
->attr
.pbl_addr
= c4iw_pblpool_alloc(&mhp
->rhp
->rdev
,
279 if (!mhp
->attr
.pbl_addr
)
282 mhp
->attr
.pbl_size
= npages
;
287 static int build_phys_page_list(struct ib_phys_buf
*buffer_list
,
288 int num_phys_buf
, u64
*iova_start
,
289 u64
*total_size
, int *npages
,
290 int *shift
, __be64
**page_list
)
297 for (i
= 0; i
< num_phys_buf
; ++i
) {
298 if (i
!= 0 && buffer_list
[i
].addr
& ~PAGE_MASK
)
300 if (i
!= 0 && i
!= num_phys_buf
- 1 &&
301 (buffer_list
[i
].size
& ~PAGE_MASK
))
303 *total_size
+= buffer_list
[i
].size
;
305 mask
|= buffer_list
[i
].addr
;
307 mask
|= buffer_list
[i
].addr
& PAGE_MASK
;
308 if (i
!= num_phys_buf
- 1)
309 mask
|= buffer_list
[i
].addr
+ buffer_list
[i
].size
;
311 mask
|= (buffer_list
[i
].addr
+ buffer_list
[i
].size
+
312 PAGE_SIZE
- 1) & PAGE_MASK
;
315 if (*total_size
> 0xFFFFFFFFULL
)
318 /* Find largest page shift we can use to cover buffers */
319 for (*shift
= PAGE_SHIFT
; *shift
< 27; ++(*shift
))
320 if ((1ULL << *shift
) & mask
)
323 buffer_list
[0].size
+= buffer_list
[0].addr
& ((1ULL << *shift
) - 1);
324 buffer_list
[0].addr
&= ~0ull << *shift
;
327 for (i
= 0; i
< num_phys_buf
; ++i
)
328 *npages
+= (buffer_list
[i
].size
+
329 (1ULL << *shift
) - 1) >> *shift
;
334 *page_list
= kmalloc(sizeof(u64
) * *npages
, GFP_KERNEL
);
339 for (i
= 0; i
< num_phys_buf
; ++i
)
341 j
< (buffer_list
[i
].size
+ (1ULL << *shift
) - 1) >> *shift
;
343 (*page_list
)[n
++] = cpu_to_be64(buffer_list
[i
].addr
+
344 ((u64
) j
<< *shift
));
346 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
347 __func__
, (unsigned long long)*iova_start
,
348 (unsigned long long)mask
, *shift
, (unsigned long long)*total_size
,
355 int c4iw_reregister_phys_mem(struct ib_mr
*mr
, int mr_rereg_mask
,
356 struct ib_pd
*pd
, struct ib_phys_buf
*buffer_list
,
357 int num_phys_buf
, int acc
, u64
*iova_start
)
360 struct c4iw_mr mh
, *mhp
;
362 struct c4iw_dev
*rhp
;
363 __be64
*page_list
= NULL
;
369 PDBG("%s ib_mr %p ib_pd %p\n", __func__
, mr
, pd
);
371 /* There can be no memory windows */
372 if (atomic_read(&mr
->usecnt
))
375 mhp
= to_c4iw_mr(mr
);
377 php
= to_c4iw_pd(mr
->pd
);
379 /* make sure we are on the same adapter */
383 memcpy(&mh
, mhp
, sizeof *mhp
);
385 if (mr_rereg_mask
& IB_MR_REREG_PD
)
386 php
= to_c4iw_pd(pd
);
387 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
) {
388 mh
.attr
.perms
= c4iw_ib_to_tpt_access(acc
);
389 mh
.attr
.mw_bind_enable
= (acc
& IB_ACCESS_MW_BIND
) ==
392 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
393 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
395 &total_size
, &npages
,
401 ret
= reregister_mem(rhp
, php
, &mh
, shift
, npages
);
405 if (mr_rereg_mask
& IB_MR_REREG_PD
)
406 mhp
->attr
.pdid
= php
->pdid
;
407 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
408 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
409 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
411 mhp
->attr
.va_fbo
= *iova_start
;
412 mhp
->attr
.page_size
= shift
- 12;
413 mhp
->attr
.len
= (u32
) total_size
;
414 mhp
->attr
.pbl_size
= npages
;
420 struct ib_mr
*c4iw_register_phys_mem(struct ib_pd
*pd
,
421 struct ib_phys_buf
*buffer_list
,
422 int num_phys_buf
, int acc
, u64
*iova_start
)
428 struct c4iw_dev
*rhp
;
433 PDBG("%s ib_pd %p\n", __func__
, pd
);
434 php
= to_c4iw_pd(pd
);
437 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
439 return ERR_PTR(-ENOMEM
);
443 /* First check that we have enough alignment */
444 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
449 if (num_phys_buf
> 1 &&
450 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
455 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
456 &total_size
, &npages
, &shift
,
461 ret
= alloc_pbl(mhp
, npages
);
467 ret
= write_pbl(&mhp
->rhp
->rdev
, page_list
, mhp
->attr
.pbl_addr
,
473 mhp
->attr
.pdid
= php
->pdid
;
476 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
477 mhp
->attr
.va_fbo
= *iova_start
;
478 mhp
->attr
.page_size
= shift
- 12;
480 mhp
->attr
.len
= (u32
) total_size
;
481 mhp
->attr
.pbl_size
= npages
;
482 ret
= register_mem(rhp
, php
, mhp
, shift
);
489 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
490 mhp
->attr
.pbl_size
<< 3);
498 struct ib_mr
*c4iw_get_dma_mr(struct ib_pd
*pd
, int acc
)
500 struct c4iw_dev
*rhp
;
504 u32 stag
= T4_STAG_UNSET
;
506 PDBG("%s ib_pd %p\n", __func__
, pd
);
507 php
= to_c4iw_pd(pd
);
510 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
512 return ERR_PTR(-ENOMEM
);
515 mhp
->attr
.pdid
= php
->pdid
;
516 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
517 mhp
->attr
.mw_bind_enable
= (acc
&IB_ACCESS_MW_BIND
) == IB_ACCESS_MW_BIND
;
519 mhp
->attr
.va_fbo
= 0;
520 mhp
->attr
.page_size
= 0;
521 mhp
->attr
.len
= ~0UL;
522 mhp
->attr
.pbl_size
= 0;
524 ret
= write_tpt_entry(&rhp
->rdev
, 0, &stag
, 1, php
->pdid
,
525 FW_RI_STAG_NSMR
, mhp
->attr
.perms
,
526 mhp
->attr
.mw_bind_enable
, 0, 0, ~0UL, 0, 0, 0);
530 ret
= finish_mem_reg(mhp
, stag
);
535 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
542 struct ib_mr
*c4iw_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
543 u64 virt
, int acc
, struct ib_udata
*udata
)
549 struct ib_umem_chunk
*chunk
;
550 struct c4iw_dev
*rhp
;
554 PDBG("%s ib_pd %p\n", __func__
, pd
);
557 return ERR_PTR(-EINVAL
);
559 if ((length
+ start
) < start
)
560 return ERR_PTR(-EINVAL
);
562 php
= to_c4iw_pd(pd
);
564 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
566 return ERR_PTR(-ENOMEM
);
570 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
571 if (IS_ERR(mhp
->umem
)) {
572 err
= PTR_ERR(mhp
->umem
);
577 shift
= ffs(mhp
->umem
->page_size
) - 1;
580 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
583 err
= alloc_pbl(mhp
, n
);
587 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
595 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
596 for (j
= 0; j
< chunk
->nmap
; ++j
) {
597 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
598 for (k
= 0; k
< len
; ++k
) {
599 pages
[i
++] = cpu_to_be64(sg_dma_address(
600 &chunk
->page_list
[j
]) +
601 mhp
->umem
->page_size
* k
);
602 if (i
== PAGE_SIZE
/ sizeof *pages
) {
603 err
= write_pbl(&mhp
->rhp
->rdev
,
605 mhp
->attr
.pbl_addr
+ (n
<< 3), i
);
615 err
= write_pbl(&mhp
->rhp
->rdev
, pages
,
616 mhp
->attr
.pbl_addr
+ (n
<< 3), i
);
619 free_page((unsigned long) pages
);
623 mhp
->attr
.pdid
= php
->pdid
;
625 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
626 mhp
->attr
.va_fbo
= virt
;
627 mhp
->attr
.page_size
= shift
- 12;
628 mhp
->attr
.len
= length
;
630 err
= register_mem(rhp
, php
, mhp
, shift
);
637 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
638 mhp
->attr
.pbl_size
<< 3);
641 ib_umem_release(mhp
->umem
);
646 struct ib_mw
*c4iw_alloc_mw(struct ib_pd
*pd
)
648 struct c4iw_dev
*rhp
;
655 php
= to_c4iw_pd(pd
);
657 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
659 return ERR_PTR(-ENOMEM
);
660 ret
= allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
666 mhp
->attr
.pdid
= php
->pdid
;
667 mhp
->attr
.type
= FW_RI_STAG_MW
;
668 mhp
->attr
.stag
= stag
;
670 mhp
->ibmw
.rkey
= stag
;
671 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
672 deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
674 return ERR_PTR(-ENOMEM
);
676 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
680 int c4iw_dealloc_mw(struct ib_mw
*mw
)
682 struct c4iw_dev
*rhp
;
686 mhp
= to_c4iw_mw(mw
);
688 mmid
= (mw
->rkey
) >> 8;
689 deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
690 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
692 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
696 struct ib_mr
*c4iw_alloc_fast_reg_mr(struct ib_pd
*pd
, int pbl_depth
)
698 struct c4iw_dev
*rhp
;
705 php
= to_c4iw_pd(pd
);
707 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
714 ret
= alloc_pbl(mhp
, pbl_depth
);
717 mhp
->attr
.pbl_size
= pbl_depth
;
718 ret
= allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
719 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
722 mhp
->attr
.pdid
= php
->pdid
;
723 mhp
->attr
.type
= FW_RI_STAG_NSMR
;
724 mhp
->attr
.stag
= stag
;
727 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
728 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
733 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
736 dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
739 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
740 mhp
->attr
.pbl_size
<< 3);
747 struct ib_fast_reg_page_list
*c4iw_alloc_fastreg_pbl(struct ib_device
*device
,
750 struct c4iw_fr_page_list
*c4pl
;
751 struct c4iw_dev
*dev
= to_c4iw_dev(device
);
753 int size
= sizeof *c4pl
+ page_list_len
* sizeof(u64
);
755 c4pl
= dma_alloc_coherent(&dev
->rdev
.lldi
.pdev
->dev
, size
,
756 &dma_addr
, GFP_KERNEL
);
758 return ERR_PTR(-ENOMEM
);
760 dma_unmap_addr_set(c4pl
, mapping
, dma_addr
);
761 c4pl
->dma_addr
= dma_addr
;
764 c4pl
->ibpl
.page_list
= (u64
*)(c4pl
+ 1);
765 c4pl
->ibpl
.max_page_list_len
= page_list_len
;
770 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list
*ibpl
)
772 struct c4iw_fr_page_list
*c4pl
= to_c4iw_fr_page_list(ibpl
);
774 dma_free_coherent(&c4pl
->dev
->rdev
.lldi
.pdev
->dev
, c4pl
->size
,
775 c4pl
, dma_unmap_addr(c4pl
, mapping
));
778 int c4iw_dereg_mr(struct ib_mr
*ib_mr
)
780 struct c4iw_dev
*rhp
;
784 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
785 /* There can be no memory windows */
786 if (atomic_read(&ib_mr
->usecnt
))
789 mhp
= to_c4iw_mr(ib_mr
);
791 mmid
= mhp
->attr
.stag
>> 8;
792 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
794 if (mhp
->attr
.pbl_size
)
795 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
796 mhp
->attr
.pbl_size
<< 3);
797 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
799 kfree((void *) (unsigned long) mhp
->kva
);
801 ib_umem_release(mhp
->umem
);
802 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);