2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <linux/atomic.h>
38 #define T4_ULPTX_MIN_IO 32
39 #define C4IW_MAX_INLINE_SIZE 96
41 static int write_adapter_mem(struct c4iw_rdev
*rdev
, u32 addr
, u32 len
,
45 struct ulp_mem_io
*req
;
46 struct ulptx_idata
*sc
;
47 u8 wr_len
, *to_dp
, *from_dp
;
48 int copy_len
, num_wqe
, i
, ret
= 0;
49 struct c4iw_wr_wait wr_wait
;
52 PDBG("%s addr 0x%x len %u\n", __func__
, addr
, len
);
53 num_wqe
= DIV_ROUND_UP(len
, C4IW_MAX_INLINE_SIZE
);
54 c4iw_init_wr_wait(&wr_wait
);
55 for (i
= 0; i
< num_wqe
; i
++) {
57 copy_len
= len
> C4IW_MAX_INLINE_SIZE
? C4IW_MAX_INLINE_SIZE
:
59 wr_len
= roundup(sizeof *req
+ sizeof *sc
+
60 roundup(copy_len
, T4_ULPTX_MIN_IO
), 16);
62 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
65 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
67 req
= (struct ulp_mem_io
*)__skb_put(skb
, wr_len
);
68 memset(req
, 0, wr_len
);
69 INIT_ULPTX_WR(req
, wr_len
, 0, 0);
71 if (i
== (num_wqe
-1)) {
72 req
->wr
.wr_hi
= cpu_to_be32(FW_WR_OP(FW_ULPTX_WR
) |
74 req
->wr
.wr_lo
= (__force __be64
)(unsigned long) &wr_wait
;
76 req
->wr
.wr_hi
= cpu_to_be32(FW_WR_OP(FW_ULPTX_WR
));
77 req
->wr
.wr_mid
= cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len
, 16)));
80 req
->cmd
= cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE
) | (1<<23));
81 req
->dlen
= cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len
, T4_ULPTX_MIN_IO
)));
83 req
->len16
= cpu_to_be32(DIV_ROUND_UP(wr_len
-sizeof(req
->wr
),
85 req
->lock_addr
= cpu_to_be32(ULP_MEMIO_ADDR(addr
+ i
* 3));
87 sc
= (struct ulptx_idata
*)(req
+ 1);
88 sc
->cmd_more
= cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM
));
89 sc
->len
= cpu_to_be32(roundup(copy_len
, T4_ULPTX_MIN_IO
));
91 to_dp
= (u8
*)(sc
+ 1);
92 from_dp
= (u8
*)data
+ i
* C4IW_MAX_INLINE_SIZE
;
94 memcpy(to_dp
, from_dp
, copy_len
);
96 memset(to_dp
, 0, copy_len
);
97 if (copy_len
% T4_ULPTX_MIN_IO
)
98 memset(to_dp
+ copy_len
, 0, T4_ULPTX_MIN_IO
-
99 (copy_len
% T4_ULPTX_MIN_IO
));
100 ret
= c4iw_ofld_send(rdev
, skb
);
103 len
-= C4IW_MAX_INLINE_SIZE
;
106 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, 0, __func__
);
111 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
113 * pbl_size and pbl_addr
116 static int write_tpt_entry(struct c4iw_rdev
*rdev
, u32 reset_tpt_entry
,
117 u32
*stag
, u8 stag_state
, u32 pdid
,
118 enum fw_ri_stag_type type
, enum fw_ri_mem_perms perm
,
119 int bind_enabled
, u32 zbva
, u64 to
,
120 u64 len
, u8 page_size
, u32 pbl_size
, u32 pbl_addr
)
123 struct fw_ri_tpte tpt
;
127 if (c4iw_fatal_error(rdev
))
130 stag_state
= stag_state
> 0;
131 stag_idx
= (*stag
) >> 8;
133 if ((!reset_tpt_entry
) && (*stag
== T4_STAG_UNSET
)) {
134 stag_idx
= c4iw_get_resource(&rdev
->resource
.tpt_table
);
137 mutex_lock(&rdev
->stats
.lock
);
138 rdev
->stats
.stag
.cur
+= 32;
139 if (rdev
->stats
.stag
.cur
> rdev
->stats
.stag
.max
)
140 rdev
->stats
.stag
.max
= rdev
->stats
.stag
.cur
;
141 mutex_unlock(&rdev
->stats
.lock
);
142 *stag
= (stag_idx
<< 8) | (atomic_inc_return(&key
) & 0xff);
144 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
145 __func__
, stag_state
, type
, pdid
, stag_idx
);
147 /* write TPT entry */
149 memset(&tpt
, 0, sizeof(tpt
));
151 tpt
.valid_to_pdid
= cpu_to_be32(F_FW_RI_TPTE_VALID
|
152 V_FW_RI_TPTE_STAGKEY((*stag
& M_FW_RI_TPTE_STAGKEY
)) |
153 V_FW_RI_TPTE_STAGSTATE(stag_state
) |
154 V_FW_RI_TPTE_STAGTYPE(type
) | V_FW_RI_TPTE_PDID(pdid
));
155 tpt
.locread_to_qpid
= cpu_to_be32(V_FW_RI_TPTE_PERM(perm
) |
156 (bind_enabled
? F_FW_RI_TPTE_MWBINDEN
: 0) |
157 V_FW_RI_TPTE_ADDRTYPE((zbva
? FW_RI_ZERO_BASED_TO
:
159 V_FW_RI_TPTE_PS(page_size
));
160 tpt
.nosnoop_pbladdr
= !pbl_size
? 0 : cpu_to_be32(
161 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev
, pbl_addr
)>>3));
162 tpt
.len_lo
= cpu_to_be32((u32
)(len
& 0xffffffffUL
));
163 tpt
.va_hi
= cpu_to_be32((u32
)(to
>> 32));
164 tpt
.va_lo_fbo
= cpu_to_be32((u32
)(to
& 0xffffffffUL
));
165 tpt
.dca_mwbcnt_pstag
= cpu_to_be32(0);
166 tpt
.len_hi
= cpu_to_be32((u32
)(len
>> 32));
168 err
= write_adapter_mem(rdev
, stag_idx
+
169 (rdev
->lldi
.vr
->stag
.start
>> 5),
172 if (reset_tpt_entry
) {
173 c4iw_put_resource(&rdev
->resource
.tpt_table
, stag_idx
);
174 mutex_lock(&rdev
->stats
.lock
);
175 rdev
->stats
.stag
.cur
-= 32;
176 mutex_unlock(&rdev
->stats
.lock
);
181 static int write_pbl(struct c4iw_rdev
*rdev
, __be64
*pbl
,
182 u32 pbl_addr
, u32 pbl_size
)
186 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187 __func__
, pbl_addr
, rdev
->lldi
.vr
->pbl
.start
,
190 err
= write_adapter_mem(rdev
, pbl_addr
>> 5, pbl_size
<< 3, pbl
);
194 static int dereg_mem(struct c4iw_rdev
*rdev
, u32 stag
, u32 pbl_size
,
197 return write_tpt_entry(rdev
, 1, &stag
, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
201 static int allocate_window(struct c4iw_rdev
*rdev
, u32
* stag
, u32 pdid
)
203 *stag
= T4_STAG_UNSET
;
204 return write_tpt_entry(rdev
, 0, stag
, 0, pdid
, FW_RI_STAG_MW
, 0, 0, 0,
208 static int deallocate_window(struct c4iw_rdev
*rdev
, u32 stag
)
210 return write_tpt_entry(rdev
, 1, &stag
, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
214 static int allocate_stag(struct c4iw_rdev
*rdev
, u32
*stag
, u32 pdid
,
215 u32 pbl_size
, u32 pbl_addr
)
217 *stag
= T4_STAG_UNSET
;
218 return write_tpt_entry(rdev
, 0, stag
, 0, pdid
, FW_RI_STAG_NSMR
, 0, 0, 0,
219 0UL, 0, 0, pbl_size
, pbl_addr
);
222 static int finish_mem_reg(struct c4iw_mr
*mhp
, u32 stag
)
227 mhp
->attr
.stag
= stag
;
229 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
230 PDBG("%s mmid 0x%x mhp %p\n", __func__
, mmid
, mhp
);
231 return insert_handle(mhp
->rhp
, &mhp
->rhp
->mmidr
, mhp
, mmid
);
234 static int register_mem(struct c4iw_dev
*rhp
, struct c4iw_pd
*php
,
235 struct c4iw_mr
*mhp
, int shift
)
237 u32 stag
= T4_STAG_UNSET
;
240 ret
= write_tpt_entry(&rhp
->rdev
, 0, &stag
, 1, mhp
->attr
.pdid
,
241 FW_RI_STAG_NSMR
, mhp
->attr
.perms
,
242 mhp
->attr
.mw_bind_enable
, mhp
->attr
.zbva
,
243 mhp
->attr
.va_fbo
, mhp
->attr
.len
, shift
- 12,
244 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
248 ret
= finish_mem_reg(mhp
, stag
);
250 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
255 static int reregister_mem(struct c4iw_dev
*rhp
, struct c4iw_pd
*php
,
256 struct c4iw_mr
*mhp
, int shift
, int npages
)
261 if (npages
> mhp
->attr
.pbl_size
)
264 stag
= mhp
->attr
.stag
;
265 ret
= write_tpt_entry(&rhp
->rdev
, 0, &stag
, 1, mhp
->attr
.pdid
,
266 FW_RI_STAG_NSMR
, mhp
->attr
.perms
,
267 mhp
->attr
.mw_bind_enable
, mhp
->attr
.zbva
,
268 mhp
->attr
.va_fbo
, mhp
->attr
.len
, shift
- 12,
269 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
273 ret
= finish_mem_reg(mhp
, stag
);
275 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
281 static int alloc_pbl(struct c4iw_mr
*mhp
, int npages
)
283 mhp
->attr
.pbl_addr
= c4iw_pblpool_alloc(&mhp
->rhp
->rdev
,
286 if (!mhp
->attr
.pbl_addr
)
289 mhp
->attr
.pbl_size
= npages
;
294 static int build_phys_page_list(struct ib_phys_buf
*buffer_list
,
295 int num_phys_buf
, u64
*iova_start
,
296 u64
*total_size
, int *npages
,
297 int *shift
, __be64
**page_list
)
304 for (i
= 0; i
< num_phys_buf
; ++i
) {
305 if (i
!= 0 && buffer_list
[i
].addr
& ~PAGE_MASK
)
307 if (i
!= 0 && i
!= num_phys_buf
- 1 &&
308 (buffer_list
[i
].size
& ~PAGE_MASK
))
310 *total_size
+= buffer_list
[i
].size
;
312 mask
|= buffer_list
[i
].addr
;
314 mask
|= buffer_list
[i
].addr
& PAGE_MASK
;
315 if (i
!= num_phys_buf
- 1)
316 mask
|= buffer_list
[i
].addr
+ buffer_list
[i
].size
;
318 mask
|= (buffer_list
[i
].addr
+ buffer_list
[i
].size
+
319 PAGE_SIZE
- 1) & PAGE_MASK
;
322 if (*total_size
> 0xFFFFFFFFULL
)
325 /* Find largest page shift we can use to cover buffers */
326 for (*shift
= PAGE_SHIFT
; *shift
< 27; ++(*shift
))
327 if ((1ULL << *shift
) & mask
)
330 buffer_list
[0].size
+= buffer_list
[0].addr
& ((1ULL << *shift
) - 1);
331 buffer_list
[0].addr
&= ~0ull << *shift
;
334 for (i
= 0; i
< num_phys_buf
; ++i
)
335 *npages
+= (buffer_list
[i
].size
+
336 (1ULL << *shift
) - 1) >> *shift
;
341 *page_list
= kmalloc(sizeof(u64
) * *npages
, GFP_KERNEL
);
346 for (i
= 0; i
< num_phys_buf
; ++i
)
348 j
< (buffer_list
[i
].size
+ (1ULL << *shift
) - 1) >> *shift
;
350 (*page_list
)[n
++] = cpu_to_be64(buffer_list
[i
].addr
+
351 ((u64
) j
<< *shift
));
353 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354 __func__
, (unsigned long long)*iova_start
,
355 (unsigned long long)mask
, *shift
, (unsigned long long)*total_size
,
362 int c4iw_reregister_phys_mem(struct ib_mr
*mr
, int mr_rereg_mask
,
363 struct ib_pd
*pd
, struct ib_phys_buf
*buffer_list
,
364 int num_phys_buf
, int acc
, u64
*iova_start
)
367 struct c4iw_mr mh
, *mhp
;
369 struct c4iw_dev
*rhp
;
370 __be64
*page_list
= NULL
;
376 PDBG("%s ib_mr %p ib_pd %p\n", __func__
, mr
, pd
);
378 /* There can be no memory windows */
379 if (atomic_read(&mr
->usecnt
))
382 mhp
= to_c4iw_mr(mr
);
384 php
= to_c4iw_pd(mr
->pd
);
386 /* make sure we are on the same adapter */
390 memcpy(&mh
, mhp
, sizeof *mhp
);
392 if (mr_rereg_mask
& IB_MR_REREG_PD
)
393 php
= to_c4iw_pd(pd
);
394 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
) {
395 mh
.attr
.perms
= c4iw_ib_to_tpt_access(acc
);
396 mh
.attr
.mw_bind_enable
= (acc
& IB_ACCESS_MW_BIND
) ==
399 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
400 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
402 &total_size
, &npages
,
408 ret
= reregister_mem(rhp
, php
, &mh
, shift
, npages
);
412 if (mr_rereg_mask
& IB_MR_REREG_PD
)
413 mhp
->attr
.pdid
= php
->pdid
;
414 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
415 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
416 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
418 mhp
->attr
.va_fbo
= *iova_start
;
419 mhp
->attr
.page_size
= shift
- 12;
420 mhp
->attr
.len
= (u32
) total_size
;
421 mhp
->attr
.pbl_size
= npages
;
427 struct ib_mr
*c4iw_register_phys_mem(struct ib_pd
*pd
,
428 struct ib_phys_buf
*buffer_list
,
429 int num_phys_buf
, int acc
, u64
*iova_start
)
435 struct c4iw_dev
*rhp
;
440 PDBG("%s ib_pd %p\n", __func__
, pd
);
441 php
= to_c4iw_pd(pd
);
444 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
446 return ERR_PTR(-ENOMEM
);
450 /* First check that we have enough alignment */
451 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
456 if (num_phys_buf
> 1 &&
457 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
462 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
463 &total_size
, &npages
, &shift
,
468 ret
= alloc_pbl(mhp
, npages
);
474 ret
= write_pbl(&mhp
->rhp
->rdev
, page_list
, mhp
->attr
.pbl_addr
,
480 mhp
->attr
.pdid
= php
->pdid
;
483 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
484 mhp
->attr
.va_fbo
= *iova_start
;
485 mhp
->attr
.page_size
= shift
- 12;
487 mhp
->attr
.len
= (u32
) total_size
;
488 mhp
->attr
.pbl_size
= npages
;
489 ret
= register_mem(rhp
, php
, mhp
, shift
);
496 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
497 mhp
->attr
.pbl_size
<< 3);
505 struct ib_mr
*c4iw_get_dma_mr(struct ib_pd
*pd
, int acc
)
507 struct c4iw_dev
*rhp
;
511 u32 stag
= T4_STAG_UNSET
;
513 PDBG("%s ib_pd %p\n", __func__
, pd
);
514 php
= to_c4iw_pd(pd
);
517 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
519 return ERR_PTR(-ENOMEM
);
522 mhp
->attr
.pdid
= php
->pdid
;
523 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
524 mhp
->attr
.mw_bind_enable
= (acc
&IB_ACCESS_MW_BIND
) == IB_ACCESS_MW_BIND
;
526 mhp
->attr
.va_fbo
= 0;
527 mhp
->attr
.page_size
= 0;
528 mhp
->attr
.len
= ~0UL;
529 mhp
->attr
.pbl_size
= 0;
531 ret
= write_tpt_entry(&rhp
->rdev
, 0, &stag
, 1, php
->pdid
,
532 FW_RI_STAG_NSMR
, mhp
->attr
.perms
,
533 mhp
->attr
.mw_bind_enable
, 0, 0, ~0UL, 0, 0, 0);
537 ret
= finish_mem_reg(mhp
, stag
);
542 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
549 struct ib_mr
*c4iw_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
550 u64 virt
, int acc
, struct ib_udata
*udata
)
556 struct ib_umem_chunk
*chunk
;
557 struct c4iw_dev
*rhp
;
561 PDBG("%s ib_pd %p\n", __func__
, pd
);
564 return ERR_PTR(-EINVAL
);
566 if ((length
+ start
) < start
)
567 return ERR_PTR(-EINVAL
);
569 php
= to_c4iw_pd(pd
);
571 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
573 return ERR_PTR(-ENOMEM
);
577 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
578 if (IS_ERR(mhp
->umem
)) {
579 err
= PTR_ERR(mhp
->umem
);
584 shift
= ffs(mhp
->umem
->page_size
) - 1;
587 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
590 err
= alloc_pbl(mhp
, n
);
594 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
602 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
603 for (j
= 0; j
< chunk
->nmap
; ++j
) {
604 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
605 for (k
= 0; k
< len
; ++k
) {
606 pages
[i
++] = cpu_to_be64(sg_dma_address(
607 &chunk
->page_list
[j
]) +
608 mhp
->umem
->page_size
* k
);
609 if (i
== PAGE_SIZE
/ sizeof *pages
) {
610 err
= write_pbl(&mhp
->rhp
->rdev
,
612 mhp
->attr
.pbl_addr
+ (n
<< 3), i
);
622 err
= write_pbl(&mhp
->rhp
->rdev
, pages
,
623 mhp
->attr
.pbl_addr
+ (n
<< 3), i
);
626 free_page((unsigned long) pages
);
630 mhp
->attr
.pdid
= php
->pdid
;
632 mhp
->attr
.perms
= c4iw_ib_to_tpt_access(acc
);
633 mhp
->attr
.va_fbo
= virt
;
634 mhp
->attr
.page_size
= shift
- 12;
635 mhp
->attr
.len
= length
;
637 err
= register_mem(rhp
, php
, mhp
, shift
);
644 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
645 mhp
->attr
.pbl_size
<< 3);
648 ib_umem_release(mhp
->umem
);
653 struct ib_mw
*c4iw_alloc_mw(struct ib_pd
*pd
)
655 struct c4iw_dev
*rhp
;
662 php
= to_c4iw_pd(pd
);
664 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
666 return ERR_PTR(-ENOMEM
);
667 ret
= allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
673 mhp
->attr
.pdid
= php
->pdid
;
674 mhp
->attr
.type
= FW_RI_STAG_MW
;
675 mhp
->attr
.stag
= stag
;
677 mhp
->ibmw
.rkey
= stag
;
678 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
679 deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
681 return ERR_PTR(-ENOMEM
);
683 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
687 int c4iw_dealloc_mw(struct ib_mw
*mw
)
689 struct c4iw_dev
*rhp
;
693 mhp
= to_c4iw_mw(mw
);
695 mmid
= (mw
->rkey
) >> 8;
696 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
697 deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
699 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
703 struct ib_mr
*c4iw_alloc_fast_reg_mr(struct ib_pd
*pd
, int pbl_depth
)
705 struct c4iw_dev
*rhp
;
712 php
= to_c4iw_pd(pd
);
714 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
721 ret
= alloc_pbl(mhp
, pbl_depth
);
724 mhp
->attr
.pbl_size
= pbl_depth
;
725 ret
= allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
726 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
729 mhp
->attr
.pdid
= php
->pdid
;
730 mhp
->attr
.type
= FW_RI_STAG_NSMR
;
731 mhp
->attr
.stag
= stag
;
734 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
735 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
740 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
743 dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
746 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
747 mhp
->attr
.pbl_size
<< 3);
754 struct ib_fast_reg_page_list
*c4iw_alloc_fastreg_pbl(struct ib_device
*device
,
757 struct c4iw_fr_page_list
*c4pl
;
758 struct c4iw_dev
*dev
= to_c4iw_dev(device
);
760 int size
= sizeof *c4pl
+ page_list_len
* sizeof(u64
);
762 c4pl
= dma_alloc_coherent(&dev
->rdev
.lldi
.pdev
->dev
, size
,
763 &dma_addr
, GFP_KERNEL
);
765 return ERR_PTR(-ENOMEM
);
767 dma_unmap_addr_set(c4pl
, mapping
, dma_addr
);
768 c4pl
->dma_addr
= dma_addr
;
771 c4pl
->ibpl
.page_list
= (u64
*)(c4pl
+ 1);
772 c4pl
->ibpl
.max_page_list_len
= page_list_len
;
777 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list
*ibpl
)
779 struct c4iw_fr_page_list
*c4pl
= to_c4iw_fr_page_list(ibpl
);
781 dma_free_coherent(&c4pl
->dev
->rdev
.lldi
.pdev
->dev
, c4pl
->size
,
782 c4pl
, dma_unmap_addr(c4pl
, mapping
));
785 int c4iw_dereg_mr(struct ib_mr
*ib_mr
)
787 struct c4iw_dev
*rhp
;
791 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
792 /* There can be no memory windows */
793 if (atomic_read(&ib_mr
->usecnt
))
796 mhp
= to_c4iw_mr(ib_mr
);
798 mmid
= mhp
->attr
.stag
>> 8;
799 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
800 dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
802 if (mhp
->attr
.pbl_size
)
803 c4iw_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
804 mhp
->attr
.pbl_size
<< 3);
806 kfree((void *) (unsigned long) mhp
->kva
);
808 ib_umem_release(mhp
->umem
);
809 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);