2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/slab.h>
33 #include <asm/byteorder.h>
35 #include <rdma/iw_cm.h>
36 #include <rdma/ib_verbs.h>
39 #include "cxio_resource.h"
41 #include "iwch_provider.h"
43 static int iwch_finish_mem_reg(struct iwch_mr
*mhp
, u32 stag
)
48 mhp
->attr
.stag
= stag
;
50 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
51 PDBG("%s mmid 0x%x mhp %p\n", __func__
, mmid
, mhp
);
52 return insert_handle(mhp
->rhp
, &mhp
->rhp
->mmidr
, mhp
, mmid
);
55 int iwch_register_mem(struct iwch_dev
*rhp
, struct iwch_pd
*php
,
56 struct iwch_mr
*mhp
, int shift
)
61 if (cxio_register_phys_mem(&rhp
->rdev
,
62 &stag
, mhp
->attr
.pdid
,
68 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
))
71 ret
= iwch_finish_mem_reg(mhp
, stag
);
73 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
78 int iwch_reregister_mem(struct iwch_dev
*rhp
, struct iwch_pd
*php
,
86 /* We could support this... */
87 if (npages
> mhp
->attr
.pbl_size
)
90 stag
= mhp
->attr
.stag
;
91 if (cxio_reregister_phys_mem(&rhp
->rdev
,
92 &stag
, mhp
->attr
.pdid
,
98 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
))
101 ret
= iwch_finish_mem_reg(mhp
, stag
);
103 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
109 int iwch_alloc_pbl(struct iwch_mr
*mhp
, int npages
)
111 mhp
->attr
.pbl_addr
= cxio_hal_pblpool_alloc(&mhp
->rhp
->rdev
,
114 if (!mhp
->attr
.pbl_addr
)
117 mhp
->attr
.pbl_size
= npages
;
122 void iwch_free_pbl(struct iwch_mr
*mhp
)
124 cxio_hal_pblpool_free(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
,
125 mhp
->attr
.pbl_size
<< 3);
128 int iwch_write_pbl(struct iwch_mr
*mhp
, __be64
*pages
, int npages
, int offset
)
130 return cxio_write_pbl(&mhp
->rhp
->rdev
, pages
,
131 mhp
->attr
.pbl_addr
+ (offset
<< 3), npages
);
134 int build_phys_page_list(struct ib_phys_buf
*buffer_list
,
147 for (i
= 0; i
< num_phys_buf
; ++i
) {
148 if (i
!= 0 && buffer_list
[i
].addr
& ~PAGE_MASK
)
150 if (i
!= 0 && i
!= num_phys_buf
- 1 &&
151 (buffer_list
[i
].size
& ~PAGE_MASK
))
153 *total_size
+= buffer_list
[i
].size
;
155 mask
|= buffer_list
[i
].addr
;
157 mask
|= buffer_list
[i
].addr
& PAGE_MASK
;
158 if (i
!= num_phys_buf
- 1)
159 mask
|= buffer_list
[i
].addr
+ buffer_list
[i
].size
;
161 mask
|= (buffer_list
[i
].addr
+ buffer_list
[i
].size
+
162 PAGE_SIZE
- 1) & PAGE_MASK
;
165 if (*total_size
> 0xFFFFFFFFULL
)
168 /* Find largest page shift we can use to cover buffers */
169 for (*shift
= PAGE_SHIFT
; *shift
< 27; ++(*shift
))
170 if ((1ULL << *shift
) & mask
)
173 buffer_list
[0].size
+= buffer_list
[0].addr
& ((1ULL << *shift
) - 1);
174 buffer_list
[0].addr
&= ~0ull << *shift
;
177 for (i
= 0; i
< num_phys_buf
; ++i
)
178 *npages
+= (buffer_list
[i
].size
+
179 (1ULL << *shift
) - 1) >> *shift
;
184 *page_list
= kmalloc(sizeof(u64
) * *npages
, GFP_KERNEL
);
189 for (i
= 0; i
< num_phys_buf
; ++i
)
191 j
< (buffer_list
[i
].size
+ (1ULL << *shift
) - 1) >> *shift
;
193 (*page_list
)[n
++] = cpu_to_be64(buffer_list
[i
].addr
+
194 ((u64
) j
<< *shift
));
196 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
197 __func__
, (unsigned long long) *iova_start
,
198 (unsigned long long) mask
, *shift
, (unsigned long long) *total_size
,