Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_mem.c
blobbba10f79d3b7c74adc675b70126b0eb1e189cdc4
1 /*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <asm/byteorder.h>
34 #include <rdma/iw_cm.h>
35 #include <rdma/ib_verbs.h>
37 #include "cxio_hal.h"
38 #include "iwch.h"
39 #include "iwch_provider.h"
41 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
42 struct iwch_mr *mhp,
43 int shift,
44 __be64 *page_list)
46 u32 stag;
47 u32 mmid;
50 if (cxio_register_phys_mem(&rhp->rdev,
51 &stag, mhp->attr.pdid,
52 mhp->attr.perms,
53 mhp->attr.zbva,
54 mhp->attr.va_fbo,
55 mhp->attr.len,
56 shift-12,
57 page_list,
58 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
59 return -ENOMEM;
60 mhp->attr.state = 1;
61 mhp->attr.stag = stag;
62 mmid = stag >> 8;
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
66 return 0;
69 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
70 struct iwch_mr *mhp,
71 int shift,
72 __be64 *page_list,
73 int npages)
75 u32 stag;
76 u32 mmid;
79 /* We could support this... */
80 if (npages > mhp->attr.pbl_size)
81 return -ENOMEM;
83 stag = mhp->attr.stag;
84 if (cxio_reregister_phys_mem(&rhp->rdev,
85 &stag, mhp->attr.pdid,
86 mhp->attr.perms,
87 mhp->attr.zbva,
88 mhp->attr.va_fbo,
89 mhp->attr.len,
90 shift-12,
91 page_list,
92 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
93 return -ENOMEM;
94 mhp->attr.state = 1;
95 mhp->attr.stag = stag;
96 mmid = stag >> 8;
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
99 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
100 return 0;
103 int build_phys_page_list(struct ib_phys_buf *buffer_list,
104 int num_phys_buf,
105 u64 *iova_start,
106 u64 *total_size,
107 int *npages,
108 int *shift,
109 __be64 **page_list)
111 u64 mask;
112 int i, j, n;
114 mask = 0;
115 *total_size = 0;
116 for (i = 0; i < num_phys_buf; ++i) {
117 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
118 return -EINVAL;
119 if (i != 0 && i != num_phys_buf - 1 &&
120 (buffer_list[i].size & ~PAGE_MASK))
121 return -EINVAL;
122 *total_size += buffer_list[i].size;
123 if (i > 0)
124 mask |= buffer_list[i].addr;
125 else
126 mask |= buffer_list[i].addr & PAGE_MASK;
127 if (i != num_phys_buf - 1)
128 mask |= buffer_list[i].addr + buffer_list[i].size;
129 else
130 mask |= (buffer_list[i].addr + buffer_list[i].size +
131 PAGE_SIZE - 1) & PAGE_MASK;
134 if (*total_size > 0xFFFFFFFFULL)
135 return -ENOMEM;
137 /* Find largest page shift we can use to cover buffers */
138 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
139 <<<<<<< HEAD:drivers/infiniband/hw/cxgb3/iwch_mem.c
140 if (num_phys_buf > 1) {
141 if ((1ULL << *shift) & mask)
142 break;
143 } else
144 if (1ULL << *shift >=
145 buffer_list[0].size +
146 (buffer_list[0].addr & ((1ULL << *shift) - 1)))
147 break;
148 =======
149 if ((1ULL << *shift) & mask)
150 break;
151 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/infiniband/hw/cxgb3/iwch_mem.c
153 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
154 buffer_list[0].addr &= ~0ull << *shift;
156 *npages = 0;
157 for (i = 0; i < num_phys_buf; ++i)
158 *npages += (buffer_list[i].size +
159 (1ULL << *shift) - 1) >> *shift;
161 if (!*npages)
162 return -EINVAL;
164 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
165 if (!*page_list)
166 return -ENOMEM;
168 n = 0;
169 for (i = 0; i < num_phys_buf; ++i)
170 for (j = 0;
171 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
172 ++j)
173 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
174 ((u64) j << *shift));
176 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
177 __FUNCTION__, (unsigned long long) *iova_start,
178 (unsigned long long) mask, *shift, (unsigned long long) *total_size,
179 *npages);
181 return 0;