2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 * Send all the PBL messages to convey the remainder of the PBL
41 * Wait for the adapter's reply on the last one.
42 * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
44 * NOTE: vq_req is _not_ freed by this function. The VQ Host
45 * Reply buffer _is_ freed by this function.
48 send_pbl_messages(struct c2_dev
*c2dev
, __be32 stag_index
,
49 unsigned long va
, u32 pbl_depth
,
50 struct c2_vq_req
*vq_req
, int pbl_type
)
52 u32 pbe_count
; /* amt that fits in a PBL msg */
53 u32 count
; /* amt in this PBL MSG. */
54 struct c2wr_nsmr_pbl_req
*wr
; /* PBL WR ptr */
55 struct c2wr_nsmr_pbl_rep
*reply
; /* reply ptr */
56 int err
, pbl_virt
, pbl_index
, i
;
70 pbe_count
= (c2dev
->req_vq
.msg_size
-
71 sizeof(struct c2wr_nsmr_pbl_req
)) / sizeof(u64
);
72 wr
= kmalloc(c2dev
->req_vq
.msg_size
, GFP_KERNEL
);
76 c2_wr_set_id(wr
, CCWR_NSMR_PBL
);
79 * Only the last PBL message will generate a reply from the verbs,
80 * so we set the context to 0 indicating there is no kernel verbs
81 * handler blocked awaiting this reply.
84 wr
->rnic_handle
= c2dev
->adapter_handle
;
85 wr
->stag_index
= stag_index
; /* already swapped */
89 count
= min(pbe_count
, pbl_depth
);
90 wr
->addrs_length
= cpu_to_be32(count
);
93 * If this is the last message, then reference the
94 * vq request struct cuz we're gonna wait for a reply.
95 * also make this PBL msg as the last one.
97 if (count
== pbl_depth
) {
99 * reference the request struct. dereferenced in the
102 vq_req_get(c2dev
, vq_req
);
103 wr
->flags
= cpu_to_be32(MEM_PBL_COMPLETE
);
106 * This is the last PBL message.
107 * Set the context to our VQ Request Object so we can
108 * wait for the reply.
110 wr
->hdr
.context
= (unsigned long) vq_req
;
114 * If pbl_virt is set then va is a virtual address
115 * that describes a virtually contiguous memory
116 * allocation. The wr needs the start of each virtual page
117 * to be converted to the corresponding physical address
118 * of the page. If pbl_virt is not set then va is an array
119 * of physical addresses and there is no conversion to do.
120 * Just fill in the wr with what is in the array.
122 for (i
= 0; i
< count
; i
++) {
127 cpu_to_be64(((u64
*)va
)[pbl_index
+ i
]);
134 err
= vq_send_wr(c2dev
, (union c2wr
*) wr
);
136 if (count
<= pbe_count
) {
137 vq_req_put(c2dev
, vq_req
);
146 * Now wait for the reply...
148 err
= vq_wait_for_reply(c2dev
, vq_req
);
156 reply
= (struct c2wr_nsmr_pbl_rep
*) (unsigned long) vq_req
->reply_msg
;
162 err
= c2_errno(reply
);
164 vq_repbuf_free(c2dev
, reply
);
170 #define C2_PBL_MAX_DEPTH 131072
172 c2_nsmr_register_phys_kern(struct c2_dev
*c2dev
, u64
*addr_list
,
173 int page_size
, int pbl_depth
, u32 length
,
174 u32 offset
, u64
*va
, enum c2_acf acf
,
177 struct c2_vq_req
*vq_req
;
178 struct c2wr_nsmr_register_req
*wr
;
179 struct c2wr_nsmr_register_rep
*reply
;
181 int i
, pbe_count
, count
;
184 if (!va
|| !length
|| !addr_list
|| !pbl_depth
)
188 * Verify PBL depth is within rnic max
190 if (pbl_depth
> C2_PBL_MAX_DEPTH
) {
195 * allocate verbs request object
197 vq_req
= vq_req_alloc(c2dev
);
201 wr
= kmalloc(c2dev
->req_vq
.msg_size
, GFP_KERNEL
);
210 c2_wr_set_id(wr
, CCWR_NSMR_REGISTER
);
211 wr
->hdr
.context
= (unsigned long) vq_req
;
212 wr
->rnic_handle
= c2dev
->adapter_handle
;
214 flags
= (acf
| MEM_VA_BASED
| MEM_REMOTE
);
217 * compute how many pbes can fit in the message
219 pbe_count
= (c2dev
->req_vq
.msg_size
-
220 sizeof(struct c2wr_nsmr_register_req
)) / sizeof(u64
);
222 if (pbl_depth
<= pbe_count
) {
223 flags
|= MEM_PBL_COMPLETE
;
225 wr
->flags
= cpu_to_be16(flags
);
226 wr
->stag_key
= 0; //stag_key;
227 wr
->va
= cpu_to_be64(*va
);
228 wr
->pd_id
= mr
->pd
->pd_id
;
229 wr
->pbe_size
= cpu_to_be32(page_size
);
230 wr
->length
= cpu_to_be32(length
);
231 wr
->pbl_depth
= cpu_to_be32(pbl_depth
);
232 wr
->fbo
= cpu_to_be32(offset
);
233 count
= min(pbl_depth
, pbe_count
);
234 wr
->addrs_length
= cpu_to_be32(count
);
237 * fill out the PBL for this message
239 for (i
= 0; i
< count
; i
++) {
240 wr
->paddrs
[i
] = cpu_to_be64(addr_list
[i
]);
244 * regerence the request struct
246 vq_req_get(c2dev
, vq_req
);
249 * send the WR to the adapter
251 err
= vq_send_wr(c2dev
, (union c2wr
*) wr
);
253 vq_req_put(c2dev
, vq_req
);
258 * wait for reply from adapter
260 err
= vq_wait_for_reply(c2dev
, vq_req
);
269 (struct c2wr_nsmr_register_rep
*) (unsigned long) (vq_req
->reply_msg
);
274 if ((err
= c2_errno(reply
))) {
277 //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
278 mr
->ibmr
.lkey
= mr
->ibmr
.rkey
= be32_to_cpu(reply
->stag_index
);
279 vq_repbuf_free(c2dev
, reply
);
282 * if there are still more PBEs we need to send them to
283 * the adapter and wait for a reply on the final one.
284 * reuse vq_req for this purpose.
289 vq_req
->reply_msg
= (unsigned long) NULL
;
290 atomic_set(&vq_req
->reply_ready
, 0);
291 err
= send_pbl_messages(c2dev
,
292 cpu_to_be32(mr
->ibmr
.lkey
),
293 (unsigned long) &addr_list
[i
],
294 pbl_depth
, vq_req
, PBL_PHYS
);
300 vq_req_free(c2dev
, vq_req
);
306 vq_repbuf_free(c2dev
, reply
);
310 vq_req_free(c2dev
, vq_req
);
314 int c2_stag_dealloc(struct c2_dev
*c2dev
, u32 stag_index
)
316 struct c2_vq_req
*vq_req
; /* verbs request object */
317 struct c2wr_stag_dealloc_req wr
; /* work request */
318 struct c2wr_stag_dealloc_rep
*reply
; /* WR reply */
323 * allocate verbs request object
325 vq_req
= vq_req_alloc(c2dev
);
333 c2_wr_set_id(&wr
, CCWR_STAG_DEALLOC
);
334 wr
.hdr
.context
= (u64
) (unsigned long) vq_req
;
335 wr
.rnic_handle
= c2dev
->adapter_handle
;
336 wr
.stag_index
= cpu_to_be32(stag_index
);
339 * reference the request struct. dereferenced in the int handler.
341 vq_req_get(c2dev
, vq_req
);
346 err
= vq_send_wr(c2dev
, (union c2wr
*) & wr
);
348 vq_req_put(c2dev
, vq_req
);
353 * Wait for reply from adapter
355 err
= vq_wait_for_reply(c2dev
, vq_req
);
363 reply
= (struct c2wr_stag_dealloc_rep
*) (unsigned long) vq_req
->reply_msg
;
369 err
= c2_errno(reply
);
371 vq_repbuf_free(c2dev
, reply
);
373 vq_req_free(c2dev
, vq_req
);