2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
41 #include <linux/errno.h>
42 #include <linux/err.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_cache.h>
47 /* Protection domains */
49 struct ib_pd
*ib_alloc_pd(struct ib_device
*device
)
53 pd
= device
->alloc_pd(device
, NULL
, NULL
);
58 atomic_set(&pd
->usecnt
, 0);
63 EXPORT_SYMBOL(ib_alloc_pd
);
65 int ib_dealloc_pd(struct ib_pd
*pd
)
67 if (atomic_read(&pd
->usecnt
))
70 return pd
->device
->dealloc_pd(pd
);
72 EXPORT_SYMBOL(ib_dealloc_pd
);
76 struct ib_ah
*ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
)
80 ah
= pd
->device
->create_ah(pd
, ah_attr
);
83 ah
->device
= pd
->device
;
86 atomic_inc(&pd
->usecnt
);
91 EXPORT_SYMBOL(ib_create_ah
);
93 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, struct ib_wc
*wc
,
94 struct ib_grh
*grh
, u8 port_num
)
96 struct ib_ah_attr ah_attr
;
101 memset(&ah_attr
, 0, sizeof ah_attr
);
102 ah_attr
.dlid
= wc
->slid
;
104 ah_attr
.src_path_bits
= wc
->dlid_path_bits
;
105 ah_attr
.port_num
= port_num
;
107 if (wc
->wc_flags
& IB_WC_GRH
) {
108 ah_attr
.ah_flags
= IB_AH_GRH
;
109 ah_attr
.grh
.dgid
= grh
->dgid
;
111 ret
= ib_find_cached_gid(pd
->device
, &grh
->sgid
, &port_num
,
116 ah_attr
.grh
.sgid_index
= (u8
) gid_index
;
117 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
118 ah_attr
.grh
.flow_label
= flow_class
& 0xFFFFF;
119 ah_attr
.grh
.traffic_class
= (flow_class
>> 20) & 0xFF;
120 ah_attr
.grh
.hop_limit
= grh
->hop_limit
;
123 return ib_create_ah(pd
, &ah_attr
);
125 EXPORT_SYMBOL(ib_create_ah_from_wc
);
127 int ib_modify_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
129 return ah
->device
->modify_ah
?
130 ah
->device
->modify_ah(ah
, ah_attr
) :
133 EXPORT_SYMBOL(ib_modify_ah
);
135 int ib_query_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
137 return ah
->device
->query_ah
?
138 ah
->device
->query_ah(ah
, ah_attr
) :
141 EXPORT_SYMBOL(ib_query_ah
);
143 int ib_destroy_ah(struct ib_ah
*ah
)
149 ret
= ah
->device
->destroy_ah(ah
);
151 atomic_dec(&pd
->usecnt
);
155 EXPORT_SYMBOL(ib_destroy_ah
);
157 /* Shared receive queues */
159 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
160 struct ib_srq_init_attr
*srq_init_attr
)
164 if (!pd
->device
->create_srq
)
165 return ERR_PTR(-ENOSYS
);
167 srq
= pd
->device
->create_srq(pd
, srq_init_attr
, NULL
);
170 srq
->device
= pd
->device
;
173 srq
->event_handler
= srq_init_attr
->event_handler
;
174 srq
->srq_context
= srq_init_attr
->srq_context
;
175 atomic_inc(&pd
->usecnt
);
176 atomic_set(&srq
->usecnt
, 0);
181 EXPORT_SYMBOL(ib_create_srq
);
183 int ib_modify_srq(struct ib_srq
*srq
,
184 struct ib_srq_attr
*srq_attr
,
185 enum ib_srq_attr_mask srq_attr_mask
)
187 return srq
->device
->modify_srq(srq
, srq_attr
, srq_attr_mask
);
189 EXPORT_SYMBOL(ib_modify_srq
);
191 int ib_query_srq(struct ib_srq
*srq
,
192 struct ib_srq_attr
*srq_attr
)
194 return srq
->device
->query_srq
?
195 srq
->device
->query_srq(srq
, srq_attr
) : -ENOSYS
;
197 EXPORT_SYMBOL(ib_query_srq
);
199 int ib_destroy_srq(struct ib_srq
*srq
)
204 if (atomic_read(&srq
->usecnt
))
209 ret
= srq
->device
->destroy_srq(srq
);
211 atomic_dec(&pd
->usecnt
);
215 EXPORT_SYMBOL(ib_destroy_srq
);
219 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
220 struct ib_qp_init_attr
*qp_init_attr
)
224 qp
= pd
->device
->create_qp(pd
, qp_init_attr
, NULL
);
227 qp
->device
= pd
->device
;
229 qp
->send_cq
= qp_init_attr
->send_cq
;
230 qp
->recv_cq
= qp_init_attr
->recv_cq
;
231 qp
->srq
= qp_init_attr
->srq
;
233 qp
->event_handler
= qp_init_attr
->event_handler
;
234 qp
->qp_context
= qp_init_attr
->qp_context
;
235 qp
->qp_type
= qp_init_attr
->qp_type
;
236 atomic_inc(&pd
->usecnt
);
237 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
238 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
239 if (qp_init_attr
->srq
)
240 atomic_inc(&qp_init_attr
->srq
->usecnt
);
245 EXPORT_SYMBOL(ib_create_qp
);
247 int ib_modify_qp(struct ib_qp
*qp
,
248 struct ib_qp_attr
*qp_attr
,
251 return qp
->device
->modify_qp(qp
, qp_attr
, qp_attr_mask
);
253 EXPORT_SYMBOL(ib_modify_qp
);
255 int ib_query_qp(struct ib_qp
*qp
,
256 struct ib_qp_attr
*qp_attr
,
258 struct ib_qp_init_attr
*qp_init_attr
)
260 return qp
->device
->query_qp
?
261 qp
->device
->query_qp(qp
, qp_attr
, qp_attr_mask
, qp_init_attr
) :
264 EXPORT_SYMBOL(ib_query_qp
);
266 int ib_destroy_qp(struct ib_qp
*qp
)
269 struct ib_cq
*scq
, *rcq
;
278 ret
= qp
->device
->destroy_qp(qp
);
280 atomic_dec(&pd
->usecnt
);
281 atomic_dec(&scq
->usecnt
);
282 atomic_dec(&rcq
->usecnt
);
284 atomic_dec(&srq
->usecnt
);
289 EXPORT_SYMBOL(ib_destroy_qp
);
291 /* Completion queues */
293 struct ib_cq
*ib_create_cq(struct ib_device
*device
,
294 ib_comp_handler comp_handler
,
295 void (*event_handler
)(struct ib_event
*, void *),
296 void *cq_context
, int cqe
)
300 cq
= device
->create_cq(device
, cqe
, NULL
, NULL
);
305 cq
->comp_handler
= comp_handler
;
306 cq
->event_handler
= event_handler
;
307 cq
->cq_context
= cq_context
;
308 atomic_set(&cq
->usecnt
, 0);
313 EXPORT_SYMBOL(ib_create_cq
);
315 int ib_destroy_cq(struct ib_cq
*cq
)
317 if (atomic_read(&cq
->usecnt
))
320 return cq
->device
->destroy_cq(cq
);
322 EXPORT_SYMBOL(ib_destroy_cq
);
324 int ib_resize_cq(struct ib_cq
*cq
,
329 if (!cq
->device
->resize_cq
)
332 ret
= cq
->device
->resize_cq(cq
, &cqe
);
338 EXPORT_SYMBOL(ib_resize_cq
);
342 struct ib_mr
*ib_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
346 mr
= pd
->device
->get_dma_mr(pd
, mr_access_flags
);
349 mr
->device
= pd
->device
;
352 atomic_inc(&pd
->usecnt
);
353 atomic_set(&mr
->usecnt
, 0);
358 EXPORT_SYMBOL(ib_get_dma_mr
);
360 struct ib_mr
*ib_reg_phys_mr(struct ib_pd
*pd
,
361 struct ib_phys_buf
*phys_buf_array
,
368 mr
= pd
->device
->reg_phys_mr(pd
, phys_buf_array
, num_phys_buf
,
369 mr_access_flags
, iova_start
);
372 mr
->device
= pd
->device
;
375 atomic_inc(&pd
->usecnt
);
376 atomic_set(&mr
->usecnt
, 0);
381 EXPORT_SYMBOL(ib_reg_phys_mr
);
383 int ib_rereg_phys_mr(struct ib_mr
*mr
,
386 struct ib_phys_buf
*phys_buf_array
,
391 struct ib_pd
*old_pd
;
394 if (!mr
->device
->rereg_phys_mr
)
397 if (atomic_read(&mr
->usecnt
))
402 ret
= mr
->device
->rereg_phys_mr(mr
, mr_rereg_mask
, pd
,
403 phys_buf_array
, num_phys_buf
,
404 mr_access_flags
, iova_start
);
406 if (!ret
&& (mr_rereg_mask
& IB_MR_REREG_PD
)) {
407 atomic_dec(&old_pd
->usecnt
);
408 atomic_inc(&pd
->usecnt
);
413 EXPORT_SYMBOL(ib_rereg_phys_mr
);
415 int ib_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
417 return mr
->device
->query_mr
?
418 mr
->device
->query_mr(mr
, mr_attr
) : -ENOSYS
;
420 EXPORT_SYMBOL(ib_query_mr
);
422 int ib_dereg_mr(struct ib_mr
*mr
)
427 if (atomic_read(&mr
->usecnt
))
431 ret
= mr
->device
->dereg_mr(mr
);
433 atomic_dec(&pd
->usecnt
);
437 EXPORT_SYMBOL(ib_dereg_mr
);
441 struct ib_mw
*ib_alloc_mw(struct ib_pd
*pd
)
445 if (!pd
->device
->alloc_mw
)
446 return ERR_PTR(-ENOSYS
);
448 mw
= pd
->device
->alloc_mw(pd
);
450 mw
->device
= pd
->device
;
453 atomic_inc(&pd
->usecnt
);
458 EXPORT_SYMBOL(ib_alloc_mw
);
460 int ib_dealloc_mw(struct ib_mw
*mw
)
466 ret
= mw
->device
->dealloc_mw(mw
);
468 atomic_dec(&pd
->usecnt
);
472 EXPORT_SYMBOL(ib_dealloc_mw
);
474 /* "Fast" memory regions */
476 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
478 struct ib_fmr_attr
*fmr_attr
)
482 if (!pd
->device
->alloc_fmr
)
483 return ERR_PTR(-ENOSYS
);
485 fmr
= pd
->device
->alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
487 fmr
->device
= pd
->device
;
489 atomic_inc(&pd
->usecnt
);
494 EXPORT_SYMBOL(ib_alloc_fmr
);
496 int ib_unmap_fmr(struct list_head
*fmr_list
)
500 if (list_empty(fmr_list
))
503 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
504 return fmr
->device
->unmap_fmr(fmr_list
);
506 EXPORT_SYMBOL(ib_unmap_fmr
);
508 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
514 ret
= fmr
->device
->dealloc_fmr(fmr
);
516 atomic_dec(&pd
->usecnt
);
520 EXPORT_SYMBOL(ib_dealloc_fmr
);
522 /* Multicast groups */
524 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
526 return qp
->device
->attach_mcast
?
527 qp
->device
->attach_mcast(qp
, gid
, lid
) :
530 EXPORT_SYMBOL(ib_attach_mcast
);
532 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
534 return qp
->device
->detach_mcast
?
535 qp
->device
->detach_mcast(qp
, gid
, lid
) :
538 EXPORT_SYMBOL(ib_detach_mcast
);